cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_display.c (314425B)


      1/*
      2 * Copyright © 2006-2007 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     21 * DEALINGS IN THE SOFTWARE.
     22 *
     23 * Authors:
     24 *	Eric Anholt <eric@anholt.net>
     25 */
     26
     27#include <acpi/video.h>
     28#include <linux/i2c.h>
     29#include <linux/input.h>
     30#include <linux/intel-iommu.h>
     31#include <linux/kernel.h>
     32#include <linux/module.h>
     33#include <linux/dma-resv.h>
     34#include <linux/slab.h>
     35#include <linux/string_helpers.h>
     36#include <linux/vga_switcheroo.h>
     37
     38#include <drm/display/drm_dp_helper.h>
     39#include <drm/drm_atomic.h>
     40#include <drm/drm_atomic_helper.h>
     41#include <drm/drm_atomic_uapi.h>
     42#include <drm/drm_damage_helper.h>
     43#include <drm/drm_edid.h>
     44#include <drm/drm_fourcc.h>
     45#include <drm/drm_plane_helper.h>
     46#include <drm/drm_privacy_screen_consumer.h>
     47#include <drm/drm_probe_helper.h>
     48#include <drm/drm_rect.h>
     49
     50#include "display/intel_audio.h"
     51#include "display/intel_crt.h"
     52#include "display/intel_ddi.h"
     53#include "display/intel_display_debugfs.h"
     54#include "display/intel_display_power.h"
     55#include "display/intel_dp.h"
     56#include "display/intel_dp_mst.h"
     57#include "display/intel_dpll.h"
     58#include "display/intel_dpll_mgr.h"
     59#include "display/intel_drrs.h"
     60#include "display/intel_dsi.h"
     61#include "display/intel_dvo.h"
     62#include "display/intel_fb.h"
     63#include "display/intel_gmbus.h"
     64#include "display/intel_hdmi.h"
     65#include "display/intel_lvds.h"
     66#include "display/intel_sdvo.h"
     67#include "display/intel_snps_phy.h"
     68#include "display/intel_tv.h"
     69#include "display/intel_vdsc.h"
     70#include "display/intel_vrr.h"
     71
     72#include "gem/i915_gem_lmem.h"
     73#include "gem/i915_gem_object.h"
     74
     75#include "gt/gen8_ppgtt.h"
     76
     77#include "g4x_dp.h"
     78#include "g4x_hdmi.h"
     79#include "hsw_ips.h"
     80#include "i915_drv.h"
     81#include "i915_utils.h"
     82#include "icl_dsi.h"
     83#include "intel_acpi.h"
     84#include "intel_atomic.h"
     85#include "intel_atomic_plane.h"
     86#include "intel_bw.h"
     87#include "intel_cdclk.h"
     88#include "intel_color.h"
     89#include "intel_crtc.h"
     90#include "intel_de.h"
     91#include "intel_display_types.h"
     92#include "intel_dmc.h"
     93#include "intel_dp_link_training.h"
     94#include "intel_dpt.h"
     95#include "intel_fbc.h"
     96#include "intel_fbdev.h"
     97#include "intel_fdi.h"
     98#include "intel_fifo_underrun.h"
     99#include "intel_frontbuffer.h"
    100#include "intel_hdcp.h"
    101#include "intel_hotplug.h"
    102#include "intel_overlay.h"
    103#include "intel_panel.h"
    104#include "intel_pch_display.h"
    105#include "intel_pch_refclk.h"
    106#include "intel_pcode.h"
    107#include "intel_pipe_crc.h"
    108#include "intel_plane_initial.h"
    109#include "intel_pm.h"
    110#include "intel_pps.h"
    111#include "intel_psr.h"
    112#include "intel_quirks.h"
    113#include "intel_sprite.h"
    114#include "intel_tc.h"
    115#include "intel_vga.h"
    116#include "i9xx_plane.h"
    117#include "skl_scaler.h"
    118#include "skl_universal_plane.h"
    119#include "vlv_dsi.h"
    120#include "vlv_dsi_pll.h"
    121#include "vlv_dsi_regs.h"
    122#include "vlv_sideband.h"
    123
    124static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
    125static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
    126static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
    127static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
    128static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
    129static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
    130static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
    131static void intel_modeset_setup_hw_state(struct drm_device *dev,
    132					 struct drm_modeset_acquire_ctx *ctx);
    133
    134/**
    135 * intel_update_watermarks - update FIFO watermark values based on current modes
    136 * @dev_priv: i915 device
    137 *
    138 * Calculate watermark values for the various WM regs based on current mode
    139 * and plane configuration.
    140 *
    141 * There are several cases to deal with here:
    142 *   - normal (i.e. non-self-refresh)
    143 *   - self-refresh (SR) mode
    144 *   - lines are large relative to FIFO size (buffer can hold up to 2)
    145 *   - lines are small relative to FIFO size (buffer can hold more than 2
    146 *     lines), so need to account for TLB latency
    147 *
    148 *   The normal calculation is:
    149 *     watermark = dotclock * bytes per pixel * latency
    150 *   where latency is platform & configuration dependent (we assume pessimal
    151 *   values here).
    152 *
    153 *   The SR calculation is:
    154 *     watermark = (trunc(latency/line time)+1) * surface width *
    155 *       bytes per pixel
    156 *   where
    157 *     line time = htotal / dotclock
    158 *     surface width = hdisplay for normal plane and 64 for cursor
    159 *   and latency is assumed to be high, as above.
    160 *
    161 * The final value programmed to the register should always be rounded up,
    162 * and include an extra 2 entries to account for clock crossings.
    163 *
    164 * We don't use the sprite, so we can ignore that.  And on Crestline we have
    165 * to set the non-SR watermarks to 8.
    166 */
    167static void intel_update_watermarks(struct drm_i915_private *dev_priv)
    168{
    169	if (dev_priv->wm_disp->update_wm)
    170		dev_priv->wm_disp->update_wm(dev_priv);
    171}
    172
    173static int intel_compute_pipe_wm(struct intel_atomic_state *state,
    174				 struct intel_crtc *crtc)
    175{
    176	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
    177	if (dev_priv->wm_disp->compute_pipe_wm)
    178		return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
    179	return 0;
    180}
    181
    182static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
    183					 struct intel_crtc *crtc)
    184{
    185	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
    186	if (!dev_priv->wm_disp->compute_intermediate_wm)
    187		return 0;
    188	if (drm_WARN_ON(&dev_priv->drm,
    189			!dev_priv->wm_disp->compute_pipe_wm))
    190		return 0;
    191	return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
    192}
    193
    194static bool intel_initial_watermarks(struct intel_atomic_state *state,
    195				     struct intel_crtc *crtc)
    196{
    197	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
    198	if (dev_priv->wm_disp->initial_watermarks) {
    199		dev_priv->wm_disp->initial_watermarks(state, crtc);
    200		return true;
    201	}
    202	return false;
    203}
    204
    205static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
    206					   struct intel_crtc *crtc)
    207{
    208	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
    209	if (dev_priv->wm_disp->atomic_update_watermarks)
    210		dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
    211}
    212
    213static void intel_optimize_watermarks(struct intel_atomic_state *state,
    214				      struct intel_crtc *crtc)
    215{
    216	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
    217	if (dev_priv->wm_disp->optimize_watermarks)
    218		dev_priv->wm_disp->optimize_watermarks(state, crtc);
    219}
    220
    221static int intel_compute_global_watermarks(struct intel_atomic_state *state)
    222{
    223	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
    224	if (dev_priv->wm_disp->compute_global_watermarks)
    225		return dev_priv->wm_disp->compute_global_watermarks(state);
    226	return 0;
    227}
    228
    229/* returns HPLL frequency in kHz */
    230int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
    231{
    232	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
    233
    234	/* Obtain SKU information */
    235	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
    236		CCK_FUSE_HPLL_FREQ_MASK;
    237
    238	return vco_freq[hpll_freq] * 1000;
    239}
    240
    241int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
    242		      const char *name, u32 reg, int ref_freq)
    243{
    244	u32 val;
    245	int divider;
    246
    247	val = vlv_cck_read(dev_priv, reg);
    248	divider = val & CCK_FREQUENCY_VALUES;
    249
    250	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
    251		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
    252		 "%s change in progress\n", name);
    253
    254	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
    255}
    256
    257int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
    258			   const char *name, u32 reg)
    259{
    260	int hpll;
    261
    262	vlv_cck_get(dev_priv);
    263
    264	if (dev_priv->hpll_freq == 0)
    265		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
    266
    267	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
    268
    269	vlv_cck_put(dev_priv);
    270
    271	return hpll;
    272}
    273
    274static void intel_update_czclk(struct drm_i915_private *dev_priv)
    275{
    276	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
    277		return;
    278
    279	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
    280						      CCK_CZ_CLOCK_CONTROL);
    281
    282	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
    283		dev_priv->czclk_freq);
    284}
    285
    286static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
    287{
    288	return (crtc_state->active_planes &
    289		~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
    290}
    291
    292/* WA Display #0827: Gen9:all */
    293static void
    294skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
    295{
    296	if (enable)
    297		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
    298		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
    299	else
    300		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
    301		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
    302}
    303
    304/* Wa_2006604312:icl,ehl */
    305static void
    306icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
    307		       bool enable)
    308{
    309	if (enable)
    310		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
    311		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
    312	else
    313		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
    314		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
    315}
    316
    317/* Wa_1604331009:icl,jsl,ehl */
    318static void
    319icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
    320		       bool enable)
    321{
    322	intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
    323		     enable ? CURSOR_GATING_DIS : 0);
    324}
    325
    326static bool
    327is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
    328{
    329	return crtc_state->master_transcoder != INVALID_TRANSCODER;
    330}
    331
    332static bool
    333is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
    334{
    335	return crtc_state->sync_mode_slaves_mask != 0;
    336}
    337
    338bool
    339is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
    340{
    341	return is_trans_port_sync_master(crtc_state) ||
    342		is_trans_port_sync_slave(crtc_state);
    343}
    344
    345static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state)
    346{
    347	return ffs(crtc_state->bigjoiner_pipes) - 1;
    348}
    349
    350u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state)
    351{
    352	if (crtc_state->bigjoiner_pipes)
    353		return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state));
    354	else
    355		return 0;
    356}
    357
    358bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state)
    359{
    360	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    361
    362	return crtc_state->bigjoiner_pipes &&
    363		crtc->pipe != bigjoiner_master_pipe(crtc_state);
    364}
    365
    366bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state)
    367{
    368	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    369
    370	return crtc_state->bigjoiner_pipes &&
    371		crtc->pipe == bigjoiner_master_pipe(crtc_state);
    372}
    373
    374static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state)
    375{
    376	return hweight8(crtc_state->bigjoiner_pipes);
    377}
    378
    379struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
    380{
    381	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
    382
    383	if (intel_crtc_is_bigjoiner_slave(crtc_state))
    384		return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state));
    385	else
    386		return to_intel_crtc(crtc_state->uapi.crtc);
    387}
    388
    389static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
    390				    enum pipe pipe)
    391{
    392	i915_reg_t reg = PIPEDSL(pipe);
    393	u32 line1, line2;
    394
    395	line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
    396	msleep(5);
    397	line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
    398
    399	return line1 != line2;
    400}
    401
    402static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
    403{
    404	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    405	enum pipe pipe = crtc->pipe;
    406
    407	/* Wait for the display line to settle/start moving */
    408	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
    409		drm_err(&dev_priv->drm,
    410			"pipe %c scanline %s wait timed out\n",
    411			pipe_name(pipe), str_on_off(state));
    412}
    413
    414static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
    415{
    416	wait_for_pipe_scanline_moving(crtc, false);
    417}
    418
    419static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
    420{
    421	wait_for_pipe_scanline_moving(crtc, true);
    422}
    423
    424static void
    425intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
    426{
    427	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
    428	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    429
    430	if (DISPLAY_VER(dev_priv) >= 4) {
    431		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
    432
    433		/* Wait for the Pipe State to go off */
    434		if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
    435					    PIPECONF_STATE_ENABLE, 100))
    436			drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
    437	} else {
    438		intel_wait_for_pipe_scanline_stopped(crtc);
    439	}
    440}
    441
    442void assert_transcoder(struct drm_i915_private *dev_priv,
    443		       enum transcoder cpu_transcoder, bool state)
    444{
    445	bool cur_state;
    446	enum intel_display_power_domain power_domain;
    447	intel_wakeref_t wakeref;
    448
    449	/* we keep both pipes enabled on 830 */
    450	if (IS_I830(dev_priv))
    451		state = true;
    452
    453	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
    454	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
    455	if (wakeref) {
    456		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
    457		cur_state = !!(val & PIPECONF_ENABLE);
    458
    459		intel_display_power_put(dev_priv, power_domain, wakeref);
    460	} else {
    461		cur_state = false;
    462	}
    463
    464	I915_STATE_WARN(cur_state != state,
    465			"transcoder %s assertion failure (expected %s, current %s)\n",
    466			transcoder_name(cpu_transcoder),
    467			str_on_off(state), str_on_off(cur_state));
    468}
    469
    470static void assert_plane(struct intel_plane *plane, bool state)
    471{
    472	enum pipe pipe;
    473	bool cur_state;
    474
    475	cur_state = plane->get_hw_state(plane, &pipe);
    476
    477	I915_STATE_WARN(cur_state != state,
    478			"%s assertion failure (expected %s, current %s)\n",
    479			plane->base.name, str_on_off(state),
    480			str_on_off(cur_state));
    481}
    482
    483#define assert_plane_enabled(p) assert_plane(p, true)
    484#define assert_plane_disabled(p) assert_plane(p, false)
    485
    486static void assert_planes_disabled(struct intel_crtc *crtc)
    487{
    488	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    489	struct intel_plane *plane;
    490
    491	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
    492		assert_plane_disabled(plane);
    493}
    494
    495void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
    496			 struct intel_digital_port *dig_port,
    497			 unsigned int expected_mask)
    498{
    499	u32 port_mask;
    500	i915_reg_t dpll_reg;
    501
    502	switch (dig_port->base.port) {
    503	case PORT_B:
    504		port_mask = DPLL_PORTB_READY_MASK;
    505		dpll_reg = DPLL(0);
    506		break;
    507	case PORT_C:
    508		port_mask = DPLL_PORTC_READY_MASK;
    509		dpll_reg = DPLL(0);
    510		expected_mask <<= 4;
    511		break;
    512	case PORT_D:
    513		port_mask = DPLL_PORTD_READY_MASK;
    514		dpll_reg = DPIO_PHY_STATUS;
    515		break;
    516	default:
    517		BUG();
    518	}
    519
    520	if (intel_de_wait_for_register(dev_priv, dpll_reg,
    521				       port_mask, expected_mask, 1000))
    522		drm_WARN(&dev_priv->drm, 1,
    523			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
    524			 dig_port->base.base.base.id, dig_port->base.base.name,
    525			 intel_de_read(dev_priv, dpll_reg) & port_mask,
    526			 expected_mask);
    527}
    528
    529void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
    530{
    531	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
    532	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    533	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
    534	enum pipe pipe = crtc->pipe;
    535	i915_reg_t reg;
    536	u32 val;
    537
    538	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
    539
    540	assert_planes_disabled(crtc);
    541
    542	/*
    543	 * A pipe without a PLL won't actually be able to drive bits from
    544	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
    545	 * need the check.
    546	 */
    547	if (HAS_GMCH(dev_priv)) {
    548		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
    549			assert_dsi_pll_enabled(dev_priv);
    550		else
    551			assert_pll_enabled(dev_priv, pipe);
    552	} else {
    553		if (new_crtc_state->has_pch_encoder) {
    554			/* if driving the PCH, we need FDI enabled */
    555			assert_fdi_rx_pll_enabled(dev_priv,
    556						  intel_crtc_pch_transcoder(crtc));
    557			assert_fdi_tx_pll_enabled(dev_priv,
    558						  (enum pipe) cpu_transcoder);
    559		}
    560		/* FIXME: assert CPU port conditions for SNB+ */
    561	}
    562
    563	/* Wa_22012358565:adl-p */
    564	if (DISPLAY_VER(dev_priv) == 13)
    565		intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
    566			     0, PIPE_ARB_USE_PROG_SLOTS);
    567
    568	reg = PIPECONF(cpu_transcoder);
    569	val = intel_de_read(dev_priv, reg);
    570	if (val & PIPECONF_ENABLE) {
    571		/* we keep both pipes enabled on 830 */
    572		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
    573		return;
    574	}
    575
    576	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
    577	intel_de_posting_read(dev_priv, reg);
    578
    579	/*
    580	 * Until the pipe starts PIPEDSL reads will return a stale value,
    581	 * which causes an apparent vblank timestamp jump when PIPEDSL
    582	 * resets to its proper value. That also messes up the frame count
    583	 * when it's derived from the timestamps. So let's wait for the
    584	 * pipe to start properly before we call drm_crtc_vblank_on()
    585	 */
    586	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
    587		intel_wait_for_pipe_scanline_moving(crtc);
    588}
    589
    590void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
    591{
    592	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
    593	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    594	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
    595	enum pipe pipe = crtc->pipe;
    596	i915_reg_t reg;
    597	u32 val;
    598
    599	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
    600
    601	/*
    602	 * Make sure planes won't keep trying to pump pixels to us,
    603	 * or we might hang the display.
    604	 */
    605	assert_planes_disabled(crtc);
    606
    607	reg = PIPECONF(cpu_transcoder);
    608	val = intel_de_read(dev_priv, reg);
    609	if ((val & PIPECONF_ENABLE) == 0)
    610		return;
    611
    612	/*
    613	 * Double wide has implications for planes
    614	 * so best keep it disabled when not needed.
    615	 */
    616	if (old_crtc_state->double_wide)
    617		val &= ~PIPECONF_DOUBLE_WIDE;
    618
    619	/* Don't disable pipe or pipe PLLs if needed */
    620	if (!IS_I830(dev_priv))
    621		val &= ~PIPECONF_ENABLE;
    622
    623	if (DISPLAY_VER(dev_priv) >= 12)
    624		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
    625			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
    626
    627	intel_de_write(dev_priv, reg, val);
    628	if ((val & PIPECONF_ENABLE) == 0)
    629		intel_wait_for_pipe_off(old_crtc_state);
    630}
    631
    632unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
    633{
    634	unsigned int size = 0;
    635	int i;
    636
    637	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
    638		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
    639
    640	return size;
    641}
    642
    643unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
    644{
    645	unsigned int size = 0;
    646	int i;
    647
    648	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
    649		unsigned int plane_size;
    650
    651		if (rem_info->plane[i].linear)
    652			plane_size = rem_info->plane[i].size;
    653		else
    654			plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
    655
    656		if (plane_size == 0)
    657			continue;
    658
    659		if (rem_info->plane_alignment)
    660			size = ALIGN(size, rem_info->plane_alignment);
    661
    662		size += plane_size;
    663	}
    664
    665	return size;
    666}
    667
    668bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
    669{
    670	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
    671	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
    672
    673	return DISPLAY_VER(dev_priv) < 4 ||
    674		(plane->fbc &&
    675		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
    676}
    677
    678/*
    679 * Convert the x/y offsets into a linear offset.
    680 * Only valid with 0/180 degree rotation, which is fine since linear
    681 * offset is only used with linear buffers on pre-hsw and tiled buffers
    682 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
    683 */
    684u32 intel_fb_xy_to_linear(int x, int y,
    685			  const struct intel_plane_state *state,
    686			  int color_plane)
    687{
    688	const struct drm_framebuffer *fb = state->hw.fb;
    689	unsigned int cpp = fb->format->cpp[color_plane];
    690	unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
    691
    692	return y * pitch + x * cpp;
    693}
    694
    695/*
    696 * Add the x/y offsets derived from fb->offsets[] to the user
    697 * specified plane src x/y offsets. The resulting x/y offsets
    698 * specify the start of scanout from the beginning of the gtt mapping.
    699 */
    700void intel_add_fb_offsets(int *x, int *y,
    701			  const struct intel_plane_state *state,
    702			  int color_plane)
    703
    704{
    705	*x += state->view.color_plane[color_plane].x;
    706	*y += state->view.color_plane[color_plane].y;
    707}
    708
    709u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
    710			      u32 pixel_format, u64 modifier)
    711{
    712	struct intel_crtc *crtc;
    713	struct intel_plane *plane;
    714
    715	if (!HAS_DISPLAY(dev_priv))
    716		return 0;
    717
    718	/*
    719	 * We assume the primary plane for pipe A has
    720	 * the highest stride limits of them all,
    721	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
    722	 */
    723	crtc = intel_first_crtc(dev_priv);
    724	if (!crtc)
    725		return 0;
    726
    727	plane = to_intel_plane(crtc->base.primary);
    728
    729	return plane->max_stride(plane, pixel_format, modifier,
    730				 DRM_MODE_ROTATE_0);
    731}
    732
    733static void
    734intel_set_plane_visible(struct intel_crtc_state *crtc_state,
    735			struct intel_plane_state *plane_state,
    736			bool visible)
    737{
    738	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
    739
    740	plane_state->uapi.visible = visible;
    741
    742	if (visible)
    743		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
    744	else
    745		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
    746}
    747
    748static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
    749{
    750	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
    751	struct drm_plane *plane;
    752
    753	/*
    754	 * Active_planes aliases if multiple "primary" or cursor planes
    755	 * have been used on the same (or wrong) pipe. plane_mask uses
    756	 * unique ids, hence we can use that to reconstruct active_planes.
    757	 */
    758	crtc_state->enabled_planes = 0;
    759	crtc_state->active_planes = 0;
    760
    761	drm_for_each_plane_mask(plane, &dev_priv->drm,
    762				crtc_state->uapi.plane_mask) {
    763		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
    764		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
    765	}
    766}
    767
    768void intel_plane_disable_noatomic(struct intel_crtc *crtc,
    769				  struct intel_plane *plane)
    770{
    771	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    772	struct intel_crtc_state *crtc_state =
    773		to_intel_crtc_state(crtc->base.state);
    774	struct intel_plane_state *plane_state =
    775		to_intel_plane_state(plane->base.state);
    776
    777	drm_dbg_kms(&dev_priv->drm,
    778		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
    779		    plane->base.base.id, plane->base.name,
    780		    crtc->base.base.id, crtc->base.name);
    781
    782	intel_set_plane_visible(crtc_state, plane_state, false);
    783	fixup_plane_bitmasks(crtc_state);
    784	crtc_state->data_rate[plane->id] = 0;
    785	crtc_state->data_rate_y[plane->id] = 0;
    786	crtc_state->rel_data_rate[plane->id] = 0;
    787	crtc_state->rel_data_rate_y[plane->id] = 0;
    788	crtc_state->min_cdclk[plane->id] = 0;
    789
    790	if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 &&
    791	    hsw_ips_disable(crtc_state)) {
    792		crtc_state->ips_enabled = false;
    793		intel_crtc_wait_for_next_vblank(crtc);
    794	}
    795
    796	/*
    797	 * Vblank time updates from the shadow to live plane control register
    798	 * are blocked if the memory self-refresh mode is active at that
    799	 * moment. So to make sure the plane gets truly disabled, disable
    800	 * first the self-refresh mode. The self-refresh enable bit in turn
    801	 * will be checked/applied by the HW only at the next frame start
    802	 * event which is after the vblank start event, so we need to have a
    803	 * wait-for-vblank between disabling the plane and the pipe.
    804	 */
    805	if (HAS_GMCH(dev_priv) &&
    806	    intel_set_memory_cxsr(dev_priv, false))
    807		intel_crtc_wait_for_next_vblank(crtc);
    808
    809	/*
    810	 * Gen2 reports pipe underruns whenever all planes are disabled.
    811	 * So disable underrun reporting before all the planes get disabled.
    812	 */
    813	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
    814		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
    815
    816	intel_plane_disable_arm(plane, crtc_state);
    817	intel_crtc_wait_for_next_vblank(crtc);
    818}
    819
    820unsigned int
    821intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
    822{
    823	int x = 0, y = 0;
    824
    825	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
    826					  plane_state->view.color_plane[0].offset, 0);
    827
    828	return y;
    829}
    830
    831static int
    832__intel_display_resume(struct drm_device *dev,
    833		       struct drm_atomic_state *state,
    834		       struct drm_modeset_acquire_ctx *ctx)
    835{
    836	struct drm_crtc_state *crtc_state;
    837	struct drm_crtc *crtc;
    838	int i, ret;
    839
    840	intel_modeset_setup_hw_state(dev, ctx);
    841	intel_vga_redisable(to_i915(dev));
    842
    843	if (!state)
    844		return 0;
    845
    846	/*
    847	 * We've duplicated the state, pointers to the old state are invalid.
    848	 *
    849	 * Don't attempt to use the old state until we commit the duplicated state.
    850	 */
    851	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
    852		/*
    853		 * Force recalculation even if we restore
    854		 * current state. With fast modeset this may not result
    855		 * in a modeset when the state is compatible.
    856		 */
    857		crtc_state->mode_changed = true;
    858	}
    859
    860	/* ignore any reset values/BIOS leftovers in the WM registers */
    861	if (!HAS_GMCH(to_i915(dev)))
    862		to_intel_atomic_state(state)->skip_intermediate_wm = true;
    863
    864	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
    865
    866	drm_WARN_ON(dev, ret == -EDEADLK);
    867	return ret;
    868}
    869
    870static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
    871{
    872	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
    873		intel_has_gpu_reset(to_gt(dev_priv)));
    874}
    875
    876void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
    877{
    878	struct drm_device *dev = &dev_priv->drm;
    879	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
    880	struct drm_atomic_state *state;
    881	int ret;
    882
    883	if (!HAS_DISPLAY(dev_priv))
    884		return;
    885
    886	/* reset doesn't touch the display */
    887	if (!dev_priv->params.force_reset_modeset_test &&
    888	    !gpu_reset_clobbers_display(dev_priv))
    889		return;
    890
    891	/* We have a modeset vs reset deadlock, defensively unbreak it. */
    892	set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
    893	smp_mb__after_atomic();
    894	wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
    895
    896	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
    897		drm_dbg_kms(&dev_priv->drm,
    898			    "Modeset potentially stuck, unbreaking through wedging\n");
    899		intel_gt_set_wedged(to_gt(dev_priv));
    900	}
    901
    902	/*
    903	 * Need mode_config.mutex so that we don't
    904	 * trample ongoing ->detect() and whatnot.
    905	 */
    906	mutex_lock(&dev->mode_config.mutex);
    907	drm_modeset_acquire_init(ctx, 0);
    908	while (1) {
    909		ret = drm_modeset_lock_all_ctx(dev, ctx);
    910		if (ret != -EDEADLK)
    911			break;
    912
    913		drm_modeset_backoff(ctx);
    914	}
    915	/*
    916	 * Disabling the crtcs gracefully seems nicer. Also the
    917	 * g33 docs say we should at least disable all the planes.
    918	 */
    919	state = drm_atomic_helper_duplicate_state(dev, ctx);
    920	if (IS_ERR(state)) {
    921		ret = PTR_ERR(state);
    922		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
    923			ret);
    924		return;
    925	}
    926
    927	ret = drm_atomic_helper_disable_all(dev, ctx);
    928	if (ret) {
    929		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
    930			ret);
    931		drm_atomic_state_put(state);
    932		return;
    933	}
    934
    935	dev_priv->modeset_restore_state = state;
    936	state->acquire_ctx = ctx;
    937}
    938
    939void intel_display_finish_reset(struct drm_i915_private *dev_priv)
    940{
    941	struct drm_device *dev = &dev_priv->drm;
    942	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
    943	struct drm_atomic_state *state;
    944	int ret;
    945
    946	if (!HAS_DISPLAY(dev_priv))
    947		return;
    948
    949	/* reset doesn't touch the display */
    950	if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
    951		return;
    952
    953	state = fetch_and_zero(&dev_priv->modeset_restore_state);
    954	if (!state)
    955		goto unlock;
    956
    957	/* reset doesn't touch the display */
    958	if (!gpu_reset_clobbers_display(dev_priv)) {
    959		/* for testing only restore the display */
    960		ret = __intel_display_resume(dev, state, ctx);
    961		if (ret)
    962			drm_err(&dev_priv->drm,
    963				"Restoring old state failed with %i\n", ret);
    964	} else {
    965		/*
    966		 * The display has been reset as well,
    967		 * so need a full re-initialization.
    968		 */
    969		intel_pps_unlock_regs_wa(dev_priv);
    970		intel_modeset_init_hw(dev_priv);
    971		intel_init_clock_gating(dev_priv);
    972		intel_hpd_init(dev_priv);
    973
    974		ret = __intel_display_resume(dev, state, ctx);
    975		if (ret)
    976			drm_err(&dev_priv->drm,
    977				"Restoring old state failed with %i\n", ret);
    978
    979		intel_hpd_poll_disable(dev_priv);
    980	}
    981
    982	drm_atomic_state_put(state);
    983unlock:
    984	drm_modeset_drop_locks(ctx);
    985	drm_modeset_acquire_fini(ctx);
    986	mutex_unlock(&dev->mode_config.mutex);
    987
    988	clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
    989}
    990
    991static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
    992{
    993	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
    994	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
    995	enum pipe pipe = crtc->pipe;
    996	u32 tmp;
    997
    998	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
    999
   1000	/*
   1001	 * Display WA #1153: icl
   1002	 * enable hardware to bypass the alpha math
   1003	 * and rounding for per-pixel values 00 and 0xff
   1004	 */
   1005	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
   1006	/*
   1007	 * Display WA # 1605353570: icl
   1008	 * Set the pixel rounding bit to 1 for allowing
   1009	 * passthrough of Frame buffer pixels unmodified
   1010	 * across pipe
   1011	 */
   1012	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
   1013
   1014	/*
   1015	 * Underrun recovery must always be disabled on display 13+.
   1016	 * DG2 chicken bit meaning is inverted compared to other platforms.
   1017	 */
   1018	if (IS_DG2(dev_priv))
   1019		tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
   1020	else if (DISPLAY_VER(dev_priv) >= 13)
   1021		tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
   1022
   1023	/* Wa_14010547955:dg2 */
   1024	if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
   1025		tmp |= DG2_RENDER_CCSTAG_4_3_EN;
   1026
   1027	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
   1028}
   1029
   1030bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
   1031{
   1032	struct drm_crtc *crtc;
   1033	bool cleanup_done;
   1034
   1035	drm_for_each_crtc(crtc, &dev_priv->drm) {
   1036		struct drm_crtc_commit *commit;
   1037		spin_lock(&crtc->commit_lock);
   1038		commit = list_first_entry_or_null(&crtc->commit_list,
   1039						  struct drm_crtc_commit, commit_entry);
   1040		cleanup_done = commit ?
   1041			try_wait_for_completion(&commit->cleanup_done) : true;
   1042		spin_unlock(&crtc->commit_lock);
   1043
   1044		if (cleanup_done)
   1045			continue;
   1046
   1047		intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
   1048
   1049		return true;
   1050	}
   1051
   1052	return false;
   1053}
   1054
   1055/*
   1056 * Finds the encoder associated with the given CRTC. This can only be
   1057 * used when we know that the CRTC isn't feeding multiple encoders!
   1058 */
   1059struct intel_encoder *
   1060intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
   1061			   const struct intel_crtc_state *crtc_state)
   1062{
   1063	const struct drm_connector_state *connector_state;
   1064	const struct drm_connector *connector;
   1065	struct intel_encoder *encoder = NULL;
   1066	struct intel_crtc *master_crtc;
   1067	int num_encoders = 0;
   1068	int i;
   1069
   1070	master_crtc = intel_master_crtc(crtc_state);
   1071
   1072	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
   1073		if (connector_state->crtc != &master_crtc->base)
   1074			continue;
   1075
   1076		encoder = to_intel_encoder(connector_state->best_encoder);
   1077		num_encoders++;
   1078	}
   1079
   1080	drm_WARN(encoder->base.dev, num_encoders != 1,
   1081		 "%d encoders for pipe %c\n",
   1082		 num_encoders, pipe_name(master_crtc->pipe));
   1083
   1084	return encoder;
   1085}
   1086
   1087static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
   1088			       enum pipe pipe)
   1089{
   1090	i915_reg_t dslreg = PIPEDSL(pipe);
   1091	u32 temp;
   1092
   1093	temp = intel_de_read(dev_priv, dslreg);
   1094	udelay(500);
   1095	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
   1096		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
   1097			drm_err(&dev_priv->drm,
   1098				"mode set failed: pipe %c stuck\n",
   1099				pipe_name(pipe));
   1100	}
   1101}
   1102
   1103static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
   1104{
   1105	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1106	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1107	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
   1108	enum pipe pipe = crtc->pipe;
   1109	int width = drm_rect_width(dst);
   1110	int height = drm_rect_height(dst);
   1111	int x = dst->x1;
   1112	int y = dst->y1;
   1113
   1114	if (!crtc_state->pch_pfit.enabled)
   1115		return;
   1116
   1117	/* Force use of hard-coded filter coefficients
   1118	 * as some pre-programmed values are broken,
   1119	 * e.g. x201.
   1120	 */
   1121	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
   1122		intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
   1123				  PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
   1124	else
   1125		intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
   1126				  PF_FILTER_MED_3x3);
   1127	intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
   1128	intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
   1129}
   1130
   1131static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
   1132{
   1133	if (crtc->overlay)
   1134		(void) intel_overlay_switch_off(crtc->overlay);
   1135
   1136	/* Let userspace switch the overlay on again. In most cases userspace
   1137	 * has to recompute where to put it anyway.
   1138	 */
   1139}
   1140
   1141static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
   1142{
   1143	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1144
   1145	if (!crtc_state->nv12_planes)
   1146		return false;
   1147
   1148	/* WA Display #0827: Gen9:all */
   1149	if (DISPLAY_VER(dev_priv) == 9)
   1150		return true;
   1151
   1152	return false;
   1153}
   1154
   1155static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
   1156{
   1157	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1158
   1159	/* Wa_2006604312:icl,ehl */
   1160	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
   1161		return true;
   1162
   1163	return false;
   1164}
   1165
   1166static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
   1167{
   1168	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   1169
   1170	/* Wa_1604331009:icl,jsl,ehl */
   1171	if (is_hdr_mode(crtc_state) &&
   1172	    crtc_state->active_planes & BIT(PLANE_CURSOR) &&
   1173	    DISPLAY_VER(dev_priv) == 11)
   1174		return true;
   1175
   1176	return false;
   1177}
   1178
   1179static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
   1180				    enum pipe pipe, bool enable)
   1181{
   1182	if (DISPLAY_VER(i915) == 9) {
   1183		/*
   1184		 * "Plane N strech max must be programmed to 11b (x1)
   1185		 *  when Async flips are enabled on that plane."
   1186		 */
   1187		intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
   1188			     SKL_PLANE1_STRETCH_MAX_MASK,
   1189			     enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
   1190	} else {
   1191		/* Also needed on HSW/BDW albeit undocumented */
   1192		intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
   1193			     HSW_PRI_STRETCH_MAX_MASK,
   1194			     enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
   1195	}
   1196}
   1197
   1198static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
   1199{
   1200	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
   1201
   1202	return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
   1203		(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
   1204}
   1205
   1206static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
   1207			    const struct intel_crtc_state *new_crtc_state)
   1208{
   1209	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
   1210		new_crtc_state->active_planes;
   1211}
   1212
   1213static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
   1214			     const struct intel_crtc_state *new_crtc_state)
   1215{
   1216	return old_crtc_state->active_planes &&
   1217		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
   1218}
   1219
   1220static void intel_post_plane_update(struct intel_atomic_state *state,
   1221				    struct intel_crtc *crtc)
   1222{
   1223	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   1224	const struct intel_crtc_state *old_crtc_state =
   1225		intel_atomic_get_old_crtc_state(state, crtc);
   1226	const struct intel_crtc_state *new_crtc_state =
   1227		intel_atomic_get_new_crtc_state(state, crtc);
   1228	enum pipe pipe = crtc->pipe;
   1229
   1230	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
   1231
   1232	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
   1233		intel_update_watermarks(dev_priv);
   1234
   1235	hsw_ips_post_update(state, crtc);
   1236	intel_fbc_post_update(state, crtc);
   1237
   1238	if (needs_async_flip_vtd_wa(old_crtc_state) &&
   1239	    !needs_async_flip_vtd_wa(new_crtc_state))
   1240		intel_async_flip_vtd_wa(dev_priv, pipe, false);
   1241
   1242	if (needs_nv12_wa(old_crtc_state) &&
   1243	    !needs_nv12_wa(new_crtc_state))
   1244		skl_wa_827(dev_priv, pipe, false);
   1245
   1246	if (needs_scalerclk_wa(old_crtc_state) &&
   1247	    !needs_scalerclk_wa(new_crtc_state))
   1248		icl_wa_scalerclkgating(dev_priv, pipe, false);
   1249
   1250	if (needs_cursorclk_wa(old_crtc_state) &&
   1251	    !needs_cursorclk_wa(new_crtc_state))
   1252		icl_wa_cursorclkgating(dev_priv, pipe, false);
   1253
   1254	intel_drrs_activate(new_crtc_state);
   1255}
   1256
   1257static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
   1258					struct intel_crtc *crtc)
   1259{
   1260	const struct intel_crtc_state *crtc_state =
   1261		intel_atomic_get_new_crtc_state(state, crtc);
   1262	u8 update_planes = crtc_state->update_planes;
   1263	const struct intel_plane_state *plane_state;
   1264	struct intel_plane *plane;
   1265	int i;
   1266
   1267	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   1268		if (plane->pipe == crtc->pipe &&
   1269		    update_planes & BIT(plane->id))
   1270			plane->enable_flip_done(plane);
   1271	}
   1272}
   1273
   1274static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
   1275					 struct intel_crtc *crtc)
   1276{
   1277	const struct intel_crtc_state *crtc_state =
   1278		intel_atomic_get_new_crtc_state(state, crtc);
   1279	u8 update_planes = crtc_state->update_planes;
   1280	const struct intel_plane_state *plane_state;
   1281	struct intel_plane *plane;
   1282	int i;
   1283
   1284	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   1285		if (plane->pipe == crtc->pipe &&
   1286		    update_planes & BIT(plane->id))
   1287			plane->disable_flip_done(plane);
   1288	}
   1289}
   1290
   1291static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
   1292					     struct intel_crtc *crtc)
   1293{
   1294	const struct intel_crtc_state *old_crtc_state =
   1295		intel_atomic_get_old_crtc_state(state, crtc);
   1296	const struct intel_crtc_state *new_crtc_state =
   1297		intel_atomic_get_new_crtc_state(state, crtc);
   1298	u8 update_planes = new_crtc_state->update_planes;
   1299	const struct intel_plane_state *old_plane_state;
   1300	struct intel_plane *plane;
   1301	bool need_vbl_wait = false;
   1302	int i;
   1303
   1304	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
   1305		if (plane->need_async_flip_disable_wa &&
   1306		    plane->pipe == crtc->pipe &&
   1307		    update_planes & BIT(plane->id)) {
   1308			/*
   1309			 * Apart from the async flip bit we want to
   1310			 * preserve the old state for the plane.
   1311			 */
   1312			plane->async_flip(plane, old_crtc_state,
   1313					  old_plane_state, false);
   1314			need_vbl_wait = true;
   1315		}
   1316	}
   1317
   1318	if (need_vbl_wait)
   1319		intel_crtc_wait_for_next_vblank(crtc);
   1320}
   1321
   1322static void intel_pre_plane_update(struct intel_atomic_state *state,
   1323				   struct intel_crtc *crtc)
   1324{
   1325	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   1326	const struct intel_crtc_state *old_crtc_state =
   1327		intel_atomic_get_old_crtc_state(state, crtc);
   1328	const struct intel_crtc_state *new_crtc_state =
   1329		intel_atomic_get_new_crtc_state(state, crtc);
   1330	enum pipe pipe = crtc->pipe;
   1331
   1332	intel_drrs_deactivate(old_crtc_state);
   1333
   1334	intel_psr_pre_plane_update(state, crtc);
   1335
   1336	if (hsw_ips_pre_update(state, crtc))
   1337		intel_crtc_wait_for_next_vblank(crtc);
   1338
   1339	if (intel_fbc_pre_update(state, crtc))
   1340		intel_crtc_wait_for_next_vblank(crtc);
   1341
   1342	if (!needs_async_flip_vtd_wa(old_crtc_state) &&
   1343	    needs_async_flip_vtd_wa(new_crtc_state))
   1344		intel_async_flip_vtd_wa(dev_priv, pipe, true);
   1345
   1346	/* Display WA 827 */
   1347	if (!needs_nv12_wa(old_crtc_state) &&
   1348	    needs_nv12_wa(new_crtc_state))
   1349		skl_wa_827(dev_priv, pipe, true);
   1350
   1351	/* Wa_2006604312:icl,ehl */
   1352	if (!needs_scalerclk_wa(old_crtc_state) &&
   1353	    needs_scalerclk_wa(new_crtc_state))
   1354		icl_wa_scalerclkgating(dev_priv, pipe, true);
   1355
   1356	/* Wa_1604331009:icl,jsl,ehl */
   1357	if (!needs_cursorclk_wa(old_crtc_state) &&
   1358	    needs_cursorclk_wa(new_crtc_state))
   1359		icl_wa_cursorclkgating(dev_priv, pipe, true);
   1360
   1361	/*
   1362	 * Vblank time updates from the shadow to live plane control register
   1363	 * are blocked if the memory self-refresh mode is active at that
   1364	 * moment. So to make sure the plane gets truly disabled, disable
   1365	 * first the self-refresh mode. The self-refresh enable bit in turn
   1366	 * will be checked/applied by the HW only at the next frame start
   1367	 * event which is after the vblank start event, so we need to have a
   1368	 * wait-for-vblank between disabling the plane and the pipe.
   1369	 */
   1370	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
   1371	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
   1372		intel_crtc_wait_for_next_vblank(crtc);
   1373
   1374	/*
   1375	 * IVB workaround: must disable low power watermarks for at least
   1376	 * one frame before enabling scaling.  LP watermarks can be re-enabled
   1377	 * when scaling is disabled.
   1378	 *
   1379	 * WaCxSRDisabledForSpriteScaling:ivb
   1380	 */
   1381	if (old_crtc_state->hw.active &&
   1382	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
   1383		intel_crtc_wait_for_next_vblank(crtc);
   1384
   1385	/*
   1386	 * If we're doing a modeset we don't need to do any
   1387	 * pre-vblank watermark programming here.
   1388	 */
   1389	if (!intel_crtc_needs_modeset(new_crtc_state)) {
   1390		/*
   1391		 * For platforms that support atomic watermarks, program the
   1392		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
   1393		 * will be the intermediate values that are safe for both pre- and
   1394		 * post- vblank; when vblank happens, the 'active' values will be set
   1395		 * to the final 'target' values and we'll do this again to get the
   1396		 * optimal watermarks.  For gen9+ platforms, the values we program here
   1397		 * will be the final target values which will get automatically latched
   1398		 * at vblank time; no further programming will be necessary.
   1399		 *
   1400		 * If a platform hasn't been transitioned to atomic watermarks yet,
   1401		 * we'll continue to update watermarks the old way, if flags tell
   1402		 * us to.
   1403		 */
   1404		if (!intel_initial_watermarks(state, crtc))
   1405			if (new_crtc_state->update_wm_pre)
   1406				intel_update_watermarks(dev_priv);
   1407	}
   1408
   1409	/*
   1410	 * Gen2 reports pipe underruns whenever all planes are disabled.
   1411	 * So disable underrun reporting before all the planes get disabled.
   1412	 *
   1413	 * We do this after .initial_watermarks() so that we have a
   1414	 * chance of catching underruns with the intermediate watermarks
   1415	 * vs. the old plane configuration.
   1416	 */
   1417	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
   1418		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   1419
   1420	/*
   1421	 * WA for platforms where async address update enable bit
   1422	 * is double buffered and only latched at start of vblank.
   1423	 */
   1424	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
   1425		intel_crtc_async_flip_disable_wa(state, crtc);
   1426}
   1427
   1428static void intel_crtc_disable_planes(struct intel_atomic_state *state,
   1429				      struct intel_crtc *crtc)
   1430{
   1431	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1432	const struct intel_crtc_state *new_crtc_state =
   1433		intel_atomic_get_new_crtc_state(state, crtc);
   1434	unsigned int update_mask = new_crtc_state->update_planes;
   1435	const struct intel_plane_state *old_plane_state;
   1436	struct intel_plane *plane;
   1437	unsigned fb_bits = 0;
   1438	int i;
   1439
   1440	intel_crtc_dpms_overlay_disable(crtc);
   1441
   1442	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
   1443		if (crtc->pipe != plane->pipe ||
   1444		    !(update_mask & BIT(plane->id)))
   1445			continue;
   1446
   1447		intel_plane_disable_arm(plane, new_crtc_state);
   1448
   1449		if (old_plane_state->uapi.visible)
   1450			fb_bits |= plane->frontbuffer_bit;
   1451	}
   1452
   1453	intel_frontbuffer_flip(dev_priv, fb_bits);
   1454}
   1455
   1456/*
   1457 * intel_connector_primary_encoder - get the primary encoder for a connector
   1458 * @connector: connector for which to return the encoder
   1459 *
   1460 * Returns the primary encoder for a connector. There is a 1:1 mapping from
   1461 * all connectors to their encoder, except for DP-MST connectors which have
   1462 * both a virtual and a primary encoder. These DP-MST primary encoders can be
   1463 * pointed to by as many DP-MST connectors as there are pipes.
   1464 */
   1465static struct intel_encoder *
   1466intel_connector_primary_encoder(struct intel_connector *connector)
   1467{
   1468	struct intel_encoder *encoder;
   1469
   1470	if (connector->mst_port)
   1471		return &dp_to_dig_port(connector->mst_port)->base;
   1472
   1473	encoder = intel_attached_encoder(connector);
   1474	drm_WARN_ON(connector->base.dev, !encoder);
   1475
   1476	return encoder;
   1477}
   1478
   1479static void intel_encoders_update_prepare(struct intel_atomic_state *state)
   1480{
   1481	struct drm_i915_private *i915 = to_i915(state->base.dev);
   1482	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
   1483	struct intel_crtc *crtc;
   1484	struct drm_connector_state *new_conn_state;
   1485	struct drm_connector *connector;
   1486	int i;
   1487
   1488	/*
   1489	 * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
   1490	 * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
   1491	 */
   1492	if (i915->dpll.mgr) {
   1493		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   1494			if (intel_crtc_needs_modeset(new_crtc_state))
   1495				continue;
   1496
   1497			new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
   1498			new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
   1499		}
   1500	}
   1501
   1502	if (!state->modeset)
   1503		return;
   1504
   1505	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
   1506					i) {
   1507		struct intel_connector *intel_connector;
   1508		struct intel_encoder *encoder;
   1509		struct intel_crtc *crtc;
   1510
   1511		if (!intel_connector_needs_modeset(state, connector))
   1512			continue;
   1513
   1514		intel_connector = to_intel_connector(connector);
   1515		encoder = intel_connector_primary_encoder(intel_connector);
   1516		if (!encoder->update_prepare)
   1517			continue;
   1518
   1519		crtc = new_conn_state->crtc ?
   1520			to_intel_crtc(new_conn_state->crtc) : NULL;
   1521		encoder->update_prepare(state, encoder, crtc);
   1522	}
   1523}
   1524
   1525static void intel_encoders_update_complete(struct intel_atomic_state *state)
   1526{
   1527	struct drm_connector_state *new_conn_state;
   1528	struct drm_connector *connector;
   1529	int i;
   1530
   1531	if (!state->modeset)
   1532		return;
   1533
   1534	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
   1535					i) {
   1536		struct intel_connector *intel_connector;
   1537		struct intel_encoder *encoder;
   1538		struct intel_crtc *crtc;
   1539
   1540		if (!intel_connector_needs_modeset(state, connector))
   1541			continue;
   1542
   1543		intel_connector = to_intel_connector(connector);
   1544		encoder = intel_connector_primary_encoder(intel_connector);
   1545		if (!encoder->update_complete)
   1546			continue;
   1547
   1548		crtc = new_conn_state->crtc ?
   1549			to_intel_crtc(new_conn_state->crtc) : NULL;
   1550		encoder->update_complete(state, encoder, crtc);
   1551	}
   1552}
   1553
   1554static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
   1555					  struct intel_crtc *crtc)
   1556{
   1557	const struct intel_crtc_state *crtc_state =
   1558		intel_atomic_get_new_crtc_state(state, crtc);
   1559	const struct drm_connector_state *conn_state;
   1560	struct drm_connector *conn;
   1561	int i;
   1562
   1563	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   1564		struct intel_encoder *encoder =
   1565			to_intel_encoder(conn_state->best_encoder);
   1566
   1567		if (conn_state->crtc != &crtc->base)
   1568			continue;
   1569
   1570		if (encoder->pre_pll_enable)
   1571			encoder->pre_pll_enable(state, encoder,
   1572						crtc_state, conn_state);
   1573	}
   1574}
   1575
   1576static void intel_encoders_pre_enable(struct intel_atomic_state *state,
   1577				      struct intel_crtc *crtc)
   1578{
   1579	const struct intel_crtc_state *crtc_state =
   1580		intel_atomic_get_new_crtc_state(state, crtc);
   1581	const struct drm_connector_state *conn_state;
   1582	struct drm_connector *conn;
   1583	int i;
   1584
   1585	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   1586		struct intel_encoder *encoder =
   1587			to_intel_encoder(conn_state->best_encoder);
   1588
   1589		if (conn_state->crtc != &crtc->base)
   1590			continue;
   1591
   1592		if (encoder->pre_enable)
   1593			encoder->pre_enable(state, encoder,
   1594					    crtc_state, conn_state);
   1595	}
   1596}
   1597
   1598static void intel_encoders_enable(struct intel_atomic_state *state,
   1599				  struct intel_crtc *crtc)
   1600{
   1601	const struct intel_crtc_state *crtc_state =
   1602		intel_atomic_get_new_crtc_state(state, crtc);
   1603	const struct drm_connector_state *conn_state;
   1604	struct drm_connector *conn;
   1605	int i;
   1606
   1607	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   1608		struct intel_encoder *encoder =
   1609			to_intel_encoder(conn_state->best_encoder);
   1610
   1611		if (conn_state->crtc != &crtc->base)
   1612			continue;
   1613
   1614		if (encoder->enable)
   1615			encoder->enable(state, encoder,
   1616					crtc_state, conn_state);
   1617		intel_opregion_notify_encoder(encoder, true);
   1618	}
   1619}
   1620
   1621static void intel_encoders_disable(struct intel_atomic_state *state,
   1622				   struct intel_crtc *crtc)
   1623{
   1624	const struct intel_crtc_state *old_crtc_state =
   1625		intel_atomic_get_old_crtc_state(state, crtc);
   1626	const struct drm_connector_state *old_conn_state;
   1627	struct drm_connector *conn;
   1628	int i;
   1629
   1630	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   1631		struct intel_encoder *encoder =
   1632			to_intel_encoder(old_conn_state->best_encoder);
   1633
   1634		if (old_conn_state->crtc != &crtc->base)
   1635			continue;
   1636
   1637		intel_opregion_notify_encoder(encoder, false);
   1638		if (encoder->disable)
   1639			encoder->disable(state, encoder,
   1640					 old_crtc_state, old_conn_state);
   1641	}
   1642}
   1643
   1644static void intel_encoders_post_disable(struct intel_atomic_state *state,
   1645					struct intel_crtc *crtc)
   1646{
   1647	const struct intel_crtc_state *old_crtc_state =
   1648		intel_atomic_get_old_crtc_state(state, crtc);
   1649	const struct drm_connector_state *old_conn_state;
   1650	struct drm_connector *conn;
   1651	int i;
   1652
   1653	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   1654		struct intel_encoder *encoder =
   1655			to_intel_encoder(old_conn_state->best_encoder);
   1656
   1657		if (old_conn_state->crtc != &crtc->base)
   1658			continue;
   1659
   1660		if (encoder->post_disable)
   1661			encoder->post_disable(state, encoder,
   1662					      old_crtc_state, old_conn_state);
   1663	}
   1664}
   1665
   1666static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
   1667					    struct intel_crtc *crtc)
   1668{
   1669	const struct intel_crtc_state *old_crtc_state =
   1670		intel_atomic_get_old_crtc_state(state, crtc);
   1671	const struct drm_connector_state *old_conn_state;
   1672	struct drm_connector *conn;
   1673	int i;
   1674
   1675	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
   1676		struct intel_encoder *encoder =
   1677			to_intel_encoder(old_conn_state->best_encoder);
   1678
   1679		if (old_conn_state->crtc != &crtc->base)
   1680			continue;
   1681
   1682		if (encoder->post_pll_disable)
   1683			encoder->post_pll_disable(state, encoder,
   1684						  old_crtc_state, old_conn_state);
   1685	}
   1686}
   1687
   1688static void intel_encoders_update_pipe(struct intel_atomic_state *state,
   1689				       struct intel_crtc *crtc)
   1690{
   1691	const struct intel_crtc_state *crtc_state =
   1692		intel_atomic_get_new_crtc_state(state, crtc);
   1693	const struct drm_connector_state *conn_state;
   1694	struct drm_connector *conn;
   1695	int i;
   1696
   1697	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
   1698		struct intel_encoder *encoder =
   1699			to_intel_encoder(conn_state->best_encoder);
   1700
   1701		if (conn_state->crtc != &crtc->base)
   1702			continue;
   1703
   1704		if (encoder->update_pipe)
   1705			encoder->update_pipe(state, encoder,
   1706					     crtc_state, conn_state);
   1707	}
   1708}
   1709
   1710static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
   1711{
   1712	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1713	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   1714
   1715	plane->disable_arm(plane, crtc_state);
   1716}
   1717
   1718static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
   1719{
   1720	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1721	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   1722
   1723	if (crtc_state->has_pch_encoder) {
   1724		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
   1725					       &crtc_state->fdi_m_n);
   1726	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
   1727		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
   1728					       &crtc_state->dp_m_n);
   1729		intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
   1730					       &crtc_state->dp_m2_n2);
   1731	}
   1732
   1733	intel_set_transcoder_timings(crtc_state);
   1734
   1735	ilk_set_pipeconf(crtc_state);
   1736}
   1737
   1738static void ilk_crtc_enable(struct intel_atomic_state *state,
   1739			    struct intel_crtc *crtc)
   1740{
   1741	const struct intel_crtc_state *new_crtc_state =
   1742		intel_atomic_get_new_crtc_state(state, crtc);
   1743	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1744	enum pipe pipe = crtc->pipe;
   1745
   1746	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
   1747		return;
   1748
   1749	/*
   1750	 * Sometimes spurious CPU pipe underruns happen during FDI
   1751	 * training, at least with VGA+HDMI cloning. Suppress them.
   1752	 *
   1753	 * On ILK we get an occasional spurious CPU pipe underruns
   1754	 * between eDP port A enable and vdd enable. Also PCH port
   1755	 * enable seems to result in the occasional CPU pipe underrun.
   1756	 *
   1757	 * Spurious PCH underruns also occur during PCH enabling.
   1758	 */
   1759	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   1760	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
   1761
   1762	ilk_configure_cpu_transcoder(new_crtc_state);
   1763
   1764	intel_set_pipe_src_size(new_crtc_state);
   1765
   1766	crtc->active = true;
   1767
   1768	intel_encoders_pre_enable(state, crtc);
   1769
   1770	if (new_crtc_state->has_pch_encoder) {
   1771		ilk_pch_pre_enable(state, crtc);
   1772	} else {
   1773		assert_fdi_tx_disabled(dev_priv, pipe);
   1774		assert_fdi_rx_disabled(dev_priv, pipe);
   1775	}
   1776
   1777	ilk_pfit_enable(new_crtc_state);
   1778
   1779	/*
   1780	 * On ILK+ LUT must be loaded before the pipe is running but with
   1781	 * clocks enabled
   1782	 */
   1783	intel_color_load_luts(new_crtc_state);
   1784	intel_color_commit_noarm(new_crtc_state);
   1785	intel_color_commit_arm(new_crtc_state);
   1786	/* update DSPCNTR to configure gamma for pipe bottom color */
   1787	intel_disable_primary_plane(new_crtc_state);
   1788
   1789	intel_initial_watermarks(state, crtc);
   1790	intel_enable_transcoder(new_crtc_state);
   1791
   1792	if (new_crtc_state->has_pch_encoder)
   1793		ilk_pch_enable(state, crtc);
   1794
   1795	intel_crtc_vblank_on(new_crtc_state);
   1796
   1797	intel_encoders_enable(state, crtc);
   1798
   1799	if (HAS_PCH_CPT(dev_priv))
   1800		cpt_verify_modeset(dev_priv, pipe);
   1801
   1802	/*
   1803	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
   1804	 * And a second vblank wait is needed at least on ILK with
   1805	 * some interlaced HDMI modes. Let's do the double wait always
   1806	 * in case there are more corner cases we don't know about.
   1807	 */
   1808	if (new_crtc_state->has_pch_encoder) {
   1809		intel_crtc_wait_for_next_vblank(crtc);
   1810		intel_crtc_wait_for_next_vblank(crtc);
   1811	}
   1812	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   1813	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
   1814}
   1815
   1816static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
   1817					    enum pipe pipe, bool apply)
   1818{
   1819	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
   1820	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
   1821
   1822	if (apply)
   1823		val |= mask;
   1824	else
   1825		val &= ~mask;
   1826
   1827	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
   1828}
   1829
   1830static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
   1831{
   1832	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1833	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1834
   1835	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
   1836		       HSW_LINETIME(crtc_state->linetime) |
   1837		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
   1838}
   1839
   1840static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
   1841{
   1842	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1843	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1844	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
   1845	u32 val;
   1846
   1847	val = intel_de_read(dev_priv, reg);
   1848	val &= ~HSW_FRAME_START_DELAY_MASK;
   1849	val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
   1850	intel_de_write(dev_priv, reg, val);
   1851}
   1852
   1853static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
   1854					 const struct intel_crtc_state *crtc_state)
   1855{
   1856	struct intel_crtc *master_crtc = intel_master_crtc(crtc_state);
   1857
   1858	/*
   1859	 * Enable sequence steps 1-7 on bigjoiner master
   1860	 */
   1861	if (intel_crtc_is_bigjoiner_slave(crtc_state))
   1862		intel_encoders_pre_pll_enable(state, master_crtc);
   1863
   1864	if (crtc_state->shared_dpll)
   1865		intel_enable_shared_dpll(crtc_state);
   1866
   1867	if (intel_crtc_is_bigjoiner_slave(crtc_state))
   1868		intel_encoders_pre_enable(state, master_crtc);
   1869}
   1870
   1871static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
   1872{
   1873	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   1874	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1875	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   1876
   1877	if (crtc_state->has_pch_encoder) {
   1878		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
   1879					       &crtc_state->fdi_m_n);
   1880	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
   1881		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
   1882					       &crtc_state->dp_m_n);
   1883		intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
   1884					       &crtc_state->dp_m2_n2);
   1885	}
   1886
   1887	intel_set_transcoder_timings(crtc_state);
   1888
   1889	if (cpu_transcoder != TRANSCODER_EDP)
   1890		intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
   1891			       crtc_state->pixel_multiplier - 1);
   1892
   1893	hsw_set_frame_start_delay(crtc_state);
   1894
   1895	hsw_set_transconf(crtc_state);
   1896}
   1897
   1898static void hsw_crtc_enable(struct intel_atomic_state *state,
   1899			    struct intel_crtc *crtc)
   1900{
   1901	const struct intel_crtc_state *new_crtc_state =
   1902		intel_atomic_get_new_crtc_state(state, crtc);
   1903	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1904	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
   1905	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
   1906	bool psl_clkgate_wa;
   1907
   1908	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
   1909		return;
   1910
   1911	if (!new_crtc_state->bigjoiner_pipes) {
   1912		intel_encoders_pre_pll_enable(state, crtc);
   1913
   1914		if (new_crtc_state->shared_dpll)
   1915			intel_enable_shared_dpll(new_crtc_state);
   1916
   1917		intel_encoders_pre_enable(state, crtc);
   1918	} else {
   1919		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
   1920	}
   1921
   1922	intel_dsc_enable(new_crtc_state);
   1923
   1924	if (DISPLAY_VER(dev_priv) >= 13)
   1925		intel_uncompressed_joiner_enable(new_crtc_state);
   1926
   1927	intel_set_pipe_src_size(new_crtc_state);
   1928	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
   1929		bdw_set_pipemisc(new_crtc_state);
   1930
   1931	if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) &&
   1932	    !transcoder_is_dsi(cpu_transcoder))
   1933		hsw_configure_cpu_transcoder(new_crtc_state);
   1934
   1935	crtc->active = true;
   1936
   1937	/* Display WA #1180: WaDisableScalarClockGating: glk */
   1938	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
   1939		new_crtc_state->pch_pfit.enabled;
   1940	if (psl_clkgate_wa)
   1941		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
   1942
   1943	if (DISPLAY_VER(dev_priv) >= 9)
   1944		skl_pfit_enable(new_crtc_state);
   1945	else
   1946		ilk_pfit_enable(new_crtc_state);
   1947
   1948	/*
   1949	 * On ILK+ LUT must be loaded before the pipe is running but with
   1950	 * clocks enabled
   1951	 */
   1952	intel_color_load_luts(new_crtc_state);
   1953	intel_color_commit_noarm(new_crtc_state);
   1954	intel_color_commit_arm(new_crtc_state);
   1955	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
   1956	if (DISPLAY_VER(dev_priv) < 9)
   1957		intel_disable_primary_plane(new_crtc_state);
   1958
   1959	hsw_set_linetime_wm(new_crtc_state);
   1960
   1961	if (DISPLAY_VER(dev_priv) >= 11)
   1962		icl_set_pipe_chicken(new_crtc_state);
   1963
   1964	intel_initial_watermarks(state, crtc);
   1965
   1966	if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
   1967		intel_crtc_vblank_on(new_crtc_state);
   1968
   1969	intel_encoders_enable(state, crtc);
   1970
   1971	if (psl_clkgate_wa) {
   1972		intel_crtc_wait_for_next_vblank(crtc);
   1973		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
   1974	}
   1975
   1976	/* If we change the relative order between pipe/planes enabling, we need
   1977	 * to change the workaround. */
   1978	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
   1979	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
   1980		struct intel_crtc *wa_crtc;
   1981
   1982		wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
   1983
   1984		intel_crtc_wait_for_next_vblank(wa_crtc);
   1985		intel_crtc_wait_for_next_vblank(wa_crtc);
   1986	}
   1987}
   1988
   1989void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
   1990{
   1991	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   1992	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   1993	enum pipe pipe = crtc->pipe;
   1994
   1995	/* To avoid upsetting the power well on haswell only disable the pfit if
   1996	 * it's in use. The hw state code will make sure we get this right. */
   1997	if (!old_crtc_state->pch_pfit.enabled)
   1998		return;
   1999
   2000	intel_de_write_fw(dev_priv, PF_CTL(pipe), 0);
   2001	intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0);
   2002	intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0);
   2003}
   2004
   2005static void ilk_crtc_disable(struct intel_atomic_state *state,
   2006			     struct intel_crtc *crtc)
   2007{
   2008	const struct intel_crtc_state *old_crtc_state =
   2009		intel_atomic_get_old_crtc_state(state, crtc);
   2010	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2011	enum pipe pipe = crtc->pipe;
   2012
   2013	/*
   2014	 * Sometimes spurious CPU pipe underruns happen when the
   2015	 * pipe is already disabled, but FDI RX/TX is still enabled.
   2016	 * Happens at least with VGA+HDMI cloning. Suppress them.
   2017	 */
   2018	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   2019	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
   2020
   2021	intel_encoders_disable(state, crtc);
   2022
   2023	intel_crtc_vblank_off(old_crtc_state);
   2024
   2025	intel_disable_transcoder(old_crtc_state);
   2026
   2027	ilk_pfit_disable(old_crtc_state);
   2028
   2029	if (old_crtc_state->has_pch_encoder)
   2030		ilk_pch_disable(state, crtc);
   2031
   2032	intel_encoders_post_disable(state, crtc);
   2033
   2034	if (old_crtc_state->has_pch_encoder)
   2035		ilk_pch_post_disable(state, crtc);
   2036
   2037	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   2038	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
   2039}
   2040
   2041static void hsw_crtc_disable(struct intel_atomic_state *state,
   2042			     struct intel_crtc *crtc)
   2043{
   2044	const struct intel_crtc_state *old_crtc_state =
   2045		intel_atomic_get_old_crtc_state(state, crtc);
   2046
   2047	/*
   2048	 * FIXME collapse everything to one hook.
   2049	 * Need care with mst->ddi interactions.
   2050	 */
   2051	if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
   2052		intel_encoders_disable(state, crtc);
   2053		intel_encoders_post_disable(state, crtc);
   2054	}
   2055}
   2056
   2057static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
   2058{
   2059	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   2060	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2061
   2062	if (!crtc_state->gmch_pfit.control)
   2063		return;
   2064
   2065	/*
   2066	 * The panel fitter should only be adjusted whilst the pipe is disabled,
   2067	 * according to register description and PRM.
   2068	 */
   2069	drm_WARN_ON(&dev_priv->drm,
   2070		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
   2071	assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
   2072
   2073	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
   2074		       crtc_state->gmch_pfit.pgm_ratios);
   2075	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
   2076
   2077	/* Border color in case we don't scale up to the full screen. Black by
   2078	 * default, change to something else for debugging. */
   2079	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
   2080}
   2081
   2082bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
   2083{
   2084	if (phy == PHY_NONE)
   2085		return false;
   2086	else if (IS_DG2(dev_priv))
   2087		/*
   2088		 * DG2 outputs labelled as "combo PHY" in the bspec use
   2089		 * SNPS PHYs with completely different programming,
   2090		 * hence we always return false here.
   2091		 */
   2092		return false;
   2093	else if (IS_ALDERLAKE_S(dev_priv))
   2094		return phy <= PHY_E;
   2095	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
   2096		return phy <= PHY_D;
   2097	else if (IS_JSL_EHL(dev_priv))
   2098		return phy <= PHY_C;
   2099	else if (DISPLAY_VER(dev_priv) >= 11)
   2100		return phy <= PHY_B;
   2101	else
   2102		return false;
   2103}
   2104
   2105bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
   2106{
   2107	if (IS_DG2(dev_priv))
   2108		/* DG2's "TC1" output uses a SNPS PHY */
   2109		return false;
   2110	else if (IS_ALDERLAKE_P(dev_priv))
   2111		return phy >= PHY_F && phy <= PHY_I;
   2112	else if (IS_TIGERLAKE(dev_priv))
   2113		return phy >= PHY_D && phy <= PHY_I;
   2114	else if (IS_ICELAKE(dev_priv))
   2115		return phy >= PHY_C && phy <= PHY_F;
   2116	else
   2117		return false;
   2118}
   2119
   2120bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
   2121{
   2122	if (phy == PHY_NONE)
   2123		return false;
   2124	else if (IS_DG2(dev_priv))
   2125		/*
   2126		 * All four "combo" ports and the TC1 port (PHY E) use
   2127		 * Synopsis PHYs.
   2128		 */
   2129		return phy <= PHY_E;
   2130
   2131	return false;
   2132}
   2133
   2134enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
   2135{
   2136	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
   2137		return PHY_D + port - PORT_D_XELPD;
   2138	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
   2139		return PHY_F + port - PORT_TC1;
   2140	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
   2141		return PHY_B + port - PORT_TC1;
   2142	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
   2143		return PHY_C + port - PORT_TC1;
   2144	else if (IS_JSL_EHL(i915) && port == PORT_D)
   2145		return PHY_A;
   2146
   2147	return PHY_A + port - PORT_A;
   2148}
   2149
   2150enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
   2151{
   2152	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
   2153		return TC_PORT_NONE;
   2154
   2155	if (DISPLAY_VER(dev_priv) >= 12)
   2156		return TC_PORT_1 + port - PORT_TC1;
   2157	else
   2158		return TC_PORT_1 + port - PORT_C;
   2159}
   2160
   2161enum intel_display_power_domain
   2162intel_aux_power_domain(struct intel_digital_port *dig_port)
   2163{
   2164	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
   2165
   2166	if (intel_tc_port_in_tbt_alt_mode(dig_port))
   2167		return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
   2168
   2169	return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
   2170}
   2171
   2172static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
   2173				   struct intel_power_domain_mask *mask)
   2174{
   2175	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   2176	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2177	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   2178	struct drm_encoder *encoder;
   2179	enum pipe pipe = crtc->pipe;
   2180
   2181	bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
   2182
   2183	if (!crtc_state->hw.active)
   2184		return;
   2185
   2186	set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
   2187	set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
   2188	if (crtc_state->pch_pfit.enabled ||
   2189	    crtc_state->pch_pfit.force_thru)
   2190		set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
   2191
   2192	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
   2193				  crtc_state->uapi.encoder_mask) {
   2194		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
   2195
   2196		set_bit(intel_encoder->power_domain, mask->bits);
   2197	}
   2198
   2199	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
   2200		set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
   2201
   2202	if (crtc_state->shared_dpll)
   2203		set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
   2204
   2205	if (crtc_state->dsc.compression_enable)
   2206		set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
   2207}
   2208
   2209static void
   2210modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
   2211			       struct intel_power_domain_mask *old_domains)
   2212{
   2213	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   2214	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2215	enum intel_display_power_domain domain;
   2216	struct intel_power_domain_mask domains, new_domains;
   2217
   2218	get_crtc_power_domains(crtc_state, &domains);
   2219
   2220	bitmap_andnot(new_domains.bits,
   2221		      domains.bits,
   2222		      crtc->enabled_power_domains.mask.bits,
   2223		      POWER_DOMAIN_NUM);
   2224	bitmap_andnot(old_domains->bits,
   2225		      crtc->enabled_power_domains.mask.bits,
   2226		      domains.bits,
   2227		      POWER_DOMAIN_NUM);
   2228
   2229	for_each_power_domain(domain, &new_domains)
   2230		intel_display_power_get_in_set(dev_priv,
   2231					       &crtc->enabled_power_domains,
   2232					       domain);
   2233}
   2234
   2235static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
   2236					   struct intel_power_domain_mask *domains)
   2237{
   2238	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
   2239					    &crtc->enabled_power_domains,
   2240					    domains);
   2241}
   2242
   2243static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
   2244{
   2245	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   2246	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   2247
   2248	if (intel_crtc_has_dp_encoder(crtc_state)) {
   2249		intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
   2250					       &crtc_state->dp_m_n);
   2251		intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
   2252					       &crtc_state->dp_m2_n2);
   2253	}
   2254
   2255	intel_set_transcoder_timings(crtc_state);
   2256
   2257	i9xx_set_pipeconf(crtc_state);
   2258}
   2259
   2260static void valleyview_crtc_enable(struct intel_atomic_state *state,
   2261				   struct intel_crtc *crtc)
   2262{
   2263	const struct intel_crtc_state *new_crtc_state =
   2264		intel_atomic_get_new_crtc_state(state, crtc);
   2265	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2266	enum pipe pipe = crtc->pipe;
   2267
   2268	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
   2269		return;
   2270
   2271	i9xx_configure_cpu_transcoder(new_crtc_state);
   2272
   2273	intel_set_pipe_src_size(new_crtc_state);
   2274
   2275	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
   2276		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
   2277		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
   2278	}
   2279
   2280	crtc->active = true;
   2281
   2282	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   2283
   2284	intel_encoders_pre_pll_enable(state, crtc);
   2285
   2286	if (IS_CHERRYVIEW(dev_priv))
   2287		chv_enable_pll(new_crtc_state);
   2288	else
   2289		vlv_enable_pll(new_crtc_state);
   2290
   2291	intel_encoders_pre_enable(state, crtc);
   2292
   2293	i9xx_pfit_enable(new_crtc_state);
   2294
   2295	intel_color_load_luts(new_crtc_state);
   2296	intel_color_commit_noarm(new_crtc_state);
   2297	intel_color_commit_arm(new_crtc_state);
   2298	/* update DSPCNTR to configure gamma for pipe bottom color */
   2299	intel_disable_primary_plane(new_crtc_state);
   2300
   2301	intel_initial_watermarks(state, crtc);
   2302	intel_enable_transcoder(new_crtc_state);
   2303
   2304	intel_crtc_vblank_on(new_crtc_state);
   2305
   2306	intel_encoders_enable(state, crtc);
   2307}
   2308
   2309static void i9xx_crtc_enable(struct intel_atomic_state *state,
   2310			     struct intel_crtc *crtc)
   2311{
   2312	const struct intel_crtc_state *new_crtc_state =
   2313		intel_atomic_get_new_crtc_state(state, crtc);
   2314	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2315	enum pipe pipe = crtc->pipe;
   2316
   2317	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
   2318		return;
   2319
   2320	i9xx_configure_cpu_transcoder(new_crtc_state);
   2321
   2322	intel_set_pipe_src_size(new_crtc_state);
   2323
   2324	crtc->active = true;
   2325
   2326	if (DISPLAY_VER(dev_priv) != 2)
   2327		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
   2328
   2329	intel_encoders_pre_enable(state, crtc);
   2330
   2331	i9xx_enable_pll(new_crtc_state);
   2332
   2333	i9xx_pfit_enable(new_crtc_state);
   2334
   2335	intel_color_load_luts(new_crtc_state);
   2336	intel_color_commit_noarm(new_crtc_state);
   2337	intel_color_commit_arm(new_crtc_state);
   2338	/* update DSPCNTR to configure gamma for pipe bottom color */
   2339	intel_disable_primary_plane(new_crtc_state);
   2340
   2341	if (!intel_initial_watermarks(state, crtc))
   2342		intel_update_watermarks(dev_priv);
   2343	intel_enable_transcoder(new_crtc_state);
   2344
   2345	intel_crtc_vblank_on(new_crtc_state);
   2346
   2347	intel_encoders_enable(state, crtc);
   2348
   2349	/* prevents spurious underruns */
   2350	if (DISPLAY_VER(dev_priv) == 2)
   2351		intel_crtc_wait_for_next_vblank(crtc);
   2352}
   2353
   2354static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
   2355{
   2356	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
   2357	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2358
   2359	if (!old_crtc_state->gmch_pfit.control)
   2360		return;
   2361
   2362	assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
   2363
   2364	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
   2365		    intel_de_read(dev_priv, PFIT_CONTROL));
   2366	intel_de_write(dev_priv, PFIT_CONTROL, 0);
   2367}
   2368
   2369static void i9xx_crtc_disable(struct intel_atomic_state *state,
   2370			      struct intel_crtc *crtc)
   2371{
   2372	struct intel_crtc_state *old_crtc_state =
   2373		intel_atomic_get_old_crtc_state(state, crtc);
   2374	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2375	enum pipe pipe = crtc->pipe;
   2376
   2377	/*
   2378	 * On gen2 planes are double buffered but the pipe isn't, so we must
   2379	 * wait for planes to fully turn off before disabling the pipe.
   2380	 */
   2381	if (DISPLAY_VER(dev_priv) == 2)
   2382		intel_crtc_wait_for_next_vblank(crtc);
   2383
   2384	intel_encoders_disable(state, crtc);
   2385
   2386	intel_crtc_vblank_off(old_crtc_state);
   2387
   2388	intel_disable_transcoder(old_crtc_state);
   2389
   2390	i9xx_pfit_disable(old_crtc_state);
   2391
   2392	intel_encoders_post_disable(state, crtc);
   2393
   2394	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
   2395		if (IS_CHERRYVIEW(dev_priv))
   2396			chv_disable_pll(dev_priv, pipe);
   2397		else if (IS_VALLEYVIEW(dev_priv))
   2398			vlv_disable_pll(dev_priv, pipe);
   2399		else
   2400			i9xx_disable_pll(old_crtc_state);
   2401	}
   2402
   2403	intel_encoders_post_pll_disable(state, crtc);
   2404
   2405	if (DISPLAY_VER(dev_priv) != 2)
   2406		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
   2407
   2408	if (!dev_priv->wm_disp->initial_watermarks)
   2409		intel_update_watermarks(dev_priv);
   2410
   2411	/* clock the pipe down to 640x480@60 to potentially save power */
   2412	if (IS_I830(dev_priv))
   2413		i830_enable_pipe(dev_priv, pipe);
   2414}
   2415
   2416static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
   2417					struct drm_modeset_acquire_ctx *ctx)
   2418{
   2419	struct intel_encoder *encoder;
   2420	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2421	struct intel_bw_state *bw_state =
   2422		to_intel_bw_state(dev_priv->bw_obj.state);
   2423	struct intel_cdclk_state *cdclk_state =
   2424		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
   2425	struct intel_dbuf_state *dbuf_state =
   2426		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
   2427	struct intel_crtc_state *crtc_state =
   2428		to_intel_crtc_state(crtc->base.state);
   2429	struct intel_plane *plane;
   2430	struct drm_atomic_state *state;
   2431	struct intel_crtc_state *temp_crtc_state;
   2432	enum pipe pipe = crtc->pipe;
   2433	int ret;
   2434
   2435	if (!crtc_state->hw.active)
   2436		return;
   2437
   2438	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   2439		const struct intel_plane_state *plane_state =
   2440			to_intel_plane_state(plane->base.state);
   2441
   2442		if (plane_state->uapi.visible)
   2443			intel_plane_disable_noatomic(crtc, plane);
   2444	}
   2445
   2446	state = drm_atomic_state_alloc(&dev_priv->drm);
   2447	if (!state) {
   2448		drm_dbg_kms(&dev_priv->drm,
   2449			    "failed to disable [CRTC:%d:%s], out of memory",
   2450			    crtc->base.base.id, crtc->base.name);
   2451		return;
   2452	}
   2453
   2454	state->acquire_ctx = ctx;
   2455
   2456	/* Everything's already locked, -EDEADLK can't happen. */
   2457	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
   2458	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
   2459
   2460	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
   2461
   2462	dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
   2463
   2464	drm_atomic_state_put(state);
   2465
   2466	drm_dbg_kms(&dev_priv->drm,
   2467		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
   2468		    crtc->base.base.id, crtc->base.name);
   2469
   2470	crtc->active = false;
   2471	crtc->base.enabled = false;
   2472
   2473	drm_WARN_ON(&dev_priv->drm,
   2474		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
   2475	crtc_state->uapi.active = false;
   2476	crtc_state->uapi.connector_mask = 0;
   2477	crtc_state->uapi.encoder_mask = 0;
   2478	intel_crtc_free_hw_state(crtc_state);
   2479	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
   2480
   2481	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
   2482		encoder->base.crtc = NULL;
   2483
   2484	intel_fbc_disable(crtc);
   2485	intel_update_watermarks(dev_priv);
   2486	intel_disable_shared_dpll(crtc_state);
   2487
   2488	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
   2489
   2490	cdclk_state->min_cdclk[pipe] = 0;
   2491	cdclk_state->min_voltage_level[pipe] = 0;
   2492	cdclk_state->active_pipes &= ~BIT(pipe);
   2493
   2494	dbuf_state->active_pipes &= ~BIT(pipe);
   2495
   2496	bw_state->data_rate[pipe] = 0;
   2497	bw_state->num_active_planes[pipe] = 0;
   2498}
   2499
   2500/*
   2501 * turn all crtc's off, but do not adjust state
   2502 * This has to be paired with a call to intel_modeset_setup_hw_state.
   2503 */
   2504int intel_display_suspend(struct drm_device *dev)
   2505{
   2506	struct drm_i915_private *dev_priv = to_i915(dev);
   2507	struct drm_atomic_state *state;
   2508	int ret;
   2509
   2510	if (!HAS_DISPLAY(dev_priv))
   2511		return 0;
   2512
   2513	state = drm_atomic_helper_suspend(dev);
   2514	ret = PTR_ERR_OR_ZERO(state);
   2515	if (ret)
   2516		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
   2517			ret);
   2518	else
   2519		dev_priv->modeset_restore_state = state;
   2520	return ret;
   2521}
   2522
   2523void intel_encoder_destroy(struct drm_encoder *encoder)
   2524{
   2525	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
   2526
   2527	drm_encoder_cleanup(encoder);
   2528	kfree(intel_encoder);
   2529}
   2530
   2531/* Cross check the actual hw state with our own modeset state tracking (and it's
   2532 * internal consistency). */
   2533static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
   2534					 struct drm_connector_state *conn_state)
   2535{
   2536	struct intel_connector *connector = to_intel_connector(conn_state->connector);
   2537	struct drm_i915_private *i915 = to_i915(connector->base.dev);
   2538
   2539	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
   2540		    connector->base.base.id, connector->base.name);
   2541
   2542	if (connector->get_hw_state(connector)) {
   2543		struct intel_encoder *encoder = intel_attached_encoder(connector);
   2544
   2545		I915_STATE_WARN(!crtc_state,
   2546			 "connector enabled without attached crtc\n");
   2547
   2548		if (!crtc_state)
   2549			return;
   2550
   2551		I915_STATE_WARN(!crtc_state->hw.active,
   2552				"connector is active, but attached crtc isn't\n");
   2553
   2554		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
   2555			return;
   2556
   2557		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
   2558			"atomic encoder doesn't match attached encoder\n");
   2559
   2560		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
   2561			"attached encoder crtc differs from connector crtc\n");
   2562	} else {
   2563		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
   2564				"attached crtc is active, but connector isn't\n");
   2565		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
   2566			"best encoder set without crtc!\n");
   2567	}
   2568}
   2569
   2570static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
   2571{
   2572	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2573
   2574	/* GDG double wide on either pipe, otherwise pipe A only */
   2575	return DISPLAY_VER(dev_priv) < 4 &&
   2576		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
   2577}
   2578
   2579static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
   2580{
   2581	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
   2582	struct drm_rect src;
   2583
   2584	/*
   2585	 * We only use IF-ID interlacing. If we ever use
   2586	 * PF-ID we'll need to adjust the pixel_rate here.
   2587	 */
   2588
   2589	if (!crtc_state->pch_pfit.enabled)
   2590		return pixel_rate;
   2591
   2592	drm_rect_init(&src, 0, 0,
   2593		      drm_rect_width(&crtc_state->pipe_src) << 16,
   2594		      drm_rect_height(&crtc_state->pipe_src) << 16);
   2595
   2596	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
   2597				   pixel_rate);
   2598}
   2599
   2600static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
   2601					 const struct drm_display_mode *timings)
   2602{
   2603	mode->hdisplay = timings->crtc_hdisplay;
   2604	mode->htotal = timings->crtc_htotal;
   2605	mode->hsync_start = timings->crtc_hsync_start;
   2606	mode->hsync_end = timings->crtc_hsync_end;
   2607
   2608	mode->vdisplay = timings->crtc_vdisplay;
   2609	mode->vtotal = timings->crtc_vtotal;
   2610	mode->vsync_start = timings->crtc_vsync_start;
   2611	mode->vsync_end = timings->crtc_vsync_end;
   2612
   2613	mode->flags = timings->flags;
   2614	mode->type = DRM_MODE_TYPE_DRIVER;
   2615
   2616	mode->clock = timings->crtc_clock;
   2617
   2618	drm_mode_set_name(mode);
   2619}
   2620
   2621static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
   2622{
   2623	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   2624
   2625	if (HAS_GMCH(dev_priv))
   2626		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
   2627		crtc_state->pixel_rate =
   2628			crtc_state->hw.pipe_mode.crtc_clock;
   2629	else
   2630		crtc_state->pixel_rate =
   2631			ilk_pipe_pixel_rate(crtc_state);
   2632}
   2633
   2634static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state,
   2635					   struct drm_display_mode *mode)
   2636{
   2637	int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
   2638
   2639	if (num_pipes < 2)
   2640		return;
   2641
   2642	mode->crtc_clock /= num_pipes;
   2643	mode->crtc_hdisplay /= num_pipes;
   2644	mode->crtc_hblank_start /= num_pipes;
   2645	mode->crtc_hblank_end /= num_pipes;
   2646	mode->crtc_hsync_start /= num_pipes;
   2647	mode->crtc_hsync_end /= num_pipes;
   2648	mode->crtc_htotal /= num_pipes;
   2649}
   2650
   2651static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state,
   2652					  struct drm_display_mode *mode)
   2653{
   2654	int overlap = crtc_state->splitter.pixel_overlap;
   2655	int n = crtc_state->splitter.link_count;
   2656
   2657	if (!crtc_state->splitter.enable)
   2658		return;
   2659
   2660	/*
   2661	 * eDP MSO uses segment timings from EDID for transcoder
   2662	 * timings, but full mode for everything else.
   2663	 *
   2664	 * h_full = (h_segment - pixel_overlap) * link_count
   2665	 */
   2666	mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n;
   2667	mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n;
   2668	mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n;
   2669	mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n;
   2670	mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n;
   2671	mode->crtc_htotal = (mode->crtc_htotal - overlap) * n;
   2672	mode->crtc_clock *= n;
   2673}
   2674
   2675static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
   2676{
   2677	struct drm_display_mode *mode = &crtc_state->hw.mode;
   2678	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
   2679	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
   2680
   2681	/*
   2682	 * Start with the adjusted_mode crtc timings, which
   2683	 * have been filled with the transcoder timings.
   2684	 */
   2685	drm_mode_copy(pipe_mode, adjusted_mode);
   2686
   2687	/* Expand MSO per-segment transcoder timings to full */
   2688	intel_splitter_adjust_timings(crtc_state, pipe_mode);
   2689
   2690	/*
   2691	 * We want the full numbers in adjusted_mode normal timings,
   2692	 * adjusted_mode crtc timings are left with the raw transcoder
   2693	 * timings.
   2694	 */
   2695	intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
   2696
   2697	/* Populate the "user" mode with full numbers */
   2698	drm_mode_copy(mode, pipe_mode);
   2699	intel_mode_from_crtc_timings(mode, mode);
   2700	mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
   2701		(intel_bigjoiner_num_pipes(crtc_state) ?: 1);
   2702	mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
   2703
   2704	/* Derive per-pipe timings in case bigjoiner is used */
   2705	intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
   2706	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
   2707
   2708	intel_crtc_compute_pixel_rate(crtc_state);
   2709}
   2710
   2711static void intel_encoder_get_config(struct intel_encoder *encoder,
   2712				     struct intel_crtc_state *crtc_state)
   2713{
   2714	encoder->get_config(encoder, crtc_state);
   2715
   2716	intel_crtc_readout_derived_state(crtc_state);
   2717}
   2718
   2719static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
   2720{
   2721	int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
   2722	int width, height;
   2723
   2724	if (num_pipes < 2)
   2725		return;
   2726
   2727	width = drm_rect_width(&crtc_state->pipe_src);
   2728	height = drm_rect_height(&crtc_state->pipe_src);
   2729
   2730	drm_rect_init(&crtc_state->pipe_src, 0, 0,
   2731		      width / num_pipes, height);
   2732}
   2733
   2734static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state)
   2735{
   2736	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   2737	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
   2738
   2739	intel_bigjoiner_compute_pipe_src(crtc_state);
   2740
   2741	/*
   2742	 * Pipe horizontal size must be even in:
   2743	 * - DVO ganged mode
   2744	 * - LVDS dual channel mode
   2745	 * - Double wide pipe
   2746	 */
   2747	if (drm_rect_width(&crtc_state->pipe_src) & 1) {
   2748		if (crtc_state->double_wide) {
   2749			drm_dbg_kms(&i915->drm,
   2750				    "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n",
   2751				    crtc->base.base.id, crtc->base.name);
   2752			return -EINVAL;
   2753		}
   2754
   2755		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
   2756		    intel_is_dual_link_lvds(i915)) {
   2757			drm_dbg_kms(&i915->drm,
   2758				    "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n",
   2759				    crtc->base.base.id, crtc->base.name);
   2760			return -EINVAL;
   2761		}
   2762	}
   2763
   2764	return 0;
   2765}
   2766
   2767static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
   2768{
   2769	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   2770	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
   2771	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
   2772	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
   2773	int clock_limit = i915->max_dotclk_freq;
   2774
   2775	/*
   2776	 * Start with the adjusted_mode crtc timings, which
   2777	 * have been filled with the transcoder timings.
   2778	 */
   2779	drm_mode_copy(pipe_mode, adjusted_mode);
   2780
   2781	/* Expand MSO per-segment transcoder timings to full */
   2782	intel_splitter_adjust_timings(crtc_state, pipe_mode);
   2783
   2784	/* Derive per-pipe timings in case bigjoiner is used */
   2785	intel_bigjoiner_adjust_timings(crtc_state, pipe_mode);
   2786	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
   2787
   2788	if (DISPLAY_VER(i915) < 4) {
   2789		clock_limit = i915->max_cdclk_freq * 9 / 10;
   2790
   2791		/*
   2792		 * Enable double wide mode when the dot clock
   2793		 * is > 90% of the (display) core speed.
   2794		 */
   2795		if (intel_crtc_supports_double_wide(crtc) &&
   2796		    pipe_mode->crtc_clock > clock_limit) {
   2797			clock_limit = i915->max_dotclk_freq;
   2798			crtc_state->double_wide = true;
   2799		}
   2800	}
   2801
   2802	if (pipe_mode->crtc_clock > clock_limit) {
   2803		drm_dbg_kms(&i915->drm,
   2804			    "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
   2805			    crtc->base.base.id, crtc->base.name,
   2806			    pipe_mode->crtc_clock, clock_limit,
   2807			    str_yes_no(crtc_state->double_wide));
   2808		return -EINVAL;
   2809	}
   2810
   2811	return 0;
   2812}
   2813
   2814static int intel_crtc_compute_config(struct intel_crtc *crtc,
   2815				     struct intel_crtc_state *crtc_state)
   2816{
   2817	int ret;
   2818
   2819	ret = intel_crtc_compute_pipe_src(crtc_state);
   2820	if (ret)
   2821		return ret;
   2822
   2823	ret = intel_crtc_compute_pipe_mode(crtc_state);
   2824	if (ret)
   2825		return ret;
   2826
   2827	intel_crtc_compute_pixel_rate(crtc_state);
   2828
   2829	if (crtc_state->has_pch_encoder)
   2830		return ilk_fdi_compute_config(crtc, crtc_state);
   2831
   2832	return 0;
   2833}
   2834
   2835static void
   2836intel_reduce_m_n_ratio(u32 *num, u32 *den)
   2837{
   2838	while (*num > DATA_LINK_M_N_MASK ||
   2839	       *den > DATA_LINK_M_N_MASK) {
   2840		*num >>= 1;
   2841		*den >>= 1;
   2842	}
   2843}
   2844
   2845static void compute_m_n(unsigned int m, unsigned int n,
   2846			u32 *ret_m, u32 *ret_n,
   2847			bool constant_n)
   2848{
   2849	/*
   2850	 * Several DP dongles in particular seem to be fussy about
   2851	 * too large link M/N values. Give N value as 0x8000 that
   2852	 * should be acceptable by specific devices. 0x8000 is the
   2853	 * specified fixed N value for asynchronous clock mode,
   2854	 * which the devices expect also in synchronous clock mode.
   2855	 */
   2856	if (constant_n)
   2857		*ret_n = DP_LINK_CONSTANT_N_VALUE;
   2858	else
   2859		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
   2860
   2861	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
   2862	intel_reduce_m_n_ratio(ret_m, ret_n);
   2863}
   2864
   2865void
   2866intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
   2867		       int pixel_clock, int link_clock,
   2868		       struct intel_link_m_n *m_n,
   2869		       bool constant_n, bool fec_enable)
   2870{
   2871	u32 data_clock = bits_per_pixel * pixel_clock;
   2872
   2873	if (fec_enable)
   2874		data_clock = intel_dp_mode_to_fec_clock(data_clock);
   2875
   2876	m_n->tu = 64;
   2877	compute_m_n(data_clock,
   2878		    link_clock * nlanes * 8,
   2879		    &m_n->data_m, &m_n->data_n,
   2880		    constant_n);
   2881
   2882	compute_m_n(pixel_clock, link_clock,
   2883		    &m_n->link_m, &m_n->link_n,
   2884		    constant_n);
   2885}
   2886
   2887static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
   2888{
   2889	/*
   2890	 * There may be no VBT; and if the BIOS enabled SSC we can
   2891	 * just keep using it to avoid unnecessary flicker.  Whereas if the
   2892	 * BIOS isn't using it, don't assume it will work even if the VBT
   2893	 * indicates as much.
   2894	 */
   2895	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
   2896		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
   2897						       PCH_DREF_CONTROL) &
   2898			DREF_SSC1_ENABLE;
   2899
   2900		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
   2901			drm_dbg_kms(&dev_priv->drm,
   2902				    "SSC %s by BIOS, overriding VBT which says %s\n",
   2903				    str_enabled_disabled(bios_lvds_use_ssc),
   2904				    str_enabled_disabled(dev_priv->vbt.lvds_use_ssc));
   2905			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
   2906		}
   2907	}
   2908}
   2909
   2910void intel_zero_m_n(struct intel_link_m_n *m_n)
   2911{
   2912	/* corresponds to 0 register value */
   2913	memset(m_n, 0, sizeof(*m_n));
   2914	m_n->tu = 1;
   2915}
   2916
   2917void intel_set_m_n(struct drm_i915_private *i915,
   2918		   const struct intel_link_m_n *m_n,
   2919		   i915_reg_t data_m_reg, i915_reg_t data_n_reg,
   2920		   i915_reg_t link_m_reg, i915_reg_t link_n_reg)
   2921{
   2922	intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
   2923	intel_de_write(i915, data_n_reg, m_n->data_n);
   2924	intel_de_write(i915, link_m_reg, m_n->link_m);
   2925	/*
   2926	 * On BDW+ writing LINK_N arms the double buffered update
   2927	 * of all the M/N registers, so it must be written last.
   2928	 */
   2929	intel_de_write(i915, link_n_reg, m_n->link_n);
   2930}
   2931
   2932bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
   2933				    enum transcoder transcoder)
   2934{
   2935	if (IS_HASWELL(dev_priv))
   2936		return transcoder == TRANSCODER_EDP;
   2937
   2938	return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
   2939}
   2940
   2941void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
   2942				    enum transcoder transcoder,
   2943				    const struct intel_link_m_n *m_n)
   2944{
   2945	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2946	enum pipe pipe = crtc->pipe;
   2947
   2948	if (DISPLAY_VER(dev_priv) >= 5)
   2949		intel_set_m_n(dev_priv, m_n,
   2950			      PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
   2951			      PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
   2952	else
   2953		intel_set_m_n(dev_priv, m_n,
   2954			      PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
   2955			      PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
   2956}
   2957
   2958void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
   2959				    enum transcoder transcoder,
   2960				    const struct intel_link_m_n *m_n)
   2961{
   2962	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2963
   2964	if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
   2965		return;
   2966
   2967	intel_set_m_n(dev_priv, m_n,
   2968		      PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
   2969		      PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
   2970}
   2971
   2972static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
   2973{
   2974	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   2975	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   2976	enum pipe pipe = crtc->pipe;
   2977	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   2978	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
   2979	u32 crtc_vtotal, crtc_vblank_end;
   2980	int vsyncshift = 0;
   2981
   2982	/* We need to be careful not to changed the adjusted mode, for otherwise
   2983	 * the hw state checker will get angry at the mismatch. */
   2984	crtc_vtotal = adjusted_mode->crtc_vtotal;
   2985	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
   2986
   2987	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
   2988		/* the chip adds 2 halflines automatically */
   2989		crtc_vtotal -= 1;
   2990		crtc_vblank_end -= 1;
   2991
   2992		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   2993			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
   2994		else
   2995			vsyncshift = adjusted_mode->crtc_hsync_start -
   2996				adjusted_mode->crtc_htotal / 2;
   2997		if (vsyncshift < 0)
   2998			vsyncshift += adjusted_mode->crtc_htotal;
   2999	}
   3000
   3001	if (DISPLAY_VER(dev_priv) > 3)
   3002		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
   3003		               vsyncshift);
   3004
   3005	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
   3006		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
   3007	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
   3008		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
   3009	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
   3010		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
   3011
   3012	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
   3013		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
   3014	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
   3015		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
   3016	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
   3017		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
   3018
   3019	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
   3020	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
   3021	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
   3022	 * bits. */
   3023	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
   3024	    (pipe == PIPE_B || pipe == PIPE_C))
   3025		intel_de_write(dev_priv, VTOTAL(pipe),
   3026		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
   3027
   3028}
   3029
   3030static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
   3031{
   3032	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3033	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3034	int width = drm_rect_width(&crtc_state->pipe_src);
   3035	int height = drm_rect_height(&crtc_state->pipe_src);
   3036	enum pipe pipe = crtc->pipe;
   3037
   3038	/* pipesrc controls the size that is scaled from, which should
   3039	 * always be the user's requested size.
   3040	 */
   3041	intel_de_write(dev_priv, PIPESRC(pipe),
   3042		       PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
   3043}
   3044
   3045static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
   3046{
   3047	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   3048	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   3049
   3050	if (DISPLAY_VER(dev_priv) == 2)
   3051		return false;
   3052
   3053	if (DISPLAY_VER(dev_priv) >= 9 ||
   3054	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   3055		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
   3056	else
   3057		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
   3058}
   3059
   3060static void intel_get_transcoder_timings(struct intel_crtc *crtc,
   3061					 struct intel_crtc_state *pipe_config)
   3062{
   3063	struct drm_device *dev = crtc->base.dev;
   3064	struct drm_i915_private *dev_priv = to_i915(dev);
   3065	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
   3066	u32 tmp;
   3067
   3068	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
   3069	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
   3070	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
   3071
   3072	if (!transcoder_is_dsi(cpu_transcoder)) {
   3073		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
   3074		pipe_config->hw.adjusted_mode.crtc_hblank_start =
   3075							(tmp & 0xffff) + 1;
   3076		pipe_config->hw.adjusted_mode.crtc_hblank_end =
   3077						((tmp >> 16) & 0xffff) + 1;
   3078	}
   3079	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
   3080	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
   3081	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
   3082
   3083	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
   3084	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
   3085	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
   3086
   3087	if (!transcoder_is_dsi(cpu_transcoder)) {
   3088		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
   3089		pipe_config->hw.adjusted_mode.crtc_vblank_start =
   3090							(tmp & 0xffff) + 1;
   3091		pipe_config->hw.adjusted_mode.crtc_vblank_end =
   3092						((tmp >> 16) & 0xffff) + 1;
   3093	}
   3094	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
   3095	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
   3096	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
   3097
   3098	if (intel_pipe_is_interlaced(pipe_config)) {
   3099		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
   3100		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
   3101		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
   3102	}
   3103}
   3104
   3105static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
   3106{
   3107	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3108	int num_pipes = intel_bigjoiner_num_pipes(crtc_state);
   3109	enum pipe master_pipe, pipe = crtc->pipe;
   3110	int width;
   3111
   3112	if (num_pipes < 2)
   3113		return;
   3114
   3115	master_pipe = bigjoiner_master_pipe(crtc_state);
   3116	width = drm_rect_width(&crtc_state->pipe_src);
   3117
   3118	drm_rect_translate_to(&crtc_state->pipe_src,
   3119			      (pipe - master_pipe) * width, 0);
   3120}
   3121
   3122static void intel_get_pipe_src_size(struct intel_crtc *crtc,
   3123				    struct intel_crtc_state *pipe_config)
   3124{
   3125	struct drm_device *dev = crtc->base.dev;
   3126	struct drm_i915_private *dev_priv = to_i915(dev);
   3127	u32 tmp;
   3128
   3129	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
   3130
   3131	drm_rect_init(&pipe_config->pipe_src, 0, 0,
   3132		      REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1,
   3133		      REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1);
   3134
   3135	intel_bigjoiner_adjust_pipe_src(pipe_config);
   3136}
   3137
   3138static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
   3139{
   3140	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3141	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3142	u32 pipeconf = 0;
   3143
   3144	/* we keep both pipes enabled on 830 */
   3145	if (IS_I830(dev_priv))
   3146		pipeconf |= PIPECONF_ENABLE;
   3147
   3148	if (crtc_state->double_wide)
   3149		pipeconf |= PIPECONF_DOUBLE_WIDE;
   3150
   3151	/* only g4x and later have fancy bpc/dither controls */
   3152	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   3153	    IS_CHERRYVIEW(dev_priv)) {
   3154		/* Bspec claims that we can't use dithering for 30bpp pipes. */
   3155		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
   3156			pipeconf |= PIPECONF_DITHER_EN |
   3157				    PIPECONF_DITHER_TYPE_SP;
   3158
   3159		switch (crtc_state->pipe_bpp) {
   3160		case 18:
   3161			pipeconf |= PIPECONF_BPC_6;
   3162			break;
   3163		case 24:
   3164			pipeconf |= PIPECONF_BPC_8;
   3165			break;
   3166		case 30:
   3167			pipeconf |= PIPECONF_BPC_10;
   3168			break;
   3169		default:
   3170			/* Case prevented by intel_choose_pipe_bpp_dither. */
   3171			BUG();
   3172		}
   3173	}
   3174
   3175	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
   3176		if (DISPLAY_VER(dev_priv) < 4 ||
   3177		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   3178			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
   3179		else
   3180			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
   3181	} else {
   3182		pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
   3183	}
   3184
   3185	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
   3186	     crtc_state->limited_color_range)
   3187		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
   3188
   3189	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
   3190
   3191	pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
   3192
   3193	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
   3194	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
   3195}
   3196
   3197static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
   3198{
   3199	if (IS_I830(dev_priv))
   3200		return false;
   3201
   3202	return DISPLAY_VER(dev_priv) >= 4 ||
   3203		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
   3204}
   3205
   3206static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
   3207{
   3208	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3209	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3210	u32 tmp;
   3211
   3212	if (!i9xx_has_pfit(dev_priv))
   3213		return;
   3214
   3215	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
   3216	if (!(tmp & PFIT_ENABLE))
   3217		return;
   3218
   3219	/* Check whether the pfit is attached to our pipe. */
   3220	if (DISPLAY_VER(dev_priv) < 4) {
   3221		if (crtc->pipe != PIPE_B)
   3222			return;
   3223	} else {
   3224		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
   3225			return;
   3226	}
   3227
   3228	crtc_state->gmch_pfit.control = tmp;
   3229	crtc_state->gmch_pfit.pgm_ratios =
   3230		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
   3231}
   3232
   3233static void vlv_crtc_clock_get(struct intel_crtc *crtc,
   3234			       struct intel_crtc_state *pipe_config)
   3235{
   3236	struct drm_device *dev = crtc->base.dev;
   3237	struct drm_i915_private *dev_priv = to_i915(dev);
   3238	enum pipe pipe = crtc->pipe;
   3239	struct dpll clock;
   3240	u32 mdiv;
   3241	int refclk = 100000;
   3242
   3243	/* In case of DSI, DPLL will not be used */
   3244	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   3245		return;
   3246
   3247	vlv_dpio_get(dev_priv);
   3248	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
   3249	vlv_dpio_put(dev_priv);
   3250
   3251	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
   3252	clock.m2 = mdiv & DPIO_M2DIV_MASK;
   3253	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
   3254	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
   3255	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
   3256
   3257	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
   3258}
   3259
   3260static void chv_crtc_clock_get(struct intel_crtc *crtc,
   3261			       struct intel_crtc_state *pipe_config)
   3262{
   3263	struct drm_device *dev = crtc->base.dev;
   3264	struct drm_i915_private *dev_priv = to_i915(dev);
   3265	enum pipe pipe = crtc->pipe;
   3266	enum dpio_channel port = vlv_pipe_to_channel(pipe);
   3267	struct dpll clock;
   3268	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
   3269	int refclk = 100000;
   3270
   3271	/* In case of DSI, DPLL will not be used */
   3272	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
   3273		return;
   3274
   3275	vlv_dpio_get(dev_priv);
   3276	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
   3277	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
   3278	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
   3279	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
   3280	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
   3281	vlv_dpio_put(dev_priv);
   3282
   3283	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
   3284	clock.m2 = (pll_dw0 & 0xff) << 22;
   3285	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
   3286		clock.m2 |= pll_dw2 & 0x3fffff;
   3287	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
   3288	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
   3289	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
   3290
   3291	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
   3292}
   3293
   3294static enum intel_output_format
   3295bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
   3296{
   3297	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3298	u32 tmp;
   3299
   3300	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
   3301
   3302	if (tmp & PIPEMISC_YUV420_ENABLE) {
   3303		/* We support 4:2:0 in full blend mode only */
   3304		drm_WARN_ON(&dev_priv->drm,
   3305			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
   3306
   3307		return INTEL_OUTPUT_FORMAT_YCBCR420;
   3308	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
   3309		return INTEL_OUTPUT_FORMAT_YCBCR444;
   3310	} else {
   3311		return INTEL_OUTPUT_FORMAT_RGB;
   3312	}
   3313}
   3314
   3315static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
   3316{
   3317	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3318	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
   3319	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3320	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
   3321	u32 tmp;
   3322
   3323	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
   3324
   3325	if (tmp & DISP_PIPE_GAMMA_ENABLE)
   3326		crtc_state->gamma_enable = true;
   3327
   3328	if (!HAS_GMCH(dev_priv) &&
   3329	    tmp & DISP_PIPE_CSC_ENABLE)
   3330		crtc_state->csc_enable = true;
   3331}
   3332
   3333static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
   3334				 struct intel_crtc_state *pipe_config)
   3335{
   3336	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3337	enum intel_display_power_domain power_domain;
   3338	intel_wakeref_t wakeref;
   3339	u32 tmp;
   3340	bool ret;
   3341
   3342	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   3343	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   3344	if (!wakeref)
   3345		return false;
   3346
   3347	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   3348	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   3349	pipe_config->shared_dpll = NULL;
   3350
   3351	ret = false;
   3352
   3353	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
   3354	if (!(tmp & PIPECONF_ENABLE))
   3355		goto out;
   3356
   3357	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   3358	    IS_CHERRYVIEW(dev_priv)) {
   3359		switch (tmp & PIPECONF_BPC_MASK) {
   3360		case PIPECONF_BPC_6:
   3361			pipe_config->pipe_bpp = 18;
   3362			break;
   3363		case PIPECONF_BPC_8:
   3364			pipe_config->pipe_bpp = 24;
   3365			break;
   3366		case PIPECONF_BPC_10:
   3367			pipe_config->pipe_bpp = 30;
   3368			break;
   3369		default:
   3370			MISSING_CASE(tmp);
   3371			break;
   3372		}
   3373	}
   3374
   3375	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
   3376	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
   3377		pipe_config->limited_color_range = true;
   3378
   3379	pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
   3380
   3381	pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
   3382
   3383	if (IS_CHERRYVIEW(dev_priv))
   3384		pipe_config->cgm_mode = intel_de_read(dev_priv,
   3385						      CGM_PIPE_MODE(crtc->pipe));
   3386
   3387	i9xx_get_pipe_color_config(pipe_config);
   3388	intel_color_get_config(pipe_config);
   3389
   3390	if (DISPLAY_VER(dev_priv) < 4)
   3391		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
   3392
   3393	intel_get_transcoder_timings(crtc, pipe_config);
   3394	intel_get_pipe_src_size(crtc, pipe_config);
   3395
   3396	i9xx_get_pfit_config(pipe_config);
   3397
   3398	if (DISPLAY_VER(dev_priv) >= 4) {
   3399		/* No way to read it out on pipes B and C */
   3400		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
   3401			tmp = dev_priv->chv_dpll_md[crtc->pipe];
   3402		else
   3403			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
   3404		pipe_config->pixel_multiplier =
   3405			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
   3406			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
   3407		pipe_config->dpll_hw_state.dpll_md = tmp;
   3408	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
   3409		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
   3410		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
   3411		pipe_config->pixel_multiplier =
   3412			((tmp & SDVO_MULTIPLIER_MASK)
   3413			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
   3414	} else {
   3415		/* Note that on i915G/GM the pixel multiplier is in the sdvo
   3416		 * port and will be fixed up in the encoder->get_config
   3417		 * function. */
   3418		pipe_config->pixel_multiplier = 1;
   3419	}
   3420	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
   3421							DPLL(crtc->pipe));
   3422	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
   3423		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
   3424							       FP0(crtc->pipe));
   3425		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
   3426							       FP1(crtc->pipe));
   3427	} else {
   3428		/* Mask out read-only status bits. */
   3429		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
   3430						     DPLL_PORTC_READY_MASK |
   3431						     DPLL_PORTB_READY_MASK);
   3432	}
   3433
   3434	if (IS_CHERRYVIEW(dev_priv))
   3435		chv_crtc_clock_get(crtc, pipe_config);
   3436	else if (IS_VALLEYVIEW(dev_priv))
   3437		vlv_crtc_clock_get(crtc, pipe_config);
   3438	else
   3439		i9xx_crtc_clock_get(crtc, pipe_config);
   3440
   3441	/*
   3442	 * Normally the dotclock is filled in by the encoder .get_config()
   3443	 * but in case the pipe is enabled w/o any ports we need a sane
   3444	 * default.
   3445	 */
   3446	pipe_config->hw.adjusted_mode.crtc_clock =
   3447		pipe_config->port_clock / pipe_config->pixel_multiplier;
   3448
   3449	ret = true;
   3450
   3451out:
   3452	intel_display_power_put(dev_priv, power_domain, wakeref);
   3453
   3454	return ret;
   3455}
   3456
   3457static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
   3458{
   3459	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3460	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3461	enum pipe pipe = crtc->pipe;
   3462	u32 val;
   3463
   3464	val = 0;
   3465
   3466	switch (crtc_state->pipe_bpp) {
   3467	case 18:
   3468		val |= PIPECONF_BPC_6;
   3469		break;
   3470	case 24:
   3471		val |= PIPECONF_BPC_8;
   3472		break;
   3473	case 30:
   3474		val |= PIPECONF_BPC_10;
   3475		break;
   3476	case 36:
   3477		val |= PIPECONF_BPC_12;
   3478		break;
   3479	default:
   3480		/* Case prevented by intel_choose_pipe_bpp_dither. */
   3481		BUG();
   3482	}
   3483
   3484	if (crtc_state->dither)
   3485		val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
   3486
   3487	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   3488		val |= PIPECONF_INTERLACE_IF_ID_ILK;
   3489	else
   3490		val |= PIPECONF_INTERLACE_PF_PD_ILK;
   3491
   3492	/*
   3493	 * This would end up with an odd purple hue over
   3494	 * the entire display. Make sure we don't do it.
   3495	 */
   3496	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
   3497		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
   3498
   3499	if (crtc_state->limited_color_range &&
   3500	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
   3501		val |= PIPECONF_COLOR_RANGE_SELECT;
   3502
   3503	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
   3504		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
   3505
   3506	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
   3507
   3508	val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1);
   3509	val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay);
   3510
   3511	intel_de_write(dev_priv, PIPECONF(pipe), val);
   3512	intel_de_posting_read(dev_priv, PIPECONF(pipe));
   3513}
   3514
   3515static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
   3516{
   3517	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3518	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3519	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
   3520	u32 val = 0;
   3521
   3522	if (IS_HASWELL(dev_priv) && crtc_state->dither)
   3523		val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
   3524
   3525	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   3526		val |= PIPECONF_INTERLACE_IF_ID_ILK;
   3527	else
   3528		val |= PIPECONF_INTERLACE_PF_PD_ILK;
   3529
   3530	if (IS_HASWELL(dev_priv) &&
   3531	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
   3532		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
   3533
   3534	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
   3535	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
   3536}
   3537
   3538static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
   3539{
   3540	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3541	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3542	u32 val = 0;
   3543
   3544	switch (crtc_state->pipe_bpp) {
   3545	case 18:
   3546		val |= PIPEMISC_BPC_6;
   3547		break;
   3548	case 24:
   3549		val |= PIPEMISC_BPC_8;
   3550		break;
   3551	case 30:
   3552		val |= PIPEMISC_BPC_10;
   3553		break;
   3554	case 36:
   3555		/* Port output 12BPC defined for ADLP+ */
   3556		if (DISPLAY_VER(dev_priv) > 12)
   3557			val |= PIPEMISC_BPC_12_ADLP;
   3558		break;
   3559	default:
   3560		MISSING_CASE(crtc_state->pipe_bpp);
   3561		break;
   3562	}
   3563
   3564	if (crtc_state->dither)
   3565		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
   3566
   3567	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
   3568	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
   3569		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
   3570
   3571	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
   3572		val |= PIPEMISC_YUV420_ENABLE |
   3573			PIPEMISC_YUV420_MODE_FULL_BLEND;
   3574
   3575	if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
   3576		val |= PIPEMISC_HDR_MODE_PRECISION;
   3577
   3578	if (DISPLAY_VER(dev_priv) >= 12)
   3579		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
   3580
   3581	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
   3582}
   3583
   3584int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
   3585{
   3586	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3587	u32 tmp;
   3588
   3589	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
   3590
   3591	switch (tmp & PIPEMISC_BPC_MASK) {
   3592	case PIPEMISC_BPC_6:
   3593		return 18;
   3594	case PIPEMISC_BPC_8:
   3595		return 24;
   3596	case PIPEMISC_BPC_10:
   3597		return 30;
   3598	/*
   3599	 * PORT OUTPUT 12 BPC defined for ADLP+.
   3600	 *
   3601	 * TODO:
   3602	 * For previous platforms with DSI interface, bits 5:7
   3603	 * are used for storing pipe_bpp irrespective of dithering.
   3604	 * Since the value of 12 BPC is not defined for these bits
   3605	 * on older platforms, need to find a workaround for 12 BPC
   3606	 * MIPI DSI HW readout.
   3607	 */
   3608	case PIPEMISC_BPC_12_ADLP:
   3609		if (DISPLAY_VER(dev_priv) > 12)
   3610			return 36;
   3611		fallthrough;
   3612	default:
   3613		MISSING_CASE(tmp);
   3614		return 0;
   3615	}
   3616}
   3617
   3618int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
   3619{
   3620	/*
   3621	 * Account for spread spectrum to avoid
   3622	 * oversubscribing the link. Max center spread
   3623	 * is 2.5%; use 5% for safety's sake.
   3624	 */
   3625	u32 bps = target_clock * bpp * 21 / 20;
   3626	return DIV_ROUND_UP(bps, link_bw * 8);
   3627}
   3628
   3629void intel_get_m_n(struct drm_i915_private *i915,
   3630		   struct intel_link_m_n *m_n,
   3631		   i915_reg_t data_m_reg, i915_reg_t data_n_reg,
   3632		   i915_reg_t link_m_reg, i915_reg_t link_n_reg)
   3633{
   3634	m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
   3635	m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
   3636	m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
   3637	m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
   3638	m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
   3639}
   3640
   3641void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
   3642				    enum transcoder transcoder,
   3643				    struct intel_link_m_n *m_n)
   3644{
   3645	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3646	enum pipe pipe = crtc->pipe;
   3647
   3648	if (DISPLAY_VER(dev_priv) >= 5)
   3649		intel_get_m_n(dev_priv, m_n,
   3650			      PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
   3651			      PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
   3652	else
   3653		intel_get_m_n(dev_priv, m_n,
   3654			      PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
   3655			      PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
   3656}
   3657
   3658void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
   3659				    enum transcoder transcoder,
   3660				    struct intel_link_m_n *m_n)
   3661{
   3662	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3663
   3664	if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
   3665		return;
   3666
   3667	intel_get_m_n(dev_priv, m_n,
   3668		      PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
   3669		      PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
   3670}
   3671
   3672static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
   3673				  u32 pos, u32 size)
   3674{
   3675	drm_rect_init(&crtc_state->pch_pfit.dst,
   3676		      pos >> 16, pos & 0xffff,
   3677		      size >> 16, size & 0xffff);
   3678}
   3679
   3680static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
   3681{
   3682	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3683	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3684	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
   3685	int id = -1;
   3686	int i;
   3687
   3688	/* find scaler attached to this pipe */
   3689	for (i = 0; i < crtc->num_scalers; i++) {
   3690		u32 ctl, pos, size;
   3691
   3692		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
   3693		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
   3694			continue;
   3695
   3696		id = i;
   3697		crtc_state->pch_pfit.enabled = true;
   3698
   3699		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
   3700		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
   3701
   3702		ilk_get_pfit_pos_size(crtc_state, pos, size);
   3703
   3704		scaler_state->scalers[i].in_use = true;
   3705		break;
   3706	}
   3707
   3708	scaler_state->scaler_id = id;
   3709	if (id >= 0)
   3710		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
   3711	else
   3712		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
   3713}
   3714
   3715static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
   3716{
   3717	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   3718	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   3719	u32 ctl, pos, size;
   3720
   3721	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
   3722	if ((ctl & PF_ENABLE) == 0)
   3723		return;
   3724
   3725	crtc_state->pch_pfit.enabled = true;
   3726
   3727	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
   3728	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
   3729
   3730	ilk_get_pfit_pos_size(crtc_state, pos, size);
   3731
   3732	/*
   3733	 * We currently do not free assignements of panel fitters on
   3734	 * ivb/hsw (since we don't use the higher upscaling modes which
   3735	 * differentiates them) so just WARN about this case for now.
   3736	 */
   3737	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
   3738		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
   3739}
   3740
   3741static bool ilk_get_pipe_config(struct intel_crtc *crtc,
   3742				struct intel_crtc_state *pipe_config)
   3743{
   3744	struct drm_device *dev = crtc->base.dev;
   3745	struct drm_i915_private *dev_priv = to_i915(dev);
   3746	enum intel_display_power_domain power_domain;
   3747	intel_wakeref_t wakeref;
   3748	u32 tmp;
   3749	bool ret;
   3750
   3751	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
   3752	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
   3753	if (!wakeref)
   3754		return false;
   3755
   3756	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
   3757	pipe_config->shared_dpll = NULL;
   3758
   3759	ret = false;
   3760	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
   3761	if (!(tmp & PIPECONF_ENABLE))
   3762		goto out;
   3763
   3764	switch (tmp & PIPECONF_BPC_MASK) {
   3765	case PIPECONF_BPC_6:
   3766		pipe_config->pipe_bpp = 18;
   3767		break;
   3768	case PIPECONF_BPC_8:
   3769		pipe_config->pipe_bpp = 24;
   3770		break;
   3771	case PIPECONF_BPC_10:
   3772		pipe_config->pipe_bpp = 30;
   3773		break;
   3774	case PIPECONF_BPC_12:
   3775		pipe_config->pipe_bpp = 36;
   3776		break;
   3777	default:
   3778		break;
   3779	}
   3780
   3781	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
   3782		pipe_config->limited_color_range = true;
   3783
   3784	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
   3785	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
   3786	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
   3787		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
   3788		break;
   3789	default:
   3790		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   3791		break;
   3792	}
   3793
   3794	pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
   3795
   3796	pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1;
   3797
   3798	pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp);
   3799
   3800	pipe_config->csc_mode = intel_de_read(dev_priv,
   3801					      PIPE_CSC_MODE(crtc->pipe));
   3802
   3803	i9xx_get_pipe_color_config(pipe_config);
   3804	intel_color_get_config(pipe_config);
   3805
   3806	pipe_config->pixel_multiplier = 1;
   3807
   3808	ilk_pch_get_config(pipe_config);
   3809
   3810	intel_get_transcoder_timings(crtc, pipe_config);
   3811	intel_get_pipe_src_size(crtc, pipe_config);
   3812
   3813	ilk_get_pfit_config(pipe_config);
   3814
   3815	ret = true;
   3816
   3817out:
   3818	intel_display_power_put(dev_priv, power_domain, wakeref);
   3819
   3820	return ret;
   3821}
   3822
   3823static u8 bigjoiner_pipes(struct drm_i915_private *i915)
   3824{
   3825	if (DISPLAY_VER(i915) >= 12)
   3826		return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
   3827	else if (DISPLAY_VER(i915) >= 11)
   3828		return BIT(PIPE_B) | BIT(PIPE_C);
   3829	else
   3830		return 0;
   3831}
   3832
   3833static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
   3834					   enum transcoder cpu_transcoder)
   3835{
   3836	enum intel_display_power_domain power_domain;
   3837	intel_wakeref_t wakeref;
   3838	u32 tmp = 0;
   3839
   3840	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   3841
   3842	with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
   3843		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
   3844
   3845	return tmp & TRANS_DDI_FUNC_ENABLE;
   3846}
   3847
   3848static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv,
   3849				    u8 *master_pipes, u8 *slave_pipes)
   3850{
   3851	struct intel_crtc *crtc;
   3852
   3853	*master_pipes = 0;
   3854	*slave_pipes = 0;
   3855
   3856	for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
   3857					 bigjoiner_pipes(dev_priv)) {
   3858		enum intel_display_power_domain power_domain;
   3859		enum pipe pipe = crtc->pipe;
   3860		intel_wakeref_t wakeref;
   3861
   3862		power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
   3863		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
   3864			u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
   3865
   3866			if (!(tmp & BIG_JOINER_ENABLE))
   3867				continue;
   3868
   3869			if (tmp & MASTER_BIG_JOINER_ENABLE)
   3870				*master_pipes |= BIT(pipe);
   3871			else
   3872				*slave_pipes |= BIT(pipe);
   3873		}
   3874
   3875		if (DISPLAY_VER(dev_priv) < 13)
   3876			continue;
   3877
   3878		power_domain = POWER_DOMAIN_PIPE(pipe);
   3879		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
   3880			u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
   3881
   3882			if (tmp & UNCOMPRESSED_JOINER_MASTER)
   3883				*master_pipes |= BIT(pipe);
   3884			if (tmp & UNCOMPRESSED_JOINER_SLAVE)
   3885				*slave_pipes |= BIT(pipe);
   3886		}
   3887	}
   3888
   3889	/* Bigjoiner pipes should always be consecutive master and slave */
   3890	drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1,
   3891		 "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
   3892		 *master_pipes, *slave_pipes);
   3893}
   3894
   3895static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
   3896{
   3897	if ((slave_pipes & BIT(pipe)) == 0)
   3898		return pipe;
   3899
   3900	/* ignore everything above our pipe */
   3901	master_pipes &= ~GENMASK(7, pipe);
   3902
   3903	/* highest remaining bit should be our master pipe */
   3904	return fls(master_pipes) - 1;
   3905}
   3906
   3907static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes)
   3908{
   3909	enum pipe master_pipe, next_master_pipe;
   3910
   3911	master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes);
   3912
   3913	if ((master_pipes & BIT(master_pipe)) == 0)
   3914		return 0;
   3915
   3916	/* ignore our master pipe and everything below it */
   3917	master_pipes &= ~GENMASK(master_pipe, 0);
   3918	/* make sure a high bit is set for the ffs() */
   3919	master_pipes |= BIT(7);
   3920	/* lowest remaining bit should be the next master pipe */
   3921	next_master_pipe = ffs(master_pipes) - 1;
   3922
   3923	return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe);
   3924}
   3925
   3926static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
   3927{
   3928	u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
   3929
   3930	if (DISPLAY_VER(i915) >= 11)
   3931		panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
   3932
   3933	return panel_transcoder_mask;
   3934}
   3935
   3936static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
   3937{
   3938	struct drm_device *dev = crtc->base.dev;
   3939	struct drm_i915_private *dev_priv = to_i915(dev);
   3940	u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
   3941	enum transcoder cpu_transcoder;
   3942	u8 master_pipes, slave_pipes;
   3943	u8 enabled_transcoders = 0;
   3944
   3945	/*
   3946	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
   3947	 * consistency and less surprising code; it's in always on power).
   3948	 */
   3949	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
   3950				       panel_transcoder_mask) {
   3951		enum intel_display_power_domain power_domain;
   3952		intel_wakeref_t wakeref;
   3953		enum pipe trans_pipe;
   3954		u32 tmp = 0;
   3955
   3956		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
   3957		with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
   3958			tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
   3959
   3960		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
   3961			continue;
   3962
   3963		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
   3964		default:
   3965			drm_WARN(dev, 1,
   3966				 "unknown pipe linked to transcoder %s\n",
   3967				 transcoder_name(cpu_transcoder));
   3968			fallthrough;
   3969		case TRANS_DDI_EDP_INPUT_A_ONOFF:
   3970		case TRANS_DDI_EDP_INPUT_A_ON:
   3971			trans_pipe = PIPE_A;
   3972			break;
   3973		case TRANS_DDI_EDP_INPUT_B_ONOFF:
   3974			trans_pipe = PIPE_B;
   3975			break;
   3976		case TRANS_DDI_EDP_INPUT_C_ONOFF:
   3977			trans_pipe = PIPE_C;
   3978			break;
   3979		case TRANS_DDI_EDP_INPUT_D_ONOFF:
   3980			trans_pipe = PIPE_D;
   3981			break;
   3982		}
   3983
   3984		if (trans_pipe == crtc->pipe)
   3985			enabled_transcoders |= BIT(cpu_transcoder);
   3986	}
   3987
   3988	/* single pipe or bigjoiner master */
   3989	cpu_transcoder = (enum transcoder) crtc->pipe;
   3990	if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
   3991		enabled_transcoders |= BIT(cpu_transcoder);
   3992
   3993	/* bigjoiner slave -> consider the master pipe's transcoder as well */
   3994	enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes);
   3995	if (slave_pipes & BIT(crtc->pipe)) {
   3996		cpu_transcoder = (enum transcoder)
   3997			get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes);
   3998		if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
   3999			enabled_transcoders |= BIT(cpu_transcoder);
   4000	}
   4001
   4002	return enabled_transcoders;
   4003}
   4004
   4005static bool has_edp_transcoders(u8 enabled_transcoders)
   4006{
   4007	return enabled_transcoders & BIT(TRANSCODER_EDP);
   4008}
   4009
   4010static bool has_dsi_transcoders(u8 enabled_transcoders)
   4011{
   4012	return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
   4013				      BIT(TRANSCODER_DSI_1));
   4014}
   4015
   4016static bool has_pipe_transcoders(u8 enabled_transcoders)
   4017{
   4018	return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
   4019				       BIT(TRANSCODER_DSI_0) |
   4020				       BIT(TRANSCODER_DSI_1));
   4021}
   4022
   4023static void assert_enabled_transcoders(struct drm_i915_private *i915,
   4024				       u8 enabled_transcoders)
   4025{
   4026	/* Only one type of transcoder please */
   4027	drm_WARN_ON(&i915->drm,
   4028		    has_edp_transcoders(enabled_transcoders) +
   4029		    has_dsi_transcoders(enabled_transcoders) +
   4030		    has_pipe_transcoders(enabled_transcoders) > 1);
   4031
   4032	/* Only DSI transcoders can be ganged */
   4033	drm_WARN_ON(&i915->drm,
   4034		    !has_dsi_transcoders(enabled_transcoders) &&
   4035		    !is_power_of_2(enabled_transcoders));
   4036}
   4037
   4038static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
   4039				     struct intel_crtc_state *pipe_config,
   4040				     struct intel_display_power_domain_set *power_domain_set)
   4041{
   4042	struct drm_device *dev = crtc->base.dev;
   4043	struct drm_i915_private *dev_priv = to_i915(dev);
   4044	unsigned long enabled_transcoders;
   4045	u32 tmp;
   4046
   4047	enabled_transcoders = hsw_enabled_transcoders(crtc);
   4048	if (!enabled_transcoders)
   4049		return false;
   4050
   4051	assert_enabled_transcoders(dev_priv, enabled_transcoders);
   4052
   4053	/*
   4054	 * With the exception of DSI we should only ever have
   4055	 * a single enabled transcoder. With DSI let's just
   4056	 * pick the first one.
   4057	 */
   4058	pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
   4059
   4060	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
   4061						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
   4062		return false;
   4063
   4064	if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
   4065		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
   4066
   4067		if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
   4068			pipe_config->pch_pfit.force_thru = true;
   4069	}
   4070
   4071	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
   4072
   4073	return tmp & PIPECONF_ENABLE;
   4074}
   4075
   4076static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
   4077					 struct intel_crtc_state *pipe_config,
   4078					 struct intel_display_power_domain_set *power_domain_set)
   4079{
   4080	struct drm_device *dev = crtc->base.dev;
   4081	struct drm_i915_private *dev_priv = to_i915(dev);
   4082	enum transcoder cpu_transcoder;
   4083	enum port port;
   4084	u32 tmp;
   4085
   4086	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
   4087		if (port == PORT_A)
   4088			cpu_transcoder = TRANSCODER_DSI_A;
   4089		else
   4090			cpu_transcoder = TRANSCODER_DSI_C;
   4091
   4092		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
   4093							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
   4094			continue;
   4095
   4096		/*
   4097		 * The PLL needs to be enabled with a valid divider
   4098		 * configuration, otherwise accessing DSI registers will hang
   4099		 * the machine. See BSpec North Display Engine
   4100		 * registers/MIPI[BXT]. We can break out here early, since we
   4101		 * need the same DSI PLL to be enabled for both DSI ports.
   4102		 */
   4103		if (!bxt_dsi_pll_is_enabled(dev_priv))
   4104			break;
   4105
   4106		/* XXX: this works for video mode only */
   4107		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
   4108		if (!(tmp & DPI_ENABLE))
   4109			continue;
   4110
   4111		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
   4112		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
   4113			continue;
   4114
   4115		pipe_config->cpu_transcoder = cpu_transcoder;
   4116		break;
   4117	}
   4118
   4119	return transcoder_is_dsi(pipe_config->cpu_transcoder);
   4120}
   4121
   4122static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state)
   4123{
   4124	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4125	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
   4126	u8 master_pipes, slave_pipes;
   4127	enum pipe pipe = crtc->pipe;
   4128
   4129	enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes);
   4130
   4131	if (((master_pipes | slave_pipes) & BIT(pipe)) == 0)
   4132		return;
   4133
   4134	crtc_state->bigjoiner_pipes =
   4135		BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) |
   4136		get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes);
   4137}
   4138
   4139static bool hsw_get_pipe_config(struct intel_crtc *crtc,
   4140				struct intel_crtc_state *pipe_config)
   4141{
   4142	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4143	struct intel_display_power_domain_set power_domain_set = { };
   4144	bool active;
   4145	u32 tmp;
   4146
   4147	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
   4148						       POWER_DOMAIN_PIPE(crtc->pipe)))
   4149		return false;
   4150
   4151	pipe_config->shared_dpll = NULL;
   4152
   4153	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
   4154
   4155	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
   4156	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
   4157		drm_WARN_ON(&dev_priv->drm, active);
   4158		active = true;
   4159	}
   4160
   4161	if (!active)
   4162		goto out;
   4163
   4164	intel_dsc_get_config(pipe_config);
   4165	intel_bigjoiner_get_config(pipe_config);
   4166
   4167	if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
   4168	    DISPLAY_VER(dev_priv) >= 11)
   4169		intel_get_transcoder_timings(crtc, pipe_config);
   4170
   4171	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
   4172		intel_vrr_get_config(crtc, pipe_config);
   4173
   4174	intel_get_pipe_src_size(crtc, pipe_config);
   4175
   4176	if (IS_HASWELL(dev_priv)) {
   4177		u32 tmp = intel_de_read(dev_priv,
   4178					PIPECONF(pipe_config->cpu_transcoder));
   4179
   4180		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
   4181			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
   4182		else
   4183			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
   4184	} else {
   4185		pipe_config->output_format =
   4186			bdw_get_pipemisc_output_format(crtc);
   4187	}
   4188
   4189	pipe_config->gamma_mode = intel_de_read(dev_priv,
   4190						GAMMA_MODE(crtc->pipe));
   4191
   4192	pipe_config->csc_mode = intel_de_read(dev_priv,
   4193					      PIPE_CSC_MODE(crtc->pipe));
   4194
   4195	if (DISPLAY_VER(dev_priv) >= 9) {
   4196		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
   4197
   4198		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
   4199			pipe_config->gamma_enable = true;
   4200
   4201		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
   4202			pipe_config->csc_enable = true;
   4203	} else {
   4204		i9xx_get_pipe_color_config(pipe_config);
   4205	}
   4206
   4207	intel_color_get_config(pipe_config);
   4208
   4209	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
   4210	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
   4211	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   4212		pipe_config->ips_linetime =
   4213			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
   4214
   4215	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
   4216						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
   4217		if (DISPLAY_VER(dev_priv) >= 9)
   4218			skl_get_pfit_config(pipe_config);
   4219		else
   4220			ilk_get_pfit_config(pipe_config);
   4221	}
   4222
   4223	hsw_ips_get_config(pipe_config);
   4224
   4225	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
   4226	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
   4227		pipe_config->pixel_multiplier =
   4228			intel_de_read(dev_priv,
   4229				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
   4230	} else {
   4231		pipe_config->pixel_multiplier = 1;
   4232	}
   4233
   4234	if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
   4235		tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder));
   4236
   4237		pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
   4238	} else {
   4239		/* no idea if this is correct */
   4240		pipe_config->framestart_delay = 1;
   4241	}
   4242
   4243out:
   4244	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
   4245
   4246	return active;
   4247}
   4248
   4249static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
   4250{
   4251	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4252	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
   4253
   4254	if (!i915->display->get_pipe_config(crtc, crtc_state))
   4255		return false;
   4256
   4257	crtc_state->hw.active = true;
   4258
   4259	intel_crtc_readout_derived_state(crtc_state);
   4260
   4261	return true;
   4262}
   4263
   4264/* VESA 640x480x72Hz mode to set on the pipe */
   4265static const struct drm_display_mode load_detect_mode = {
   4266	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
   4267		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
   4268};
   4269
   4270static int intel_modeset_disable_planes(struct drm_atomic_state *state,
   4271					struct drm_crtc *crtc)
   4272{
   4273	struct drm_plane *plane;
   4274	struct drm_plane_state *plane_state;
   4275	int ret, i;
   4276
   4277	ret = drm_atomic_add_affected_planes(state, crtc);
   4278	if (ret)
   4279		return ret;
   4280
   4281	for_each_new_plane_in_state(state, plane, plane_state, i) {
   4282		if (plane_state->crtc != crtc)
   4283			continue;
   4284
   4285		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
   4286		if (ret)
   4287			return ret;
   4288
   4289		drm_atomic_set_fb_for_plane(plane_state, NULL);
   4290	}
   4291
   4292	return 0;
   4293}
   4294
   4295int intel_get_load_detect_pipe(struct drm_connector *connector,
   4296			       struct intel_load_detect_pipe *old,
   4297			       struct drm_modeset_acquire_ctx *ctx)
   4298{
   4299	struct intel_encoder *encoder =
   4300		intel_attached_encoder(to_intel_connector(connector));
   4301	struct intel_crtc *possible_crtc;
   4302	struct intel_crtc *crtc = NULL;
   4303	struct drm_device *dev = encoder->base.dev;
   4304	struct drm_i915_private *dev_priv = to_i915(dev);
   4305	struct drm_mode_config *config = &dev->mode_config;
   4306	struct drm_atomic_state *state = NULL, *restore_state = NULL;
   4307	struct drm_connector_state *connector_state;
   4308	struct intel_crtc_state *crtc_state;
   4309	int ret;
   4310
   4311	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
   4312		    connector->base.id, connector->name,
   4313		    encoder->base.base.id, encoder->base.name);
   4314
   4315	old->restore_state = NULL;
   4316
   4317	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
   4318
   4319	/*
   4320	 * Algorithm gets a little messy:
   4321	 *
   4322	 *   - if the connector already has an assigned crtc, use it (but make
   4323	 *     sure it's on first)
   4324	 *
   4325	 *   - try to find the first unused crtc that can drive this connector,
   4326	 *     and use that if we find one
   4327	 */
   4328
   4329	/* See if we already have a CRTC for this connector */
   4330	if (connector->state->crtc) {
   4331		crtc = to_intel_crtc(connector->state->crtc);
   4332
   4333		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
   4334		if (ret)
   4335			goto fail;
   4336
   4337		/* Make sure the crtc and connector are running */
   4338		goto found;
   4339	}
   4340
   4341	/* Find an unused one (if possible) */
   4342	for_each_intel_crtc(dev, possible_crtc) {
   4343		if (!(encoder->base.possible_crtcs &
   4344		      drm_crtc_mask(&possible_crtc->base)))
   4345			continue;
   4346
   4347		ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
   4348		if (ret)
   4349			goto fail;
   4350
   4351		if (possible_crtc->base.state->enable) {
   4352			drm_modeset_unlock(&possible_crtc->base.mutex);
   4353			continue;
   4354		}
   4355
   4356		crtc = possible_crtc;
   4357		break;
   4358	}
   4359
   4360	/*
   4361	 * If we didn't find an unused CRTC, don't use any.
   4362	 */
   4363	if (!crtc) {
   4364		drm_dbg_kms(&dev_priv->drm,
   4365			    "no pipe available for load-detect\n");
   4366		ret = -ENODEV;
   4367		goto fail;
   4368	}
   4369
   4370found:
   4371	state = drm_atomic_state_alloc(dev);
   4372	restore_state = drm_atomic_state_alloc(dev);
   4373	if (!state || !restore_state) {
   4374		ret = -ENOMEM;
   4375		goto fail;
   4376	}
   4377
   4378	state->acquire_ctx = ctx;
   4379	restore_state->acquire_ctx = ctx;
   4380
   4381	connector_state = drm_atomic_get_connector_state(state, connector);
   4382	if (IS_ERR(connector_state)) {
   4383		ret = PTR_ERR(connector_state);
   4384		goto fail;
   4385	}
   4386
   4387	ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
   4388	if (ret)
   4389		goto fail;
   4390
   4391	crtc_state = intel_atomic_get_crtc_state(state, crtc);
   4392	if (IS_ERR(crtc_state)) {
   4393		ret = PTR_ERR(crtc_state);
   4394		goto fail;
   4395	}
   4396
   4397	crtc_state->uapi.active = true;
   4398
   4399	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
   4400					   &load_detect_mode);
   4401	if (ret)
   4402		goto fail;
   4403
   4404	ret = intel_modeset_disable_planes(state, &crtc->base);
   4405	if (ret)
   4406		goto fail;
   4407
   4408	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
   4409	if (!ret)
   4410		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
   4411	if (!ret)
   4412		ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
   4413	if (ret) {
   4414		drm_dbg_kms(&dev_priv->drm,
   4415			    "Failed to create a copy of old state to restore: %i\n",
   4416			    ret);
   4417		goto fail;
   4418	}
   4419
   4420	ret = drm_atomic_commit(state);
   4421	if (ret) {
   4422		drm_dbg_kms(&dev_priv->drm,
   4423			    "failed to set mode on load-detect pipe\n");
   4424		goto fail;
   4425	}
   4426
   4427	old->restore_state = restore_state;
   4428	drm_atomic_state_put(state);
   4429
   4430	/* let the connector get through one full cycle before testing */
   4431	intel_crtc_wait_for_next_vblank(crtc);
   4432
   4433	return true;
   4434
   4435fail:
   4436	if (state) {
   4437		drm_atomic_state_put(state);
   4438		state = NULL;
   4439	}
   4440	if (restore_state) {
   4441		drm_atomic_state_put(restore_state);
   4442		restore_state = NULL;
   4443	}
   4444
   4445	if (ret == -EDEADLK)
   4446		return ret;
   4447
   4448	return false;
   4449}
   4450
   4451void intel_release_load_detect_pipe(struct drm_connector *connector,
   4452				    struct intel_load_detect_pipe *old,
   4453				    struct drm_modeset_acquire_ctx *ctx)
   4454{
   4455	struct intel_encoder *intel_encoder =
   4456		intel_attached_encoder(to_intel_connector(connector));
   4457	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
   4458	struct drm_encoder *encoder = &intel_encoder->base;
   4459	struct drm_atomic_state *state = old->restore_state;
   4460	int ret;
   4461
   4462	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
   4463		    connector->base.id, connector->name,
   4464		    encoder->base.id, encoder->name);
   4465
   4466	if (!state)
   4467		return;
   4468
   4469	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
   4470	if (ret)
   4471		drm_dbg_kms(&i915->drm,
   4472			    "Couldn't release load detect pipe: %i\n", ret);
   4473	drm_atomic_state_put(state);
   4474}
   4475
   4476static int i9xx_pll_refclk(struct drm_device *dev,
   4477			   const struct intel_crtc_state *pipe_config)
   4478{
   4479	struct drm_i915_private *dev_priv = to_i915(dev);
   4480	u32 dpll = pipe_config->dpll_hw_state.dpll;
   4481
   4482	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
   4483		return dev_priv->vbt.lvds_ssc_freq;
   4484	else if (HAS_PCH_SPLIT(dev_priv))
   4485		return 120000;
   4486	else if (DISPLAY_VER(dev_priv) != 2)
   4487		return 96000;
   4488	else
   4489		return 48000;
   4490}
   4491
   4492/* Returns the clock of the currently programmed mode of the given pipe. */
   4493void i9xx_crtc_clock_get(struct intel_crtc *crtc,
   4494			 struct intel_crtc_state *pipe_config)
   4495{
   4496	struct drm_device *dev = crtc->base.dev;
   4497	struct drm_i915_private *dev_priv = to_i915(dev);
   4498	u32 dpll = pipe_config->dpll_hw_state.dpll;
   4499	u32 fp;
   4500	struct dpll clock;
   4501	int port_clock;
   4502	int refclk = i9xx_pll_refclk(dev, pipe_config);
   4503
   4504	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
   4505		fp = pipe_config->dpll_hw_state.fp0;
   4506	else
   4507		fp = pipe_config->dpll_hw_state.fp1;
   4508
   4509	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
   4510	if (IS_PINEVIEW(dev_priv)) {
   4511		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
   4512		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
   4513	} else {
   4514		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
   4515		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
   4516	}
   4517
   4518	if (DISPLAY_VER(dev_priv) != 2) {
   4519		if (IS_PINEVIEW(dev_priv))
   4520			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
   4521				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
   4522		else
   4523			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
   4524			       DPLL_FPA01_P1_POST_DIV_SHIFT);
   4525
   4526		switch (dpll & DPLL_MODE_MASK) {
   4527		case DPLLB_MODE_DAC_SERIAL:
   4528			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
   4529				5 : 10;
   4530			break;
   4531		case DPLLB_MODE_LVDS:
   4532			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
   4533				7 : 14;
   4534			break;
   4535		default:
   4536			drm_dbg_kms(&dev_priv->drm,
   4537				    "Unknown DPLL mode %08x in programmed "
   4538				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
   4539			return;
   4540		}
   4541
   4542		if (IS_PINEVIEW(dev_priv))
   4543			port_clock = pnv_calc_dpll_params(refclk, &clock);
   4544		else
   4545			port_clock = i9xx_calc_dpll_params(refclk, &clock);
   4546	} else {
   4547		enum pipe lvds_pipe;
   4548
   4549		if (IS_I85X(dev_priv) &&
   4550		    intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
   4551		    lvds_pipe == crtc->pipe) {
   4552			u32 lvds = intel_de_read(dev_priv, LVDS);
   4553
   4554			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
   4555				       DPLL_FPA01_P1_POST_DIV_SHIFT);
   4556
   4557			if (lvds & LVDS_CLKB_POWER_UP)
   4558				clock.p2 = 7;
   4559			else
   4560				clock.p2 = 14;
   4561		} else {
   4562			if (dpll & PLL_P1_DIVIDE_BY_TWO)
   4563				clock.p1 = 2;
   4564			else {
   4565				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
   4566					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
   4567			}
   4568			if (dpll & PLL_P2_DIVIDE_BY_4)
   4569				clock.p2 = 4;
   4570			else
   4571				clock.p2 = 2;
   4572		}
   4573
   4574		port_clock = i9xx_calc_dpll_params(refclk, &clock);
   4575	}
   4576
   4577	/*
   4578	 * This value includes pixel_multiplier. We will use
   4579	 * port_clock to compute adjusted_mode.crtc_clock in the
   4580	 * encoder's get_config() function.
   4581	 */
   4582	pipe_config->port_clock = port_clock;
   4583}
   4584
   4585int intel_dotclock_calculate(int link_freq,
   4586			     const struct intel_link_m_n *m_n)
   4587{
   4588	/*
   4589	 * The calculation for the data clock is:
   4590	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
   4591	 * But we want to avoid losing precison if possible, so:
   4592	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
   4593	 *
   4594	 * and the link clock is simpler:
   4595	 * link_clock = (m * link_clock) / n
   4596	 */
   4597
   4598	if (!m_n->link_n)
   4599		return 0;
   4600
   4601	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
   4602}
   4603
   4604/* Returns the currently programmed mode of the given encoder. */
   4605struct drm_display_mode *
   4606intel_encoder_current_mode(struct intel_encoder *encoder)
   4607{
   4608	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   4609	struct intel_crtc_state *crtc_state;
   4610	struct drm_display_mode *mode;
   4611	struct intel_crtc *crtc;
   4612	enum pipe pipe;
   4613
   4614	if (!encoder->get_hw_state(encoder, &pipe))
   4615		return NULL;
   4616
   4617	crtc = intel_crtc_for_pipe(dev_priv, pipe);
   4618
   4619	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
   4620	if (!mode)
   4621		return NULL;
   4622
   4623	crtc_state = intel_crtc_state_alloc(crtc);
   4624	if (!crtc_state) {
   4625		kfree(mode);
   4626		return NULL;
   4627	}
   4628
   4629	if (!intel_crtc_get_pipe_config(crtc_state)) {
   4630		kfree(crtc_state);
   4631		kfree(mode);
   4632		return NULL;
   4633	}
   4634
   4635	intel_encoder_get_config(encoder, crtc_state);
   4636
   4637	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
   4638
   4639	kfree(crtc_state);
   4640
   4641	return mode;
   4642}
   4643
   4644static bool encoders_cloneable(const struct intel_encoder *a,
   4645			       const struct intel_encoder *b)
   4646{
   4647	/* masks could be asymmetric, so check both ways */
   4648	return a == b || (a->cloneable & (1 << b->type) &&
   4649			  b->cloneable & (1 << a->type));
   4650}
   4651
   4652static bool check_single_encoder_cloning(struct intel_atomic_state *state,
   4653					 struct intel_crtc *crtc,
   4654					 struct intel_encoder *encoder)
   4655{
   4656	struct intel_encoder *source_encoder;
   4657	struct drm_connector *connector;
   4658	struct drm_connector_state *connector_state;
   4659	int i;
   4660
   4661	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
   4662		if (connector_state->crtc != &crtc->base)
   4663			continue;
   4664
   4665		source_encoder =
   4666			to_intel_encoder(connector_state->best_encoder);
   4667		if (!encoders_cloneable(encoder, source_encoder))
   4668			return false;
   4669	}
   4670
   4671	return true;
   4672}
   4673
   4674static int icl_add_linked_planes(struct intel_atomic_state *state)
   4675{
   4676	struct intel_plane *plane, *linked;
   4677	struct intel_plane_state *plane_state, *linked_plane_state;
   4678	int i;
   4679
   4680	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   4681		linked = plane_state->planar_linked_plane;
   4682
   4683		if (!linked)
   4684			continue;
   4685
   4686		linked_plane_state = intel_atomic_get_plane_state(state, linked);
   4687		if (IS_ERR(linked_plane_state))
   4688			return PTR_ERR(linked_plane_state);
   4689
   4690		drm_WARN_ON(state->base.dev,
   4691			    linked_plane_state->planar_linked_plane != plane);
   4692		drm_WARN_ON(state->base.dev,
   4693			    linked_plane_state->planar_slave == plane_state->planar_slave);
   4694	}
   4695
   4696	return 0;
   4697}
   4698
   4699static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
   4700{
   4701	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4702	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4703	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
   4704	struct intel_plane *plane, *linked;
   4705	struct intel_plane_state *plane_state;
   4706	int i;
   4707
   4708	if (DISPLAY_VER(dev_priv) < 11)
   4709		return 0;
   4710
   4711	/*
   4712	 * Destroy all old plane links and make the slave plane invisible
   4713	 * in the crtc_state->active_planes mask.
   4714	 */
   4715	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   4716		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
   4717			continue;
   4718
   4719		plane_state->planar_linked_plane = NULL;
   4720		if (plane_state->planar_slave && !plane_state->uapi.visible) {
   4721			crtc_state->enabled_planes &= ~BIT(plane->id);
   4722			crtc_state->active_planes &= ~BIT(plane->id);
   4723			crtc_state->update_planes |= BIT(plane->id);
   4724			crtc_state->data_rate[plane->id] = 0;
   4725			crtc_state->rel_data_rate[plane->id] = 0;
   4726		}
   4727
   4728		plane_state->planar_slave = false;
   4729	}
   4730
   4731	if (!crtc_state->nv12_planes)
   4732		return 0;
   4733
   4734	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   4735		struct intel_plane_state *linked_state = NULL;
   4736
   4737		if (plane->pipe != crtc->pipe ||
   4738		    !(crtc_state->nv12_planes & BIT(plane->id)))
   4739			continue;
   4740
   4741		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
   4742			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
   4743				continue;
   4744
   4745			if (crtc_state->active_planes & BIT(linked->id))
   4746				continue;
   4747
   4748			linked_state = intel_atomic_get_plane_state(state, linked);
   4749			if (IS_ERR(linked_state))
   4750				return PTR_ERR(linked_state);
   4751
   4752			break;
   4753		}
   4754
   4755		if (!linked_state) {
   4756			drm_dbg_kms(&dev_priv->drm,
   4757				    "Need %d free Y planes for planar YUV\n",
   4758				    hweight8(crtc_state->nv12_planes));
   4759
   4760			return -EINVAL;
   4761		}
   4762
   4763		plane_state->planar_linked_plane = linked;
   4764
   4765		linked_state->planar_slave = true;
   4766		linked_state->planar_linked_plane = plane;
   4767		crtc_state->enabled_planes |= BIT(linked->id);
   4768		crtc_state->active_planes |= BIT(linked->id);
   4769		crtc_state->update_planes |= BIT(linked->id);
   4770		crtc_state->data_rate[linked->id] =
   4771			crtc_state->data_rate_y[plane->id];
   4772		crtc_state->rel_data_rate[linked->id] =
   4773			crtc_state->rel_data_rate_y[plane->id];
   4774		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
   4775			    linked->base.name, plane->base.name);
   4776
   4777		/* Copy parameters to slave plane */
   4778		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
   4779		linked_state->color_ctl = plane_state->color_ctl;
   4780		linked_state->view = plane_state->view;
   4781		linked_state->decrypt = plane_state->decrypt;
   4782
   4783		intel_plane_copy_hw_state(linked_state, plane_state);
   4784		linked_state->uapi.src = plane_state->uapi.src;
   4785		linked_state->uapi.dst = plane_state->uapi.dst;
   4786
   4787		if (icl_is_hdr_plane(dev_priv, plane->id)) {
   4788			if (linked->id == PLANE_SPRITE5)
   4789				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
   4790			else if (linked->id == PLANE_SPRITE4)
   4791				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
   4792			else if (linked->id == PLANE_SPRITE3)
   4793				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
   4794			else if (linked->id == PLANE_SPRITE2)
   4795				plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
   4796			else
   4797				MISSING_CASE(linked->id);
   4798		}
   4799	}
   4800
   4801	return 0;
   4802}
   4803
   4804static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
   4805{
   4806	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   4807	struct intel_atomic_state *state =
   4808		to_intel_atomic_state(new_crtc_state->uapi.state);
   4809	const struct intel_crtc_state *old_crtc_state =
   4810		intel_atomic_get_old_crtc_state(state, crtc);
   4811
   4812	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
   4813}
   4814
   4815static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
   4816{
   4817	const struct drm_display_mode *pipe_mode =
   4818		&crtc_state->hw.pipe_mode;
   4819	int linetime_wm;
   4820
   4821	if (!crtc_state->hw.enable)
   4822		return 0;
   4823
   4824	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
   4825					pipe_mode->crtc_clock);
   4826
   4827	return min(linetime_wm, 0x1ff);
   4828}
   4829
   4830static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
   4831			       const struct intel_cdclk_state *cdclk_state)
   4832{
   4833	const struct drm_display_mode *pipe_mode =
   4834		&crtc_state->hw.pipe_mode;
   4835	int linetime_wm;
   4836
   4837	if (!crtc_state->hw.enable)
   4838		return 0;
   4839
   4840	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
   4841					cdclk_state->logical.cdclk);
   4842
   4843	return min(linetime_wm, 0x1ff);
   4844}
   4845
   4846static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
   4847{
   4848	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   4849	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4850	const struct drm_display_mode *pipe_mode =
   4851		&crtc_state->hw.pipe_mode;
   4852	int linetime_wm;
   4853
   4854	if (!crtc_state->hw.enable)
   4855		return 0;
   4856
   4857	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
   4858				   crtc_state->pixel_rate);
   4859
   4860	/* Display WA #1135: BXT:ALL GLK:ALL */
   4861	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
   4862	    dev_priv->ipc_enabled)
   4863		linetime_wm /= 2;
   4864
   4865	return min(linetime_wm, 0x1ff);
   4866}
   4867
   4868static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
   4869				   struct intel_crtc *crtc)
   4870{
   4871	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4872	struct intel_crtc_state *crtc_state =
   4873		intel_atomic_get_new_crtc_state(state, crtc);
   4874	const struct intel_cdclk_state *cdclk_state;
   4875
   4876	if (DISPLAY_VER(dev_priv) >= 9)
   4877		crtc_state->linetime = skl_linetime_wm(crtc_state);
   4878	else
   4879		crtc_state->linetime = hsw_linetime_wm(crtc_state);
   4880
   4881	if (!hsw_crtc_supports_ips(crtc))
   4882		return 0;
   4883
   4884	cdclk_state = intel_atomic_get_cdclk_state(state);
   4885	if (IS_ERR(cdclk_state))
   4886		return PTR_ERR(cdclk_state);
   4887
   4888	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
   4889						       cdclk_state);
   4890
   4891	return 0;
   4892}
   4893
   4894static int intel_crtc_atomic_check(struct intel_atomic_state *state,
   4895				   struct intel_crtc *crtc)
   4896{
   4897	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   4898	struct intel_crtc_state *crtc_state =
   4899		intel_atomic_get_new_crtc_state(state, crtc);
   4900	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
   4901	int ret;
   4902
   4903	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
   4904	    mode_changed && !crtc_state->hw.active)
   4905		crtc_state->update_wm_post = true;
   4906
   4907	if (mode_changed) {
   4908		ret = intel_dpll_crtc_compute_clock(state, crtc);
   4909		if (ret)
   4910			return ret;
   4911
   4912		ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
   4913		if (ret)
   4914			return ret;
   4915	}
   4916
   4917	/*
   4918	 * May need to update pipe gamma enable bits
   4919	 * when C8 planes are getting enabled/disabled.
   4920	 */
   4921	if (c8_planes_changed(crtc_state))
   4922		crtc_state->uapi.color_mgmt_changed = true;
   4923
   4924	if (mode_changed || crtc_state->update_pipe ||
   4925	    crtc_state->uapi.color_mgmt_changed) {
   4926		ret = intel_color_check(crtc_state);
   4927		if (ret)
   4928			return ret;
   4929	}
   4930
   4931	ret = intel_compute_pipe_wm(state, crtc);
   4932	if (ret) {
   4933		drm_dbg_kms(&dev_priv->drm,
   4934			    "Target pipe watermarks are invalid\n");
   4935		return ret;
   4936	}
   4937
   4938	/*
   4939	 * Calculate 'intermediate' watermarks that satisfy both the
   4940	 * old state and the new state.  We can program these
   4941	 * immediately.
   4942	 */
   4943	ret = intel_compute_intermediate_wm(state, crtc);
   4944	if (ret) {
   4945		drm_dbg_kms(&dev_priv->drm,
   4946			    "No valid intermediate pipe watermarks are possible\n");
   4947		return ret;
   4948	}
   4949
   4950	if (DISPLAY_VER(dev_priv) >= 9) {
   4951		if (mode_changed || crtc_state->update_pipe) {
   4952			ret = skl_update_scaler_crtc(crtc_state);
   4953			if (ret)
   4954				return ret;
   4955		}
   4956
   4957		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
   4958		if (ret)
   4959			return ret;
   4960	}
   4961
   4962	if (HAS_IPS(dev_priv)) {
   4963		ret = hsw_ips_compute_config(state, crtc);
   4964		if (ret)
   4965			return ret;
   4966	}
   4967
   4968	if (DISPLAY_VER(dev_priv) >= 9 ||
   4969	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
   4970		ret = hsw_compute_linetime_wm(state, crtc);
   4971		if (ret)
   4972			return ret;
   4973
   4974	}
   4975
   4976	ret = intel_psr2_sel_fetch_update(state, crtc);
   4977	if (ret)
   4978		return ret;
   4979
   4980	return 0;
   4981}
   4982
   4983static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
   4984{
   4985	struct intel_connector *connector;
   4986	struct drm_connector_list_iter conn_iter;
   4987
   4988	drm_connector_list_iter_begin(dev, &conn_iter);
   4989	for_each_intel_connector_iter(connector, &conn_iter) {
   4990		struct drm_connector_state *conn_state = connector->base.state;
   4991		struct intel_encoder *encoder =
   4992			to_intel_encoder(connector->base.encoder);
   4993
   4994		if (conn_state->crtc)
   4995			drm_connector_put(&connector->base);
   4996
   4997		if (encoder) {
   4998			struct intel_crtc *crtc =
   4999				to_intel_crtc(encoder->base.crtc);
   5000			const struct intel_crtc_state *crtc_state =
   5001				to_intel_crtc_state(crtc->base.state);
   5002
   5003			conn_state->best_encoder = &encoder->base;
   5004			conn_state->crtc = &crtc->base;
   5005			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
   5006
   5007			drm_connector_get(&connector->base);
   5008		} else {
   5009			conn_state->best_encoder = NULL;
   5010			conn_state->crtc = NULL;
   5011		}
   5012	}
   5013	drm_connector_list_iter_end(&conn_iter);
   5014}
   5015
   5016static int
   5017compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
   5018		      struct intel_crtc_state *pipe_config)
   5019{
   5020	struct drm_connector *connector = conn_state->connector;
   5021	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
   5022	const struct drm_display_info *info = &connector->display_info;
   5023	int bpp;
   5024
   5025	switch (conn_state->max_bpc) {
   5026	case 6 ... 7:
   5027		bpp = 6 * 3;
   5028		break;
   5029	case 8 ... 9:
   5030		bpp = 8 * 3;
   5031		break;
   5032	case 10 ... 11:
   5033		bpp = 10 * 3;
   5034		break;
   5035	case 12 ... 16:
   5036		bpp = 12 * 3;
   5037		break;
   5038	default:
   5039		MISSING_CASE(conn_state->max_bpc);
   5040		return -EINVAL;
   5041	}
   5042
   5043	if (bpp < pipe_config->pipe_bpp) {
   5044		drm_dbg_kms(&i915->drm,
   5045			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
   5046			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
   5047			    connector->base.id, connector->name,
   5048			    bpp, 3 * info->bpc,
   5049			    3 * conn_state->max_requested_bpc,
   5050			    pipe_config->pipe_bpp);
   5051
   5052		pipe_config->pipe_bpp = bpp;
   5053	}
   5054
   5055	return 0;
   5056}
   5057
   5058static int
   5059compute_baseline_pipe_bpp(struct intel_crtc *crtc,
   5060			  struct intel_crtc_state *pipe_config)
   5061{
   5062	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5063	struct drm_atomic_state *state = pipe_config->uapi.state;
   5064	struct drm_connector *connector;
   5065	struct drm_connector_state *connector_state;
   5066	int bpp, i;
   5067
   5068	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   5069	    IS_CHERRYVIEW(dev_priv)))
   5070		bpp = 10*3;
   5071	else if (DISPLAY_VER(dev_priv) >= 5)
   5072		bpp = 12*3;
   5073	else
   5074		bpp = 8*3;
   5075
   5076	pipe_config->pipe_bpp = bpp;
   5077
   5078	/* Clamp display bpp to connector max bpp */
   5079	for_each_new_connector_in_state(state, connector, connector_state, i) {
   5080		int ret;
   5081
   5082		if (connector_state->crtc != &crtc->base)
   5083			continue;
   5084
   5085		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
   5086		if (ret)
   5087			return ret;
   5088	}
   5089
   5090	return 0;
   5091}
   5092
   5093static void intel_dump_crtc_timings(struct drm_i915_private *i915,
   5094				    const struct drm_display_mode *mode)
   5095{
   5096	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
   5097		    "type: 0x%x flags: 0x%x\n",
   5098		    mode->crtc_clock,
   5099		    mode->crtc_hdisplay, mode->crtc_hsync_start,
   5100		    mode->crtc_hsync_end, mode->crtc_htotal,
   5101		    mode->crtc_vdisplay, mode->crtc_vsync_start,
   5102		    mode->crtc_vsync_end, mode->crtc_vtotal,
   5103		    mode->type, mode->flags);
   5104}
   5105
   5106static void
   5107intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
   5108		      const char *id, unsigned int lane_count,
   5109		      const struct intel_link_m_n *m_n)
   5110{
   5111	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
   5112
   5113	drm_dbg_kms(&i915->drm,
   5114		    "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
   5115		    id, lane_count,
   5116		    m_n->data_m, m_n->data_n,
   5117		    m_n->link_m, m_n->link_n, m_n->tu);
   5118}
   5119
   5120static void
   5121intel_dump_infoframe(struct drm_i915_private *dev_priv,
   5122		     const union hdmi_infoframe *frame)
   5123{
   5124	if (!drm_debug_enabled(DRM_UT_KMS))
   5125		return;
   5126
   5127	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
   5128}
   5129
   5130static void
   5131intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
   5132		      const struct drm_dp_vsc_sdp *vsc)
   5133{
   5134	if (!drm_debug_enabled(DRM_UT_KMS))
   5135		return;
   5136
   5137	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
   5138}
   5139
   5140#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
   5141
   5142static const char * const output_type_str[] = {
   5143	OUTPUT_TYPE(UNUSED),
   5144	OUTPUT_TYPE(ANALOG),
   5145	OUTPUT_TYPE(DVO),
   5146	OUTPUT_TYPE(SDVO),
   5147	OUTPUT_TYPE(LVDS),
   5148	OUTPUT_TYPE(TVOUT),
   5149	OUTPUT_TYPE(HDMI),
   5150	OUTPUT_TYPE(DP),
   5151	OUTPUT_TYPE(EDP),
   5152	OUTPUT_TYPE(DSI),
   5153	OUTPUT_TYPE(DDI),
   5154	OUTPUT_TYPE(DP_MST),
   5155};
   5156
   5157#undef OUTPUT_TYPE
   5158
   5159static void snprintf_output_types(char *buf, size_t len,
   5160				  unsigned int output_types)
   5161{
   5162	char *str = buf;
   5163	int i;
   5164
   5165	str[0] = '\0';
   5166
   5167	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
   5168		int r;
   5169
   5170		if ((output_types & BIT(i)) == 0)
   5171			continue;
   5172
   5173		r = snprintf(str, len, "%s%s",
   5174			     str != buf ? "," : "", output_type_str[i]);
   5175		if (r >= len)
   5176			break;
   5177		str += r;
   5178		len -= r;
   5179
   5180		output_types &= ~BIT(i);
   5181	}
   5182
   5183	WARN_ON_ONCE(output_types != 0);
   5184}
   5185
   5186static const char * const output_format_str[] = {
   5187	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
   5188	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
   5189	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
   5190};
   5191
   5192static const char *output_formats(enum intel_output_format format)
   5193{
   5194	if (format >= ARRAY_SIZE(output_format_str))
   5195		return "invalid";
   5196	return output_format_str[format];
   5197}
   5198
   5199static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
   5200{
   5201	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
   5202	struct drm_i915_private *i915 = to_i915(plane->base.dev);
   5203	const struct drm_framebuffer *fb = plane_state->hw.fb;
   5204
   5205	if (!fb) {
   5206		drm_dbg_kms(&i915->drm,
   5207			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
   5208			    plane->base.base.id, plane->base.name,
   5209			    str_yes_no(plane_state->uapi.visible));
   5210		return;
   5211	}
   5212
   5213	drm_dbg_kms(&i915->drm,
   5214		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
   5215		    plane->base.base.id, plane->base.name,
   5216		    fb->base.id, fb->width, fb->height, &fb->format->format,
   5217		    fb->modifier, str_yes_no(plane_state->uapi.visible));
   5218	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
   5219		    plane_state->hw.rotation, plane_state->scaler_id);
   5220	if (plane_state->uapi.visible)
   5221		drm_dbg_kms(&i915->drm,
   5222			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
   5223			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
   5224			    DRM_RECT_ARG(&plane_state->uapi.dst));
   5225}
   5226
   5227static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
   5228				   struct intel_atomic_state *state,
   5229				   const char *context)
   5230{
   5231	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
   5232	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5233	const struct intel_plane_state *plane_state;
   5234	struct intel_plane *plane;
   5235	char buf[64];
   5236	int i;
   5237
   5238	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
   5239		    crtc->base.base.id, crtc->base.name,
   5240		    str_yes_no(pipe_config->hw.enable), context);
   5241
   5242	if (!pipe_config->hw.enable)
   5243		goto dump_planes;
   5244
   5245	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
   5246	drm_dbg_kms(&dev_priv->drm,
   5247		    "active: %s, output_types: %s (0x%x), output format: %s\n",
   5248		    str_yes_no(pipe_config->hw.active),
   5249		    buf, pipe_config->output_types,
   5250		    output_formats(pipe_config->output_format));
   5251
   5252	drm_dbg_kms(&dev_priv->drm,
   5253		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
   5254		    transcoder_name(pipe_config->cpu_transcoder),
   5255		    pipe_config->pipe_bpp, pipe_config->dither);
   5256
   5257	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
   5258		    transcoder_name(pipe_config->mst_master_transcoder));
   5259
   5260	drm_dbg_kms(&dev_priv->drm,
   5261		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
   5262		    transcoder_name(pipe_config->master_transcoder),
   5263		    pipe_config->sync_mode_slaves_mask);
   5264
   5265	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s, pipes: 0x%x\n",
   5266		    intel_crtc_is_bigjoiner_slave(pipe_config) ? "slave" :
   5267		    intel_crtc_is_bigjoiner_master(pipe_config) ? "master" : "no",
   5268		    pipe_config->bigjoiner_pipes);
   5269
   5270	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
   5271		    str_enabled_disabled(pipe_config->splitter.enable),
   5272		    pipe_config->splitter.link_count,
   5273		    pipe_config->splitter.pixel_overlap);
   5274
   5275	if (pipe_config->has_pch_encoder)
   5276		intel_dump_m_n_config(pipe_config, "fdi",
   5277				      pipe_config->fdi_lanes,
   5278				      &pipe_config->fdi_m_n);
   5279
   5280	if (intel_crtc_has_dp_encoder(pipe_config)) {
   5281		intel_dump_m_n_config(pipe_config, "dp m_n",
   5282				      pipe_config->lane_count,
   5283				      &pipe_config->dp_m_n);
   5284		intel_dump_m_n_config(pipe_config, "dp m2_n2",
   5285				      pipe_config->lane_count,
   5286				      &pipe_config->dp_m2_n2);
   5287	}
   5288
   5289	drm_dbg_kms(&dev_priv->drm, "framestart delay: %d, MSA timing delay: %d\n",
   5290		    pipe_config->framestart_delay, pipe_config->msa_timing_delay);
   5291
   5292	drm_dbg_kms(&dev_priv->drm,
   5293		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
   5294		    pipe_config->has_audio, pipe_config->has_infoframe,
   5295		    pipe_config->infoframes.enable);
   5296
   5297	if (pipe_config->infoframes.enable &
   5298	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
   5299		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
   5300			    pipe_config->infoframes.gcp);
   5301	if (pipe_config->infoframes.enable &
   5302	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
   5303		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
   5304	if (pipe_config->infoframes.enable &
   5305	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
   5306		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
   5307	if (pipe_config->infoframes.enable &
   5308	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
   5309		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
   5310	if (pipe_config->infoframes.enable &
   5311	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
   5312		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
   5313	if (pipe_config->infoframes.enable &
   5314	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
   5315		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
   5316	if (pipe_config->infoframes.enable &
   5317	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
   5318		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
   5319
   5320	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
   5321		    str_yes_no(pipe_config->vrr.enable),
   5322		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
   5323		    pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
   5324		    pipe_config->vrr.flipline,
   5325		    intel_vrr_vmin_vblank_start(pipe_config),
   5326		    intel_vrr_vmax_vblank_start(pipe_config));
   5327
   5328	drm_dbg_kms(&dev_priv->drm, "requested mode: " DRM_MODE_FMT "\n",
   5329		    DRM_MODE_ARG(&pipe_config->hw.mode));
   5330	drm_dbg_kms(&dev_priv->drm, "adjusted mode: " DRM_MODE_FMT "\n",
   5331		    DRM_MODE_ARG(&pipe_config->hw.adjusted_mode));
   5332	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
   5333	drm_dbg_kms(&dev_priv->drm, "pipe mode: " DRM_MODE_FMT "\n",
   5334		    DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
   5335	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
   5336	drm_dbg_kms(&dev_priv->drm,
   5337		    "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
   5338		    pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
   5339		    pipe_config->pixel_rate);
   5340
   5341	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
   5342		    pipe_config->linetime, pipe_config->ips_linetime);
   5343
   5344	if (DISPLAY_VER(dev_priv) >= 9)
   5345		drm_dbg_kms(&dev_priv->drm,
   5346			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
   5347			    crtc->num_scalers,
   5348			    pipe_config->scaler_state.scaler_users,
   5349			    pipe_config->scaler_state.scaler_id);
   5350
   5351	if (HAS_GMCH(dev_priv))
   5352		drm_dbg_kms(&dev_priv->drm,
   5353			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
   5354			    pipe_config->gmch_pfit.control,
   5355			    pipe_config->gmch_pfit.pgm_ratios,
   5356			    pipe_config->gmch_pfit.lvds_border_bits);
   5357	else
   5358		drm_dbg_kms(&dev_priv->drm,
   5359			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
   5360			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
   5361			    str_enabled_disabled(pipe_config->pch_pfit.enabled),
   5362			    str_yes_no(pipe_config->pch_pfit.force_thru));
   5363
   5364	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i, drrs: %i\n",
   5365		    pipe_config->ips_enabled, pipe_config->double_wide,
   5366		    pipe_config->has_drrs);
   5367
   5368	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
   5369
   5370	if (IS_CHERRYVIEW(dev_priv))
   5371		drm_dbg_kms(&dev_priv->drm,
   5372			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
   5373			    pipe_config->cgm_mode, pipe_config->gamma_mode,
   5374			    pipe_config->gamma_enable, pipe_config->csc_enable);
   5375	else
   5376		drm_dbg_kms(&dev_priv->drm,
   5377			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
   5378			    pipe_config->csc_mode, pipe_config->gamma_mode,
   5379			    pipe_config->gamma_enable, pipe_config->csc_enable);
   5380
   5381	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
   5382		    pipe_config->hw.degamma_lut ?
   5383		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
   5384		    pipe_config->hw.gamma_lut ?
   5385		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
   5386
   5387dump_planes:
   5388	if (!state)
   5389		return;
   5390
   5391	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   5392		if (plane->pipe == crtc->pipe)
   5393			intel_dump_plane_state(plane_state);
   5394	}
   5395}
   5396
   5397static bool check_digital_port_conflicts(struct intel_atomic_state *state)
   5398{
   5399	struct drm_device *dev = state->base.dev;
   5400	struct drm_connector *connector;
   5401	struct drm_connector_list_iter conn_iter;
   5402	unsigned int used_ports = 0;
   5403	unsigned int used_mst_ports = 0;
   5404	bool ret = true;
   5405
   5406	/*
   5407	 * We're going to peek into connector->state,
   5408	 * hence connection_mutex must be held.
   5409	 */
   5410	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
   5411
   5412	/*
   5413	 * Walk the connector list instead of the encoder
   5414	 * list to detect the problem on ddi platforms
   5415	 * where there's just one encoder per digital port.
   5416	 */
   5417	drm_connector_list_iter_begin(dev, &conn_iter);
   5418	drm_for_each_connector_iter(connector, &conn_iter) {
   5419		struct drm_connector_state *connector_state;
   5420		struct intel_encoder *encoder;
   5421
   5422		connector_state =
   5423			drm_atomic_get_new_connector_state(&state->base,
   5424							   connector);
   5425		if (!connector_state)
   5426			connector_state = connector->state;
   5427
   5428		if (!connector_state->best_encoder)
   5429			continue;
   5430
   5431		encoder = to_intel_encoder(connector_state->best_encoder);
   5432
   5433		drm_WARN_ON(dev, !connector_state->crtc);
   5434
   5435		switch (encoder->type) {
   5436		case INTEL_OUTPUT_DDI:
   5437			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
   5438				break;
   5439			fallthrough;
   5440		case INTEL_OUTPUT_DP:
   5441		case INTEL_OUTPUT_HDMI:
   5442		case INTEL_OUTPUT_EDP:
   5443			/* the same port mustn't appear more than once */
   5444			if (used_ports & BIT(encoder->port))
   5445				ret = false;
   5446
   5447			used_ports |= BIT(encoder->port);
   5448			break;
   5449		case INTEL_OUTPUT_DP_MST:
   5450			used_mst_ports |=
   5451				1 << encoder->port;
   5452			break;
   5453		default:
   5454			break;
   5455		}
   5456	}
   5457	drm_connector_list_iter_end(&conn_iter);
   5458
   5459	/* can't mix MST and SST/HDMI on the same port */
   5460	if (used_ports & used_mst_ports)
   5461		return false;
   5462
   5463	return ret;
   5464}
   5465
   5466static void
   5467intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
   5468					   struct intel_crtc *crtc)
   5469{
   5470	struct intel_crtc_state *crtc_state =
   5471		intel_atomic_get_new_crtc_state(state, crtc);
   5472
   5473	WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
   5474
   5475	drm_property_replace_blob(&crtc_state->hw.degamma_lut,
   5476				  crtc_state->uapi.degamma_lut);
   5477	drm_property_replace_blob(&crtc_state->hw.gamma_lut,
   5478				  crtc_state->uapi.gamma_lut);
   5479	drm_property_replace_blob(&crtc_state->hw.ctm,
   5480				  crtc_state->uapi.ctm);
   5481}
   5482
   5483static void
   5484intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state,
   5485					 struct intel_crtc *crtc)
   5486{
   5487	struct intel_crtc_state *crtc_state =
   5488		intel_atomic_get_new_crtc_state(state, crtc);
   5489
   5490	WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
   5491
   5492	crtc_state->hw.enable = crtc_state->uapi.enable;
   5493	crtc_state->hw.active = crtc_state->uapi.active;
   5494	drm_mode_copy(&crtc_state->hw.mode,
   5495		      &crtc_state->uapi.mode);
   5496	drm_mode_copy(&crtc_state->hw.adjusted_mode,
   5497		      &crtc_state->uapi.adjusted_mode);
   5498	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
   5499
   5500	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
   5501}
   5502
   5503static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
   5504{
   5505	if (intel_crtc_is_bigjoiner_slave(crtc_state))
   5506		return;
   5507
   5508	crtc_state->uapi.enable = crtc_state->hw.enable;
   5509	crtc_state->uapi.active = crtc_state->hw.active;
   5510	drm_WARN_ON(crtc_state->uapi.crtc->dev,
   5511		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
   5512
   5513	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
   5514	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
   5515
   5516	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
   5517				  crtc_state->hw.degamma_lut);
   5518	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
   5519				  crtc_state->hw.gamma_lut);
   5520	drm_property_replace_blob(&crtc_state->uapi.ctm,
   5521				  crtc_state->hw.ctm);
   5522}
   5523
   5524static void
   5525copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state,
   5526				    struct intel_crtc *slave_crtc)
   5527{
   5528	struct intel_crtc_state *slave_crtc_state =
   5529		intel_atomic_get_new_crtc_state(state, slave_crtc);
   5530	struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
   5531	const struct intel_crtc_state *master_crtc_state =
   5532		intel_atomic_get_new_crtc_state(state, master_crtc);
   5533
   5534	drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut,
   5535				  master_crtc_state->hw.degamma_lut);
   5536	drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut,
   5537				  master_crtc_state->hw.gamma_lut);
   5538	drm_property_replace_blob(&slave_crtc_state->hw.ctm,
   5539				  master_crtc_state->hw.ctm);
   5540
   5541	slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed;
   5542}
   5543
   5544static int
   5545copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
   5546				  struct intel_crtc *slave_crtc)
   5547{
   5548	struct intel_crtc_state *slave_crtc_state =
   5549		intel_atomic_get_new_crtc_state(state, slave_crtc);
   5550	struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state);
   5551	const struct intel_crtc_state *master_crtc_state =
   5552		intel_atomic_get_new_crtc_state(state, master_crtc);
   5553	struct intel_crtc_state *saved_state;
   5554
   5555	WARN_ON(master_crtc_state->bigjoiner_pipes !=
   5556		slave_crtc_state->bigjoiner_pipes);
   5557
   5558	saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL);
   5559	if (!saved_state)
   5560		return -ENOMEM;
   5561
   5562	/* preserve some things from the slave's original crtc state */
   5563	saved_state->uapi = slave_crtc_state->uapi;
   5564	saved_state->scaler_state = slave_crtc_state->scaler_state;
   5565	saved_state->shared_dpll = slave_crtc_state->shared_dpll;
   5566	saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
   5567	saved_state->crc_enabled = slave_crtc_state->crc_enabled;
   5568
   5569	intel_crtc_free_hw_state(slave_crtc_state);
   5570	memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
   5571	kfree(saved_state);
   5572
   5573	/* Re-init hw state */
   5574	memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw));
   5575	slave_crtc_state->hw.enable = master_crtc_state->hw.enable;
   5576	slave_crtc_state->hw.active = master_crtc_state->hw.active;
   5577	drm_mode_copy(&slave_crtc_state->hw.mode,
   5578		      &master_crtc_state->hw.mode);
   5579	drm_mode_copy(&slave_crtc_state->hw.pipe_mode,
   5580		      &master_crtc_state->hw.pipe_mode);
   5581	drm_mode_copy(&slave_crtc_state->hw.adjusted_mode,
   5582		      &master_crtc_state->hw.adjusted_mode);
   5583	slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
   5584
   5585	copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
   5586
   5587	slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
   5588	slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed;
   5589	slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed;
   5590
   5591	WARN_ON(master_crtc_state->bigjoiner_pipes !=
   5592		slave_crtc_state->bigjoiner_pipes);
   5593
   5594	return 0;
   5595}
   5596
   5597static int
   5598intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
   5599				 struct intel_crtc *crtc)
   5600{
   5601	struct intel_crtc_state *crtc_state =
   5602		intel_atomic_get_new_crtc_state(state, crtc);
   5603	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   5604	struct intel_crtc_state *saved_state;
   5605
   5606	saved_state = intel_crtc_state_alloc(crtc);
   5607	if (!saved_state)
   5608		return -ENOMEM;
   5609
   5610	/* free the old crtc_state->hw members */
   5611	intel_crtc_free_hw_state(crtc_state);
   5612
   5613	/* FIXME: before the switch to atomic started, a new pipe_config was
   5614	 * kzalloc'd. Code that depends on any field being zero should be
   5615	 * fixed, so that the crtc_state can be safely duplicated. For now,
   5616	 * only fields that are know to not cause problems are preserved. */
   5617
   5618	saved_state->uapi = crtc_state->uapi;
   5619	saved_state->scaler_state = crtc_state->scaler_state;
   5620	saved_state->shared_dpll = crtc_state->shared_dpll;
   5621	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
   5622	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
   5623	       sizeof(saved_state->icl_port_dplls));
   5624	saved_state->crc_enabled = crtc_state->crc_enabled;
   5625	if (IS_G4X(dev_priv) ||
   5626	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   5627		saved_state->wm = crtc_state->wm;
   5628
   5629	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
   5630	kfree(saved_state);
   5631
   5632	intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc);
   5633
   5634	return 0;
   5635}
   5636
   5637static int
   5638intel_modeset_pipe_config(struct intel_atomic_state *state,
   5639			  struct intel_crtc_state *pipe_config)
   5640{
   5641	struct drm_crtc *crtc = pipe_config->uapi.crtc;
   5642	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
   5643	struct drm_connector *connector;
   5644	struct drm_connector_state *connector_state;
   5645	int pipe_src_w, pipe_src_h;
   5646	int base_bpp, ret, i;
   5647	bool retry = true;
   5648
   5649	pipe_config->cpu_transcoder =
   5650		(enum transcoder) to_intel_crtc(crtc)->pipe;
   5651
   5652	pipe_config->framestart_delay = 1;
   5653
   5654	/*
   5655	 * Sanitize sync polarity flags based on requested ones. If neither
   5656	 * positive or negative polarity is requested, treat this as meaning
   5657	 * negative polarity.
   5658	 */
   5659	if (!(pipe_config->hw.adjusted_mode.flags &
   5660	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
   5661		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
   5662
   5663	if (!(pipe_config->hw.adjusted_mode.flags &
   5664	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
   5665		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
   5666
   5667	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
   5668					pipe_config);
   5669	if (ret)
   5670		return ret;
   5671
   5672	base_bpp = pipe_config->pipe_bpp;
   5673
   5674	/*
   5675	 * Determine the real pipe dimensions. Note that stereo modes can
   5676	 * increase the actual pipe size due to the frame doubling and
   5677	 * insertion of additional space for blanks between the frame. This
   5678	 * is stored in the crtc timings. We use the requested mode to do this
   5679	 * computation to clearly distinguish it from the adjusted mode, which
   5680	 * can be changed by the connectors in the below retry loop.
   5681	 */
   5682	drm_mode_get_hv_timing(&pipe_config->hw.mode,
   5683			       &pipe_src_w, &pipe_src_h);
   5684	drm_rect_init(&pipe_config->pipe_src, 0, 0,
   5685		      pipe_src_w, pipe_src_h);
   5686
   5687	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
   5688		struct intel_encoder *encoder =
   5689			to_intel_encoder(connector_state->best_encoder);
   5690
   5691		if (connector_state->crtc != crtc)
   5692			continue;
   5693
   5694		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
   5695			drm_dbg_kms(&i915->drm,
   5696				    "rejecting invalid cloning configuration\n");
   5697			return -EINVAL;
   5698		}
   5699
   5700		/*
   5701		 * Determine output_types before calling the .compute_config()
   5702		 * hooks so that the hooks can use this information safely.
   5703		 */
   5704		if (encoder->compute_output_type)
   5705			pipe_config->output_types |=
   5706				BIT(encoder->compute_output_type(encoder, pipe_config,
   5707								 connector_state));
   5708		else
   5709			pipe_config->output_types |= BIT(encoder->type);
   5710	}
   5711
   5712encoder_retry:
   5713	/* Ensure the port clock defaults are reset when retrying. */
   5714	pipe_config->port_clock = 0;
   5715	pipe_config->pixel_multiplier = 1;
   5716
   5717	/* Fill in default crtc timings, allow encoders to overwrite them. */
   5718	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
   5719			      CRTC_STEREO_DOUBLE);
   5720
   5721	/* Pass our mode to the connectors and the CRTC to give them a chance to
   5722	 * adjust it according to limitations or connector properties, and also
   5723	 * a chance to reject the mode entirely.
   5724	 */
   5725	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
   5726		struct intel_encoder *encoder =
   5727			to_intel_encoder(connector_state->best_encoder);
   5728
   5729		if (connector_state->crtc != crtc)
   5730			continue;
   5731
   5732		ret = encoder->compute_config(encoder, pipe_config,
   5733					      connector_state);
   5734		if (ret == -EDEADLK)
   5735			return ret;
   5736		if (ret < 0) {
   5737			drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
   5738			return ret;
   5739		}
   5740	}
   5741
   5742	/* Set default port clock if not overwritten by the encoder. Needs to be
   5743	 * done afterwards in case the encoder adjusts the mode. */
   5744	if (!pipe_config->port_clock)
   5745		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
   5746			* pipe_config->pixel_multiplier;
   5747
   5748	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
   5749	if (ret == -EDEADLK)
   5750		return ret;
   5751	if (ret == -EAGAIN) {
   5752		if (drm_WARN(&i915->drm, !retry,
   5753			     "loop in pipe configuration computation\n"))
   5754			return -EINVAL;
   5755
   5756		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
   5757		retry = false;
   5758		goto encoder_retry;
   5759	}
   5760	if (ret < 0) {
   5761		drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
   5762		return ret;
   5763	}
   5764
   5765	/* Dithering seems to not pass-through bits correctly when it should, so
   5766	 * only enable it on 6bpc panels and when its not a compliance
   5767	 * test requesting 6bpc video pattern.
   5768	 */
   5769	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
   5770		!pipe_config->dither_force_disable;
   5771	drm_dbg_kms(&i915->drm,
   5772		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
   5773		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
   5774
   5775	return 0;
   5776}
   5777
   5778static int
   5779intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
   5780{
   5781	struct intel_atomic_state *state =
   5782		to_intel_atomic_state(crtc_state->uapi.state);
   5783	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   5784	struct drm_connector_state *conn_state;
   5785	struct drm_connector *connector;
   5786	int i;
   5787
   5788	intel_bigjoiner_adjust_pipe_src(crtc_state);
   5789
   5790	for_each_new_connector_in_state(&state->base, connector,
   5791					conn_state, i) {
   5792		struct intel_encoder *encoder =
   5793			to_intel_encoder(conn_state->best_encoder);
   5794		int ret;
   5795
   5796		if (conn_state->crtc != &crtc->base ||
   5797		    !encoder->compute_config_late)
   5798			continue;
   5799
   5800		ret = encoder->compute_config_late(encoder, crtc_state,
   5801						   conn_state);
   5802		if (ret)
   5803			return ret;
   5804	}
   5805
   5806	return 0;
   5807}
   5808
   5809bool intel_fuzzy_clock_check(int clock1, int clock2)
   5810{
   5811	int diff;
   5812
   5813	if (clock1 == clock2)
   5814		return true;
   5815
   5816	if (!clock1 || !clock2)
   5817		return false;
   5818
   5819	diff = abs(clock1 - clock2);
   5820
   5821	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
   5822		return true;
   5823
   5824	return false;
   5825}
   5826
   5827static bool
   5828intel_compare_m_n(unsigned int m, unsigned int n,
   5829		  unsigned int m2, unsigned int n2,
   5830		  bool exact)
   5831{
   5832	if (m == m2 && n == n2)
   5833		return true;
   5834
   5835	if (exact || !m || !n || !m2 || !n2)
   5836		return false;
   5837
   5838	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
   5839
   5840	if (n > n2) {
   5841		while (n > n2) {
   5842			m2 <<= 1;
   5843			n2 <<= 1;
   5844		}
   5845	} else if (n < n2) {
   5846		while (n < n2) {
   5847			m <<= 1;
   5848			n <<= 1;
   5849		}
   5850	}
   5851
   5852	if (n != n2)
   5853		return false;
   5854
   5855	return intel_fuzzy_clock_check(m, m2);
   5856}
   5857
   5858static bool
   5859intel_compare_link_m_n(const struct intel_link_m_n *m_n,
   5860		       const struct intel_link_m_n *m2_n2,
   5861		       bool exact)
   5862{
   5863	return m_n->tu == m2_n2->tu &&
   5864		intel_compare_m_n(m_n->data_m, m_n->data_n,
   5865				  m2_n2->data_m, m2_n2->data_n, exact) &&
   5866		intel_compare_m_n(m_n->link_m, m_n->link_n,
   5867				  m2_n2->link_m, m2_n2->link_n, exact);
   5868}
   5869
   5870static bool
   5871intel_compare_infoframe(const union hdmi_infoframe *a,
   5872			const union hdmi_infoframe *b)
   5873{
   5874	return memcmp(a, b, sizeof(*a)) == 0;
   5875}
   5876
   5877static bool
   5878intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
   5879			 const struct drm_dp_vsc_sdp *b)
   5880{
   5881	return memcmp(a, b, sizeof(*a)) == 0;
   5882}
   5883
   5884static void
   5885pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
   5886			       bool fastset, const char *name,
   5887			       const union hdmi_infoframe *a,
   5888			       const union hdmi_infoframe *b)
   5889{
   5890	if (fastset) {
   5891		if (!drm_debug_enabled(DRM_UT_KMS))
   5892			return;
   5893
   5894		drm_dbg_kms(&dev_priv->drm,
   5895			    "fastset mismatch in %s infoframe\n", name);
   5896		drm_dbg_kms(&dev_priv->drm, "expected:\n");
   5897		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
   5898		drm_dbg_kms(&dev_priv->drm, "found:\n");
   5899		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
   5900	} else {
   5901		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
   5902		drm_err(&dev_priv->drm, "expected:\n");
   5903		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
   5904		drm_err(&dev_priv->drm, "found:\n");
   5905		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
   5906	}
   5907}
   5908
   5909static void
   5910pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
   5911				bool fastset, const char *name,
   5912				const struct drm_dp_vsc_sdp *a,
   5913				const struct drm_dp_vsc_sdp *b)
   5914{
   5915	if (fastset) {
   5916		if (!drm_debug_enabled(DRM_UT_KMS))
   5917			return;
   5918
   5919		drm_dbg_kms(&dev_priv->drm,
   5920			    "fastset mismatch in %s dp sdp\n", name);
   5921		drm_dbg_kms(&dev_priv->drm, "expected:\n");
   5922		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
   5923		drm_dbg_kms(&dev_priv->drm, "found:\n");
   5924		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
   5925	} else {
   5926		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
   5927		drm_err(&dev_priv->drm, "expected:\n");
   5928		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
   5929		drm_err(&dev_priv->drm, "found:\n");
   5930		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
   5931	}
   5932}
   5933
   5934static void __printf(4, 5)
   5935pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
   5936		     const char *name, const char *format, ...)
   5937{
   5938	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
   5939	struct va_format vaf;
   5940	va_list args;
   5941
   5942	va_start(args, format);
   5943	vaf.fmt = format;
   5944	vaf.va = &args;
   5945
   5946	if (fastset)
   5947		drm_dbg_kms(&i915->drm,
   5948			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
   5949			    crtc->base.base.id, crtc->base.name, name, &vaf);
   5950	else
   5951		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
   5952			crtc->base.base.id, crtc->base.name, name, &vaf);
   5953
   5954	va_end(args);
   5955}
   5956
   5957static bool fastboot_enabled(struct drm_i915_private *dev_priv)
   5958{
   5959	if (dev_priv->params.fastboot != -1)
   5960		return dev_priv->params.fastboot;
   5961
   5962	/* Enable fastboot by default on Skylake and newer */
   5963	if (DISPLAY_VER(dev_priv) >= 9)
   5964		return true;
   5965
   5966	/* Enable fastboot by default on VLV and CHV */
   5967	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   5968		return true;
   5969
   5970	/* Disabled by default on all others */
   5971	return false;
   5972}
   5973
   5974static bool
   5975intel_pipe_config_compare(const struct intel_crtc_state *current_config,
   5976			  const struct intel_crtc_state *pipe_config,
   5977			  bool fastset)
   5978{
   5979	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
   5980	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
   5981	bool ret = true;
   5982	u32 bp_gamma = 0;
   5983	bool fixup_inherited = fastset &&
   5984		current_config->inherited && !pipe_config->inherited;
   5985
   5986	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
   5987		drm_dbg_kms(&dev_priv->drm,
   5988			    "initial modeset and fastboot not set\n");
   5989		ret = false;
   5990	}
   5991
   5992#define PIPE_CONF_CHECK_X(name) do { \
   5993	if (current_config->name != pipe_config->name) { \
   5994		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   5995				     "(expected 0x%08x, found 0x%08x)", \
   5996				     current_config->name, \
   5997				     pipe_config->name); \
   5998		ret = false; \
   5999	} \
   6000} while (0)
   6001
   6002#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
   6003	if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
   6004		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6005				     "(expected 0x%08x, found 0x%08x)", \
   6006				     current_config->name & (mask), \
   6007				     pipe_config->name & (mask)); \
   6008		ret = false; \
   6009	} \
   6010} while (0)
   6011
   6012#define PIPE_CONF_CHECK_I(name) do { \
   6013	if (current_config->name != pipe_config->name) { \
   6014		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6015				     "(expected %i, found %i)", \
   6016				     current_config->name, \
   6017				     pipe_config->name); \
   6018		ret = false; \
   6019	} \
   6020} while (0)
   6021
   6022#define PIPE_CONF_CHECK_BOOL(name) do { \
   6023	if (current_config->name != pipe_config->name) { \
   6024		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
   6025				     "(expected %s, found %s)", \
   6026				     str_yes_no(current_config->name), \
   6027				     str_yes_no(pipe_config->name)); \
   6028		ret = false; \
   6029	} \
   6030} while (0)
   6031
   6032/*
   6033 * Checks state where we only read out the enabling, but not the entire
   6034 * state itself (like full infoframes or ELD for audio). These states
   6035 * require a full modeset on bootup to fix up.
   6036 */
   6037#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
   6038	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
   6039		PIPE_CONF_CHECK_BOOL(name); \
   6040	} else { \
   6041		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6042				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
   6043				     str_yes_no(current_config->name), \
   6044				     str_yes_no(pipe_config->name)); \
   6045		ret = false; \
   6046	} \
   6047} while (0)
   6048
   6049#define PIPE_CONF_CHECK_P(name) do { \
   6050	if (current_config->name != pipe_config->name) { \
   6051		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6052				     "(expected %p, found %p)", \
   6053				     current_config->name, \
   6054				     pipe_config->name); \
   6055		ret = false; \
   6056	} \
   6057} while (0)
   6058
   6059#define PIPE_CONF_CHECK_M_N(name) do { \
   6060	if (!intel_compare_link_m_n(&current_config->name, \
   6061				    &pipe_config->name,\
   6062				    !fastset)) { \
   6063		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6064				     "(expected tu %i data %i/%i link %i/%i, " \
   6065				     "found tu %i, data %i/%i link %i/%i)", \
   6066				     current_config->name.tu, \
   6067				     current_config->name.data_m, \
   6068				     current_config->name.data_n, \
   6069				     current_config->name.link_m, \
   6070				     current_config->name.link_n, \
   6071				     pipe_config->name.tu, \
   6072				     pipe_config->name.data_m, \
   6073				     pipe_config->name.data_n, \
   6074				     pipe_config->name.link_m, \
   6075				     pipe_config->name.link_n); \
   6076		ret = false; \
   6077	} \
   6078} while (0)
   6079
   6080/* This is required for BDW+ where there is only one set of registers for
   6081 * switching between high and low RR.
   6082 * This macro can be used whenever a comparison has to be made between one
   6083 * hw state and multiple sw state variables.
   6084 */
   6085#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
   6086	if (!intel_compare_link_m_n(&current_config->name, \
   6087				    &pipe_config->name, !fastset) && \
   6088	    !intel_compare_link_m_n(&current_config->alt_name, \
   6089				    &pipe_config->name, !fastset)) { \
   6090		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6091				     "(expected tu %i data %i/%i link %i/%i, " \
   6092				     "or tu %i data %i/%i link %i/%i, " \
   6093				     "found tu %i, data %i/%i link %i/%i)", \
   6094				     current_config->name.tu, \
   6095				     current_config->name.data_m, \
   6096				     current_config->name.data_n, \
   6097				     current_config->name.link_m, \
   6098				     current_config->name.link_n, \
   6099				     current_config->alt_name.tu, \
   6100				     current_config->alt_name.data_m, \
   6101				     current_config->alt_name.data_n, \
   6102				     current_config->alt_name.link_m, \
   6103				     current_config->alt_name.link_n, \
   6104				     pipe_config->name.tu, \
   6105				     pipe_config->name.data_m, \
   6106				     pipe_config->name.data_n, \
   6107				     pipe_config->name.link_m, \
   6108				     pipe_config->name.link_n); \
   6109		ret = false; \
   6110	} \
   6111} while (0)
   6112
   6113#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
   6114	if ((current_config->name ^ pipe_config->name) & (mask)) { \
   6115		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6116				     "(%x) (expected %i, found %i)", \
   6117				     (mask), \
   6118				     current_config->name & (mask), \
   6119				     pipe_config->name & (mask)); \
   6120		ret = false; \
   6121	} \
   6122} while (0)
   6123
   6124#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
   6125	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
   6126		pipe_config_mismatch(fastset, crtc, __stringify(name), \
   6127				     "(expected %i, found %i)", \
   6128				     current_config->name, \
   6129				     pipe_config->name); \
   6130		ret = false; \
   6131	} \
   6132} while (0)
   6133
   6134#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
   6135	if (!intel_compare_infoframe(&current_config->infoframes.name, \
   6136				     &pipe_config->infoframes.name)) { \
   6137		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
   6138					       &current_config->infoframes.name, \
   6139					       &pipe_config->infoframes.name); \
   6140		ret = false; \
   6141	} \
   6142} while (0)
   6143
   6144#define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
   6145	if (!current_config->has_psr && !pipe_config->has_psr && \
   6146	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
   6147				      &pipe_config->infoframes.name)) { \
   6148		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
   6149						&current_config->infoframes.name, \
   6150						&pipe_config->infoframes.name); \
   6151		ret = false; \
   6152	} \
   6153} while (0)
   6154
   6155#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
   6156	if (current_config->name1 != pipe_config->name1) { \
   6157		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
   6158				"(expected %i, found %i, won't compare lut values)", \
   6159				current_config->name1, \
   6160				pipe_config->name1); \
   6161		ret = false;\
   6162	} else { \
   6163		if (!intel_color_lut_equal(current_config->name2, \
   6164					pipe_config->name2, pipe_config->name1, \
   6165					bit_precision)) { \
   6166			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
   6167					"hw_state doesn't match sw_state"); \
   6168			ret = false; \
   6169		} \
   6170	} \
   6171} while (0)
   6172
   6173#define PIPE_CONF_QUIRK(quirk) \
   6174	((current_config->quirks | pipe_config->quirks) & (quirk))
   6175
   6176	PIPE_CONF_CHECK_I(cpu_transcoder);
   6177
   6178	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
   6179	PIPE_CONF_CHECK_I(fdi_lanes);
   6180	PIPE_CONF_CHECK_M_N(fdi_m_n);
   6181
   6182	PIPE_CONF_CHECK_I(lane_count);
   6183	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
   6184
   6185	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) {
   6186		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
   6187	} else {
   6188		PIPE_CONF_CHECK_M_N(dp_m_n);
   6189		PIPE_CONF_CHECK_M_N(dp_m2_n2);
   6190	}
   6191
   6192	PIPE_CONF_CHECK_X(output_types);
   6193
   6194	PIPE_CONF_CHECK_I(framestart_delay);
   6195	PIPE_CONF_CHECK_I(msa_timing_delay);
   6196
   6197	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
   6198	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
   6199	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
   6200	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
   6201	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
   6202	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
   6203
   6204	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
   6205	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
   6206	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
   6207	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
   6208	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
   6209	PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
   6210
   6211	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
   6212	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
   6213	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
   6214	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
   6215	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
   6216	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
   6217
   6218	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
   6219	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
   6220	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
   6221	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
   6222	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
   6223	PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
   6224
   6225	PIPE_CONF_CHECK_I(pixel_multiplier);
   6226
   6227	PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   6228			      DRM_MODE_FLAG_INTERLACE);
   6229
   6230	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
   6231		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   6232				      DRM_MODE_FLAG_PHSYNC);
   6233		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   6234				      DRM_MODE_FLAG_NHSYNC);
   6235		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   6236				      DRM_MODE_FLAG_PVSYNC);
   6237		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
   6238				      DRM_MODE_FLAG_NVSYNC);
   6239	}
   6240
   6241	PIPE_CONF_CHECK_I(output_format);
   6242	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
   6243	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
   6244	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
   6245		PIPE_CONF_CHECK_BOOL(limited_color_range);
   6246
   6247	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
   6248	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
   6249	PIPE_CONF_CHECK_BOOL(has_infoframe);
   6250	PIPE_CONF_CHECK_BOOL(fec_enable);
   6251
   6252	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
   6253
   6254	PIPE_CONF_CHECK_X(gmch_pfit.control);
   6255	/* pfit ratios are autocomputed by the hw on gen4+ */
   6256	if (DISPLAY_VER(dev_priv) < 4)
   6257		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
   6258	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
   6259
   6260	/*
   6261	 * Changing the EDP transcoder input mux
   6262	 * (A_ONOFF vs. A_ON) requires a full modeset.
   6263	 */
   6264	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
   6265
   6266	if (!fastset) {
   6267		PIPE_CONF_CHECK_I(pipe_src.x1);
   6268		PIPE_CONF_CHECK_I(pipe_src.y1);
   6269		PIPE_CONF_CHECK_I(pipe_src.x2);
   6270		PIPE_CONF_CHECK_I(pipe_src.y2);
   6271
   6272		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
   6273		if (current_config->pch_pfit.enabled) {
   6274			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
   6275			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
   6276			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
   6277			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
   6278		}
   6279
   6280		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
   6281		PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
   6282
   6283		PIPE_CONF_CHECK_X(gamma_mode);
   6284		if (IS_CHERRYVIEW(dev_priv))
   6285			PIPE_CONF_CHECK_X(cgm_mode);
   6286		else
   6287			PIPE_CONF_CHECK_X(csc_mode);
   6288		PIPE_CONF_CHECK_BOOL(gamma_enable);
   6289		PIPE_CONF_CHECK_BOOL(csc_enable);
   6290
   6291		PIPE_CONF_CHECK_I(linetime);
   6292		PIPE_CONF_CHECK_I(ips_linetime);
   6293
   6294		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
   6295		if (bp_gamma)
   6296			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
   6297
   6298		if (current_config->active_planes) {
   6299			PIPE_CONF_CHECK_BOOL(has_psr);
   6300			PIPE_CONF_CHECK_BOOL(has_psr2);
   6301			PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
   6302			PIPE_CONF_CHECK_I(dc3co_exitline);
   6303		}
   6304	}
   6305
   6306	PIPE_CONF_CHECK_BOOL(double_wide);
   6307
   6308	if (dev_priv->dpll.mgr) {
   6309		PIPE_CONF_CHECK_P(shared_dpll);
   6310
   6311		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
   6312		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
   6313		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
   6314		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
   6315		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
   6316		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
   6317		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
   6318		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
   6319		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
   6320		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
   6321		PIPE_CONF_CHECK_X(dpll_hw_state.div0);
   6322		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
   6323		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
   6324		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
   6325		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
   6326		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
   6327		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
   6328		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
   6329		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
   6330		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
   6331		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
   6332		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
   6333		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
   6334		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
   6335		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
   6336		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
   6337		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
   6338		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
   6339		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
   6340		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
   6341		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
   6342		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
   6343	}
   6344
   6345	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
   6346	PIPE_CONF_CHECK_X(dsi_pll.div);
   6347
   6348	if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
   6349		PIPE_CONF_CHECK_I(pipe_bpp);
   6350
   6351	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
   6352	PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
   6353	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
   6354
   6355	PIPE_CONF_CHECK_I(min_voltage_level);
   6356
   6357	if (current_config->has_psr || pipe_config->has_psr)
   6358		PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
   6359					    ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
   6360	else
   6361		PIPE_CONF_CHECK_X(infoframes.enable);
   6362
   6363	PIPE_CONF_CHECK_X(infoframes.gcp);
   6364	PIPE_CONF_CHECK_INFOFRAME(avi);
   6365	PIPE_CONF_CHECK_INFOFRAME(spd);
   6366	PIPE_CONF_CHECK_INFOFRAME(hdmi);
   6367	PIPE_CONF_CHECK_INFOFRAME(drm);
   6368	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
   6369
   6370	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
   6371	PIPE_CONF_CHECK_I(master_transcoder);
   6372	PIPE_CONF_CHECK_X(bigjoiner_pipes);
   6373
   6374	PIPE_CONF_CHECK_I(dsc.compression_enable);
   6375	PIPE_CONF_CHECK_I(dsc.dsc_split);
   6376	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
   6377
   6378	PIPE_CONF_CHECK_BOOL(splitter.enable);
   6379	PIPE_CONF_CHECK_I(splitter.link_count);
   6380	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
   6381
   6382	PIPE_CONF_CHECK_I(mst_master_transcoder);
   6383
   6384	PIPE_CONF_CHECK_BOOL(vrr.enable);
   6385	PIPE_CONF_CHECK_I(vrr.vmin);
   6386	PIPE_CONF_CHECK_I(vrr.vmax);
   6387	PIPE_CONF_CHECK_I(vrr.flipline);
   6388	PIPE_CONF_CHECK_I(vrr.pipeline_full);
   6389	PIPE_CONF_CHECK_I(vrr.guardband);
   6390
   6391#undef PIPE_CONF_CHECK_X
   6392#undef PIPE_CONF_CHECK_I
   6393#undef PIPE_CONF_CHECK_BOOL
   6394#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
   6395#undef PIPE_CONF_CHECK_P
   6396#undef PIPE_CONF_CHECK_FLAGS
   6397#undef PIPE_CONF_CHECK_CLOCK_FUZZY
   6398#undef PIPE_CONF_CHECK_COLOR_LUT
   6399#undef PIPE_CONF_QUIRK
   6400
   6401	return ret;
   6402}
   6403
   6404static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
   6405					   const struct intel_crtc_state *pipe_config)
   6406{
   6407	if (pipe_config->has_pch_encoder) {
   6408		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
   6409							    &pipe_config->fdi_m_n);
   6410		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
   6411
   6412		/*
   6413		 * FDI already provided one idea for the dotclock.
   6414		 * Yell if the encoder disagrees.
   6415		 */
   6416		drm_WARN(&dev_priv->drm,
   6417			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
   6418			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
   6419			 fdi_dotclock, dotclock);
   6420	}
   6421}
   6422
   6423static void verify_wm_state(struct intel_crtc *crtc,
   6424			    struct intel_crtc_state *new_crtc_state)
   6425{
   6426	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6427	struct skl_hw_state {
   6428		struct skl_ddb_entry ddb[I915_MAX_PLANES];
   6429		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
   6430		struct skl_pipe_wm wm;
   6431	} *hw;
   6432	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
   6433	int level, max_level = ilk_wm_max_level(dev_priv);
   6434	struct intel_plane *plane;
   6435	u8 hw_enabled_slices;
   6436
   6437	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
   6438		return;
   6439
   6440	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
   6441	if (!hw)
   6442		return;
   6443
   6444	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
   6445
   6446	skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
   6447
   6448	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
   6449
   6450	if (DISPLAY_VER(dev_priv) >= 11 &&
   6451	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
   6452		drm_err(&dev_priv->drm,
   6453			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
   6454			dev_priv->dbuf.enabled_slices,
   6455			hw_enabled_slices);
   6456
   6457	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   6458		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
   6459		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
   6460
   6461		/* Watermarks */
   6462		for (level = 0; level <= max_level; level++) {
   6463			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
   6464			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
   6465
   6466			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
   6467				continue;
   6468
   6469			drm_err(&dev_priv->drm,
   6470				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   6471				plane->base.base.id, plane->base.name, level,
   6472				sw_wm_level->enable,
   6473				sw_wm_level->blocks,
   6474				sw_wm_level->lines,
   6475				hw_wm_level->enable,
   6476				hw_wm_level->blocks,
   6477				hw_wm_level->lines);
   6478		}
   6479
   6480		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
   6481		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
   6482
   6483		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
   6484			drm_err(&dev_priv->drm,
   6485				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   6486				plane->base.base.id, plane->base.name,
   6487				sw_wm_level->enable,
   6488				sw_wm_level->blocks,
   6489				sw_wm_level->lines,
   6490				hw_wm_level->enable,
   6491				hw_wm_level->blocks,
   6492				hw_wm_level->lines);
   6493		}
   6494
   6495		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
   6496		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
   6497
   6498		if (HAS_HW_SAGV_WM(dev_priv) &&
   6499		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
   6500			drm_err(&dev_priv->drm,
   6501				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   6502				plane->base.base.id, plane->base.name,
   6503				sw_wm_level->enable,
   6504				sw_wm_level->blocks,
   6505				sw_wm_level->lines,
   6506				hw_wm_level->enable,
   6507				hw_wm_level->blocks,
   6508				hw_wm_level->lines);
   6509		}
   6510
   6511		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
   6512		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
   6513
   6514		if (HAS_HW_SAGV_WM(dev_priv) &&
   6515		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
   6516			drm_err(&dev_priv->drm,
   6517				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
   6518				plane->base.base.id, plane->base.name,
   6519				sw_wm_level->enable,
   6520				sw_wm_level->blocks,
   6521				sw_wm_level->lines,
   6522				hw_wm_level->enable,
   6523				hw_wm_level->blocks,
   6524				hw_wm_level->lines);
   6525		}
   6526
   6527		/* DDB */
   6528		hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
   6529		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
   6530
   6531		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
   6532			drm_err(&dev_priv->drm,
   6533				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
   6534				plane->base.base.id, plane->base.name,
   6535				sw_ddb_entry->start, sw_ddb_entry->end,
   6536				hw_ddb_entry->start, hw_ddb_entry->end);
   6537		}
   6538	}
   6539
   6540	kfree(hw);
   6541}
   6542
   6543static void
   6544verify_connector_state(struct intel_atomic_state *state,
   6545		       struct intel_crtc *crtc)
   6546{
   6547	struct drm_connector *connector;
   6548	struct drm_connector_state *new_conn_state;
   6549	int i;
   6550
   6551	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
   6552		struct drm_encoder *encoder = connector->encoder;
   6553		struct intel_crtc_state *crtc_state = NULL;
   6554
   6555		if (new_conn_state->crtc != &crtc->base)
   6556			continue;
   6557
   6558		if (crtc)
   6559			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
   6560
   6561		intel_connector_verify_state(crtc_state, new_conn_state);
   6562
   6563		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
   6564		     "connector's atomic encoder doesn't match legacy encoder\n");
   6565	}
   6566}
   6567
   6568static void
   6569verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
   6570{
   6571	struct intel_encoder *encoder;
   6572	struct drm_connector *connector;
   6573	struct drm_connector_state *old_conn_state, *new_conn_state;
   6574	int i;
   6575
   6576	for_each_intel_encoder(&dev_priv->drm, encoder) {
   6577		bool enabled = false, found = false;
   6578		enum pipe pipe;
   6579
   6580		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
   6581			    encoder->base.base.id,
   6582			    encoder->base.name);
   6583
   6584		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
   6585						   new_conn_state, i) {
   6586			if (old_conn_state->best_encoder == &encoder->base)
   6587				found = true;
   6588
   6589			if (new_conn_state->best_encoder != &encoder->base)
   6590				continue;
   6591			found = enabled = true;
   6592
   6593			I915_STATE_WARN(new_conn_state->crtc !=
   6594					encoder->base.crtc,
   6595			     "connector's crtc doesn't match encoder crtc\n");
   6596		}
   6597
   6598		if (!found)
   6599			continue;
   6600
   6601		I915_STATE_WARN(!!encoder->base.crtc != enabled,
   6602		     "encoder's enabled state mismatch "
   6603		     "(expected %i, found %i)\n",
   6604		     !!encoder->base.crtc, enabled);
   6605
   6606		if (!encoder->base.crtc) {
   6607			bool active;
   6608
   6609			active = encoder->get_hw_state(encoder, &pipe);
   6610			I915_STATE_WARN(active,
   6611			     "encoder detached but still enabled on pipe %c.\n",
   6612			     pipe_name(pipe));
   6613		}
   6614	}
   6615}
   6616
   6617static void
   6618verify_crtc_state(struct intel_crtc *crtc,
   6619		  struct intel_crtc_state *old_crtc_state,
   6620		  struct intel_crtc_state *new_crtc_state)
   6621{
   6622	struct drm_device *dev = crtc->base.dev;
   6623	struct drm_i915_private *dev_priv = to_i915(dev);
   6624	struct intel_encoder *encoder;
   6625	struct intel_crtc_state *pipe_config = old_crtc_state;
   6626	struct drm_atomic_state *state = old_crtc_state->uapi.state;
   6627	struct intel_crtc *master_crtc;
   6628
   6629	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
   6630	intel_crtc_free_hw_state(old_crtc_state);
   6631	intel_crtc_state_reset(old_crtc_state, crtc);
   6632	old_crtc_state->uapi.state = state;
   6633
   6634	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
   6635		    crtc->base.name);
   6636
   6637	pipe_config->hw.enable = new_crtc_state->hw.enable;
   6638
   6639	intel_crtc_get_pipe_config(pipe_config);
   6640
   6641	/* we keep both pipes enabled on 830 */
   6642	if (IS_I830(dev_priv) && pipe_config->hw.active)
   6643		pipe_config->hw.active = new_crtc_state->hw.active;
   6644
   6645	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
   6646			"crtc active state doesn't match with hw state "
   6647			"(expected %i, found %i)\n",
   6648			new_crtc_state->hw.active, pipe_config->hw.active);
   6649
   6650	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
   6651			"transitional active state does not match atomic hw state "
   6652			"(expected %i, found %i)\n",
   6653			new_crtc_state->hw.active, crtc->active);
   6654
   6655	master_crtc = intel_master_crtc(new_crtc_state);
   6656
   6657	for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
   6658		enum pipe pipe;
   6659		bool active;
   6660
   6661		active = encoder->get_hw_state(encoder, &pipe);
   6662		I915_STATE_WARN(active != new_crtc_state->hw.active,
   6663				"[ENCODER:%i] active %i with crtc active %i\n",
   6664				encoder->base.base.id, active,
   6665				new_crtc_state->hw.active);
   6666
   6667		I915_STATE_WARN(active && master_crtc->pipe != pipe,
   6668				"Encoder connected to wrong pipe %c\n",
   6669				pipe_name(pipe));
   6670
   6671		if (active)
   6672			intel_encoder_get_config(encoder, pipe_config);
   6673	}
   6674
   6675	if (!new_crtc_state->hw.active)
   6676		return;
   6677
   6678	intel_pipe_config_sanity_check(dev_priv, pipe_config);
   6679
   6680	if (!intel_pipe_config_compare(new_crtc_state,
   6681				       pipe_config, false)) {
   6682		I915_STATE_WARN(1, "pipe state doesn't match!\n");
   6683		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
   6684		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
   6685	}
   6686}
   6687
   6688static void
   6689intel_verify_planes(struct intel_atomic_state *state)
   6690{
   6691	struct intel_plane *plane;
   6692	const struct intel_plane_state *plane_state;
   6693	int i;
   6694
   6695	for_each_new_intel_plane_in_state(state, plane,
   6696					  plane_state, i)
   6697		assert_plane(plane, plane_state->planar_slave ||
   6698			     plane_state->uapi.visible);
   6699}
   6700
   6701static void
   6702verify_single_dpll_state(struct drm_i915_private *dev_priv,
   6703			 struct intel_shared_dpll *pll,
   6704			 struct intel_crtc *crtc,
   6705			 struct intel_crtc_state *new_crtc_state)
   6706{
   6707	struct intel_dpll_hw_state dpll_hw_state;
   6708	u8 pipe_mask;
   6709	bool active;
   6710
   6711	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
   6712
   6713	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
   6714
   6715	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
   6716
   6717	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
   6718		I915_STATE_WARN(!pll->on && pll->active_mask,
   6719		     "pll in active use but not on in sw tracking\n");
   6720		I915_STATE_WARN(pll->on && !pll->active_mask,
   6721		     "pll is on but not used by any active pipe\n");
   6722		I915_STATE_WARN(pll->on != active,
   6723		     "pll on state mismatch (expected %i, found %i)\n",
   6724		     pll->on, active);
   6725	}
   6726
   6727	if (!crtc) {
   6728		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
   6729				"more active pll users than references: 0x%x vs 0x%x\n",
   6730				pll->active_mask, pll->state.pipe_mask);
   6731
   6732		return;
   6733	}
   6734
   6735	pipe_mask = BIT(crtc->pipe);
   6736
   6737	if (new_crtc_state->hw.active)
   6738		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
   6739				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
   6740				pipe_name(crtc->pipe), pll->active_mask);
   6741	else
   6742		I915_STATE_WARN(pll->active_mask & pipe_mask,
   6743				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
   6744				pipe_name(crtc->pipe), pll->active_mask);
   6745
   6746	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
   6747			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
   6748			pipe_mask, pll->state.pipe_mask);
   6749
   6750	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
   6751					  &dpll_hw_state,
   6752					  sizeof(dpll_hw_state)),
   6753			"pll hw state mismatch\n");
   6754}
   6755
   6756static void
   6757verify_shared_dpll_state(struct intel_crtc *crtc,
   6758			 struct intel_crtc_state *old_crtc_state,
   6759			 struct intel_crtc_state *new_crtc_state)
   6760{
   6761	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6762
   6763	if (new_crtc_state->shared_dpll)
   6764		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
   6765
   6766	if (old_crtc_state->shared_dpll &&
   6767	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
   6768		u8 pipe_mask = BIT(crtc->pipe);
   6769		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
   6770
   6771		I915_STATE_WARN(pll->active_mask & pipe_mask,
   6772				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
   6773				pipe_name(crtc->pipe), pll->active_mask);
   6774		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
   6775				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
   6776				pipe_name(crtc->pipe), pll->state.pipe_mask);
   6777	}
   6778}
   6779
   6780static void
   6781verify_mpllb_state(struct intel_atomic_state *state,
   6782		   struct intel_crtc_state *new_crtc_state)
   6783{
   6784	struct drm_i915_private *i915 = to_i915(state->base.dev);
   6785	struct intel_mpllb_state mpllb_hw_state = { 0 };
   6786	struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
   6787	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   6788	struct intel_encoder *encoder;
   6789
   6790	if (!IS_DG2(i915))
   6791		return;
   6792
   6793	if (!new_crtc_state->hw.active)
   6794		return;
   6795
   6796	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
   6797	intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
   6798
   6799#define MPLLB_CHECK(name) do { \
   6800	if (mpllb_sw_state->name != mpllb_hw_state.name) { \
   6801		pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
   6802				     "(expected 0x%08x, found 0x%08x)", \
   6803				     mpllb_sw_state->name, \
   6804				     mpllb_hw_state.name); \
   6805	} \
   6806} while (0)
   6807
   6808	MPLLB_CHECK(mpllb_cp);
   6809	MPLLB_CHECK(mpllb_div);
   6810	MPLLB_CHECK(mpllb_div2);
   6811	MPLLB_CHECK(mpllb_fracn1);
   6812	MPLLB_CHECK(mpllb_fracn2);
   6813	MPLLB_CHECK(mpllb_sscen);
   6814	MPLLB_CHECK(mpllb_sscstep);
   6815
   6816	/*
   6817	 * ref_control is handled by the hardware/firemware and never
   6818	 * programmed by the software, but the proper values are supplied
   6819	 * in the bspec for verification purposes.
   6820	 */
   6821	MPLLB_CHECK(ref_control);
   6822
   6823#undef MPLLB_CHECK
   6824}
   6825
   6826static void
   6827intel_modeset_verify_crtc(struct intel_crtc *crtc,
   6828			  struct intel_atomic_state *state,
   6829			  struct intel_crtc_state *old_crtc_state,
   6830			  struct intel_crtc_state *new_crtc_state)
   6831{
   6832	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
   6833		return;
   6834
   6835	verify_wm_state(crtc, new_crtc_state);
   6836	verify_connector_state(state, crtc);
   6837	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
   6838	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
   6839	verify_mpllb_state(state, new_crtc_state);
   6840}
   6841
   6842static void
   6843verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
   6844{
   6845	int i;
   6846
   6847	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
   6848		verify_single_dpll_state(dev_priv,
   6849					 &dev_priv->dpll.shared_dplls[i],
   6850					 NULL, NULL);
   6851}
   6852
   6853static void
   6854intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
   6855			      struct intel_atomic_state *state)
   6856{
   6857	verify_encoder_state(dev_priv, state);
   6858	verify_connector_state(state, NULL);
   6859	verify_disabled_dpll_state(dev_priv);
   6860}
   6861
   6862int intel_modeset_all_pipes(struct intel_atomic_state *state)
   6863{
   6864	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   6865	struct intel_crtc *crtc;
   6866
   6867	/*
   6868	 * Add all pipes to the state, and force
   6869	 * a modeset on all the active ones.
   6870	 */
   6871	for_each_intel_crtc(&dev_priv->drm, crtc) {
   6872		struct intel_crtc_state *crtc_state;
   6873		int ret;
   6874
   6875		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
   6876		if (IS_ERR(crtc_state))
   6877			return PTR_ERR(crtc_state);
   6878
   6879		if (!crtc_state->hw.active ||
   6880		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
   6881			continue;
   6882
   6883		crtc_state->uapi.mode_changed = true;
   6884
   6885		ret = drm_atomic_add_affected_connectors(&state->base,
   6886							 &crtc->base);
   6887		if (ret)
   6888			return ret;
   6889
   6890		ret = intel_atomic_add_affected_planes(state, crtc);
   6891		if (ret)
   6892			return ret;
   6893
   6894		crtc_state->update_planes |= crtc_state->active_planes;
   6895	}
   6896
   6897	return 0;
   6898}
   6899
   6900static void
   6901intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
   6902{
   6903	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
   6904	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   6905	struct drm_display_mode adjusted_mode;
   6906
   6907	drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
   6908
   6909	if (crtc_state->vrr.enable) {
   6910		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
   6911		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
   6912		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
   6913		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
   6914	}
   6915
   6916	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
   6917
   6918	crtc->mode_flags = crtc_state->mode_flags;
   6919
   6920	/*
   6921	 * The scanline counter increments at the leading edge of hsync.
   6922	 *
   6923	 * On most platforms it starts counting from vtotal-1 on the
   6924	 * first active line. That means the scanline counter value is
   6925	 * always one less than what we would expect. Ie. just after
   6926	 * start of vblank, which also occurs at start of hsync (on the
   6927	 * last active line), the scanline counter will read vblank_start-1.
   6928	 *
   6929	 * On gen2 the scanline counter starts counting from 1 instead
   6930	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
   6931	 * to keep the value positive), instead of adding one.
   6932	 *
   6933	 * On HSW+ the behaviour of the scanline counter depends on the output
   6934	 * type. For DP ports it behaves like most other platforms, but on HDMI
   6935	 * there's an extra 1 line difference. So we need to add two instead of
   6936	 * one to the value.
   6937	 *
   6938	 * On VLV/CHV DSI the scanline counter would appear to increment
   6939	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
   6940	 * that means we can't tell whether we're in vblank or not while
   6941	 * we're on that particular line. We must still set scanline_offset
   6942	 * to 1 so that the vblank timestamps come out correct when we query
   6943	 * the scanline counter from within the vblank interrupt handler.
   6944	 * However if queried just before the start of vblank we'll get an
   6945	 * answer that's slightly in the future.
   6946	 */
   6947	if (DISPLAY_VER(dev_priv) == 2) {
   6948		int vtotal;
   6949
   6950		vtotal = adjusted_mode.crtc_vtotal;
   6951		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
   6952			vtotal /= 2;
   6953
   6954		crtc->scanline_offset = vtotal - 1;
   6955	} else if (HAS_DDI(dev_priv) &&
   6956		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
   6957		crtc->scanline_offset = 2;
   6958	} else {
   6959		crtc->scanline_offset = 1;
   6960	}
   6961}
   6962
   6963static void intel_modeset_clear_plls(struct intel_atomic_state *state)
   6964{
   6965	struct intel_crtc_state *new_crtc_state;
   6966	struct intel_crtc *crtc;
   6967	int i;
   6968
   6969	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   6970		if (!intel_crtc_needs_modeset(new_crtc_state))
   6971			continue;
   6972
   6973		intel_release_shared_dplls(state, crtc);
   6974	}
   6975}
   6976
   6977/*
   6978 * This implements the workaround described in the "notes" section of the mode
   6979 * set sequence documentation. When going from no pipes or single pipe to
   6980 * multiple pipes, and planes are enabled after the pipe, we need to wait at
   6981 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
   6982 */
   6983static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
   6984{
   6985	struct intel_crtc_state *crtc_state;
   6986	struct intel_crtc *crtc;
   6987	struct intel_crtc_state *first_crtc_state = NULL;
   6988	struct intel_crtc_state *other_crtc_state = NULL;
   6989	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
   6990	int i;
   6991
   6992	/* look at all crtc's that are going to be enabled in during modeset */
   6993	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   6994		if (!crtc_state->hw.active ||
   6995		    !intel_crtc_needs_modeset(crtc_state))
   6996			continue;
   6997
   6998		if (first_crtc_state) {
   6999			other_crtc_state = crtc_state;
   7000			break;
   7001		} else {
   7002			first_crtc_state = crtc_state;
   7003			first_pipe = crtc->pipe;
   7004		}
   7005	}
   7006
   7007	/* No workaround needed? */
   7008	if (!first_crtc_state)
   7009		return 0;
   7010
   7011	/* w/a possibly needed, check how many crtc's are already enabled. */
   7012	for_each_intel_crtc(state->base.dev, crtc) {
   7013		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
   7014		if (IS_ERR(crtc_state))
   7015			return PTR_ERR(crtc_state);
   7016
   7017		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
   7018
   7019		if (!crtc_state->hw.active ||
   7020		    intel_crtc_needs_modeset(crtc_state))
   7021			continue;
   7022
   7023		/* 2 or more enabled crtcs means no need for w/a */
   7024		if (enabled_pipe != INVALID_PIPE)
   7025			return 0;
   7026
   7027		enabled_pipe = crtc->pipe;
   7028	}
   7029
   7030	if (enabled_pipe != INVALID_PIPE)
   7031		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
   7032	else if (other_crtc_state)
   7033		other_crtc_state->hsw_workaround_pipe = first_pipe;
   7034
   7035	return 0;
   7036}
   7037
   7038u8 intel_calc_active_pipes(struct intel_atomic_state *state,
   7039			   u8 active_pipes)
   7040{
   7041	const struct intel_crtc_state *crtc_state;
   7042	struct intel_crtc *crtc;
   7043	int i;
   7044
   7045	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   7046		if (crtc_state->hw.active)
   7047			active_pipes |= BIT(crtc->pipe);
   7048		else
   7049			active_pipes &= ~BIT(crtc->pipe);
   7050	}
   7051
   7052	return active_pipes;
   7053}
   7054
   7055static int intel_modeset_checks(struct intel_atomic_state *state)
   7056{
   7057	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   7058
   7059	state->modeset = true;
   7060
   7061	if (IS_HASWELL(dev_priv))
   7062		return hsw_mode_set_planes_workaround(state);
   7063
   7064	return 0;
   7065}
   7066
   7067static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
   7068				     struct intel_crtc_state *new_crtc_state)
   7069{
   7070	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
   7071		return;
   7072
   7073	new_crtc_state->uapi.mode_changed = false;
   7074	new_crtc_state->update_pipe = true;
   7075}
   7076
   7077static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
   7078				    struct intel_crtc_state *new_crtc_state)
   7079{
   7080	/*
   7081	 * If we're not doing the full modeset we want to
   7082	 * keep the current M/N values as they may be
   7083	 * sufficiently different to the computed values
   7084	 * to cause problems.
   7085	 *
   7086	 * FIXME: should really copy more fuzzy state here
   7087	 */
   7088	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
   7089	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
   7090	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
   7091	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
   7092}
   7093
   7094static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
   7095					  struct intel_crtc *crtc,
   7096					  u8 plane_ids_mask)
   7097{
   7098	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   7099	struct intel_plane *plane;
   7100
   7101	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
   7102		struct intel_plane_state *plane_state;
   7103
   7104		if ((plane_ids_mask & BIT(plane->id)) == 0)
   7105			continue;
   7106
   7107		plane_state = intel_atomic_get_plane_state(state, plane);
   7108		if (IS_ERR(plane_state))
   7109			return PTR_ERR(plane_state);
   7110	}
   7111
   7112	return 0;
   7113}
   7114
   7115int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
   7116				     struct intel_crtc *crtc)
   7117{
   7118	const struct intel_crtc_state *old_crtc_state =
   7119		intel_atomic_get_old_crtc_state(state, crtc);
   7120	const struct intel_crtc_state *new_crtc_state =
   7121		intel_atomic_get_new_crtc_state(state, crtc);
   7122
   7123	return intel_crtc_add_planes_to_state(state, crtc,
   7124					      old_crtc_state->enabled_planes |
   7125					      new_crtc_state->enabled_planes);
   7126}
   7127
   7128static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
   7129{
   7130	/* See {hsw,vlv,ivb}_plane_ratio() */
   7131	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
   7132		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
   7133		IS_IVYBRIDGE(dev_priv);
   7134}
   7135
   7136static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
   7137					   struct intel_crtc *crtc,
   7138					   struct intel_crtc *other)
   7139{
   7140	const struct intel_plane_state *plane_state;
   7141	struct intel_plane *plane;
   7142	u8 plane_ids = 0;
   7143	int i;
   7144
   7145	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   7146		if (plane->pipe == crtc->pipe)
   7147			plane_ids |= BIT(plane->id);
   7148	}
   7149
   7150	return intel_crtc_add_planes_to_state(state, other, plane_ids);
   7151}
   7152
   7153static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
   7154{
   7155	struct drm_i915_private *i915 = to_i915(state->base.dev);
   7156	const struct intel_crtc_state *crtc_state;
   7157	struct intel_crtc *crtc;
   7158	int i;
   7159
   7160	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   7161		struct intel_crtc *other;
   7162
   7163		for_each_intel_crtc_in_pipe_mask(&i915->drm, other,
   7164						 crtc_state->bigjoiner_pipes) {
   7165			int ret;
   7166
   7167			if (crtc == other)
   7168				continue;
   7169
   7170			ret = intel_crtc_add_bigjoiner_planes(state, crtc, other);
   7171			if (ret)
   7172				return ret;
   7173		}
   7174	}
   7175
   7176	return 0;
   7177}
   7178
   7179static int intel_atomic_check_planes(struct intel_atomic_state *state)
   7180{
   7181	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   7182	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   7183	struct intel_plane_state *plane_state;
   7184	struct intel_plane *plane;
   7185	struct intel_crtc *crtc;
   7186	int i, ret;
   7187
   7188	ret = icl_add_linked_planes(state);
   7189	if (ret)
   7190		return ret;
   7191
   7192	ret = intel_bigjoiner_add_affected_planes(state);
   7193	if (ret)
   7194		return ret;
   7195
   7196	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   7197		ret = intel_plane_atomic_check(state, plane);
   7198		if (ret) {
   7199			drm_dbg_atomic(&dev_priv->drm,
   7200				       "[PLANE:%d:%s] atomic driver check failed\n",
   7201				       plane->base.base.id, plane->base.name);
   7202			return ret;
   7203		}
   7204	}
   7205
   7206	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   7207					    new_crtc_state, i) {
   7208		u8 old_active_planes, new_active_planes;
   7209
   7210		ret = icl_check_nv12_planes(new_crtc_state);
   7211		if (ret)
   7212			return ret;
   7213
   7214		/*
   7215		 * On some platforms the number of active planes affects
   7216		 * the planes' minimum cdclk calculation. Add such planes
   7217		 * to the state before we compute the minimum cdclk.
   7218		 */
   7219		if (!active_planes_affects_min_cdclk(dev_priv))
   7220			continue;
   7221
   7222		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
   7223		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
   7224
   7225		if (hweight8(old_active_planes) == hweight8(new_active_planes))
   7226			continue;
   7227
   7228		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
   7229		if (ret)
   7230			return ret;
   7231	}
   7232
   7233	return 0;
   7234}
   7235
   7236static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
   7237{
   7238	struct intel_crtc_state *crtc_state;
   7239	struct intel_crtc *crtc;
   7240	int i;
   7241
   7242	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   7243		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
   7244		int ret;
   7245
   7246		ret = intel_crtc_atomic_check(state, crtc);
   7247		if (ret) {
   7248			drm_dbg_atomic(&i915->drm,
   7249				       "[CRTC:%d:%s] atomic driver check failed\n",
   7250				       crtc->base.base.id, crtc->base.name);
   7251			return ret;
   7252		}
   7253	}
   7254
   7255	return 0;
   7256}
   7257
   7258static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
   7259					       u8 transcoders)
   7260{
   7261	const struct intel_crtc_state *new_crtc_state;
   7262	struct intel_crtc *crtc;
   7263	int i;
   7264
   7265	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   7266		if (new_crtc_state->hw.enable &&
   7267		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
   7268		    intel_crtc_needs_modeset(new_crtc_state))
   7269			return true;
   7270	}
   7271
   7272	return false;
   7273}
   7274
   7275static bool intel_pipes_need_modeset(struct intel_atomic_state *state,
   7276				     u8 pipes)
   7277{
   7278	const struct intel_crtc_state *new_crtc_state;
   7279	struct intel_crtc *crtc;
   7280	int i;
   7281
   7282	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   7283		if (new_crtc_state->hw.enable &&
   7284		    pipes & BIT(crtc->pipe) &&
   7285		    intel_crtc_needs_modeset(new_crtc_state))
   7286			return true;
   7287	}
   7288
   7289	return false;
   7290}
   7291
   7292static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
   7293					struct intel_crtc *master_crtc)
   7294{
   7295	struct drm_i915_private *i915 = to_i915(state->base.dev);
   7296	struct intel_crtc_state *master_crtc_state =
   7297		intel_atomic_get_new_crtc_state(state, master_crtc);
   7298	struct intel_crtc *slave_crtc;
   7299
   7300	if (!master_crtc_state->bigjoiner_pipes)
   7301		return 0;
   7302
   7303	/* sanity check */
   7304	if (drm_WARN_ON(&i915->drm,
   7305			master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state)))
   7306		return -EINVAL;
   7307
   7308	if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) {
   7309		drm_dbg_kms(&i915->drm,
   7310			    "[CRTC:%d:%s] Cannot act as big joiner master "
   7311			    "(need 0x%x as pipes, only 0x%x possible)\n",
   7312			    master_crtc->base.base.id, master_crtc->base.name,
   7313			    master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915));
   7314		return -EINVAL;
   7315	}
   7316
   7317	for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
   7318					 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
   7319		struct intel_crtc_state *slave_crtc_state;
   7320		int ret;
   7321
   7322		slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
   7323		if (IS_ERR(slave_crtc_state))
   7324			return PTR_ERR(slave_crtc_state);
   7325
   7326		/* master being enabled, slave was already configured? */
   7327		if (slave_crtc_state->uapi.enable) {
   7328			drm_dbg_kms(&i915->drm,
   7329				    "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
   7330				    "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
   7331				    slave_crtc->base.base.id, slave_crtc->base.name,
   7332				    master_crtc->base.base.id, master_crtc->base.name);
   7333			return -EINVAL;
   7334		}
   7335
   7336		/*
   7337		 * The state copy logic assumes the master crtc gets processed
   7338		 * before the slave crtc during the main compute_config loop.
   7339		 * This works because the crtcs are created in pipe order,
   7340		 * and the hardware requires master pipe < slave pipe as well.
   7341		 * Should that change we need to rethink the logic.
   7342		 */
   7343		if (WARN_ON(drm_crtc_index(&master_crtc->base) >
   7344			    drm_crtc_index(&slave_crtc->base)))
   7345			return -EINVAL;
   7346
   7347		drm_dbg_kms(&i915->drm,
   7348			    "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n",
   7349			    slave_crtc->base.base.id, slave_crtc->base.name,
   7350			    master_crtc->base.base.id, master_crtc->base.name);
   7351
   7352		slave_crtc_state->bigjoiner_pipes =
   7353			master_crtc_state->bigjoiner_pipes;
   7354
   7355		ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc);
   7356		if (ret)
   7357			return ret;
   7358	}
   7359
   7360	return 0;
   7361}
   7362
   7363static void kill_bigjoiner_slave(struct intel_atomic_state *state,
   7364				 struct intel_crtc *master_crtc)
   7365{
   7366	struct drm_i915_private *i915 = to_i915(state->base.dev);
   7367	struct intel_crtc_state *master_crtc_state =
   7368		intel_atomic_get_new_crtc_state(state, master_crtc);
   7369	struct intel_crtc *slave_crtc;
   7370
   7371	for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
   7372					 intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) {
   7373		struct intel_crtc_state *slave_crtc_state =
   7374			intel_atomic_get_new_crtc_state(state, slave_crtc);
   7375
   7376		slave_crtc_state->bigjoiner_pipes = 0;
   7377
   7378		intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc);
   7379	}
   7380
   7381	master_crtc_state->bigjoiner_pipes = 0;
   7382}
   7383
   7384/**
   7385 * DOC: asynchronous flip implementation
   7386 *
   7387 * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
   7388 * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
   7389 * Correspondingly, support is currently added for primary plane only.
   7390 *
   7391 * Async flip can only change the plane surface address, so anything else
   7392 * changing is rejected from the intel_async_flip_check_hw() function.
   7393 * Once this check is cleared, flip done interrupt is enabled using
   7394 * the intel_crtc_enable_flip_done() function.
   7395 *
   7396 * As soon as the surface address register is written, flip done interrupt is
   7397 * generated and the requested events are sent to the usersapce in the interrupt
   7398 * handler itself. The timestamp and sequence sent during the flip done event
   7399 * correspond to the last vblank and have no relation to the actual time when
   7400 * the flip done event was sent.
   7401 */
   7402static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
   7403				       struct intel_crtc *crtc)
   7404{
   7405	struct drm_i915_private *i915 = to_i915(state->base.dev);
   7406	const struct intel_crtc_state *new_crtc_state =
   7407		intel_atomic_get_new_crtc_state(state, crtc);
   7408	const struct intel_plane_state *old_plane_state;
   7409	struct intel_plane_state *new_plane_state;
   7410	struct intel_plane *plane;
   7411	int i;
   7412
   7413	if (!new_crtc_state->uapi.async_flip)
   7414		return 0;
   7415
   7416	if (!new_crtc_state->uapi.active) {
   7417		drm_dbg_kms(&i915->drm,
   7418			    "[CRTC:%d:%s] not active\n",
   7419			    crtc->base.base.id, crtc->base.name);
   7420		return -EINVAL;
   7421	}
   7422
   7423	if (intel_crtc_needs_modeset(new_crtc_state)) {
   7424		drm_dbg_kms(&i915->drm,
   7425			    "[CRTC:%d:%s] modeset required\n",
   7426			    crtc->base.base.id, crtc->base.name);
   7427		return -EINVAL;
   7428	}
   7429
   7430	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
   7431					     new_plane_state, i) {
   7432		if (plane->pipe != crtc->pipe)
   7433			continue;
   7434
   7435		/*
   7436		 * TODO: Async flip is only supported through the page flip IOCTL
   7437		 * as of now. So support currently added for primary plane only.
   7438		 * Support for other planes on platforms on which supports
   7439		 * this(vlv/chv and icl+) should be added when async flip is
   7440		 * enabled in the atomic IOCTL path.
   7441		 */
   7442		if (!plane->async_flip) {
   7443			drm_dbg_kms(&i915->drm,
   7444				    "[PLANE:%d:%s] async flip not supported\n",
   7445				    plane->base.base.id, plane->base.name);
   7446			return -EINVAL;
   7447		}
   7448
   7449		if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) {
   7450			drm_dbg_kms(&i915->drm,
   7451				    "[PLANE:%d:%s] no old or new framebuffer\n",
   7452				    plane->base.base.id, plane->base.name);
   7453			return -EINVAL;
   7454		}
   7455	}
   7456
   7457	return 0;
   7458}
   7459
   7460static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc)
   7461{
   7462	struct drm_i915_private *i915 = to_i915(state->base.dev);
   7463	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   7464	const struct intel_plane_state *new_plane_state, *old_plane_state;
   7465	struct intel_plane *plane;
   7466	int i;
   7467
   7468	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
   7469	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
   7470
   7471	if (!new_crtc_state->uapi.async_flip)
   7472		return 0;
   7473
   7474	if (!new_crtc_state->hw.active) {
   7475		drm_dbg_kms(&i915->drm,
   7476			    "[CRTC:%d:%s] not active\n",
   7477			    crtc->base.base.id, crtc->base.name);
   7478		return -EINVAL;
   7479	}
   7480
   7481	if (intel_crtc_needs_modeset(new_crtc_state)) {
   7482		drm_dbg_kms(&i915->drm,
   7483			    "[CRTC:%d:%s] modeset required\n",
   7484			    crtc->base.base.id, crtc->base.name);
   7485		return -EINVAL;
   7486	}
   7487
   7488	if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
   7489		drm_dbg_kms(&i915->drm,
   7490			    "[CRTC:%d:%s] Active planes cannot be in async flip\n",
   7491			    crtc->base.base.id, crtc->base.name);
   7492		return -EINVAL;
   7493	}
   7494
   7495	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
   7496					     new_plane_state, i) {
   7497		if (plane->pipe != crtc->pipe)
   7498			continue;
   7499
   7500		/*
   7501		 * Only async flip capable planes should be in the state
   7502		 * if we're really about to ask the hardware to perform
   7503		 * an async flip. We should never get this far otherwise.
   7504		 */
   7505		if (drm_WARN_ON(&i915->drm,
   7506				new_crtc_state->do_async_flip && !plane->async_flip))
   7507			return -EINVAL;
   7508
   7509		/*
   7510		 * Only check async flip capable planes other planes
   7511		 * may be involved in the initial commit due to
   7512		 * the wm0/ddb optimization.
   7513		 *
   7514		 * TODO maybe should track which planes actually
   7515		 * were requested to do the async flip...
   7516		 */
   7517		if (!plane->async_flip)
   7518			continue;
   7519
   7520		/*
   7521		 * FIXME: This check is kept generic for all platforms.
   7522		 * Need to verify this for all gen9 platforms to enable
   7523		 * this selectively if required.
   7524		 */
   7525		switch (new_plane_state->hw.fb->modifier) {
   7526		case I915_FORMAT_MOD_X_TILED:
   7527		case I915_FORMAT_MOD_Y_TILED:
   7528		case I915_FORMAT_MOD_Yf_TILED:
   7529		case I915_FORMAT_MOD_4_TILED:
   7530			break;
   7531		default:
   7532			drm_dbg_kms(&i915->drm,
   7533				    "[PLANE:%d:%s] Modifier does not support async flips\n",
   7534				    plane->base.base.id, plane->base.name);
   7535			return -EINVAL;
   7536		}
   7537
   7538		if (new_plane_state->hw.fb->format->num_planes > 1) {
   7539			drm_dbg_kms(&i915->drm,
   7540				    "[PLANE:%d:%s] Planar formats do not support async flips\n",
   7541				    plane->base.base.id, plane->base.name);
   7542			return -EINVAL;
   7543		}
   7544
   7545		if (old_plane_state->view.color_plane[0].mapping_stride !=
   7546		    new_plane_state->view.color_plane[0].mapping_stride) {
   7547			drm_dbg_kms(&i915->drm,
   7548				    "[PLANE:%d:%s] Stride cannot be changed in async flip\n",
   7549				    plane->base.base.id, plane->base.name);
   7550			return -EINVAL;
   7551		}
   7552
   7553		if (old_plane_state->hw.fb->modifier !=
   7554		    new_plane_state->hw.fb->modifier) {
   7555			drm_dbg_kms(&i915->drm,
   7556				    "[PLANE:%d:%s] Modifier cannot be changed in async flip\n",
   7557				    plane->base.base.id, plane->base.name);
   7558			return -EINVAL;
   7559		}
   7560
   7561		if (old_plane_state->hw.fb->format !=
   7562		    new_plane_state->hw.fb->format) {
   7563			drm_dbg_kms(&i915->drm,
   7564				    "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n",
   7565				    plane->base.base.id, plane->base.name);
   7566			return -EINVAL;
   7567		}
   7568
   7569		if (old_plane_state->hw.rotation !=
   7570		    new_plane_state->hw.rotation) {
   7571			drm_dbg_kms(&i915->drm,
   7572				    "[PLANE:%d:%s] Rotation cannot be changed in async flip\n",
   7573				    plane->base.base.id, plane->base.name);
   7574			return -EINVAL;
   7575		}
   7576
   7577		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
   7578		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
   7579			drm_dbg_kms(&i915->drm,
   7580				    "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n",
   7581				    plane->base.base.id, plane->base.name);
   7582			return -EINVAL;
   7583		}
   7584
   7585		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
   7586			drm_dbg_kms(&i915->drm,
   7587				    "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n",
   7588				    plane->base.base.id, plane->base.name);
   7589			return -EINVAL;
   7590		}
   7591
   7592		if (old_plane_state->hw.pixel_blend_mode !=
   7593		    new_plane_state->hw.pixel_blend_mode) {
   7594			drm_dbg_kms(&i915->drm,
   7595				    "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n",
   7596				    plane->base.base.id, plane->base.name);
   7597			return -EINVAL;
   7598		}
   7599
   7600		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
   7601			drm_dbg_kms(&i915->drm,
   7602				    "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n",
   7603				    plane->base.base.id, plane->base.name);
   7604			return -EINVAL;
   7605		}
   7606
   7607		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
   7608			drm_dbg_kms(&i915->drm,
   7609				    "[PLANE:%d:%s] Color range cannot be changed in async flip\n",
   7610				    plane->base.base.id, plane->base.name);
   7611			return -EINVAL;
   7612		}
   7613
   7614		/* plane decryption is allow to change only in synchronous flips */
   7615		if (old_plane_state->decrypt != new_plane_state->decrypt) {
   7616			drm_dbg_kms(&i915->drm,
   7617				    "[PLANE:%d:%s] Decryption cannot be changed in async flip\n",
   7618				    plane->base.base.id, plane->base.name);
   7619			return -EINVAL;
   7620		}
   7621	}
   7622
   7623	return 0;
   7624}
   7625
   7626static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
   7627{
   7628	struct drm_i915_private *i915 = to_i915(state->base.dev);
   7629	struct intel_crtc_state *crtc_state;
   7630	struct intel_crtc *crtc;
   7631	u8 affected_pipes = 0;
   7632	u8 modeset_pipes = 0;
   7633	int i;
   7634
   7635	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   7636		affected_pipes |= crtc_state->bigjoiner_pipes;
   7637		if (intel_crtc_needs_modeset(crtc_state))
   7638			modeset_pipes |= crtc_state->bigjoiner_pipes;
   7639	}
   7640
   7641	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) {
   7642		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
   7643		if (IS_ERR(crtc_state))
   7644			return PTR_ERR(crtc_state);
   7645	}
   7646
   7647	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) {
   7648		int ret;
   7649
   7650		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
   7651
   7652		crtc_state->uapi.mode_changed = true;
   7653
   7654		ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
   7655		if (ret)
   7656			return ret;
   7657
   7658		ret = intel_atomic_add_affected_planes(state, crtc);
   7659		if (ret)
   7660			return ret;
   7661	}
   7662
   7663	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   7664		/* Kill old bigjoiner link, we may re-establish afterwards */
   7665		if (intel_crtc_needs_modeset(crtc_state) &&
   7666		    intel_crtc_is_bigjoiner_master(crtc_state))
   7667			kill_bigjoiner_slave(state, crtc);
   7668	}
   7669
   7670	return 0;
   7671}
   7672
   7673/**
   7674 * intel_atomic_check - validate state object
   7675 * @dev: drm device
   7676 * @_state: state to validate
   7677 */
   7678static int intel_atomic_check(struct drm_device *dev,
   7679			      struct drm_atomic_state *_state)
   7680{
   7681	struct drm_i915_private *dev_priv = to_i915(dev);
   7682	struct intel_atomic_state *state = to_intel_atomic_state(_state);
   7683	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   7684	struct intel_crtc *crtc;
   7685	int ret, i;
   7686	bool any_ms = false;
   7687
   7688	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   7689					    new_crtc_state, i) {
   7690		if (new_crtc_state->inherited != old_crtc_state->inherited)
   7691			new_crtc_state->uapi.mode_changed = true;
   7692
   7693		if (new_crtc_state->uapi.scaling_filter !=
   7694		    old_crtc_state->uapi.scaling_filter)
   7695			new_crtc_state->uapi.mode_changed = true;
   7696	}
   7697
   7698	intel_vrr_check_modeset(state);
   7699
   7700	ret = drm_atomic_helper_check_modeset(dev, &state->base);
   7701	if (ret)
   7702		goto fail;
   7703
   7704	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   7705		ret = intel_async_flip_check_uapi(state, crtc);
   7706		if (ret)
   7707			return ret;
   7708	}
   7709
   7710	ret = intel_bigjoiner_add_affected_crtcs(state);
   7711	if (ret)
   7712		goto fail;
   7713
   7714	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   7715					    new_crtc_state, i) {
   7716		if (!intel_crtc_needs_modeset(new_crtc_state)) {
   7717			if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
   7718				copy_bigjoiner_crtc_state_nomodeset(state, crtc);
   7719			else
   7720				intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc);
   7721			continue;
   7722		}
   7723
   7724		if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) {
   7725			drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable);
   7726			continue;
   7727		}
   7728
   7729		ret = intel_crtc_prepare_cleared_state(state, crtc);
   7730		if (ret)
   7731			goto fail;
   7732
   7733		if (!new_crtc_state->hw.enable)
   7734			continue;
   7735
   7736		ret = intel_modeset_pipe_config(state, new_crtc_state);
   7737		if (ret)
   7738			goto fail;
   7739
   7740		ret = intel_atomic_check_bigjoiner(state, crtc);
   7741		if (ret)
   7742			goto fail;
   7743	}
   7744
   7745	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   7746					    new_crtc_state, i) {
   7747		if (!intel_crtc_needs_modeset(new_crtc_state))
   7748			continue;
   7749
   7750		ret = intel_modeset_pipe_config_late(new_crtc_state);
   7751		if (ret)
   7752			goto fail;
   7753
   7754		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
   7755	}
   7756
   7757	/**
   7758	 * Check if fastset is allowed by external dependencies like other
   7759	 * pipes and transcoders.
   7760	 *
   7761	 * Right now it only forces a fullmodeset when the MST master
   7762	 * transcoder did not changed but the pipe of the master transcoder
   7763	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
   7764	 * in case of port synced crtcs, if one of the synced crtcs
   7765	 * needs a full modeset, all other synced crtcs should be
   7766	 * forced a full modeset.
   7767	 */
   7768	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   7769		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
   7770			continue;
   7771
   7772		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
   7773			enum transcoder master = new_crtc_state->mst_master_transcoder;
   7774
   7775			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
   7776				new_crtc_state->uapi.mode_changed = true;
   7777				new_crtc_state->update_pipe = false;
   7778			}
   7779		}
   7780
   7781		if (is_trans_port_sync_mode(new_crtc_state)) {
   7782			u8 trans = new_crtc_state->sync_mode_slaves_mask;
   7783
   7784			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
   7785				trans |= BIT(new_crtc_state->master_transcoder);
   7786
   7787			if (intel_cpu_transcoders_need_modeset(state, trans)) {
   7788				new_crtc_state->uapi.mode_changed = true;
   7789				new_crtc_state->update_pipe = false;
   7790			}
   7791		}
   7792
   7793		if (new_crtc_state->bigjoiner_pipes) {
   7794			if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
   7795				new_crtc_state->uapi.mode_changed = true;
   7796				new_crtc_state->update_pipe = false;
   7797			}
   7798		}
   7799	}
   7800
   7801	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   7802					    new_crtc_state, i) {
   7803		if (intel_crtc_needs_modeset(new_crtc_state)) {
   7804			any_ms = true;
   7805			continue;
   7806		}
   7807
   7808		if (!new_crtc_state->update_pipe)
   7809			continue;
   7810
   7811		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
   7812	}
   7813
   7814	if (any_ms && !check_digital_port_conflicts(state)) {
   7815		drm_dbg_kms(&dev_priv->drm,
   7816			    "rejecting conflicting digital port configuration\n");
   7817		ret = -EINVAL;
   7818		goto fail;
   7819	}
   7820
   7821	ret = drm_dp_mst_atomic_check(&state->base);
   7822	if (ret)
   7823		goto fail;
   7824
   7825	ret = intel_atomic_check_planes(state);
   7826	if (ret)
   7827		goto fail;
   7828
   7829	ret = intel_compute_global_watermarks(state);
   7830	if (ret)
   7831		goto fail;
   7832
   7833	ret = intel_bw_atomic_check(state);
   7834	if (ret)
   7835		goto fail;
   7836
   7837	ret = intel_cdclk_atomic_check(state, &any_ms);
   7838	if (ret)
   7839		goto fail;
   7840
   7841	if (intel_any_crtc_needs_modeset(state))
   7842		any_ms = true;
   7843
   7844	if (any_ms) {
   7845		ret = intel_modeset_checks(state);
   7846		if (ret)
   7847			goto fail;
   7848
   7849		ret = intel_modeset_calc_cdclk(state);
   7850		if (ret)
   7851			return ret;
   7852
   7853		intel_modeset_clear_plls(state);
   7854	}
   7855
   7856	ret = intel_atomic_check_crtcs(state);
   7857	if (ret)
   7858		goto fail;
   7859
   7860	ret = intel_fbc_atomic_check(state);
   7861	if (ret)
   7862		goto fail;
   7863
   7864	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   7865					    new_crtc_state, i) {
   7866		ret = intel_async_flip_check_hw(state, crtc);
   7867		if (ret)
   7868			goto fail;
   7869
   7870		if (!intel_crtc_needs_modeset(new_crtc_state) &&
   7871		    !new_crtc_state->update_pipe)
   7872			continue;
   7873
   7874		intel_dump_pipe_config(new_crtc_state, state,
   7875				       intel_crtc_needs_modeset(new_crtc_state) ?
   7876				       "[modeset]" : "[fastset]");
   7877	}
   7878
   7879	return 0;
   7880
   7881 fail:
   7882	if (ret == -EDEADLK)
   7883		return ret;
   7884
   7885	/*
   7886	 * FIXME would probably be nice to know which crtc specifically
   7887	 * caused the failure, in cases where we can pinpoint it.
   7888	 */
   7889	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   7890					    new_crtc_state, i)
   7891		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
   7892
   7893	return ret;
   7894}
   7895
   7896static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
   7897{
   7898	struct intel_crtc_state *crtc_state;
   7899	struct intel_crtc *crtc;
   7900	int i, ret;
   7901
   7902	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
   7903	if (ret < 0)
   7904		return ret;
   7905
   7906	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
   7907		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
   7908
   7909		if (mode_changed || crtc_state->update_pipe ||
   7910		    crtc_state->uapi.color_mgmt_changed) {
   7911			intel_dsb_prepare(crtc_state);
   7912		}
   7913	}
   7914
   7915	return 0;
   7916}
   7917
   7918void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
   7919				  struct intel_crtc_state *crtc_state)
   7920{
   7921	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7922
   7923	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
   7924		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
   7925
   7926	if (crtc_state->has_pch_encoder) {
   7927		enum pipe pch_transcoder =
   7928			intel_crtc_pch_transcoder(crtc);
   7929
   7930		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
   7931	}
   7932}
   7933
   7934static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
   7935			       const struct intel_crtc_state *new_crtc_state)
   7936{
   7937	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
   7938	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
   7939
   7940	/*
   7941	 * Update pipe size and adjust fitter if needed: the reason for this is
   7942	 * that in compute_mode_changes we check the native mode (not the pfit
   7943	 * mode) to see if we can flip rather than do a full mode set. In the
   7944	 * fastboot case, we'll flip, but if we don't update the pipesrc and
   7945	 * pfit state, we'll end up with a big fb scanned out into the wrong
   7946	 * sized surface.
   7947	 */
   7948	intel_set_pipe_src_size(new_crtc_state);
   7949
   7950	/* on skylake this is done by detaching scalers */
   7951	if (DISPLAY_VER(dev_priv) >= 9) {
   7952		if (new_crtc_state->pch_pfit.enabled)
   7953			skl_pfit_enable(new_crtc_state);
   7954	} else if (HAS_PCH_SPLIT(dev_priv)) {
   7955		if (new_crtc_state->pch_pfit.enabled)
   7956			ilk_pfit_enable(new_crtc_state);
   7957		else if (old_crtc_state->pch_pfit.enabled)
   7958			ilk_pfit_disable(old_crtc_state);
   7959	}
   7960
   7961	/*
   7962	 * The register is supposedly single buffered so perhaps
   7963	 * not 100% correct to do this here. But SKL+ calculate
   7964	 * this based on the adjust pixel rate so pfit changes do
   7965	 * affect it and so it must be updated for fastsets.
   7966	 * HSW/BDW only really need this here for fastboot, after
   7967	 * that the value should not change without a full modeset.
   7968	 */
   7969	if (DISPLAY_VER(dev_priv) >= 9 ||
   7970	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
   7971		hsw_set_linetime_wm(new_crtc_state);
   7972}
   7973
   7974static void commit_pipe_pre_planes(struct intel_atomic_state *state,
   7975				   struct intel_crtc *crtc)
   7976{
   7977	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   7978	const struct intel_crtc_state *old_crtc_state =
   7979		intel_atomic_get_old_crtc_state(state, crtc);
   7980	const struct intel_crtc_state *new_crtc_state =
   7981		intel_atomic_get_new_crtc_state(state, crtc);
   7982	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
   7983
   7984	/*
   7985	 * During modesets pipe configuration was programmed as the
   7986	 * CRTC was enabled.
   7987	 */
   7988	if (!modeset) {
   7989		if (new_crtc_state->uapi.color_mgmt_changed ||
   7990		    new_crtc_state->update_pipe)
   7991			intel_color_commit_arm(new_crtc_state);
   7992
   7993		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
   7994			bdw_set_pipemisc(new_crtc_state);
   7995
   7996		if (new_crtc_state->update_pipe)
   7997			intel_pipe_fastset(old_crtc_state, new_crtc_state);
   7998	}
   7999
   8000	intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
   8001
   8002	intel_atomic_update_watermarks(state, crtc);
   8003}
   8004
   8005static void commit_pipe_post_planes(struct intel_atomic_state *state,
   8006				    struct intel_crtc *crtc)
   8007{
   8008	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   8009	const struct intel_crtc_state *new_crtc_state =
   8010		intel_atomic_get_new_crtc_state(state, crtc);
   8011
   8012	/*
   8013	 * Disable the scaler(s) after the plane(s) so that we don't
   8014	 * get a catastrophic underrun even if the two operations
   8015	 * end up happening in two different frames.
   8016	 */
   8017	if (DISPLAY_VER(dev_priv) >= 9 &&
   8018	    !intel_crtc_needs_modeset(new_crtc_state))
   8019		skl_detach_scalers(new_crtc_state);
   8020}
   8021
   8022static void intel_enable_crtc(struct intel_atomic_state *state,
   8023			      struct intel_crtc *crtc)
   8024{
   8025	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   8026	const struct intel_crtc_state *new_crtc_state =
   8027		intel_atomic_get_new_crtc_state(state, crtc);
   8028
   8029	if (!intel_crtc_needs_modeset(new_crtc_state))
   8030		return;
   8031
   8032	intel_crtc_update_active_timings(new_crtc_state);
   8033
   8034	dev_priv->display->crtc_enable(state, crtc);
   8035
   8036	if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
   8037		return;
   8038
   8039	/* vblanks work again, re-enable pipe CRC. */
   8040	intel_crtc_enable_pipe_crc(crtc);
   8041}
   8042
   8043static void intel_update_crtc(struct intel_atomic_state *state,
   8044			      struct intel_crtc *crtc)
   8045{
   8046	struct drm_i915_private *i915 = to_i915(state->base.dev);
   8047	const struct intel_crtc_state *old_crtc_state =
   8048		intel_atomic_get_old_crtc_state(state, crtc);
   8049	struct intel_crtc_state *new_crtc_state =
   8050		intel_atomic_get_new_crtc_state(state, crtc);
   8051	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
   8052
   8053	if (!modeset) {
   8054		if (new_crtc_state->preload_luts &&
   8055		    (new_crtc_state->uapi.color_mgmt_changed ||
   8056		     new_crtc_state->update_pipe))
   8057			intel_color_load_luts(new_crtc_state);
   8058
   8059		intel_pre_plane_update(state, crtc);
   8060
   8061		if (new_crtc_state->update_pipe)
   8062			intel_encoders_update_pipe(state, crtc);
   8063
   8064		if (DISPLAY_VER(i915) >= 11 &&
   8065		    new_crtc_state->update_pipe)
   8066			icl_set_pipe_chicken(new_crtc_state);
   8067	}
   8068
   8069	intel_fbc_update(state, crtc);
   8070
   8071	if (!modeset &&
   8072	    (new_crtc_state->uapi.color_mgmt_changed ||
   8073	     new_crtc_state->update_pipe))
   8074		intel_color_commit_noarm(new_crtc_state);
   8075
   8076	intel_crtc_planes_update_noarm(state, crtc);
   8077
   8078	/* Perform vblank evasion around commit operation */
   8079	intel_pipe_update_start(new_crtc_state);
   8080
   8081	commit_pipe_pre_planes(state, crtc);
   8082
   8083	intel_crtc_planes_update_arm(state, crtc);
   8084
   8085	commit_pipe_post_planes(state, crtc);
   8086
   8087	intel_pipe_update_end(new_crtc_state);
   8088
   8089	/*
   8090	 * We usually enable FIFO underrun interrupts as part of the
   8091	 * CRTC enable sequence during modesets.  But when we inherit a
   8092	 * valid pipe configuration from the BIOS we need to take care
   8093	 * of enabling them on the CRTC's first fastset.
   8094	 */
   8095	if (new_crtc_state->update_pipe && !modeset &&
   8096	    old_crtc_state->inherited)
   8097		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
   8098}
   8099
   8100static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
   8101					  struct intel_crtc_state *old_crtc_state,
   8102					  struct intel_crtc_state *new_crtc_state,
   8103					  struct intel_crtc *crtc)
   8104{
   8105	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   8106
   8107	/*
   8108	 * We need to disable pipe CRC before disabling the pipe,
   8109	 * or we race against vblank off.
   8110	 */
   8111	intel_crtc_disable_pipe_crc(crtc);
   8112
   8113	dev_priv->display->crtc_disable(state, crtc);
   8114	crtc->active = false;
   8115	intel_fbc_disable(crtc);
   8116	intel_disable_shared_dpll(old_crtc_state);
   8117
   8118	/* FIXME unify this for all platforms */
   8119	if (!new_crtc_state->hw.active &&
   8120	    !HAS_GMCH(dev_priv))
   8121		intel_initial_watermarks(state, crtc);
   8122}
   8123
   8124static void intel_commit_modeset_disables(struct intel_atomic_state *state)
   8125{
   8126	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
   8127	struct intel_crtc *crtc;
   8128	u32 handled = 0;
   8129	int i;
   8130
   8131	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   8132					    new_crtc_state, i) {
   8133		if (!intel_crtc_needs_modeset(new_crtc_state))
   8134			continue;
   8135
   8136		if (!old_crtc_state->hw.active)
   8137			continue;
   8138
   8139		intel_pre_plane_update(state, crtc);
   8140		intel_crtc_disable_planes(state, crtc);
   8141	}
   8142
   8143	/* Only disable port sync and MST slaves */
   8144	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   8145					    new_crtc_state, i) {
   8146		if (!intel_crtc_needs_modeset(new_crtc_state))
   8147			continue;
   8148
   8149		if (!old_crtc_state->hw.active)
   8150			continue;
   8151
   8152		/* In case of Transcoder port Sync master slave CRTCs can be
   8153		 * assigned in any order and we need to make sure that
   8154		 * slave CRTCs are disabled first and then master CRTC since
   8155		 * Slave vblanks are masked till Master Vblanks.
   8156		 */
   8157		if (!is_trans_port_sync_slave(old_crtc_state) &&
   8158		    !intel_dp_mst_is_slave_trans(old_crtc_state) &&
   8159		    !intel_crtc_is_bigjoiner_slave(old_crtc_state))
   8160			continue;
   8161
   8162		intel_old_crtc_state_disables(state, old_crtc_state,
   8163					      new_crtc_state, crtc);
   8164		handled |= BIT(crtc->pipe);
   8165	}
   8166
   8167	/* Disable everything else left on */
   8168	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   8169					    new_crtc_state, i) {
   8170		if (!intel_crtc_needs_modeset(new_crtc_state) ||
   8171		    (handled & BIT(crtc->pipe)))
   8172			continue;
   8173
   8174		if (!old_crtc_state->hw.active)
   8175			continue;
   8176
   8177		intel_old_crtc_state_disables(state, old_crtc_state,
   8178					      new_crtc_state, crtc);
   8179	}
   8180}
   8181
   8182static void intel_commit_modeset_enables(struct intel_atomic_state *state)
   8183{
   8184	struct intel_crtc_state *new_crtc_state;
   8185	struct intel_crtc *crtc;
   8186	int i;
   8187
   8188	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   8189		if (!new_crtc_state->hw.active)
   8190			continue;
   8191
   8192		intel_enable_crtc(state, crtc);
   8193		intel_update_crtc(state, crtc);
   8194	}
   8195}
   8196
   8197static void skl_commit_modeset_enables(struct intel_atomic_state *state)
   8198{
   8199	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
   8200	struct intel_crtc *crtc;
   8201	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   8202	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
   8203	u8 update_pipes = 0, modeset_pipes = 0;
   8204	int i;
   8205
   8206	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   8207		enum pipe pipe = crtc->pipe;
   8208
   8209		if (!new_crtc_state->hw.active)
   8210			continue;
   8211
   8212		/* ignore allocations for crtc's that have been turned off. */
   8213		if (!intel_crtc_needs_modeset(new_crtc_state)) {
   8214			entries[pipe] = old_crtc_state->wm.skl.ddb;
   8215			update_pipes |= BIT(pipe);
   8216		} else {
   8217			modeset_pipes |= BIT(pipe);
   8218		}
   8219	}
   8220
   8221	/*
   8222	 * Whenever the number of active pipes changes, we need to make sure we
   8223	 * update the pipes in the right order so that their ddb allocations
   8224	 * never overlap with each other between CRTC updates. Otherwise we'll
   8225	 * cause pipe underruns and other bad stuff.
   8226	 *
   8227	 * So first lets enable all pipes that do not need a fullmodeset as
   8228	 * those don't have any external dependency.
   8229	 */
   8230	while (update_pipes) {
   8231		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   8232						    new_crtc_state, i) {
   8233			enum pipe pipe = crtc->pipe;
   8234
   8235			if ((update_pipes & BIT(pipe)) == 0)
   8236				continue;
   8237
   8238			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   8239							entries, I915_MAX_PIPES, pipe))
   8240				continue;
   8241
   8242			entries[pipe] = new_crtc_state->wm.skl.ddb;
   8243			update_pipes &= ~BIT(pipe);
   8244
   8245			intel_update_crtc(state, crtc);
   8246
   8247			/*
   8248			 * If this is an already active pipe, it's DDB changed,
   8249			 * and this isn't the last pipe that needs updating
   8250			 * then we need to wait for a vblank to pass for the
   8251			 * new ddb allocation to take effect.
   8252			 */
   8253			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
   8254						 &old_crtc_state->wm.skl.ddb) &&
   8255			    (update_pipes | modeset_pipes))
   8256				intel_crtc_wait_for_next_vblank(crtc);
   8257		}
   8258	}
   8259
   8260	update_pipes = modeset_pipes;
   8261
   8262	/*
   8263	 * Enable all pipes that needs a modeset and do not depends on other
   8264	 * pipes
   8265	 */
   8266	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   8267		enum pipe pipe = crtc->pipe;
   8268
   8269		if ((modeset_pipes & BIT(pipe)) == 0)
   8270			continue;
   8271
   8272		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
   8273		    is_trans_port_sync_master(new_crtc_state) ||
   8274		    intel_crtc_is_bigjoiner_master(new_crtc_state))
   8275			continue;
   8276
   8277		modeset_pipes &= ~BIT(pipe);
   8278
   8279		intel_enable_crtc(state, crtc);
   8280	}
   8281
   8282	/*
   8283	 * Then we enable all remaining pipes that depend on other
   8284	 * pipes: MST slaves and port sync masters, big joiner master
   8285	 */
   8286	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   8287		enum pipe pipe = crtc->pipe;
   8288
   8289		if ((modeset_pipes & BIT(pipe)) == 0)
   8290			continue;
   8291
   8292		modeset_pipes &= ~BIT(pipe);
   8293
   8294		intel_enable_crtc(state, crtc);
   8295	}
   8296
   8297	/*
   8298	 * Finally we do the plane updates/etc. for all pipes that got enabled.
   8299	 */
   8300	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   8301		enum pipe pipe = crtc->pipe;
   8302
   8303		if ((update_pipes & BIT(pipe)) == 0)
   8304			continue;
   8305
   8306		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
   8307									entries, I915_MAX_PIPES, pipe));
   8308
   8309		entries[pipe] = new_crtc_state->wm.skl.ddb;
   8310		update_pipes &= ~BIT(pipe);
   8311
   8312		intel_update_crtc(state, crtc);
   8313	}
   8314
   8315	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
   8316	drm_WARN_ON(&dev_priv->drm, update_pipes);
   8317}
   8318
   8319static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
   8320{
   8321	struct intel_atomic_state *state, *next;
   8322	struct llist_node *freed;
   8323
   8324	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
   8325	llist_for_each_entry_safe(state, next, freed, freed)
   8326		drm_atomic_state_put(&state->base);
   8327}
   8328
   8329static void intel_atomic_helper_free_state_worker(struct work_struct *work)
   8330{
   8331	struct drm_i915_private *dev_priv =
   8332		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
   8333
   8334	intel_atomic_helper_free_state(dev_priv);
   8335}
   8336
   8337static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
   8338{
   8339	struct wait_queue_entry wait_fence, wait_reset;
   8340	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
   8341
   8342	init_wait_entry(&wait_fence, 0);
   8343	init_wait_entry(&wait_reset, 0);
   8344	for (;;) {
   8345		prepare_to_wait(&intel_state->commit_ready.wait,
   8346				&wait_fence, TASK_UNINTERRUPTIBLE);
   8347		prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
   8348					      I915_RESET_MODESET),
   8349				&wait_reset, TASK_UNINTERRUPTIBLE);
   8350
   8351
   8352		if (i915_sw_fence_done(&intel_state->commit_ready) ||
   8353		    test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
   8354			break;
   8355
   8356		schedule();
   8357	}
   8358	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
   8359	finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
   8360				  I915_RESET_MODESET),
   8361		    &wait_reset);
   8362}
   8363
   8364static void intel_cleanup_dsbs(struct intel_atomic_state *state)
   8365{
   8366	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
   8367	struct intel_crtc *crtc;
   8368	int i;
   8369
   8370	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   8371					    new_crtc_state, i)
   8372		intel_dsb_cleanup(old_crtc_state);
   8373}
   8374
   8375static void intel_atomic_cleanup_work(struct work_struct *work)
   8376{
   8377	struct intel_atomic_state *state =
   8378		container_of(work, struct intel_atomic_state, base.commit_work);
   8379	struct drm_i915_private *i915 = to_i915(state->base.dev);
   8380
   8381	intel_cleanup_dsbs(state);
   8382	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
   8383	drm_atomic_helper_commit_cleanup_done(&state->base);
   8384	drm_atomic_state_put(&state->base);
   8385
   8386	intel_atomic_helper_free_state(i915);
   8387}
   8388
   8389static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
   8390{
   8391	struct drm_i915_private *i915 = to_i915(state->base.dev);
   8392	struct intel_plane *plane;
   8393	struct intel_plane_state *plane_state;
   8394	int i;
   8395
   8396	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
   8397		struct drm_framebuffer *fb = plane_state->hw.fb;
   8398		int cc_plane;
   8399		int ret;
   8400
   8401		if (!fb)
   8402			continue;
   8403
   8404		cc_plane = intel_fb_rc_ccs_cc_plane(fb);
   8405		if (cc_plane < 0)
   8406			continue;
   8407
   8408		/*
   8409		 * The layout of the fast clear color value expected by HW
   8410		 * (the DRM ABI requiring this value to be located in fb at
   8411		 * offset 0 of cc plane, plane #2 previous generations or
   8412		 * plane #1 for flat ccs):
   8413		 * - 4 x 4 bytes per-channel value
   8414		 *   (in surface type specific float/int format provided by the fb user)
   8415		 * - 8 bytes native color value used by the display
   8416		 *   (converted/written by GPU during a fast clear operation using the
   8417		 *    above per-channel values)
   8418		 *
   8419		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
   8420		 * caller made sure that the object is synced wrt. the related color clear value
   8421		 * GPU write on it.
   8422		 */
   8423		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
   8424						     fb->offsets[cc_plane] + 16,
   8425						     &plane_state->ccval,
   8426						     sizeof(plane_state->ccval));
   8427		/* The above could only fail if the FB obj has an unexpected backing store type. */
   8428		drm_WARN_ON(&i915->drm, ret);
   8429	}
   8430}
   8431
   8432static void intel_atomic_commit_tail(struct intel_atomic_state *state)
   8433{
   8434	struct drm_device *dev = state->base.dev;
   8435	struct drm_i915_private *dev_priv = to_i915(dev);
   8436	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
   8437	struct intel_crtc *crtc;
   8438	struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
   8439	intel_wakeref_t wakeref = 0;
   8440	int i;
   8441
   8442	intel_atomic_commit_fence_wait(state);
   8443
   8444	drm_atomic_helper_wait_for_dependencies(&state->base);
   8445
   8446	if (state->modeset)
   8447		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
   8448
   8449	intel_atomic_prepare_plane_clear_colors(state);
   8450
   8451	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   8452					    new_crtc_state, i) {
   8453		if (intel_crtc_needs_modeset(new_crtc_state) ||
   8454		    new_crtc_state->update_pipe) {
   8455			modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
   8456		}
   8457	}
   8458
   8459	intel_commit_modeset_disables(state);
   8460
   8461	/* FIXME: Eventually get rid of our crtc->config pointer */
   8462	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
   8463		crtc->config = new_crtc_state;
   8464
   8465	if (state->modeset) {
   8466		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
   8467
   8468		intel_set_cdclk_pre_plane_update(state);
   8469
   8470		intel_modeset_verify_disabled(dev_priv, state);
   8471	}
   8472
   8473	intel_sagv_pre_plane_update(state);
   8474
   8475	/* Complete the events for pipes that have now been disabled */
   8476	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   8477		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
   8478
   8479		/* Complete events for now disable pipes here. */
   8480		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
   8481			spin_lock_irq(&dev->event_lock);
   8482			drm_crtc_send_vblank_event(&crtc->base,
   8483						   new_crtc_state->uapi.event);
   8484			spin_unlock_irq(&dev->event_lock);
   8485
   8486			new_crtc_state->uapi.event = NULL;
   8487		}
   8488	}
   8489
   8490	intel_encoders_update_prepare(state);
   8491
   8492	intel_dbuf_pre_plane_update(state);
   8493	intel_mbus_dbox_update(state);
   8494
   8495	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   8496		if (new_crtc_state->do_async_flip)
   8497			intel_crtc_enable_flip_done(state, crtc);
   8498	}
   8499
   8500	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
   8501	dev_priv->display->commit_modeset_enables(state);
   8502
   8503	intel_encoders_update_complete(state);
   8504
   8505	if (state->modeset)
   8506		intel_set_cdclk_post_plane_update(state);
   8507
   8508	intel_wait_for_vblank_workers(state);
   8509
   8510	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
   8511	 * already, but still need the state for the delayed optimization. To
   8512	 * fix this:
   8513	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
   8514	 * - schedule that vblank worker _before_ calling hw_done
   8515	 * - at the start of commit_tail, cancel it _synchrously
   8516	 * - switch over to the vblank wait helper in the core after that since
   8517	 *   we don't need out special handling any more.
   8518	 */
   8519	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
   8520
   8521	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
   8522		if (new_crtc_state->do_async_flip)
   8523			intel_crtc_disable_flip_done(state, crtc);
   8524	}
   8525
   8526	/*
   8527	 * Now that the vblank has passed, we can go ahead and program the
   8528	 * optimal watermarks on platforms that need two-step watermark
   8529	 * programming.
   8530	 *
   8531	 * TODO: Move this (and other cleanup) to an async worker eventually.
   8532	 */
   8533	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
   8534					    new_crtc_state, i) {
   8535		/*
   8536		 * Gen2 reports pipe underruns whenever all planes are disabled.
   8537		 * So re-enable underrun reporting after some planes get enabled.
   8538		 *
   8539		 * We do this before .optimize_watermarks() so that we have a
   8540		 * chance of catching underruns with the intermediate watermarks
   8541		 * vs. the new plane configuration.
   8542		 */
   8543		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
   8544			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
   8545
   8546		intel_optimize_watermarks(state, crtc);
   8547	}
   8548
   8549	intel_dbuf_post_plane_update(state);
   8550	intel_psr_post_plane_update(state);
   8551
   8552	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
   8553		intel_post_plane_update(state, crtc);
   8554
   8555		modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
   8556
   8557		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
   8558
   8559		/*
   8560		 * DSB cleanup is done in cleanup_work aligning with framebuffer
   8561		 * cleanup. So copy and reset the dsb structure to sync with
   8562		 * commit_done and later do dsb cleanup in cleanup_work.
   8563		 */
   8564		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
   8565	}
   8566
   8567	/* Underruns don't always raise interrupts, so check manually */
   8568	intel_check_cpu_fifo_underruns(dev_priv);
   8569	intel_check_pch_fifo_underruns(dev_priv);
   8570
   8571	if (state->modeset)
   8572		intel_verify_planes(state);
   8573
   8574	intel_sagv_post_plane_update(state);
   8575
   8576	drm_atomic_helper_commit_hw_done(&state->base);
   8577
   8578	if (state->modeset) {
   8579		/* As one of the primary mmio accessors, KMS has a high
   8580		 * likelihood of triggering bugs in unclaimed access. After we
   8581		 * finish modesetting, see if an error has been flagged, and if
   8582		 * so enable debugging for the next modeset - and hope we catch
   8583		 * the culprit.
   8584		 */
   8585		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
   8586		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
   8587	}
   8588	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   8589
   8590	/*
   8591	 * Defer the cleanup of the old state to a separate worker to not
   8592	 * impede the current task (userspace for blocking modesets) that
   8593	 * are executed inline. For out-of-line asynchronous modesets/flips,
   8594	 * deferring to a new worker seems overkill, but we would place a
   8595	 * schedule point (cond_resched()) here anyway to keep latencies
   8596	 * down.
   8597	 */
   8598	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
   8599	queue_work(system_highpri_wq, &state->base.commit_work);
   8600}
   8601
   8602static void intel_atomic_commit_work(struct work_struct *work)
   8603{
   8604	struct intel_atomic_state *state =
   8605		container_of(work, struct intel_atomic_state, base.commit_work);
   8606
   8607	intel_atomic_commit_tail(state);
   8608}
   8609
   8610static int
   8611intel_atomic_commit_ready(struct i915_sw_fence *fence,
   8612			  enum i915_sw_fence_notify notify)
   8613{
   8614	struct intel_atomic_state *state =
   8615		container_of(fence, struct intel_atomic_state, commit_ready);
   8616
   8617	switch (notify) {
   8618	case FENCE_COMPLETE:
   8619		/* we do blocking waits in the worker, nothing to do here */
   8620		break;
   8621	case FENCE_FREE:
   8622		{
   8623			struct intel_atomic_helper *helper =
   8624				&to_i915(state->base.dev)->atomic_helper;
   8625
   8626			if (llist_add(&state->freed, &helper->free_list))
   8627				schedule_work(&helper->free_work);
   8628			break;
   8629		}
   8630	}
   8631
   8632	return NOTIFY_DONE;
   8633}
   8634
   8635static void intel_atomic_track_fbs(struct intel_atomic_state *state)
   8636{
   8637	struct intel_plane_state *old_plane_state, *new_plane_state;
   8638	struct intel_plane *plane;
   8639	int i;
   8640
   8641	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
   8642					     new_plane_state, i)
   8643		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
   8644					to_intel_frontbuffer(new_plane_state->hw.fb),
   8645					plane->frontbuffer_bit);
   8646}
   8647
   8648static int intel_atomic_commit(struct drm_device *dev,
   8649			       struct drm_atomic_state *_state,
   8650			       bool nonblock)
   8651{
   8652	struct intel_atomic_state *state = to_intel_atomic_state(_state);
   8653	struct drm_i915_private *dev_priv = to_i915(dev);
   8654	int ret = 0;
   8655
   8656	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
   8657
   8658	drm_atomic_state_get(&state->base);
   8659	i915_sw_fence_init(&state->commit_ready,
   8660			   intel_atomic_commit_ready);
   8661
   8662	/*
   8663	 * The intel_legacy_cursor_update() fast path takes care
   8664	 * of avoiding the vblank waits for simple cursor
   8665	 * movement and flips. For cursor on/off and size changes,
   8666	 * we want to perform the vblank waits so that watermark
   8667	 * updates happen during the correct frames. Gen9+ have
   8668	 * double buffered watermarks and so shouldn't need this.
   8669	 *
   8670	 * Unset state->legacy_cursor_update before the call to
   8671	 * drm_atomic_helper_setup_commit() because otherwise
   8672	 * drm_atomic_helper_wait_for_flip_done() is a noop and
   8673	 * we get FIFO underruns because we didn't wait
   8674	 * for vblank.
   8675	 *
   8676	 * FIXME doing watermarks and fb cleanup from a vblank worker
   8677	 * (assuming we had any) would solve these problems.
   8678	 */
   8679	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
   8680		struct intel_crtc_state *new_crtc_state;
   8681		struct intel_crtc *crtc;
   8682		int i;
   8683
   8684		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
   8685			if (new_crtc_state->wm.need_postvbl_update ||
   8686			    new_crtc_state->update_wm_post)
   8687				state->base.legacy_cursor_update = false;
   8688	}
   8689
   8690	ret = intel_atomic_prepare_commit(state);
   8691	if (ret) {
   8692		drm_dbg_atomic(&dev_priv->drm,
   8693			       "Preparing state failed with %i\n", ret);
   8694		i915_sw_fence_commit(&state->commit_ready);
   8695		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   8696		return ret;
   8697	}
   8698
   8699	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
   8700	if (!ret)
   8701		ret = drm_atomic_helper_swap_state(&state->base, true);
   8702	if (!ret)
   8703		intel_atomic_swap_global_state(state);
   8704
   8705	if (ret) {
   8706		struct intel_crtc_state *new_crtc_state;
   8707		struct intel_crtc *crtc;
   8708		int i;
   8709
   8710		i915_sw_fence_commit(&state->commit_ready);
   8711
   8712		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
   8713			intel_dsb_cleanup(new_crtc_state);
   8714
   8715		drm_atomic_helper_cleanup_planes(dev, &state->base);
   8716		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
   8717		return ret;
   8718	}
   8719	intel_shared_dpll_swap_state(state);
   8720	intel_atomic_track_fbs(state);
   8721
   8722	drm_atomic_state_get(&state->base);
   8723	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
   8724
   8725	i915_sw_fence_commit(&state->commit_ready);
   8726	if (nonblock && state->modeset) {
   8727		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
   8728	} else if (nonblock) {
   8729		queue_work(dev_priv->flip_wq, &state->base.commit_work);
   8730	} else {
   8731		if (state->modeset)
   8732			flush_workqueue(dev_priv->modeset_wq);
   8733		intel_atomic_commit_tail(state);
   8734	}
   8735
   8736	return 0;
   8737}
   8738
   8739/**
   8740 * intel_plane_destroy - destroy a plane
   8741 * @plane: plane to destroy
   8742 *
   8743 * Common destruction function for all types of planes (primary, cursor,
   8744 * sprite).
   8745 */
   8746void intel_plane_destroy(struct drm_plane *plane)
   8747{
   8748	drm_plane_cleanup(plane);
   8749	kfree(to_intel_plane(plane));
   8750}
   8751
   8752static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
   8753{
   8754	struct intel_plane *plane;
   8755
   8756	for_each_intel_plane(&dev_priv->drm, plane) {
   8757		struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
   8758							      plane->pipe);
   8759
   8760		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
   8761	}
   8762}
   8763
   8764
   8765int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
   8766				      struct drm_file *file)
   8767{
   8768	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
   8769	struct drm_crtc *drmmode_crtc;
   8770	struct intel_crtc *crtc;
   8771
   8772	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
   8773	if (!drmmode_crtc)
   8774		return -ENOENT;
   8775
   8776	crtc = to_intel_crtc(drmmode_crtc);
   8777	pipe_from_crtc_id->pipe = crtc->pipe;
   8778
   8779	return 0;
   8780}
   8781
   8782static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
   8783{
   8784	struct drm_device *dev = encoder->base.dev;
   8785	struct intel_encoder *source_encoder;
   8786	u32 possible_clones = 0;
   8787
   8788	for_each_intel_encoder(dev, source_encoder) {
   8789		if (encoders_cloneable(encoder, source_encoder))
   8790			possible_clones |= drm_encoder_mask(&source_encoder->base);
   8791	}
   8792
   8793	return possible_clones;
   8794}
   8795
   8796static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
   8797{
   8798	struct drm_device *dev = encoder->base.dev;
   8799	struct intel_crtc *crtc;
   8800	u32 possible_crtcs = 0;
   8801
   8802	for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask)
   8803		possible_crtcs |= drm_crtc_mask(&crtc->base);
   8804
   8805	return possible_crtcs;
   8806}
   8807
   8808static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
   8809{
   8810	if (!IS_MOBILE(dev_priv))
   8811		return false;
   8812
   8813	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
   8814		return false;
   8815
   8816	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
   8817		return false;
   8818
   8819	return true;
   8820}
   8821
   8822static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
   8823{
   8824	if (DISPLAY_VER(dev_priv) >= 9)
   8825		return false;
   8826
   8827	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
   8828		return false;
   8829
   8830	if (HAS_PCH_LPT_H(dev_priv) &&
   8831	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
   8832		return false;
   8833
   8834	/* DDI E can't be used if DDI A requires 4 lanes */
   8835	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
   8836		return false;
   8837
   8838	if (!dev_priv->vbt.int_crt_support)
   8839		return false;
   8840
   8841	return true;
   8842}
   8843
   8844static void intel_setup_outputs(struct drm_i915_private *dev_priv)
   8845{
   8846	struct intel_encoder *encoder;
   8847	bool dpd_is_edp = false;
   8848
   8849	intel_pps_unlock_regs_wa(dev_priv);
   8850
   8851	if (!HAS_DISPLAY(dev_priv))
   8852		return;
   8853
   8854	if (IS_DG2(dev_priv)) {
   8855		intel_ddi_init(dev_priv, PORT_A);
   8856		intel_ddi_init(dev_priv, PORT_B);
   8857		intel_ddi_init(dev_priv, PORT_C);
   8858		intel_ddi_init(dev_priv, PORT_D_XELPD);
   8859		intel_ddi_init(dev_priv, PORT_TC1);
   8860	} else if (IS_ALDERLAKE_P(dev_priv)) {
   8861		intel_ddi_init(dev_priv, PORT_A);
   8862		intel_ddi_init(dev_priv, PORT_B);
   8863		intel_ddi_init(dev_priv, PORT_TC1);
   8864		intel_ddi_init(dev_priv, PORT_TC2);
   8865		intel_ddi_init(dev_priv, PORT_TC3);
   8866		intel_ddi_init(dev_priv, PORT_TC4);
   8867		icl_dsi_init(dev_priv);
   8868	} else if (IS_ALDERLAKE_S(dev_priv)) {
   8869		intel_ddi_init(dev_priv, PORT_A);
   8870		intel_ddi_init(dev_priv, PORT_TC1);
   8871		intel_ddi_init(dev_priv, PORT_TC2);
   8872		intel_ddi_init(dev_priv, PORT_TC3);
   8873		intel_ddi_init(dev_priv, PORT_TC4);
   8874	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
   8875		intel_ddi_init(dev_priv, PORT_A);
   8876		intel_ddi_init(dev_priv, PORT_B);
   8877		intel_ddi_init(dev_priv, PORT_TC1);
   8878		intel_ddi_init(dev_priv, PORT_TC2);
   8879	} else if (DISPLAY_VER(dev_priv) >= 12) {
   8880		intel_ddi_init(dev_priv, PORT_A);
   8881		intel_ddi_init(dev_priv, PORT_B);
   8882		intel_ddi_init(dev_priv, PORT_TC1);
   8883		intel_ddi_init(dev_priv, PORT_TC2);
   8884		intel_ddi_init(dev_priv, PORT_TC3);
   8885		intel_ddi_init(dev_priv, PORT_TC4);
   8886		intel_ddi_init(dev_priv, PORT_TC5);
   8887		intel_ddi_init(dev_priv, PORT_TC6);
   8888		icl_dsi_init(dev_priv);
   8889	} else if (IS_JSL_EHL(dev_priv)) {
   8890		intel_ddi_init(dev_priv, PORT_A);
   8891		intel_ddi_init(dev_priv, PORT_B);
   8892		intel_ddi_init(dev_priv, PORT_C);
   8893		intel_ddi_init(dev_priv, PORT_D);
   8894		icl_dsi_init(dev_priv);
   8895	} else if (DISPLAY_VER(dev_priv) == 11) {
   8896		intel_ddi_init(dev_priv, PORT_A);
   8897		intel_ddi_init(dev_priv, PORT_B);
   8898		intel_ddi_init(dev_priv, PORT_C);
   8899		intel_ddi_init(dev_priv, PORT_D);
   8900		intel_ddi_init(dev_priv, PORT_E);
   8901		intel_ddi_init(dev_priv, PORT_F);
   8902		icl_dsi_init(dev_priv);
   8903	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
   8904		intel_ddi_init(dev_priv, PORT_A);
   8905		intel_ddi_init(dev_priv, PORT_B);
   8906		intel_ddi_init(dev_priv, PORT_C);
   8907		vlv_dsi_init(dev_priv);
   8908	} else if (DISPLAY_VER(dev_priv) >= 9) {
   8909		intel_ddi_init(dev_priv, PORT_A);
   8910		intel_ddi_init(dev_priv, PORT_B);
   8911		intel_ddi_init(dev_priv, PORT_C);
   8912		intel_ddi_init(dev_priv, PORT_D);
   8913		intel_ddi_init(dev_priv, PORT_E);
   8914	} else if (HAS_DDI(dev_priv)) {
   8915		u32 found;
   8916
   8917		if (intel_ddi_crt_present(dev_priv))
   8918			intel_crt_init(dev_priv);
   8919
   8920		/* Haswell uses DDI functions to detect digital outputs. */
   8921		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
   8922		if (found)
   8923			intel_ddi_init(dev_priv, PORT_A);
   8924
   8925		found = intel_de_read(dev_priv, SFUSE_STRAP);
   8926		if (found & SFUSE_STRAP_DDIB_DETECTED)
   8927			intel_ddi_init(dev_priv, PORT_B);
   8928		if (found & SFUSE_STRAP_DDIC_DETECTED)
   8929			intel_ddi_init(dev_priv, PORT_C);
   8930		if (found & SFUSE_STRAP_DDID_DETECTED)
   8931			intel_ddi_init(dev_priv, PORT_D);
   8932		if (found & SFUSE_STRAP_DDIF_DETECTED)
   8933			intel_ddi_init(dev_priv, PORT_F);
   8934	} else if (HAS_PCH_SPLIT(dev_priv)) {
   8935		int found;
   8936
   8937		/*
   8938		 * intel_edp_init_connector() depends on this completing first,
   8939		 * to prevent the registration of both eDP and LVDS and the
   8940		 * incorrect sharing of the PPS.
   8941		 */
   8942		intel_lvds_init(dev_priv);
   8943		intel_crt_init(dev_priv);
   8944
   8945		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
   8946
   8947		if (ilk_has_edp_a(dev_priv))
   8948			g4x_dp_init(dev_priv, DP_A, PORT_A);
   8949
   8950		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
   8951			/* PCH SDVOB multiplex with HDMIB */
   8952			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
   8953			if (!found)
   8954				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
   8955			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
   8956				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
   8957		}
   8958
   8959		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
   8960			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
   8961
   8962		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
   8963			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
   8964
   8965		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
   8966			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
   8967
   8968		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
   8969			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
   8970	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
   8971		bool has_edp, has_port;
   8972
   8973		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
   8974			intel_crt_init(dev_priv);
   8975
   8976		/*
   8977		 * The DP_DETECTED bit is the latched state of the DDC
   8978		 * SDA pin at boot. However since eDP doesn't require DDC
   8979		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
   8980		 * eDP ports may have been muxed to an alternate function.
   8981		 * Thus we can't rely on the DP_DETECTED bit alone to detect
   8982		 * eDP ports. Consult the VBT as well as DP_DETECTED to
   8983		 * detect eDP ports.
   8984		 *
   8985		 * Sadly the straps seem to be missing sometimes even for HDMI
   8986		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
   8987		 * and VBT for the presence of the port. Additionally we can't
   8988		 * trust the port type the VBT declares as we've seen at least
   8989		 * HDMI ports that the VBT claim are DP or eDP.
   8990		 */
   8991		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
   8992		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
   8993		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
   8994			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
   8995		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
   8996			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
   8997
   8998		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
   8999		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
   9000		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
   9001			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
   9002		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
   9003			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
   9004
   9005		if (IS_CHERRYVIEW(dev_priv)) {
   9006			/*
   9007			 * eDP not supported on port D,
   9008			 * so no need to worry about it
   9009			 */
   9010			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
   9011			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
   9012				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
   9013			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
   9014				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
   9015		}
   9016
   9017		vlv_dsi_init(dev_priv);
   9018	} else if (IS_PINEVIEW(dev_priv)) {
   9019		intel_lvds_init(dev_priv);
   9020		intel_crt_init(dev_priv);
   9021	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
   9022		bool found = false;
   9023
   9024		if (IS_MOBILE(dev_priv))
   9025			intel_lvds_init(dev_priv);
   9026
   9027		intel_crt_init(dev_priv);
   9028
   9029		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
   9030			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
   9031			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
   9032			if (!found && IS_G4X(dev_priv)) {
   9033				drm_dbg_kms(&dev_priv->drm,
   9034					    "probing HDMI on SDVOB\n");
   9035				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
   9036			}
   9037
   9038			if (!found && IS_G4X(dev_priv))
   9039				g4x_dp_init(dev_priv, DP_B, PORT_B);
   9040		}
   9041
   9042		/* Before G4X SDVOC doesn't have its own detect register */
   9043
   9044		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
   9045			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
   9046			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
   9047		}
   9048
   9049		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
   9050
   9051			if (IS_G4X(dev_priv)) {
   9052				drm_dbg_kms(&dev_priv->drm,
   9053					    "probing HDMI on SDVOC\n");
   9054				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
   9055			}
   9056			if (IS_G4X(dev_priv))
   9057				g4x_dp_init(dev_priv, DP_C, PORT_C);
   9058		}
   9059
   9060		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
   9061			g4x_dp_init(dev_priv, DP_D, PORT_D);
   9062
   9063		if (SUPPORTS_TV(dev_priv))
   9064			intel_tv_init(dev_priv);
   9065	} else if (DISPLAY_VER(dev_priv) == 2) {
   9066		if (IS_I85X(dev_priv))
   9067			intel_lvds_init(dev_priv);
   9068
   9069		intel_crt_init(dev_priv);
   9070		intel_dvo_init(dev_priv);
   9071	}
   9072
   9073	for_each_intel_encoder(&dev_priv->drm, encoder) {
   9074		encoder->base.possible_crtcs =
   9075			intel_encoder_possible_crtcs(encoder);
   9076		encoder->base.possible_clones =
   9077			intel_encoder_possible_clones(encoder);
   9078	}
   9079
   9080	intel_init_pch_refclk(dev_priv);
   9081
   9082	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
   9083}
   9084
   9085static enum drm_mode_status
   9086intel_mode_valid(struct drm_device *dev,
   9087		 const struct drm_display_mode *mode)
   9088{
   9089	struct drm_i915_private *dev_priv = to_i915(dev);
   9090	int hdisplay_max, htotal_max;
   9091	int vdisplay_max, vtotal_max;
   9092
   9093	/*
   9094	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
   9095	 * of DBLSCAN modes to the output's mode list when they detect
   9096	 * the scaling mode property on the connector. And they don't
   9097	 * ask the kernel to validate those modes in any way until
   9098	 * modeset time at which point the client gets a protocol error.
   9099	 * So in order to not upset those clients we silently ignore the
   9100	 * DBLSCAN flag on such connectors. For other connectors we will
   9101	 * reject modes with the DBLSCAN flag in encoder->compute_config().
   9102	 * And we always reject DBLSCAN modes in connector->mode_valid()
   9103	 * as we never want such modes on the connector's mode list.
   9104	 */
   9105
   9106	if (mode->vscan > 1)
   9107		return MODE_NO_VSCAN;
   9108
   9109	if (mode->flags & DRM_MODE_FLAG_HSKEW)
   9110		return MODE_H_ILLEGAL;
   9111
   9112	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
   9113			   DRM_MODE_FLAG_NCSYNC |
   9114			   DRM_MODE_FLAG_PCSYNC))
   9115		return MODE_HSYNC;
   9116
   9117	if (mode->flags & (DRM_MODE_FLAG_BCAST |
   9118			   DRM_MODE_FLAG_PIXMUX |
   9119			   DRM_MODE_FLAG_CLKDIV2))
   9120		return MODE_BAD;
   9121
   9122	/* Transcoder timing limits */
   9123	if (DISPLAY_VER(dev_priv) >= 11) {
   9124		hdisplay_max = 16384;
   9125		vdisplay_max = 8192;
   9126		htotal_max = 16384;
   9127		vtotal_max = 8192;
   9128	} else if (DISPLAY_VER(dev_priv) >= 9 ||
   9129		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
   9130		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
   9131		vdisplay_max = 4096;
   9132		htotal_max = 8192;
   9133		vtotal_max = 8192;
   9134	} else if (DISPLAY_VER(dev_priv) >= 3) {
   9135		hdisplay_max = 4096;
   9136		vdisplay_max = 4096;
   9137		htotal_max = 8192;
   9138		vtotal_max = 8192;
   9139	} else {
   9140		hdisplay_max = 2048;
   9141		vdisplay_max = 2048;
   9142		htotal_max = 4096;
   9143		vtotal_max = 4096;
   9144	}
   9145
   9146	if (mode->hdisplay > hdisplay_max ||
   9147	    mode->hsync_start > htotal_max ||
   9148	    mode->hsync_end > htotal_max ||
   9149	    mode->htotal > htotal_max)
   9150		return MODE_H_ILLEGAL;
   9151
   9152	if (mode->vdisplay > vdisplay_max ||
   9153	    mode->vsync_start > vtotal_max ||
   9154	    mode->vsync_end > vtotal_max ||
   9155	    mode->vtotal > vtotal_max)
   9156		return MODE_V_ILLEGAL;
   9157
   9158	if (DISPLAY_VER(dev_priv) >= 5) {
   9159		if (mode->hdisplay < 64 ||
   9160		    mode->htotal - mode->hdisplay < 32)
   9161			return MODE_H_ILLEGAL;
   9162
   9163		if (mode->vtotal - mode->vdisplay < 5)
   9164			return MODE_V_ILLEGAL;
   9165	} else {
   9166		if (mode->htotal - mode->hdisplay < 32)
   9167			return MODE_H_ILLEGAL;
   9168
   9169		if (mode->vtotal - mode->vdisplay < 3)
   9170			return MODE_V_ILLEGAL;
   9171	}
   9172
   9173	/*
   9174	 * Cantiga+ cannot handle modes with a hsync front porch of 0.
   9175	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
   9176	 */
   9177	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
   9178	    mode->hsync_start == mode->hdisplay)
   9179		return MODE_H_ILLEGAL;
   9180
   9181	return MODE_OK;
   9182}
   9183
   9184enum drm_mode_status
   9185intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
   9186				const struct drm_display_mode *mode,
   9187				bool bigjoiner)
   9188{
   9189	int plane_width_max, plane_height_max;
   9190
   9191	/*
   9192	 * intel_mode_valid() should be
   9193	 * sufficient on older platforms.
   9194	 */
   9195	if (DISPLAY_VER(dev_priv) < 9)
   9196		return MODE_OK;
   9197
   9198	/*
   9199	 * Most people will probably want a fullscreen
   9200	 * plane so let's not advertize modes that are
   9201	 * too big for that.
   9202	 */
   9203	if (DISPLAY_VER(dev_priv) >= 11) {
   9204		plane_width_max = 5120 << bigjoiner;
   9205		plane_height_max = 4320;
   9206	} else {
   9207		plane_width_max = 5120;
   9208		plane_height_max = 4096;
   9209	}
   9210
   9211	if (mode->hdisplay > plane_width_max)
   9212		return MODE_H_ILLEGAL;
   9213
   9214	if (mode->vdisplay > plane_height_max)
   9215		return MODE_V_ILLEGAL;
   9216
   9217	return MODE_OK;
   9218}
   9219
   9220static const struct drm_mode_config_funcs intel_mode_funcs = {
   9221	.fb_create = intel_user_framebuffer_create,
   9222	.get_format_info = intel_fb_get_format_info,
   9223	.output_poll_changed = intel_fbdev_output_poll_changed,
   9224	.mode_valid = intel_mode_valid,
   9225	.atomic_check = intel_atomic_check,
   9226	.atomic_commit = intel_atomic_commit,
   9227	.atomic_state_alloc = intel_atomic_state_alloc,
   9228	.atomic_state_clear = intel_atomic_state_clear,
   9229	.atomic_state_free = intel_atomic_state_free,
   9230};
   9231
   9232static const struct drm_i915_display_funcs skl_display_funcs = {
   9233	.get_pipe_config = hsw_get_pipe_config,
   9234	.crtc_enable = hsw_crtc_enable,
   9235	.crtc_disable = hsw_crtc_disable,
   9236	.commit_modeset_enables = skl_commit_modeset_enables,
   9237	.get_initial_plane_config = skl_get_initial_plane_config,
   9238};
   9239
   9240static const struct drm_i915_display_funcs ddi_display_funcs = {
   9241	.get_pipe_config = hsw_get_pipe_config,
   9242	.crtc_enable = hsw_crtc_enable,
   9243	.crtc_disable = hsw_crtc_disable,
   9244	.commit_modeset_enables = intel_commit_modeset_enables,
   9245	.get_initial_plane_config = i9xx_get_initial_plane_config,
   9246};
   9247
   9248static const struct drm_i915_display_funcs pch_split_display_funcs = {
   9249	.get_pipe_config = ilk_get_pipe_config,
   9250	.crtc_enable = ilk_crtc_enable,
   9251	.crtc_disable = ilk_crtc_disable,
   9252	.commit_modeset_enables = intel_commit_modeset_enables,
   9253	.get_initial_plane_config = i9xx_get_initial_plane_config,
   9254};
   9255
   9256static const struct drm_i915_display_funcs vlv_display_funcs = {
   9257	.get_pipe_config = i9xx_get_pipe_config,
   9258	.crtc_enable = valleyview_crtc_enable,
   9259	.crtc_disable = i9xx_crtc_disable,
   9260	.commit_modeset_enables = intel_commit_modeset_enables,
   9261	.get_initial_plane_config = i9xx_get_initial_plane_config,
   9262};
   9263
   9264static const struct drm_i915_display_funcs i9xx_display_funcs = {
   9265	.get_pipe_config = i9xx_get_pipe_config,
   9266	.crtc_enable = i9xx_crtc_enable,
   9267	.crtc_disable = i9xx_crtc_disable,
   9268	.commit_modeset_enables = intel_commit_modeset_enables,
   9269	.get_initial_plane_config = i9xx_get_initial_plane_config,
   9270};
   9271
   9272/**
   9273 * intel_init_display_hooks - initialize the display modesetting hooks
   9274 * @dev_priv: device private
   9275 */
   9276void intel_init_display_hooks(struct drm_i915_private *dev_priv)
   9277{
   9278	if (!HAS_DISPLAY(dev_priv))
   9279		return;
   9280
   9281	intel_init_cdclk_hooks(dev_priv);
   9282	intel_audio_hooks_init(dev_priv);
   9283
   9284	intel_dpll_init_clock_hook(dev_priv);
   9285
   9286	if (DISPLAY_VER(dev_priv) >= 9) {
   9287		dev_priv->display = &skl_display_funcs;
   9288	} else if (HAS_DDI(dev_priv)) {
   9289		dev_priv->display = &ddi_display_funcs;
   9290	} else if (HAS_PCH_SPLIT(dev_priv)) {
   9291		dev_priv->display = &pch_split_display_funcs;
   9292	} else if (IS_CHERRYVIEW(dev_priv) ||
   9293		   IS_VALLEYVIEW(dev_priv)) {
   9294		dev_priv->display = &vlv_display_funcs;
   9295	} else {
   9296		dev_priv->display = &i9xx_display_funcs;
   9297	}
   9298
   9299	intel_fdi_init_hook(dev_priv);
   9300}
   9301
   9302void intel_modeset_init_hw(struct drm_i915_private *i915)
   9303{
   9304	struct intel_cdclk_state *cdclk_state;
   9305
   9306	if (!HAS_DISPLAY(i915))
   9307		return;
   9308
   9309	cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
   9310
   9311	intel_update_cdclk(i915);
   9312	intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
   9313	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
   9314}
   9315
   9316static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
   9317{
   9318	struct drm_plane *plane;
   9319	struct intel_crtc *crtc;
   9320
   9321	for_each_intel_crtc(state->dev, crtc) {
   9322		struct intel_crtc_state *crtc_state;
   9323
   9324		crtc_state = intel_atomic_get_crtc_state(state, crtc);
   9325		if (IS_ERR(crtc_state))
   9326			return PTR_ERR(crtc_state);
   9327
   9328		if (crtc_state->hw.active) {
   9329			/*
   9330			 * Preserve the inherited flag to avoid
   9331			 * taking the full modeset path.
   9332			 */
   9333			crtc_state->inherited = true;
   9334		}
   9335	}
   9336
   9337	drm_for_each_plane(plane, state->dev) {
   9338		struct drm_plane_state *plane_state;
   9339
   9340		plane_state = drm_atomic_get_plane_state(state, plane);
   9341		if (IS_ERR(plane_state))
   9342			return PTR_ERR(plane_state);
   9343	}
   9344
   9345	return 0;
   9346}
   9347
   9348/*
   9349 * Calculate what we think the watermarks should be for the state we've read
   9350 * out of the hardware and then immediately program those watermarks so that
   9351 * we ensure the hardware settings match our internal state.
   9352 *
   9353 * We can calculate what we think WM's should be by creating a duplicate of the
   9354 * current state (which was constructed during hardware readout) and running it
   9355 * through the atomic check code to calculate new watermark values in the
   9356 * state object.
   9357 */
   9358static void sanitize_watermarks(struct drm_i915_private *dev_priv)
   9359{
   9360	struct drm_atomic_state *state;
   9361	struct intel_atomic_state *intel_state;
   9362	struct intel_crtc *crtc;
   9363	struct intel_crtc_state *crtc_state;
   9364	struct drm_modeset_acquire_ctx ctx;
   9365	int ret;
   9366	int i;
   9367
   9368	/* Only supported on platforms that use atomic watermark design */
   9369	if (!dev_priv->wm_disp->optimize_watermarks)
   9370		return;
   9371
   9372	state = drm_atomic_state_alloc(&dev_priv->drm);
   9373	if (drm_WARN_ON(&dev_priv->drm, !state))
   9374		return;
   9375
   9376	intel_state = to_intel_atomic_state(state);
   9377
   9378	drm_modeset_acquire_init(&ctx, 0);
   9379
   9380retry:
   9381	state->acquire_ctx = &ctx;
   9382
   9383	/*
   9384	 * Hardware readout is the only time we don't want to calculate
   9385	 * intermediate watermarks (since we don't trust the current
   9386	 * watermarks).
   9387	 */
   9388	if (!HAS_GMCH(dev_priv))
   9389		intel_state->skip_intermediate_wm = true;
   9390
   9391	ret = sanitize_watermarks_add_affected(state);
   9392	if (ret)
   9393		goto fail;
   9394
   9395	ret = intel_atomic_check(&dev_priv->drm, state);
   9396	if (ret)
   9397		goto fail;
   9398
   9399	/* Write calculated watermark values back */
   9400	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
   9401		crtc_state->wm.need_postvbl_update = true;
   9402		intel_optimize_watermarks(intel_state, crtc);
   9403
   9404		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
   9405	}
   9406
   9407fail:
   9408	if (ret == -EDEADLK) {
   9409		drm_atomic_state_clear(state);
   9410		drm_modeset_backoff(&ctx);
   9411		goto retry;
   9412	}
   9413
   9414	/*
   9415	 * If we fail here, it means that the hardware appears to be
   9416	 * programmed in a way that shouldn't be possible, given our
   9417	 * understanding of watermark requirements.  This might mean a
   9418	 * mistake in the hardware readout code or a mistake in the
   9419	 * watermark calculations for a given platform.  Raise a WARN
   9420	 * so that this is noticeable.
   9421	 *
   9422	 * If this actually happens, we'll have to just leave the
   9423	 * BIOS-programmed watermarks untouched and hope for the best.
   9424	 */
   9425	drm_WARN(&dev_priv->drm, ret,
   9426		 "Could not determine valid watermarks for inherited state\n");
   9427
   9428	drm_atomic_state_put(state);
   9429
   9430	drm_modeset_drop_locks(&ctx);
   9431	drm_modeset_acquire_fini(&ctx);
   9432}
   9433
   9434static int intel_initial_commit(struct drm_device *dev)
   9435{
   9436	struct drm_atomic_state *state = NULL;
   9437	struct drm_modeset_acquire_ctx ctx;
   9438	struct intel_crtc *crtc;
   9439	int ret = 0;
   9440
   9441	state = drm_atomic_state_alloc(dev);
   9442	if (!state)
   9443		return -ENOMEM;
   9444
   9445	drm_modeset_acquire_init(&ctx, 0);
   9446
   9447retry:
   9448	state->acquire_ctx = &ctx;
   9449
   9450	for_each_intel_crtc(dev, crtc) {
   9451		struct intel_crtc_state *crtc_state =
   9452			intel_atomic_get_crtc_state(state, crtc);
   9453
   9454		if (IS_ERR(crtc_state)) {
   9455			ret = PTR_ERR(crtc_state);
   9456			goto out;
   9457		}
   9458
   9459		if (crtc_state->hw.active) {
   9460			struct intel_encoder *encoder;
   9461
   9462			/*
   9463			 * We've not yet detected sink capabilities
   9464			 * (audio,infoframes,etc.) and thus we don't want to
   9465			 * force a full state recomputation yet. We want that to
   9466			 * happen only for the first real commit from userspace.
   9467			 * So preserve the inherited flag for the time being.
   9468			 */
   9469			crtc_state->inherited = true;
   9470
   9471			ret = drm_atomic_add_affected_planes(state, &crtc->base);
   9472			if (ret)
   9473				goto out;
   9474
   9475			/*
   9476			 * FIXME hack to force a LUT update to avoid the
   9477			 * plane update forcing the pipe gamma on without
   9478			 * having a proper LUT loaded. Remove once we
   9479			 * have readout for pipe gamma enable.
   9480			 */
   9481			crtc_state->uapi.color_mgmt_changed = true;
   9482
   9483			for_each_intel_encoder_mask(dev, encoder,
   9484						    crtc_state->uapi.encoder_mask) {
   9485				if (encoder->initial_fastset_check &&
   9486				    !encoder->initial_fastset_check(encoder, crtc_state)) {
   9487					ret = drm_atomic_add_affected_connectors(state,
   9488										 &crtc->base);
   9489					if (ret)
   9490						goto out;
   9491				}
   9492			}
   9493		}
   9494	}
   9495
   9496	ret = drm_atomic_commit(state);
   9497
   9498out:
   9499	if (ret == -EDEADLK) {
   9500		drm_atomic_state_clear(state);
   9501		drm_modeset_backoff(&ctx);
   9502		goto retry;
   9503	}
   9504
   9505	drm_atomic_state_put(state);
   9506
   9507	drm_modeset_drop_locks(&ctx);
   9508	drm_modeset_acquire_fini(&ctx);
   9509
   9510	return ret;
   9511}
   9512
   9513static void intel_mode_config_init(struct drm_i915_private *i915)
   9514{
   9515	struct drm_mode_config *mode_config = &i915->drm.mode_config;
   9516
   9517	drm_mode_config_init(&i915->drm);
   9518	INIT_LIST_HEAD(&i915->global_obj_list);
   9519
   9520	mode_config->min_width = 0;
   9521	mode_config->min_height = 0;
   9522
   9523	mode_config->preferred_depth = 24;
   9524	mode_config->prefer_shadow = 1;
   9525
   9526	mode_config->funcs = &intel_mode_funcs;
   9527
   9528	mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
   9529
   9530	/*
   9531	 * Maximum framebuffer dimensions, chosen to match
   9532	 * the maximum render engine surface size on gen4+.
   9533	 */
   9534	if (DISPLAY_VER(i915) >= 7) {
   9535		mode_config->max_width = 16384;
   9536		mode_config->max_height = 16384;
   9537	} else if (DISPLAY_VER(i915) >= 4) {
   9538		mode_config->max_width = 8192;
   9539		mode_config->max_height = 8192;
   9540	} else if (DISPLAY_VER(i915) == 3) {
   9541		mode_config->max_width = 4096;
   9542		mode_config->max_height = 4096;
   9543	} else {
   9544		mode_config->max_width = 2048;
   9545		mode_config->max_height = 2048;
   9546	}
   9547
   9548	if (IS_I845G(i915) || IS_I865G(i915)) {
   9549		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
   9550		mode_config->cursor_height = 1023;
   9551	} else if (IS_I830(i915) || IS_I85X(i915) ||
   9552		   IS_I915G(i915) || IS_I915GM(i915)) {
   9553		mode_config->cursor_width = 64;
   9554		mode_config->cursor_height = 64;
   9555	} else {
   9556		mode_config->cursor_width = 256;
   9557		mode_config->cursor_height = 256;
   9558	}
   9559}
   9560
   9561static void intel_mode_config_cleanup(struct drm_i915_private *i915)
   9562{
   9563	intel_atomic_global_obj_cleanup(i915);
   9564	drm_mode_config_cleanup(&i915->drm);
   9565}
   9566
   9567/* part #1: call before irq install */
   9568int intel_modeset_init_noirq(struct drm_i915_private *i915)
   9569{
   9570	int ret;
   9571
   9572	if (i915_inject_probe_failure(i915))
   9573		return -ENODEV;
   9574
   9575	if (HAS_DISPLAY(i915)) {
   9576		ret = drm_vblank_init(&i915->drm,
   9577				      INTEL_NUM_PIPES(i915));
   9578		if (ret)
   9579			return ret;
   9580	}
   9581
   9582	intel_bios_init(i915);
   9583
   9584	ret = intel_vga_register(i915);
   9585	if (ret)
   9586		goto cleanup_bios;
   9587
   9588	/* FIXME: completely on the wrong abstraction layer */
   9589	intel_power_domains_init_hw(i915, false);
   9590
   9591	if (!HAS_DISPLAY(i915))
   9592		return 0;
   9593
   9594	intel_dmc_ucode_init(i915);
   9595
   9596	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
   9597	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
   9598					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
   9599
   9600	i915->window2_delay = 0; /* No DSB so no window2 delay */
   9601
   9602	intel_mode_config_init(i915);
   9603
   9604	ret = intel_cdclk_init(i915);
   9605	if (ret)
   9606		goto cleanup_vga_client_pw_domain_dmc;
   9607
   9608	ret = intel_dbuf_init(i915);
   9609	if (ret)
   9610		goto cleanup_vga_client_pw_domain_dmc;
   9611
   9612	ret = intel_bw_init(i915);
   9613	if (ret)
   9614		goto cleanup_vga_client_pw_domain_dmc;
   9615
   9616	init_llist_head(&i915->atomic_helper.free_list);
   9617	INIT_WORK(&i915->atomic_helper.free_work,
   9618		  intel_atomic_helper_free_state_worker);
   9619
   9620	intel_init_quirks(i915);
   9621
   9622	intel_fbc_init(i915);
   9623
   9624	return 0;
   9625
   9626cleanup_vga_client_pw_domain_dmc:
   9627	intel_dmc_ucode_fini(i915);
   9628	intel_power_domains_driver_remove(i915);
   9629	intel_vga_unregister(i915);
   9630cleanup_bios:
   9631	intel_bios_driver_remove(i915);
   9632
   9633	return ret;
   9634}
   9635
   9636/* part #2: call after irq install, but before gem init */
   9637int intel_modeset_init_nogem(struct drm_i915_private *i915)
   9638{
   9639	struct drm_device *dev = &i915->drm;
   9640	enum pipe pipe;
   9641	struct intel_crtc *crtc;
   9642	int ret;
   9643
   9644	if (!HAS_DISPLAY(i915))
   9645		return 0;
   9646
   9647	intel_init_pm(i915);
   9648
   9649	intel_panel_sanitize_ssc(i915);
   9650
   9651	intel_pps_setup(i915);
   9652
   9653	intel_gmbus_setup(i915);
   9654
   9655	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
   9656		    INTEL_NUM_PIPES(i915),
   9657		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
   9658
   9659	for_each_pipe(i915, pipe) {
   9660		ret = intel_crtc_init(i915, pipe);
   9661		if (ret) {
   9662			intel_mode_config_cleanup(i915);
   9663			return ret;
   9664		}
   9665	}
   9666
   9667	intel_plane_possible_crtcs_init(i915);
   9668	intel_shared_dpll_init(i915);
   9669	intel_fdi_pll_freq_update(i915);
   9670
   9671	intel_update_czclk(i915);
   9672	intel_modeset_init_hw(i915);
   9673	intel_dpll_update_ref_clks(i915);
   9674
   9675	intel_hdcp_component_init(i915);
   9676
   9677	if (i915->max_cdclk_freq == 0)
   9678		intel_update_max_cdclk(i915);
   9679
   9680	/*
   9681	 * If the platform has HTI, we need to find out whether it has reserved
   9682	 * any display resources before we create our display outputs.
   9683	 */
   9684	if (INTEL_INFO(i915)->display.has_hti)
   9685		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
   9686
   9687	/* Just disable it once at startup */
   9688	intel_vga_disable(i915);
   9689	intel_setup_outputs(i915);
   9690
   9691	drm_modeset_lock_all(dev);
   9692	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
   9693	intel_acpi_assign_connector_fwnodes(i915);
   9694	drm_modeset_unlock_all(dev);
   9695
   9696	for_each_intel_crtc(dev, crtc) {
   9697		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
   9698			continue;
   9699		intel_crtc_initial_plane_config(crtc);
   9700	}
   9701
   9702	/*
   9703	 * Make sure hardware watermarks really match the state we read out.
   9704	 * Note that we need to do this after reconstructing the BIOS fb's
   9705	 * since the watermark calculation done here will use pstate->fb.
   9706	 */
   9707	if (!HAS_GMCH(i915))
   9708		sanitize_watermarks(i915);
   9709
   9710	return 0;
   9711}
   9712
   9713/* part #3: call after gem init */
   9714int intel_modeset_init(struct drm_i915_private *i915)
   9715{
   9716	int ret;
   9717
   9718	if (!HAS_DISPLAY(i915))
   9719		return 0;
   9720
   9721	/*
   9722	 * Force all active planes to recompute their states. So that on
   9723	 * mode_setcrtc after probe, all the intel_plane_state variables
   9724	 * are already calculated and there is no assert_plane warnings
   9725	 * during bootup.
   9726	 */
   9727	ret = intel_initial_commit(&i915->drm);
   9728	if (ret)
   9729		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
   9730
   9731	intel_overlay_setup(i915);
   9732
   9733	ret = intel_fbdev_init(&i915->drm);
   9734	if (ret)
   9735		return ret;
   9736
   9737	/* Only enable hotplug handling once the fbdev is fully set up. */
   9738	intel_hpd_init(i915);
   9739	intel_hpd_poll_disable(i915);
   9740
   9741	intel_init_ipc(i915);
   9742
   9743	return 0;
   9744}
   9745
   9746void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
   9747{
   9748	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
   9749	/* 640x480@60Hz, ~25175 kHz */
   9750	struct dpll clock = {
   9751		.m1 = 18,
   9752		.m2 = 7,
   9753		.p1 = 13,
   9754		.p2 = 4,
   9755		.n = 2,
   9756	};
   9757	u32 dpll, fp;
   9758	int i;
   9759
   9760	drm_WARN_ON(&dev_priv->drm,
   9761		    i9xx_calc_dpll_params(48000, &clock) != 25154);
   9762
   9763	drm_dbg_kms(&dev_priv->drm,
   9764		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
   9765		    pipe_name(pipe), clock.vco, clock.dot);
   9766
   9767	fp = i9xx_dpll_compute_fp(&clock);
   9768	dpll = DPLL_DVO_2X_MODE |
   9769		DPLL_VGA_MODE_DIS |
   9770		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
   9771		PLL_P2_DIVIDE_BY_4 |
   9772		PLL_REF_INPUT_DREFCLK |
   9773		DPLL_VCO_ENABLE;
   9774
   9775	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
   9776	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
   9777	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
   9778	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
   9779	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
   9780	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
   9781	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
   9782
   9783	intel_de_write(dev_priv, FP0(pipe), fp);
   9784	intel_de_write(dev_priv, FP1(pipe), fp);
   9785
   9786	/*
   9787	 * Apparently we need to have VGA mode enabled prior to changing
   9788	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
   9789	 * dividers, even though the register value does change.
   9790	 */
   9791	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
   9792	intel_de_write(dev_priv, DPLL(pipe), dpll);
   9793
   9794	/* Wait for the clocks to stabilize. */
   9795	intel_de_posting_read(dev_priv, DPLL(pipe));
   9796	udelay(150);
   9797
   9798	/* The pixel multiplier can only be updated once the
   9799	 * DPLL is enabled and the clocks are stable.
   9800	 *
   9801	 * So write it again.
   9802	 */
   9803	intel_de_write(dev_priv, DPLL(pipe), dpll);
   9804
   9805	/* We do this three times for luck */
   9806	for (i = 0; i < 3 ; i++) {
   9807		intel_de_write(dev_priv, DPLL(pipe), dpll);
   9808		intel_de_posting_read(dev_priv, DPLL(pipe));
   9809		udelay(150); /* wait for warmup */
   9810	}
   9811
   9812	intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
   9813	intel_de_posting_read(dev_priv, PIPECONF(pipe));
   9814
   9815	intel_wait_for_pipe_scanline_moving(crtc);
   9816}
   9817
   9818void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
   9819{
   9820	struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
   9821
   9822	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
   9823		    pipe_name(pipe));
   9824
   9825	drm_WARN_ON(&dev_priv->drm,
   9826		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
   9827	drm_WARN_ON(&dev_priv->drm,
   9828		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
   9829	drm_WARN_ON(&dev_priv->drm,
   9830		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
   9831	drm_WARN_ON(&dev_priv->drm,
   9832		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
   9833	drm_WARN_ON(&dev_priv->drm,
   9834		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
   9835
   9836	intel_de_write(dev_priv, PIPECONF(pipe), 0);
   9837	intel_de_posting_read(dev_priv, PIPECONF(pipe));
   9838
   9839	intel_wait_for_pipe_scanline_stopped(crtc);
   9840
   9841	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
   9842	intel_de_posting_read(dev_priv, DPLL(pipe));
   9843}
   9844
   9845static void
   9846intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
   9847{
   9848	struct intel_crtc *crtc;
   9849
   9850	if (DISPLAY_VER(dev_priv) >= 4)
   9851		return;
   9852
   9853	for_each_intel_crtc(&dev_priv->drm, crtc) {
   9854		struct intel_plane *plane =
   9855			to_intel_plane(crtc->base.primary);
   9856		struct intel_crtc *plane_crtc;
   9857		enum pipe pipe;
   9858
   9859		if (!plane->get_hw_state(plane, &pipe))
   9860			continue;
   9861
   9862		if (pipe == crtc->pipe)
   9863			continue;
   9864
   9865		drm_dbg_kms(&dev_priv->drm,
   9866			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
   9867			    plane->base.base.id, plane->base.name);
   9868
   9869		plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
   9870		intel_plane_disable_noatomic(plane_crtc, plane);
   9871	}
   9872}
   9873
   9874static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
   9875{
   9876	struct drm_device *dev = crtc->base.dev;
   9877	struct intel_encoder *encoder;
   9878
   9879	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
   9880		return true;
   9881
   9882	return false;
   9883}
   9884
   9885static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
   9886{
   9887	struct drm_device *dev = encoder->base.dev;
   9888	struct intel_connector *connector;
   9889
   9890	for_each_connector_on_encoder(dev, &encoder->base, connector)
   9891		return connector;
   9892
   9893	return NULL;
   9894}
   9895
   9896static void intel_sanitize_crtc(struct intel_crtc *crtc,
   9897				struct drm_modeset_acquire_ctx *ctx)
   9898{
   9899	struct drm_device *dev = crtc->base.dev;
   9900	struct drm_i915_private *dev_priv = to_i915(dev);
   9901	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
   9902
   9903	if (crtc_state->hw.active) {
   9904		struct intel_plane *plane;
   9905
   9906		/* Disable everything but the primary plane */
   9907		for_each_intel_plane_on_crtc(dev, crtc, plane) {
   9908			const struct intel_plane_state *plane_state =
   9909				to_intel_plane_state(plane->base.state);
   9910
   9911			if (plane_state->uapi.visible &&
   9912			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
   9913				intel_plane_disable_noatomic(crtc, plane);
   9914		}
   9915
   9916		/* Disable any background color/etc. set by the BIOS */
   9917		intel_color_commit_noarm(crtc_state);
   9918		intel_color_commit_arm(crtc_state);
   9919	}
   9920
   9921	/* Adjust the state of the output pipe according to whether we
   9922	 * have active connectors/encoders. */
   9923	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
   9924	    !intel_crtc_is_bigjoiner_slave(crtc_state))
   9925		intel_crtc_disable_noatomic(crtc, ctx);
   9926
   9927	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
   9928		/*
   9929		 * We start out with underrun reporting disabled to avoid races.
   9930		 * For correct bookkeeping mark this on active crtcs.
   9931		 *
   9932		 * Also on gmch platforms we dont have any hardware bits to
   9933		 * disable the underrun reporting. Which means we need to start
   9934		 * out with underrun reporting disabled also on inactive pipes,
   9935		 * since otherwise we'll complain about the garbage we read when
   9936		 * e.g. coming up after runtime pm.
   9937		 *
   9938		 * No protection against concurrent access is required - at
   9939		 * worst a fifo underrun happens which also sets this to false.
   9940		 */
   9941		crtc->cpu_fifo_underrun_disabled = true;
   9942		/*
   9943		 * We track the PCH trancoder underrun reporting state
   9944		 * within the crtc. With crtc for pipe A housing the underrun
   9945		 * reporting state for PCH transcoder A, crtc for pipe B housing
   9946		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
   9947		 * and marking underrun reporting as disabled for the non-existing
   9948		 * PCH transcoders B and C would prevent enabling the south
   9949		 * error interrupt (see cpt_can_enable_serr_int()).
   9950		 */
   9951		if (intel_has_pch_trancoder(dev_priv, crtc->pipe))
   9952			crtc->pch_fifo_underrun_disabled = true;
   9953	}
   9954}
   9955
   9956static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
   9957{
   9958	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
   9959
   9960	/*
   9961	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
   9962	 * the hardware when a high res displays plugged in. DPLL P
   9963	 * divider is zero, and the pipe timings are bonkers. We'll
   9964	 * try to disable everything in that case.
   9965	 *
   9966	 * FIXME would be nice to be able to sanitize this state
   9967	 * without several WARNs, but for now let's take the easy
   9968	 * road.
   9969	 */
   9970	return IS_SANDYBRIDGE(dev_priv) &&
   9971		crtc_state->hw.active &&
   9972		crtc_state->shared_dpll &&
   9973		crtc_state->port_clock == 0;
   9974}
   9975
   9976static void intel_sanitize_encoder(struct intel_encoder *encoder)
   9977{
   9978	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
   9979	struct intel_connector *connector;
   9980	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
   9981	struct intel_crtc_state *crtc_state = crtc ?
   9982		to_intel_crtc_state(crtc->base.state) : NULL;
   9983
   9984	/* We need to check both for a crtc link (meaning that the
   9985	 * encoder is active and trying to read from a pipe) and the
   9986	 * pipe itself being active. */
   9987	bool has_active_crtc = crtc_state &&
   9988		crtc_state->hw.active;
   9989
   9990	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
   9991		drm_dbg_kms(&dev_priv->drm,
   9992			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
   9993			    pipe_name(crtc->pipe));
   9994		has_active_crtc = false;
   9995	}
   9996
   9997	connector = intel_encoder_find_connector(encoder);
   9998	if (connector && !has_active_crtc) {
   9999		drm_dbg_kms(&dev_priv->drm,
  10000			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
  10001			    encoder->base.base.id,
  10002			    encoder->base.name);
  10003
  10004		/* Connector is active, but has no active pipe. This is
  10005		 * fallout from our resume register restoring. Disable
  10006		 * the encoder manually again. */
  10007		if (crtc_state) {
  10008			struct drm_encoder *best_encoder;
  10009
  10010			drm_dbg_kms(&dev_priv->drm,
  10011				    "[ENCODER:%d:%s] manually disabled\n",
  10012				    encoder->base.base.id,
  10013				    encoder->base.name);
  10014
  10015			/* avoid oopsing in case the hooks consult best_encoder */
  10016			best_encoder = connector->base.state->best_encoder;
  10017			connector->base.state->best_encoder = &encoder->base;
  10018
  10019			/* FIXME NULL atomic state passed! */
  10020			if (encoder->disable)
  10021				encoder->disable(NULL, encoder, crtc_state,
  10022						 connector->base.state);
  10023			if (encoder->post_disable)
  10024				encoder->post_disable(NULL, encoder, crtc_state,
  10025						      connector->base.state);
  10026
  10027			connector->base.state->best_encoder = best_encoder;
  10028		}
  10029		encoder->base.crtc = NULL;
  10030
  10031		/* Inconsistent output/port/pipe state happens presumably due to
  10032		 * a bug in one of the get_hw_state functions. Or someplace else
  10033		 * in our code, like the register restore mess on resume. Clamp
  10034		 * things to off as a safer default. */
  10035
  10036		connector->base.dpms = DRM_MODE_DPMS_OFF;
  10037		connector->base.encoder = NULL;
  10038	}
  10039
  10040	/* notify opregion of the sanitized encoder state */
  10041	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
  10042
  10043	if (HAS_DDI(dev_priv))
  10044		intel_ddi_sanitize_encoder_pll_mapping(encoder);
  10045}
  10046
  10047/* FIXME read out full plane state for all planes */
  10048static void readout_plane_state(struct drm_i915_private *dev_priv)
  10049{
  10050	struct intel_plane *plane;
  10051	struct intel_crtc *crtc;
  10052
  10053	for_each_intel_plane(&dev_priv->drm, plane) {
  10054		struct intel_plane_state *plane_state =
  10055			to_intel_plane_state(plane->base.state);
  10056		struct intel_crtc_state *crtc_state;
  10057		enum pipe pipe = PIPE_A;
  10058		bool visible;
  10059
  10060		visible = plane->get_hw_state(plane, &pipe);
  10061
  10062		crtc = intel_crtc_for_pipe(dev_priv, pipe);
  10063		crtc_state = to_intel_crtc_state(crtc->base.state);
  10064
  10065		intel_set_plane_visible(crtc_state, plane_state, visible);
  10066
  10067		drm_dbg_kms(&dev_priv->drm,
  10068			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
  10069			    plane->base.base.id, plane->base.name,
  10070			    str_enabled_disabled(visible), pipe_name(pipe));
  10071	}
  10072
  10073	for_each_intel_crtc(&dev_priv->drm, crtc) {
  10074		struct intel_crtc_state *crtc_state =
  10075			to_intel_crtc_state(crtc->base.state);
  10076
  10077		fixup_plane_bitmasks(crtc_state);
  10078	}
  10079}
  10080
  10081static void intel_modeset_readout_hw_state(struct drm_device *dev)
  10082{
  10083	struct drm_i915_private *dev_priv = to_i915(dev);
  10084	struct intel_cdclk_state *cdclk_state =
  10085		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
  10086	struct intel_dbuf_state *dbuf_state =
  10087		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
  10088	enum pipe pipe;
  10089	struct intel_crtc *crtc;
  10090	struct intel_encoder *encoder;
  10091	struct intel_connector *connector;
  10092	struct drm_connector_list_iter conn_iter;
  10093	u8 active_pipes = 0;
  10094
  10095	for_each_intel_crtc(dev, crtc) {
  10096		struct intel_crtc_state *crtc_state =
  10097			to_intel_crtc_state(crtc->base.state);
  10098
  10099		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
  10100		intel_crtc_free_hw_state(crtc_state);
  10101		intel_crtc_state_reset(crtc_state, crtc);
  10102
  10103		intel_crtc_get_pipe_config(crtc_state);
  10104
  10105		crtc_state->hw.enable = crtc_state->hw.active;
  10106
  10107		crtc->base.enabled = crtc_state->hw.enable;
  10108		crtc->active = crtc_state->hw.active;
  10109
  10110		if (crtc_state->hw.active)
  10111			active_pipes |= BIT(crtc->pipe);
  10112
  10113		drm_dbg_kms(&dev_priv->drm,
  10114			    "[CRTC:%d:%s] hw state readout: %s\n",
  10115			    crtc->base.base.id, crtc->base.name,
  10116			    str_enabled_disabled(crtc_state->hw.active));
  10117	}
  10118
  10119	cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
  10120
  10121	readout_plane_state(dev_priv);
  10122
  10123	for_each_intel_encoder(dev, encoder) {
  10124		struct intel_crtc_state *crtc_state = NULL;
  10125
  10126		pipe = 0;
  10127
  10128		if (encoder->get_hw_state(encoder, &pipe)) {
  10129			crtc = intel_crtc_for_pipe(dev_priv, pipe);
  10130			crtc_state = to_intel_crtc_state(crtc->base.state);
  10131
  10132			encoder->base.crtc = &crtc->base;
  10133			intel_encoder_get_config(encoder, crtc_state);
  10134
  10135			/* read out to slave crtc as well for bigjoiner */
  10136			if (crtc_state->bigjoiner_pipes) {
  10137				struct intel_crtc *slave_crtc;
  10138
  10139				/* encoder should read be linked to bigjoiner master */
  10140				WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state));
  10141
  10142				for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc,
  10143								 intel_crtc_bigjoiner_slave_pipes(crtc_state)) {
  10144					struct intel_crtc_state *slave_crtc_state;
  10145
  10146					slave_crtc_state = to_intel_crtc_state(slave_crtc->base.state);
  10147					intel_encoder_get_config(encoder, slave_crtc_state);
  10148				}
  10149			}
  10150		} else {
  10151			encoder->base.crtc = NULL;
  10152		}
  10153
  10154		if (encoder->sync_state)
  10155			encoder->sync_state(encoder, crtc_state);
  10156
  10157		drm_dbg_kms(&dev_priv->drm,
  10158			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
  10159			    encoder->base.base.id, encoder->base.name,
  10160			    str_enabled_disabled(encoder->base.crtc),
  10161			    pipe_name(pipe));
  10162	}
  10163
  10164	intel_dpll_readout_hw_state(dev_priv);
  10165
  10166	drm_connector_list_iter_begin(dev, &conn_iter);
  10167	for_each_intel_connector_iter(connector, &conn_iter) {
  10168		if (connector->get_hw_state(connector)) {
  10169			struct intel_crtc_state *crtc_state;
  10170			struct intel_crtc *crtc;
  10171
  10172			connector->base.dpms = DRM_MODE_DPMS_ON;
  10173
  10174			encoder = intel_attached_encoder(connector);
  10175			connector->base.encoder = &encoder->base;
  10176
  10177			crtc = to_intel_crtc(encoder->base.crtc);
  10178			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
  10179
  10180			if (crtc_state && crtc_state->hw.active) {
  10181				/*
  10182				 * This has to be done during hardware readout
  10183				 * because anything calling .crtc_disable may
  10184				 * rely on the connector_mask being accurate.
  10185				 */
  10186				crtc_state->uapi.connector_mask |=
  10187					drm_connector_mask(&connector->base);
  10188				crtc_state->uapi.encoder_mask |=
  10189					drm_encoder_mask(&encoder->base);
  10190			}
  10191		} else {
  10192			connector->base.dpms = DRM_MODE_DPMS_OFF;
  10193			connector->base.encoder = NULL;
  10194		}
  10195		drm_dbg_kms(&dev_priv->drm,
  10196			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
  10197			    connector->base.base.id, connector->base.name,
  10198			    str_enabled_disabled(connector->base.encoder));
  10199	}
  10200	drm_connector_list_iter_end(&conn_iter);
  10201
  10202	for_each_intel_crtc(dev, crtc) {
  10203		struct intel_bw_state *bw_state =
  10204			to_intel_bw_state(dev_priv->bw_obj.state);
  10205		struct intel_crtc_state *crtc_state =
  10206			to_intel_crtc_state(crtc->base.state);
  10207		struct intel_plane *plane;
  10208		int min_cdclk = 0;
  10209
  10210		if (crtc_state->hw.active) {
  10211			/*
  10212			 * The initial mode needs to be set in order to keep
  10213			 * the atomic core happy. It wants a valid mode if the
  10214			 * crtc's enabled, so we do the above call.
  10215			 *
  10216			 * But we don't set all the derived state fully, hence
  10217			 * set a flag to indicate that a full recalculation is
  10218			 * needed on the next commit.
  10219			 */
  10220			crtc_state->inherited = true;
  10221
  10222			intel_crtc_update_active_timings(crtc_state);
  10223
  10224			intel_crtc_copy_hw_to_uapi_state(crtc_state);
  10225		}
  10226
  10227		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
  10228			const struct intel_plane_state *plane_state =
  10229				to_intel_plane_state(plane->base.state);
  10230
  10231			/*
  10232			 * FIXME don't have the fb yet, so can't
  10233			 * use intel_plane_data_rate() :(
  10234			 */
  10235			if (plane_state->uapi.visible)
  10236				crtc_state->data_rate[plane->id] =
  10237					4 * crtc_state->pixel_rate;
  10238			/*
  10239			 * FIXME don't have the fb yet, so can't
  10240			 * use plane->min_cdclk() :(
  10241			 */
  10242			if (plane_state->uapi.visible && plane->min_cdclk) {
  10243				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
  10244					crtc_state->min_cdclk[plane->id] =
  10245						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
  10246				else
  10247					crtc_state->min_cdclk[plane->id] =
  10248						crtc_state->pixel_rate;
  10249			}
  10250			drm_dbg_kms(&dev_priv->drm,
  10251				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
  10252				    plane->base.base.id, plane->base.name,
  10253				    crtc_state->min_cdclk[plane->id]);
  10254		}
  10255
  10256		if (crtc_state->hw.active) {
  10257			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
  10258			if (drm_WARN_ON(dev, min_cdclk < 0))
  10259				min_cdclk = 0;
  10260		}
  10261
  10262		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
  10263		cdclk_state->min_voltage_level[crtc->pipe] =
  10264			crtc_state->min_voltage_level;
  10265
  10266		intel_bw_crtc_update(bw_state, crtc_state);
  10267
  10268		intel_pipe_config_sanity_check(dev_priv, crtc_state);
  10269	}
  10270}
  10271
  10272static void
  10273get_encoder_power_domains(struct drm_i915_private *dev_priv)
  10274{
  10275	struct intel_encoder *encoder;
  10276
  10277	for_each_intel_encoder(&dev_priv->drm, encoder) {
  10278		struct intel_crtc_state *crtc_state;
  10279
  10280		if (!encoder->get_power_domains)
  10281			continue;
  10282
  10283		/*
  10284		 * MST-primary and inactive encoders don't have a crtc state
  10285		 * and neither of these require any power domain references.
  10286		 */
  10287		if (!encoder->base.crtc)
  10288			continue;
  10289
  10290		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
  10291		encoder->get_power_domains(encoder, crtc_state);
  10292	}
  10293}
  10294
  10295static void intel_early_display_was(struct drm_i915_private *dev_priv)
  10296{
  10297	/*
  10298	 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
  10299	 * Also known as Wa_14010480278.
  10300	 */
  10301	if (IS_DISPLAY_VER(dev_priv, 10, 12))
  10302		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
  10303			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
  10304
  10305	if (IS_HASWELL(dev_priv)) {
  10306		/*
  10307		 * WaRsPkgCStateDisplayPMReq:hsw
  10308		 * System hang if this isn't done before disabling all planes!
  10309		 */
  10310		intel_de_write(dev_priv, CHICKEN_PAR1_1,
  10311			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
  10312	}
  10313
  10314	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
  10315		/* Display WA #1142:kbl,cfl,cml */
  10316		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
  10317			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
  10318		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
  10319			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
  10320			     KBL_ARB_FILL_SPARE_14);
  10321	}
  10322}
  10323
  10324
  10325/* Scan out the current hw modeset state,
  10326 * and sanitizes it to the current state
  10327 */
  10328static void
  10329intel_modeset_setup_hw_state(struct drm_device *dev,
  10330			     struct drm_modeset_acquire_ctx *ctx)
  10331{
  10332	struct drm_i915_private *dev_priv = to_i915(dev);
  10333	struct intel_encoder *encoder;
  10334	struct intel_crtc *crtc;
  10335	intel_wakeref_t wakeref;
  10336
  10337	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
  10338
  10339	intel_early_display_was(dev_priv);
  10340	intel_modeset_readout_hw_state(dev);
  10341
  10342	/* HW state is read out, now we need to sanitize this mess. */
  10343	get_encoder_power_domains(dev_priv);
  10344
  10345	intel_pch_sanitize(dev_priv);
  10346
  10347	/*
  10348	 * intel_sanitize_plane_mapping() may need to do vblank
  10349	 * waits, so we need vblank interrupts restored beforehand.
  10350	 */
  10351	for_each_intel_crtc(&dev_priv->drm, crtc) {
  10352		struct intel_crtc_state *crtc_state =
  10353			to_intel_crtc_state(crtc->base.state);
  10354
  10355		drm_crtc_vblank_reset(&crtc->base);
  10356
  10357		if (crtc_state->hw.active)
  10358			intel_crtc_vblank_on(crtc_state);
  10359	}
  10360
  10361	intel_fbc_sanitize(dev_priv);
  10362
  10363	intel_sanitize_plane_mapping(dev_priv);
  10364
  10365	for_each_intel_encoder(dev, encoder)
  10366		intel_sanitize_encoder(encoder);
  10367
  10368	for_each_intel_crtc(&dev_priv->drm, crtc) {
  10369		struct intel_crtc_state *crtc_state =
  10370			to_intel_crtc_state(crtc->base.state);
  10371
  10372		intel_sanitize_crtc(crtc, ctx);
  10373		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
  10374	}
  10375
  10376	intel_modeset_update_connector_atomic_state(dev);
  10377
  10378	intel_dpll_sanitize_state(dev_priv);
  10379
  10380	if (IS_G4X(dev_priv)) {
  10381		g4x_wm_get_hw_state(dev_priv);
  10382		g4x_wm_sanitize(dev_priv);
  10383	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  10384		vlv_wm_get_hw_state(dev_priv);
  10385		vlv_wm_sanitize(dev_priv);
  10386	} else if (DISPLAY_VER(dev_priv) >= 9) {
  10387		skl_wm_get_hw_state(dev_priv);
  10388		skl_wm_sanitize(dev_priv);
  10389	} else if (HAS_PCH_SPLIT(dev_priv)) {
  10390		ilk_wm_get_hw_state(dev_priv);
  10391	}
  10392
  10393	for_each_intel_crtc(dev, crtc) {
  10394		struct intel_crtc_state *crtc_state =
  10395			to_intel_crtc_state(crtc->base.state);
  10396		struct intel_power_domain_mask put_domains;
  10397
  10398		modeset_get_crtc_power_domains(crtc_state, &put_domains);
  10399		if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
  10400			modeset_put_crtc_power_domains(crtc, &put_domains);
  10401	}
  10402
  10403	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
  10404
  10405	intel_power_domains_sanitize_state(dev_priv);
  10406}
  10407
  10408void intel_display_resume(struct drm_device *dev)
  10409{
  10410	struct drm_i915_private *dev_priv = to_i915(dev);
  10411	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
  10412	struct drm_modeset_acquire_ctx ctx;
  10413	int ret;
  10414
  10415	if (!HAS_DISPLAY(dev_priv))
  10416		return;
  10417
  10418	dev_priv->modeset_restore_state = NULL;
  10419	if (state)
  10420		state->acquire_ctx = &ctx;
  10421
  10422	drm_modeset_acquire_init(&ctx, 0);
  10423
  10424	while (1) {
  10425		ret = drm_modeset_lock_all_ctx(dev, &ctx);
  10426		if (ret != -EDEADLK)
  10427			break;
  10428
  10429		drm_modeset_backoff(&ctx);
  10430	}
  10431
  10432	if (!ret)
  10433		ret = __intel_display_resume(dev, state, &ctx);
  10434
  10435	intel_enable_ipc(dev_priv);
  10436	drm_modeset_drop_locks(&ctx);
  10437	drm_modeset_acquire_fini(&ctx);
  10438
  10439	if (ret)
  10440		drm_err(&dev_priv->drm,
  10441			"Restoring old state failed with %i\n", ret);
  10442	if (state)
  10443		drm_atomic_state_put(state);
  10444}
  10445
  10446static void intel_hpd_poll_fini(struct drm_i915_private *i915)
  10447{
  10448	struct intel_connector *connector;
  10449	struct drm_connector_list_iter conn_iter;
  10450
  10451	/* Kill all the work that may have been queued by hpd. */
  10452	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
  10453	for_each_intel_connector_iter(connector, &conn_iter) {
  10454		if (connector->modeset_retry_work.func)
  10455			cancel_work_sync(&connector->modeset_retry_work);
  10456		if (connector->hdcp.shim) {
  10457			cancel_delayed_work_sync(&connector->hdcp.check_work);
  10458			cancel_work_sync(&connector->hdcp.prop_work);
  10459		}
  10460	}
  10461	drm_connector_list_iter_end(&conn_iter);
  10462}
  10463
  10464/* part #1: call before irq uninstall */
  10465void intel_modeset_driver_remove(struct drm_i915_private *i915)
  10466{
  10467	if (!HAS_DISPLAY(i915))
  10468		return;
  10469
  10470	flush_workqueue(i915->flip_wq);
  10471	flush_workqueue(i915->modeset_wq);
  10472
  10473	flush_work(&i915->atomic_helper.free_work);
  10474	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
  10475}
  10476
  10477/* part #2: call after irq uninstall */
  10478void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
  10479{
  10480	if (!HAS_DISPLAY(i915))
  10481		return;
  10482
  10483	/*
  10484	 * Due to the hpd irq storm handling the hotplug work can re-arm the
  10485	 * poll handlers. Hence disable polling after hpd handling is shut down.
  10486	 */
  10487	intel_hpd_poll_fini(i915);
  10488
  10489	/*
  10490	 * MST topology needs to be suspended so we don't have any calls to
  10491	 * fbdev after it's finalized. MST will be destroyed later as part of
  10492	 * drm_mode_config_cleanup()
  10493	 */
  10494	intel_dp_mst_suspend(i915);
  10495
  10496	/* poll work can call into fbdev, hence clean that up afterwards */
  10497	intel_fbdev_fini(i915);
  10498
  10499	intel_unregister_dsm_handler();
  10500
  10501	/* flush any delayed tasks or pending work */
  10502	flush_scheduled_work();
  10503
  10504	intel_hdcp_component_fini(i915);
  10505
  10506	intel_mode_config_cleanup(i915);
  10507
  10508	intel_overlay_cleanup(i915);
  10509
  10510	intel_gmbus_teardown(i915);
  10511
  10512	destroy_workqueue(i915->flip_wq);
  10513	destroy_workqueue(i915->modeset_wq);
  10514
  10515	intel_fbc_cleanup(i915);
  10516}
  10517
  10518/* part #3: call after gem init */
  10519void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
  10520{
  10521	intel_dmc_ucode_fini(i915);
  10522
  10523	intel_power_domains_driver_remove(i915);
  10524
  10525	intel_vga_unregister(i915);
  10526
  10527	intel_bios_driver_remove(i915);
  10528}
  10529
  10530bool intel_modeset_probe_defer(struct pci_dev *pdev)
  10531{
  10532	struct drm_privacy_screen *privacy_screen;
  10533
  10534	/*
  10535	 * apple-gmux is needed on dual GPU MacBook Pro
  10536	 * to probe the panel if we're the inactive GPU.
  10537	 */
  10538	if (vga_switcheroo_client_probe_defer(pdev))
  10539		return true;
  10540
  10541	/* If the LCD panel has a privacy-screen, wait for it */
  10542	privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
  10543	if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
  10544		return true;
  10545
  10546	drm_privacy_screen_put(privacy_screen);
  10547
  10548	return false;
  10549}
  10550
  10551void intel_display_driver_register(struct drm_i915_private *i915)
  10552{
  10553	if (!HAS_DISPLAY(i915))
  10554		return;
  10555
  10556	intel_display_debugfs_register(i915);
  10557
  10558	/* Must be done after probing outputs */
  10559	intel_opregion_register(i915);
  10560	acpi_video_register();
  10561
  10562	intel_audio_init(i915);
  10563
  10564	/*
  10565	 * Some ports require correctly set-up hpd registers for
  10566	 * detection to work properly (leading to ghost connected
  10567	 * connector status), e.g. VGA on gm45.  Hence we can only set
  10568	 * up the initial fbdev config after hpd irqs are fully
  10569	 * enabled. We do it last so that the async config cannot run
  10570	 * before the connectors are registered.
  10571	 */
  10572	intel_fbdev_initial_config_async(&i915->drm);
  10573
  10574	/*
  10575	 * We need to coordinate the hotplugs with the asynchronous
  10576	 * fbdev configuration, for which we use the
  10577	 * fbdev->async_cookie.
  10578	 */
  10579	drm_kms_helper_poll_init(&i915->drm);
  10580}
  10581
  10582void intel_display_driver_unregister(struct drm_i915_private *i915)
  10583{
  10584	if (!HAS_DISPLAY(i915))
  10585		return;
  10586
  10587	intel_fbdev_unregister(i915);
  10588	intel_audio_deinit(i915);
  10589
  10590	/*
  10591	 * After flushing the fbdev (incl. a late async config which
  10592	 * will have delayed queuing of a hotplug event), then flush
  10593	 * the hotplug events.
  10594	 */
  10595	drm_kms_helper_poll_fini(&i915->drm);
  10596	drm_atomic_helper_shutdown(&i915->drm);
  10597
  10598	acpi_video_unregister();
  10599	intel_opregion_unregister(i915);
  10600}
  10601
  10602bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
  10603{
  10604	return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
  10605}