cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dpu_crtc.c (43624B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
      4 * Copyright (C) 2013 Red Hat
      5 * Author: Rob Clark <robdclark@gmail.com>
      6 */
      7
      8#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
      9#include <linux/sort.h>
     10#include <linux/debugfs.h>
     11#include <linux/ktime.h>
     12#include <linux/bits.h>
     13
     14#include <drm/drm_atomic.h>
     15#include <drm/drm_crtc.h>
     16#include <drm/drm_flip_work.h>
     17#include <drm/drm_mode.h>
     18#include <drm/drm_probe_helper.h>
     19#include <drm/drm_rect.h>
     20#include <drm/drm_vblank.h>
     21
     22#include "dpu_kms.h"
     23#include "dpu_hw_lm.h"
     24#include "dpu_hw_ctl.h"
     25#include "dpu_hw_dspp.h"
     26#include "dpu_crtc.h"
     27#include "dpu_plane.h"
     28#include "dpu_encoder.h"
     29#include "dpu_vbif.h"
     30#include "dpu_core_perf.h"
     31#include "dpu_trace.h"
     32
     33/* layer mixer index on dpu_crtc */
     34#define LEFT_MIXER 0
     35#define RIGHT_MIXER 1
     36
     37/* timeout in ms waiting for frame done */
     38#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS	60
     39
     40#define	CONVERT_S3_15(val) \
     41	(((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
     42
     43static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
     44{
     45	struct msm_drm_private *priv = crtc->dev->dev_private;
     46
     47	return to_dpu_kms(priv->kms);
     48}
     49
     50static void dpu_crtc_destroy(struct drm_crtc *crtc)
     51{
     52	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
     53
     54	if (!crtc)
     55		return;
     56
     57	drm_crtc_cleanup(crtc);
     58	kfree(dpu_crtc);
     59}
     60
     61static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
     62{
     63	struct drm_device *dev = crtc->dev;
     64	struct drm_encoder *encoder;
     65
     66	drm_for_each_encoder(encoder, dev)
     67		if (encoder->crtc == crtc)
     68			return encoder;
     69
     70	return NULL;
     71}
     72
     73static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
     74{
     75	if (!src_name ||
     76	    !strcmp(src_name, "none"))
     77		return DPU_CRTC_CRC_SOURCE_NONE;
     78	if (!strcmp(src_name, "auto") ||
     79	    !strcmp(src_name, "lm"))
     80		return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
     81
     82	return DPU_CRTC_CRC_SOURCE_INVALID;
     83}
     84
     85static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
     86		const char *src_name, size_t *values_cnt)
     87{
     88	enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
     89	struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
     90
     91	if (source < 0) {
     92		DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
     93		return -EINVAL;
     94	}
     95
     96	if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
     97		*values_cnt = crtc_state->num_mixers;
     98
     99	return 0;
    100}
    101
    102static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
    103{
    104	enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
    105	enum dpu_crtc_crc_source current_source;
    106	struct dpu_crtc_state *crtc_state;
    107	struct drm_device *drm_dev = crtc->dev;
    108	struct dpu_crtc_mixer *m;
    109
    110	bool was_enabled;
    111	bool enable = false;
    112	int i, ret = 0;
    113
    114	if (source < 0) {
    115		DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
    116		return -EINVAL;
    117	}
    118
    119	ret = drm_modeset_lock(&crtc->mutex, NULL);
    120
    121	if (ret)
    122		return ret;
    123
    124	enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
    125	crtc_state = to_dpu_crtc_state(crtc->state);
    126
    127	spin_lock_irq(&drm_dev->event_lock);
    128	current_source = crtc_state->crc_source;
    129	spin_unlock_irq(&drm_dev->event_lock);
    130
    131	was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
    132
    133	if (!was_enabled && enable) {
    134		ret = drm_crtc_vblank_get(crtc);
    135
    136		if (ret)
    137			goto cleanup;
    138
    139	} else if (was_enabled && !enable) {
    140		drm_crtc_vblank_put(crtc);
    141	}
    142
    143	spin_lock_irq(&drm_dev->event_lock);
    144	crtc_state->crc_source = source;
    145	spin_unlock_irq(&drm_dev->event_lock);
    146
    147	crtc_state->crc_frame_skip_count = 0;
    148
    149	for (i = 0; i < crtc_state->num_mixers; ++i) {
    150		m = &crtc_state->mixers[i];
    151
    152		if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
    153			continue;
    154
    155		/* Calculate MISR over 1 frame */
    156		m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
    157	}
    158
    159
    160cleanup:
    161	drm_modeset_unlock(&crtc->mutex);
    162
    163	return ret;
    164}
    165
    166static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
    167{
    168	struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
    169	if (!encoder) {
    170		DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
    171		return 0;
    172	}
    173
    174	return dpu_encoder_get_vsync_count(encoder);
    175}
    176
    177
    178static int dpu_crtc_get_crc(struct drm_crtc *crtc)
    179{
    180	struct dpu_crtc_state *crtc_state;
    181	struct dpu_crtc_mixer *m;
    182	u32 crcs[CRTC_DUAL_MIXERS];
    183
    184	int i = 0;
    185	int rc = 0;
    186
    187	crtc_state = to_dpu_crtc_state(crtc->state);
    188
    189	BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
    190
    191	/* Skip first 2 frames in case of "uncooked" CRCs */
    192	if (crtc_state->crc_frame_skip_count < 2) {
    193		crtc_state->crc_frame_skip_count++;
    194		return 0;
    195	}
    196
    197	for (i = 0; i < crtc_state->num_mixers; ++i) {
    198
    199		m = &crtc_state->mixers[i];
    200
    201		if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
    202			continue;
    203
    204		rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
    205
    206		if (rc) {
    207			if (rc != -ENODATA)
    208				DRM_DEBUG_DRIVER("MISR read failed\n");
    209			return rc;
    210		}
    211	}
    212
    213	return drm_crtc_add_crc_entry(crtc, true,
    214			drm_crtc_accurate_vblank_count(crtc), crcs);
    215}
    216
    217static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
    218					   bool in_vblank_irq,
    219					   int *vpos, int *hpos,
    220					   ktime_t *stime, ktime_t *etime,
    221					   const struct drm_display_mode *mode)
    222{
    223	unsigned int pipe = crtc->index;
    224	struct drm_encoder *encoder;
    225	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
    226
    227	encoder = get_encoder_from_crtc(crtc);
    228	if (!encoder) {
    229		DRM_ERROR("no encoder found for crtc %d\n", pipe);
    230		return false;
    231	}
    232
    233	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
    234	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
    235
    236	/*
    237	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
    238	 * the end of VFP. Translate the porch values relative to the line
    239	 * counter positions.
    240	 */
    241
    242	vactive_start = vsw + vbp + 1;
    243	vactive_end = vactive_start + mode->crtc_vdisplay;
    244
    245	/* last scan line before VSYNC */
    246	vfp_end = mode->crtc_vtotal;
    247
    248	if (stime)
    249		*stime = ktime_get();
    250
    251	line = dpu_encoder_get_linecount(encoder);
    252
    253	if (line < vactive_start)
    254		line -= vactive_start;
    255	else if (line > vactive_end)
    256		line = line - vfp_end - vactive_start;
    257	else
    258		line -= vactive_start;
    259
    260	*vpos = line;
    261	*hpos = 0;
    262
    263	if (etime)
    264		*etime = ktime_get();
    265
    266	return true;
    267}
    268
    269static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
    270		struct dpu_plane_state *pstate, struct dpu_format *format)
    271{
    272	struct dpu_hw_mixer *lm = mixer->hw_lm;
    273	uint32_t blend_op;
    274	uint32_t fg_alpha, bg_alpha;
    275
    276	fg_alpha = pstate->base.alpha >> 8;
    277	bg_alpha = 0xff - fg_alpha;
    278
    279	/* default to opaque blending */
    280	if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
    281	    !format->alpha_enable) {
    282		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
    283			DPU_BLEND_BG_ALPHA_BG_CONST;
    284	} else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
    285		blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
    286			DPU_BLEND_BG_ALPHA_FG_PIXEL;
    287		if (fg_alpha != 0xff) {
    288			bg_alpha = fg_alpha;
    289			blend_op |= DPU_BLEND_BG_MOD_ALPHA |
    290				    DPU_BLEND_BG_INV_MOD_ALPHA;
    291		} else {
    292			blend_op |= DPU_BLEND_BG_INV_ALPHA;
    293		}
    294	} else {
    295		/* coverage blending */
    296		blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
    297			DPU_BLEND_BG_ALPHA_FG_PIXEL;
    298		if (fg_alpha != 0xff) {
    299			bg_alpha = fg_alpha;
    300			blend_op |= DPU_BLEND_FG_MOD_ALPHA |
    301				    DPU_BLEND_FG_INV_MOD_ALPHA |
    302				    DPU_BLEND_BG_MOD_ALPHA |
    303				    DPU_BLEND_BG_INV_MOD_ALPHA;
    304		} else {
    305			blend_op |= DPU_BLEND_BG_INV_ALPHA;
    306		}
    307	}
    308
    309	lm->ops.setup_blend_config(lm, pstate->stage,
    310				fg_alpha, bg_alpha, blend_op);
    311
    312	DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
    313		  &format->base.pixel_format, format->alpha_enable, blend_op);
    314}
    315
    316static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
    317{
    318	struct dpu_crtc_state *crtc_state;
    319	int lm_idx, lm_horiz_position;
    320
    321	crtc_state = to_dpu_crtc_state(crtc->state);
    322
    323	lm_horiz_position = 0;
    324	for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
    325		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
    326		struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
    327		struct dpu_hw_mixer_cfg cfg;
    328
    329		if (!lm_roi || !drm_rect_visible(lm_roi))
    330			continue;
    331
    332		cfg.out_width = drm_rect_width(lm_roi);
    333		cfg.out_height = drm_rect_height(lm_roi);
    334		cfg.right_mixer = lm_horiz_position++;
    335		cfg.flags = 0;
    336		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
    337	}
    338}
    339
    340static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
    341	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
    342	struct dpu_hw_stage_cfg *stage_cfg)
    343{
    344	struct drm_plane *plane;
    345	struct drm_framebuffer *fb;
    346	struct drm_plane_state *state;
    347	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
    348	struct dpu_plane_state *pstate = NULL;
    349	struct dpu_format *format;
    350	struct dpu_hw_ctl *ctl = mixer->lm_ctl;
    351
    352	u32 flush_mask;
    353	uint32_t stage_idx, lm_idx;
    354	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
    355	bool bg_alpha_enable = false;
    356	DECLARE_BITMAP(fetch_active, SSPP_MAX);
    357
    358	memset(fetch_active, 0, sizeof(fetch_active));
    359	drm_atomic_crtc_for_each_plane(plane, crtc) {
    360		state = plane->state;
    361		if (!state)
    362			continue;
    363
    364		pstate = to_dpu_plane_state(state);
    365		fb = state->fb;
    366
    367		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
    368		set_bit(dpu_plane_pipe(plane), fetch_active);
    369
    370		DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
    371				crtc->base.id,
    372				pstate->stage,
    373				plane->base.id,
    374				dpu_plane_pipe(plane) - SSPP_VIG0,
    375				state->fb ? state->fb->base.id : -1);
    376
    377		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
    378
    379		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
    380			bg_alpha_enable = true;
    381
    382		stage_idx = zpos_cnt[pstate->stage]++;
    383		stage_cfg->stage[pstate->stage][stage_idx] =
    384					dpu_plane_pipe(plane);
    385		stage_cfg->multirect_index[pstate->stage][stage_idx] =
    386					pstate->multirect_index;
    387
    388		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
    389					   state, pstate, stage_idx,
    390					   dpu_plane_pipe(plane) - SSPP_VIG0,
    391					   format->base.pixel_format,
    392					   fb ? fb->modifier : 0);
    393
    394		/* blend config update */
    395		for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
    396			_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
    397						pstate, format);
    398
    399			mixer[lm_idx].flush_mask |= flush_mask;
    400
    401			if (bg_alpha_enable && !format->alpha_enable)
    402				mixer[lm_idx].mixer_op_mode = 0;
    403			else
    404				mixer[lm_idx].mixer_op_mode |=
    405						1 << pstate->stage;
    406		}
    407	}
    408
    409	if (ctl->ops.set_active_pipes)
    410		ctl->ops.set_active_pipes(ctl, fetch_active);
    411
    412	_dpu_crtc_program_lm_output_roi(crtc);
    413}
    414
    415/**
    416 * _dpu_crtc_blend_setup - configure crtc mixers
    417 * @crtc: Pointer to drm crtc structure
    418 */
    419static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
    420{
    421	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
    422	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
    423	struct dpu_crtc_mixer *mixer = cstate->mixers;
    424	struct dpu_hw_ctl *ctl;
    425	struct dpu_hw_mixer *lm;
    426	struct dpu_hw_stage_cfg stage_cfg;
    427	int i;
    428
    429	DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
    430
    431	for (i = 0; i < cstate->num_mixers; i++) {
    432		mixer[i].mixer_op_mode = 0;
    433		mixer[i].flush_mask = 0;
    434		if (mixer[i].lm_ctl->ops.clear_all_blendstages)
    435			mixer[i].lm_ctl->ops.clear_all_blendstages(
    436					mixer[i].lm_ctl);
    437	}
    438
    439	/* initialize stage cfg */
    440	memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
    441
    442	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
    443
    444	for (i = 0; i < cstate->num_mixers; i++) {
    445		ctl = mixer[i].lm_ctl;
    446		lm = mixer[i].hw_lm;
    447
    448		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
    449
    450		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
    451			mixer[i].hw_lm->idx);
    452
    453		/* stage config flush mask */
    454		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
    455
    456		DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
    457			mixer[i].hw_lm->idx - LM_0,
    458			mixer[i].mixer_op_mode,
    459			ctl->idx - CTL_0,
    460			mixer[i].flush_mask);
    461
    462		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
    463			&stage_cfg);
    464	}
    465}
    466
    467/**
    468 *  _dpu_crtc_complete_flip - signal pending page_flip events
    469 * Any pending vblank events are added to the vblank_event_list
    470 * so that the next vblank interrupt shall signal them.
    471 * However PAGE_FLIP events are not handled through the vblank_event_list.
    472 * This API signals any pending PAGE_FLIP events requested through
    473 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
    474 * @crtc: Pointer to drm crtc structure
    475 */
    476static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
    477{
    478	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
    479	struct drm_device *dev = crtc->dev;
    480	unsigned long flags;
    481
    482	spin_lock_irqsave(&dev->event_lock, flags);
    483	if (dpu_crtc->event) {
    484		DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
    485			      dpu_crtc->event);
    486		trace_dpu_crtc_complete_flip(DRMID(crtc));
    487		drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
    488		dpu_crtc->event = NULL;
    489	}
    490	spin_unlock_irqrestore(&dev->event_lock, flags);
    491}
    492
    493enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
    494{
    495	struct drm_encoder *encoder;
    496
    497	/*
    498	 * TODO: This function is called from dpu debugfs and as part of atomic
    499	 * check. When called from debugfs, the crtc->mutex must be held to
    500	 * read crtc->state. However reading crtc->state from atomic check isn't
    501	 * allowed (unless you have a good reason, a big comment, and a deep
    502	 * understanding of how the atomic/modeset locks work (<- and this is
    503	 * probably not possible)). So we'll keep the WARN_ON here for now, but
    504	 * really we need to figure out a better way to track our operating mode
    505	 */
    506	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
    507
    508	/* TODO: Returns the first INTF_MODE, could there be multiple values? */
    509	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
    510		return dpu_encoder_get_intf_mode(encoder);
    511
    512	return INTF_MODE_NONE;
    513}
    514
    515void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
    516{
    517	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
    518
    519	/* keep statistics on vblank callback - with auto reset via debugfs */
    520	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
    521		dpu_crtc->vblank_cb_time = ktime_get();
    522	else
    523		dpu_crtc->vblank_cb_count++;
    524
    525	dpu_crtc_get_crc(crtc);
    526
    527	drm_crtc_handle_vblank(crtc);
    528	trace_dpu_crtc_vblank_cb(DRMID(crtc));
    529}
    530
    531static void dpu_crtc_frame_event_work(struct kthread_work *work)
    532{
    533	struct dpu_crtc_frame_event *fevent = container_of(work,
    534			struct dpu_crtc_frame_event, work);
    535	struct drm_crtc *crtc = fevent->crtc;
    536	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
    537	unsigned long flags;
    538	bool frame_done = false;
    539
    540	DPU_ATRACE_BEGIN("crtc_frame_event");
    541
    542	DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
    543			ktime_to_ns(fevent->ts));
    544
    545	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
    546				| DPU_ENCODER_FRAME_EVENT_ERROR
    547				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
    548
    549		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
    550			/* ignore vblank when not pending */
    551		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
    552			/* release bandwidth and other resources */
    553			trace_dpu_crtc_frame_event_done(DRMID(crtc),
    554							fevent->event);
    555			dpu_core_perf_crtc_release_bw(crtc);
    556		} else {
    557			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
    558								fevent->event);
    559		}
    560
    561		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
    562					| DPU_ENCODER_FRAME_EVENT_ERROR))
    563			frame_done = true;
    564	}
    565
    566	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
    567		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
    568				crtc->base.id, ktime_to_ns(fevent->ts));
    569
    570	if (frame_done)
    571		complete_all(&dpu_crtc->frame_done_comp);
    572
    573	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
    574	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
    575	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
    576	DPU_ATRACE_END("crtc_frame_event");
    577}
    578
    579/*
    580 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
    581 * registers this API to encoder for all frame event callbacks like
    582 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
    583 * from different context - IRQ, user thread, commit_thread, etc. Each event
    584 * should be carefully reviewed and should be processed in proper task context
    585 * to avoid schedulin delay or properly manage the irq context's bottom half
    586 * processing.
    587 */
    588static void dpu_crtc_frame_event_cb(void *data, u32 event)
    589{
    590	struct drm_crtc *crtc = (struct drm_crtc *)data;
    591	struct dpu_crtc *dpu_crtc;
    592	struct msm_drm_private *priv;
    593	struct dpu_crtc_frame_event *fevent;
    594	unsigned long flags;
    595	u32 crtc_id;
    596
    597	/* Nothing to do on idle event */
    598	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
    599		return;
    600
    601	dpu_crtc = to_dpu_crtc(crtc);
    602	priv = crtc->dev->dev_private;
    603	crtc_id = drm_crtc_index(crtc);
    604
    605	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
    606
    607	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
    608	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
    609			struct dpu_crtc_frame_event, list);
    610	if (fevent)
    611		list_del_init(&fevent->list);
    612	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
    613
    614	if (!fevent) {
    615		DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
    616		return;
    617	}
    618
    619	fevent->event = event;
    620	fevent->crtc = crtc;
    621	fevent->ts = ktime_get();
    622	kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
    623}
    624
    625void dpu_crtc_complete_commit(struct drm_crtc *crtc)
    626{
    627	trace_dpu_crtc_complete_commit(DRMID(crtc));
    628	dpu_core_perf_crtc_update(crtc, 0, false);
    629	_dpu_crtc_complete_flip(crtc);
    630}
    631
    632static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
    633		struct drm_crtc_state *state)
    634{
    635	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
    636	struct drm_display_mode *adj_mode = &state->adjusted_mode;
    637	u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
    638	int i;
    639
    640	for (i = 0; i < cstate->num_mixers; i++) {
    641		struct drm_rect *r = &cstate->lm_bounds[i];
    642		r->x1 = crtc_split_width * i;
    643		r->y1 = 0;
    644		r->x2 = r->x1 + crtc_split_width;
    645		r->y2 = adj_mode->vdisplay;
    646
    647		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
    648	}
    649}
    650
    651static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
    652		struct dpu_hw_pcc_cfg *cfg)
    653{
    654	struct drm_color_ctm *ctm;
    655
    656	memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
    657
    658	ctm = (struct drm_color_ctm *)state->ctm->data;
    659
    660	if (!ctm)
    661		return;
    662
    663	cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
    664	cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
    665	cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
    666
    667	cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
    668	cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
    669	cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
    670
    671	cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
    672	cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
    673	cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
    674}
    675
    676static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
    677{
    678	struct drm_crtc_state *state = crtc->state;
    679	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
    680	struct dpu_crtc_mixer *mixer = cstate->mixers;
    681	struct dpu_hw_pcc_cfg cfg;
    682	struct dpu_hw_ctl *ctl;
    683	struct dpu_hw_dspp *dspp;
    684	int i;
    685
    686
    687	if (!state->color_mgmt_changed)
    688		return;
    689
    690	for (i = 0; i < cstate->num_mixers; i++) {
    691		ctl = mixer[i].lm_ctl;
    692		dspp = mixer[i].hw_dspp;
    693
    694		if (!dspp || !dspp->ops.setup_pcc)
    695			continue;
    696
    697		if (!state->ctm) {
    698			dspp->ops.setup_pcc(dspp, NULL);
    699		} else {
    700			_dpu_crtc_get_pcc_coeff(state, &cfg);
    701			dspp->ops.setup_pcc(dspp, &cfg);
    702		}
    703
    704		mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
    705			mixer[i].hw_dspp->idx);
    706
    707		/* stage config flush mask */
    708		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
    709
    710		DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
    711			mixer[i].hw_lm->idx - DSPP_0,
    712			ctl->idx - CTL_0,
    713			mixer[i].flush_mask);
    714	}
    715}
    716
    717static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
    718		struct drm_atomic_state *state)
    719{
    720	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
    721	struct drm_encoder *encoder;
    722
    723	if (!crtc->state->enable) {
    724		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
    725				crtc->base.id, crtc->state->enable);
    726		return;
    727	}
    728
    729	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
    730
    731	_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
    732
    733	/* encoder will trigger pending mask now */
    734	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
    735		dpu_encoder_trigger_kickoff_pending(encoder);
    736
    737	/*
    738	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
    739	 * it means we are trying to flush a CRTC whose state is disabled:
    740	 * nothing else needs to be done.
    741	 */
    742	if (unlikely(!cstate->num_mixers))
    743		return;
    744
    745	_dpu_crtc_blend_setup(crtc);
    746
    747	_dpu_crtc_setup_cp_blocks(crtc);
    748
    749	/*
    750	 * PP_DONE irq is only used by command mode for now.
    751	 * It is better to request pending before FLUSH and START trigger
    752	 * to make sure no pp_done irq missed.
    753	 * This is safe because no pp_done will happen before SW trigger
    754	 * in command mode.
    755	 */
    756}
    757
    758static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
    759		struct drm_atomic_state *state)
    760{
    761	struct dpu_crtc *dpu_crtc;
    762	struct drm_device *dev;
    763	struct drm_plane *plane;
    764	struct msm_drm_private *priv;
    765	unsigned long flags;
    766	struct dpu_crtc_state *cstate;
    767
    768	if (!crtc->state->enable) {
    769		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
    770				crtc->base.id, crtc->state->enable);
    771		return;
    772	}
    773
    774	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
    775
    776	dpu_crtc = to_dpu_crtc(crtc);
    777	cstate = to_dpu_crtc_state(crtc->state);
    778	dev = crtc->dev;
    779	priv = dev->dev_private;
    780
    781	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
    782		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
    783		return;
    784	}
    785
    786	WARN_ON(dpu_crtc->event);
    787	spin_lock_irqsave(&dev->event_lock, flags);
    788	dpu_crtc->event = crtc->state->event;
    789	crtc->state->event = NULL;
    790	spin_unlock_irqrestore(&dev->event_lock, flags);
    791
    792	/*
    793	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
    794	 * it means we are trying to flush a CRTC whose state is disabled:
    795	 * nothing else needs to be done.
    796	 */
    797	if (unlikely(!cstate->num_mixers))
    798		return;
    799
    800	/* update performance setting before crtc kickoff */
    801	dpu_core_perf_crtc_update(crtc, 1, false);
    802
    803	/*
    804	 * Final plane updates: Give each plane a chance to complete all
    805	 *                      required writes/flushing before crtc's "flush
    806	 *                      everything" call below.
    807	 */
    808	drm_atomic_crtc_for_each_plane(plane, crtc) {
    809		if (dpu_crtc->smmu_state.transition_error)
    810			dpu_plane_set_error(plane, true);
    811		dpu_plane_flush(plane);
    812	}
    813
    814	/* Kickoff will be scheduled by outer layer */
    815}
    816
    817/**
    818 * dpu_crtc_destroy_state - state destroy hook
    819 * @crtc: drm CRTC
    820 * @state: CRTC state object to release
    821 */
    822static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
    823		struct drm_crtc_state *state)
    824{
    825	struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
    826
    827	DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
    828
    829	__drm_atomic_helper_crtc_destroy_state(state);
    830
    831	kfree(cstate);
    832}
    833
    834static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
    835{
    836	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
    837	int ret, rc = 0;
    838
    839	if (!atomic_read(&dpu_crtc->frame_pending)) {
    840		DRM_DEBUG_ATOMIC("no frames pending\n");
    841		return 0;
    842	}
    843
    844	DPU_ATRACE_BEGIN("frame done completion wait");
    845	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
    846			msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
    847	if (!ret) {
    848		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
    849		rc = -ETIMEDOUT;
    850	}
    851	DPU_ATRACE_END("frame done completion wait");
    852
    853	return rc;
    854}
    855
    856void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
    857{
    858	struct drm_encoder *encoder;
    859	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
    860	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
    861	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
    862
    863	/*
    864	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
    865	 * it means we are trying to start a CRTC whose state is disabled:
    866	 * nothing else needs to be done.
    867	 */
    868	if (unlikely(!cstate->num_mixers))
    869		return;
    870
    871	DPU_ATRACE_BEGIN("crtc_commit");
    872
    873	drm_for_each_encoder_mask(encoder, crtc->dev,
    874			crtc->state->encoder_mask) {
    875		if (!dpu_encoder_is_valid_for_commit(encoder)) {
    876			DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
    877			goto end;
    878		}
    879	}
    880	/*
    881	 * Encoder will flush/start now, unless it has a tx pending. If so, it
    882	 * may delay and flush at an irq event (e.g. ppdone)
    883	 */
    884	drm_for_each_encoder_mask(encoder, crtc->dev,
    885				  crtc->state->encoder_mask)
    886		dpu_encoder_prepare_for_kickoff(encoder);
    887
    888	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
    889		/* acquire bandwidth and other resources */
    890		DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
    891	} else
    892		DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
    893
    894	dpu_crtc->play_count++;
    895
    896	dpu_vbif_clear_errors(dpu_kms);
    897
    898	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
    899		dpu_encoder_kickoff(encoder);
    900
    901	reinit_completion(&dpu_crtc->frame_done_comp);
    902
    903end:
    904	DPU_ATRACE_END("crtc_commit");
    905}
    906
    907static void dpu_crtc_reset(struct drm_crtc *crtc)
    908{
    909	struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
    910
    911	if (crtc->state)
    912		dpu_crtc_destroy_state(crtc, crtc->state);
    913
    914	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
    915}
    916
    917/**
    918 * dpu_crtc_duplicate_state - state duplicate hook
    919 * @crtc: Pointer to drm crtc structure
    920 */
    921static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
    922{
    923	struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
    924
    925	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
    926	if (!cstate) {
    927		DPU_ERROR("failed to allocate state\n");
    928		return NULL;
    929	}
    930
    931	/* duplicate base helper */
    932	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
    933
    934	return &cstate->base;
    935}
    936
    937static void dpu_crtc_atomic_print_state(struct drm_printer *p,
    938					const struct drm_crtc_state *state)
    939{
    940	const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
    941	int i;
    942
    943	for (i = 0; i < cstate->num_mixers; i++) {
    944		drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
    945		drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
    946		if (cstate->mixers[i].hw_dspp)
    947			drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
    948	}
    949}
    950
    951static void dpu_crtc_disable(struct drm_crtc *crtc,
    952			     struct drm_atomic_state *state)
    953{
    954	struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
    955									      crtc);
    956	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
    957	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
    958	struct drm_encoder *encoder;
    959	unsigned long flags;
    960	bool release_bandwidth = false;
    961
    962	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
    963
    964	/* Disable/save vblank irq handling */
    965	drm_crtc_vblank_off(crtc);
    966
    967	drm_for_each_encoder_mask(encoder, crtc->dev,
    968				  old_crtc_state->encoder_mask) {
    969		/* in video mode, we hold an extra bandwidth reference
    970		 * as we cannot drop bandwidth at frame-done if any
    971		 * crtc is being used in video mode.
    972		 */
    973		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
    974			release_bandwidth = true;
    975		dpu_encoder_assign_crtc(encoder, NULL);
    976	}
    977
    978	/* wait for frame_event_done completion */
    979	if (_dpu_crtc_wait_for_frame_done(crtc))
    980		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
    981				crtc->base.id,
    982				atomic_read(&dpu_crtc->frame_pending));
    983
    984	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
    985	dpu_crtc->enabled = false;
    986
    987	if (atomic_read(&dpu_crtc->frame_pending)) {
    988		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
    989				     atomic_read(&dpu_crtc->frame_pending));
    990		if (release_bandwidth)
    991			dpu_core_perf_crtc_release_bw(crtc);
    992		atomic_set(&dpu_crtc->frame_pending, 0);
    993	}
    994
    995	dpu_core_perf_crtc_update(crtc, 0, true);
    996
    997	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
    998		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
    999
   1000	memset(cstate->mixers, 0, sizeof(cstate->mixers));
   1001	cstate->num_mixers = 0;
   1002
   1003	/* disable clk & bw control until clk & bw properties are set */
   1004	cstate->bw_control = false;
   1005	cstate->bw_split_vote = false;
   1006
   1007	if (crtc->state->event && !crtc->state->active) {
   1008		spin_lock_irqsave(&crtc->dev->event_lock, flags);
   1009		drm_crtc_send_vblank_event(crtc, crtc->state->event);
   1010		crtc->state->event = NULL;
   1011		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
   1012	}
   1013
   1014	pm_runtime_put_sync(crtc->dev->dev);
   1015}
   1016
   1017static void dpu_crtc_enable(struct drm_crtc *crtc,
   1018		struct drm_atomic_state *state)
   1019{
   1020	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
   1021	struct drm_encoder *encoder;
   1022	bool request_bandwidth = false;
   1023
   1024	pm_runtime_get_sync(crtc->dev->dev);
   1025
   1026	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
   1027
   1028	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
   1029		/* in video mode, we hold an extra bandwidth reference
   1030		 * as we cannot drop bandwidth at frame-done if any
   1031		 * crtc is being used in video mode.
   1032		 */
   1033		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
   1034			request_bandwidth = true;
   1035		dpu_encoder_register_frame_event_callback(encoder,
   1036				dpu_crtc_frame_event_cb, (void *)crtc);
   1037	}
   1038
   1039	if (request_bandwidth)
   1040		atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
   1041
   1042	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
   1043	dpu_crtc->enabled = true;
   1044
   1045	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
   1046		dpu_encoder_assign_crtc(encoder, crtc);
   1047
   1048	/* Enable/restore vblank irq handling */
   1049	drm_crtc_vblank_on(crtc);
   1050}
   1051
   1052struct plane_state {
   1053	struct dpu_plane_state *dpu_pstate;
   1054	const struct drm_plane_state *drm_pstate;
   1055	int stage;
   1056	u32 pipe_id;
   1057};
   1058
   1059static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
   1060{
   1061	struct drm_crtc *crtc = cstate->crtc;
   1062	struct drm_encoder *encoder;
   1063
   1064	drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
   1065		if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
   1066			return true;
   1067		}
   1068	}
   1069
   1070	return false;
   1071}
   1072
   1073static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
   1074		struct drm_atomic_state *state)
   1075{
   1076	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
   1077									  crtc);
   1078	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
   1079	struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
   1080	struct plane_state *pstates;
   1081
   1082	const struct drm_plane_state *pstate;
   1083	struct drm_plane *plane;
   1084	struct drm_display_mode *mode;
   1085
   1086	int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
   1087
   1088	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
   1089	int multirect_count = 0;
   1090	const struct drm_plane_state *pipe_staged[SSPP_MAX];
   1091	int left_zpos_cnt = 0, right_zpos_cnt = 0;
   1092	struct drm_rect crtc_rect = { 0 };
   1093	bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
   1094
   1095	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
   1096
   1097	if (!crtc_state->enable || !crtc_state->active) {
   1098		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
   1099				crtc->base.id, crtc_state->enable,
   1100				crtc_state->active);
   1101		memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
   1102		goto end;
   1103	}
   1104
   1105	mode = &crtc_state->adjusted_mode;
   1106	DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
   1107
   1108	/* force a full mode set if active state changed */
   1109	if (crtc_state->active_changed)
   1110		crtc_state->mode_changed = true;
   1111
   1112	memset(pipe_staged, 0, sizeof(pipe_staged));
   1113
   1114	if (cstate->num_mixers) {
   1115		mixer_width = mode->hdisplay / cstate->num_mixers;
   1116
   1117		_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
   1118	}
   1119
   1120	crtc_rect.x2 = mode->hdisplay;
   1121	crtc_rect.y2 = mode->vdisplay;
   1122
   1123	 /* get plane state for all drm planes associated with crtc state */
   1124	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
   1125		struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
   1126		struct drm_rect dst, clip = crtc_rect;
   1127
   1128		if (IS_ERR_OR_NULL(pstate)) {
   1129			rc = PTR_ERR(pstate);
   1130			DPU_ERROR("%s: failed to get plane%d state, %d\n",
   1131					dpu_crtc->name, plane->base.id, rc);
   1132			goto end;
   1133		}
   1134		if (cnt >= DPU_STAGE_MAX * 4)
   1135			continue;
   1136
   1137		pstates[cnt].dpu_pstate = dpu_pstate;
   1138		pstates[cnt].drm_pstate = pstate;
   1139		pstates[cnt].stage = pstate->normalized_zpos;
   1140		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
   1141
   1142		dpu_pstate->needs_dirtyfb = needs_dirtyfb;
   1143
   1144		if (pipe_staged[pstates[cnt].pipe_id]) {
   1145			multirect_plane[multirect_count].r0 =
   1146				pipe_staged[pstates[cnt].pipe_id];
   1147			multirect_plane[multirect_count].r1 = pstate;
   1148			multirect_count++;
   1149
   1150			pipe_staged[pstates[cnt].pipe_id] = NULL;
   1151		} else {
   1152			pipe_staged[pstates[cnt].pipe_id] = pstate;
   1153		}
   1154
   1155		cnt++;
   1156
   1157		dst = drm_plane_state_dest(pstate);
   1158		if (!drm_rect_intersect(&clip, &dst)) {
   1159			DPU_ERROR("invalid vertical/horizontal destination\n");
   1160			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
   1161				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
   1162				  DRM_RECT_ARG(&dst));
   1163			rc = -E2BIG;
   1164			goto end;
   1165		}
   1166	}
   1167
   1168	for (i = 1; i < SSPP_MAX; i++) {
   1169		if (pipe_staged[i]) {
   1170			dpu_plane_clear_multirect(pipe_staged[i]);
   1171
   1172			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
   1173				DPU_ERROR(
   1174					"r1 only virt plane:%d not supported\n",
   1175					pipe_staged[i]->plane->base.id);
   1176				rc  = -EINVAL;
   1177				goto end;
   1178			}
   1179		}
   1180	}
   1181
   1182	z_pos = -1;
   1183	for (i = 0; i < cnt; i++) {
   1184		/* reset counts at every new blend stage */
   1185		if (pstates[i].stage != z_pos) {
   1186			left_zpos_cnt = 0;
   1187			right_zpos_cnt = 0;
   1188			z_pos = pstates[i].stage;
   1189		}
   1190
   1191		/* verify z_pos setting before using it */
   1192		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
   1193			DPU_ERROR("> %d plane stages assigned\n",
   1194					DPU_STAGE_MAX - DPU_STAGE_0);
   1195			rc = -EINVAL;
   1196			goto end;
   1197		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
   1198			if (left_zpos_cnt == 2) {
   1199				DPU_ERROR("> 2 planes @ stage %d on left\n",
   1200					z_pos);
   1201				rc = -EINVAL;
   1202				goto end;
   1203			}
   1204			left_zpos_cnt++;
   1205
   1206		} else {
   1207			if (right_zpos_cnt == 2) {
   1208				DPU_ERROR("> 2 planes @ stage %d on right\n",
   1209					z_pos);
   1210				rc = -EINVAL;
   1211				goto end;
   1212			}
   1213			right_zpos_cnt++;
   1214		}
   1215
   1216		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
   1217		DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
   1218	}
   1219
   1220	for (i = 0; i < multirect_count; i++) {
   1221		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
   1222			DPU_ERROR(
   1223			"multirect validation failed for planes (%d - %d)\n",
   1224					multirect_plane[i].r0->plane->base.id,
   1225					multirect_plane[i].r1->plane->base.id);
   1226			rc = -EINVAL;
   1227			goto end;
   1228		}
   1229	}
   1230
   1231	atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
   1232
   1233	rc = dpu_core_perf_crtc_check(crtc, crtc_state);
   1234	if (rc) {
   1235		DPU_ERROR("crtc%d failed performance check %d\n",
   1236				crtc->base.id, rc);
   1237		goto end;
   1238	}
   1239
   1240	/* validate source split:
   1241	 * use pstates sorted by stage to check planes on same stage
   1242	 * we assume that all pipes are in source split so its valid to compare
   1243	 * without taking into account left/right mixer placement
   1244	 */
   1245	for (i = 1; i < cnt; i++) {
   1246		struct plane_state *prv_pstate, *cur_pstate;
   1247		struct drm_rect left_rect, right_rect;
   1248		int32_t left_pid, right_pid;
   1249		int32_t stage;
   1250
   1251		prv_pstate = &pstates[i - 1];
   1252		cur_pstate = &pstates[i];
   1253		if (prv_pstate->stage != cur_pstate->stage)
   1254			continue;
   1255
   1256		stage = cur_pstate->stage;
   1257
   1258		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
   1259		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
   1260
   1261		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
   1262		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
   1263
   1264		if (right_rect.x1 < left_rect.x1) {
   1265			swap(left_pid, right_pid);
   1266			swap(left_rect, right_rect);
   1267		}
   1268
   1269		/**
   1270		 * - planes are enumerated in pipe-priority order such that
   1271		 *   planes with lower drm_id must be left-most in a shared
   1272		 *   blend-stage when using source split.
   1273		 * - planes in source split must be contiguous in width
   1274		 * - planes in source split must have same dest yoff and height
   1275		 */
   1276		if (right_pid < left_pid) {
   1277			DPU_ERROR(
   1278				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
   1279				stage, left_pid, right_pid);
   1280			rc = -EINVAL;
   1281			goto end;
   1282		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
   1283			DPU_ERROR("non-contiguous coordinates for src split. "
   1284				  "stage: %d left: " DRM_RECT_FMT " right: "
   1285				  DRM_RECT_FMT "\n", stage,
   1286				  DRM_RECT_ARG(&left_rect),
   1287				  DRM_RECT_ARG(&right_rect));
   1288			rc = -EINVAL;
   1289			goto end;
   1290		} else if (left_rect.y1 != right_rect.y1 ||
   1291			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
   1292			DPU_ERROR("source split at stage: %d. invalid "
   1293				  "yoff/height: left: " DRM_RECT_FMT " right: "
   1294				  DRM_RECT_FMT "\n", stage,
   1295				  DRM_RECT_ARG(&left_rect),
   1296				  DRM_RECT_ARG(&right_rect));
   1297			rc = -EINVAL;
   1298			goto end;
   1299		}
   1300	}
   1301
   1302end:
   1303	kfree(pstates);
   1304	return rc;
   1305}
   1306
   1307int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
   1308{
   1309	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
   1310	struct drm_encoder *enc;
   1311
   1312	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
   1313
   1314	/*
   1315	 * Normally we would iterate through encoder_mask in crtc state to find
   1316	 * attached encoders. In this case, we might be disabling vblank _after_
   1317	 * encoder_mask has been cleared.
   1318	 *
   1319	 * Instead, we "assign" a crtc to the encoder in enable and clear it in
   1320	 * disable (which is also after encoder_mask is cleared). So instead of
   1321	 * using encoder mask, we'll ask the encoder to toggle itself iff it's
   1322	 * currently assigned to our crtc.
   1323	 *
   1324	 * Note also that this function cannot be called while crtc is disabled
   1325	 * since we use drm_crtc_vblank_on/off. So we don't need to worry
   1326	 * about the assigned crtcs being inconsistent with the current state
   1327	 * (which means no need to worry about modeset locks).
   1328	 */
   1329	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
   1330		trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
   1331					     dpu_crtc);
   1332
   1333		dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
   1334	}
   1335
   1336	return 0;
   1337}
   1338
   1339#ifdef CONFIG_DEBUG_FS
   1340static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
   1341{
   1342	struct dpu_crtc *dpu_crtc;
   1343	struct dpu_plane_state *pstate = NULL;
   1344	struct dpu_crtc_mixer *m;
   1345
   1346	struct drm_crtc *crtc;
   1347	struct drm_plane *plane;
   1348	struct drm_display_mode *mode;
   1349	struct drm_framebuffer *fb;
   1350	struct drm_plane_state *state;
   1351	struct dpu_crtc_state *cstate;
   1352
   1353	int i, out_width;
   1354
   1355	dpu_crtc = s->private;
   1356	crtc = &dpu_crtc->base;
   1357
   1358	drm_modeset_lock_all(crtc->dev);
   1359	cstate = to_dpu_crtc_state(crtc->state);
   1360
   1361	mode = &crtc->state->adjusted_mode;
   1362	out_width = mode->hdisplay / cstate->num_mixers;
   1363
   1364	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
   1365				mode->hdisplay, mode->vdisplay);
   1366
   1367	seq_puts(s, "\n");
   1368
   1369	for (i = 0; i < cstate->num_mixers; ++i) {
   1370		m = &cstate->mixers[i];
   1371		seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
   1372			m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
   1373			out_width, mode->vdisplay);
   1374	}
   1375
   1376	seq_puts(s, "\n");
   1377
   1378	drm_atomic_crtc_for_each_plane(plane, crtc) {
   1379		pstate = to_dpu_plane_state(plane->state);
   1380		state = plane->state;
   1381
   1382		if (!pstate || !state)
   1383			continue;
   1384
   1385		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
   1386			pstate->stage);
   1387
   1388		if (plane->state->fb) {
   1389			fb = plane->state->fb;
   1390
   1391			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
   1392				fb->base.id, (char *) &fb->format->format,
   1393				fb->width, fb->height);
   1394			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
   1395				seq_printf(s, "cpp[%d]:%u ",
   1396						i, fb->format->cpp[i]);
   1397			seq_puts(s, "\n\t");
   1398
   1399			seq_printf(s, "modifier:%8llu ", fb->modifier);
   1400			seq_puts(s, "\n");
   1401
   1402			seq_puts(s, "\t");
   1403			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
   1404				seq_printf(s, "pitches[%d]:%8u ", i,
   1405							fb->pitches[i]);
   1406			seq_puts(s, "\n");
   1407
   1408			seq_puts(s, "\t");
   1409			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
   1410				seq_printf(s, "offsets[%d]:%8u ", i,
   1411							fb->offsets[i]);
   1412			seq_puts(s, "\n");
   1413		}
   1414
   1415		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
   1416			state->src_x, state->src_y, state->src_w, state->src_h);
   1417
   1418		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
   1419			state->crtc_x, state->crtc_y, state->crtc_w,
   1420			state->crtc_h);
   1421		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
   1422			pstate->multirect_mode, pstate->multirect_index);
   1423
   1424		seq_puts(s, "\n");
   1425	}
   1426	if (dpu_crtc->vblank_cb_count) {
   1427		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
   1428		s64 diff_ms = ktime_to_ms(diff);
   1429		s64 fps = diff_ms ? div_s64(
   1430				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
   1431
   1432		seq_printf(s,
   1433			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
   1434				fps, dpu_crtc->vblank_cb_count,
   1435				ktime_to_ms(diff), dpu_crtc->play_count);
   1436
   1437		/* reset time & count for next measurement */
   1438		dpu_crtc->vblank_cb_count = 0;
   1439		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
   1440	}
   1441
   1442	drm_modeset_unlock_all(crtc->dev);
   1443
   1444	return 0;
   1445}
   1446
   1447DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
   1448
   1449static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
   1450{
   1451	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
   1452	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
   1453
   1454	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
   1455	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
   1456	seq_printf(s, "core_clk_rate: %llu\n",
   1457			dpu_crtc->cur_perf.core_clk_rate);
   1458	seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
   1459	seq_printf(s, "max_per_pipe_ib: %llu\n",
   1460				dpu_crtc->cur_perf.max_per_pipe_ib);
   1461
   1462	return 0;
   1463}
   1464DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
   1465
   1466static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
   1467{
   1468	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
   1469	struct dentry *debugfs_root;
   1470
   1471	debugfs_root = debugfs_create_dir(dpu_crtc->name,
   1472			crtc->dev->primary->debugfs_root);
   1473
   1474	debugfs_create_file("status", 0400,
   1475			debugfs_root,
   1476			dpu_crtc, &_dpu_debugfs_status_fops);
   1477	debugfs_create_file("state", 0600,
   1478			debugfs_root,
   1479			&dpu_crtc->base,
   1480			&dpu_crtc_debugfs_state_fops);
   1481
   1482	return 0;
   1483}
   1484#else
   1485static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
   1486{
   1487	return 0;
   1488}
   1489#endif /* CONFIG_DEBUG_FS */
   1490
   1491static int dpu_crtc_late_register(struct drm_crtc *crtc)
   1492{
   1493	return _dpu_crtc_init_debugfs(crtc);
   1494}
   1495
   1496static const struct drm_crtc_funcs dpu_crtc_funcs = {
   1497	.set_config = drm_atomic_helper_set_config,
   1498	.destroy = dpu_crtc_destroy,
   1499	.page_flip = drm_atomic_helper_page_flip,
   1500	.reset = dpu_crtc_reset,
   1501	.atomic_duplicate_state = dpu_crtc_duplicate_state,
   1502	.atomic_destroy_state = dpu_crtc_destroy_state,
   1503	.atomic_print_state = dpu_crtc_atomic_print_state,
   1504	.late_register = dpu_crtc_late_register,
   1505	.verify_crc_source = dpu_crtc_verify_crc_source,
   1506	.set_crc_source = dpu_crtc_set_crc_source,
   1507	.enable_vblank  = msm_crtc_enable_vblank,
   1508	.disable_vblank = msm_crtc_disable_vblank,
   1509	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
   1510	.get_vblank_counter = dpu_crtc_get_vblank_counter,
   1511};
   1512
   1513static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
   1514	.atomic_disable = dpu_crtc_disable,
   1515	.atomic_enable = dpu_crtc_enable,
   1516	.atomic_check = dpu_crtc_atomic_check,
   1517	.atomic_begin = dpu_crtc_atomic_begin,
   1518	.atomic_flush = dpu_crtc_atomic_flush,
   1519	.get_scanout_position = dpu_crtc_get_scanout_position,
   1520};
   1521
   1522/* initialize crtc */
   1523struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
   1524				struct drm_plane *cursor)
   1525{
   1526	struct drm_crtc *crtc = NULL;
   1527	struct dpu_crtc *dpu_crtc = NULL;
   1528	int i;
   1529
   1530	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
   1531	if (!dpu_crtc)
   1532		return ERR_PTR(-ENOMEM);
   1533
   1534	crtc = &dpu_crtc->base;
   1535	crtc->dev = dev;
   1536
   1537	spin_lock_init(&dpu_crtc->spin_lock);
   1538	atomic_set(&dpu_crtc->frame_pending, 0);
   1539
   1540	init_completion(&dpu_crtc->frame_done_comp);
   1541
   1542	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
   1543
   1544	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
   1545		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
   1546		list_add(&dpu_crtc->frame_events[i].list,
   1547				&dpu_crtc->frame_event_list);
   1548		kthread_init_work(&dpu_crtc->frame_events[i].work,
   1549				dpu_crtc_frame_event_work);
   1550	}
   1551
   1552	drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
   1553				NULL);
   1554
   1555	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
   1556
   1557	drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
   1558
   1559	/* save user friendly CRTC name for later */
   1560	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
   1561
   1562	/* initialize event handling */
   1563	spin_lock_init(&dpu_crtc->event_lock);
   1564
   1565	DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
   1566	return crtc;
   1567}