cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mdp5_crtc.c (38524B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
      4 * Copyright (C) 2013 Red Hat
      5 * Author: Rob Clark <robdclark@gmail.com>
      6 */
      7
      8#include <linux/sort.h>
      9
     10#include <drm/drm_atomic.h>
     11#include <drm/drm_mode.h>
     12#include <drm/drm_crtc.h>
     13#include <drm/drm_flip_work.h>
     14#include <drm/drm_fourcc.h>
     15#include <drm/drm_probe_helper.h>
     16#include <drm/drm_vblank.h>
     17
     18#include "mdp5_kms.h"
     19#include "msm_gem.h"
     20
     21#define CURSOR_WIDTH	64
     22#define CURSOR_HEIGHT	64
     23
     24struct mdp5_crtc {
     25	struct drm_crtc base;
     26	int id;
     27	bool enabled;
     28
     29	spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
     30
     31	/* if there is a pending flip, these will be non-null: */
     32	struct drm_pending_vblank_event *event;
     33
     34	/* Bits have been flushed at the last commit,
     35	 * used to decide if a vsync has happened since last commit.
     36	 */
     37	u32 flushed_mask;
     38
     39#define PENDING_CURSOR 0x1
     40#define PENDING_FLIP   0x2
     41	atomic_t pending;
     42
     43	/* for unref'ing cursor bo's after scanout completes: */
     44	struct drm_flip_work unref_cursor_work;
     45
     46	struct mdp_irq vblank;
     47	struct mdp_irq err;
     48	struct mdp_irq pp_done;
     49
     50	struct completion pp_completion;
     51
     52	bool lm_cursor_enabled;
     53
     54	struct {
     55		/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
     56		spinlock_t lock;
     57
     58		/* current cursor being scanned out: */
     59		struct drm_gem_object *scanout_bo;
     60		uint64_t iova;
     61		uint32_t width, height;
     62		int x, y;
     63	} cursor;
     64};
     65#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
     66
     67static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
     68
     69static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
     70{
     71	struct msm_drm_private *priv = crtc->dev->dev_private;
     72	return to_mdp5_kms(to_mdp_kms(priv->kms));
     73}
     74
     75static void request_pending(struct drm_crtc *crtc, uint32_t pending)
     76{
     77	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
     78
     79	atomic_or(pending, &mdp5_crtc->pending);
     80	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
     81}
     82
     83static void request_pp_done_pending(struct drm_crtc *crtc)
     84{
     85	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
     86	reinit_completion(&mdp5_crtc->pp_completion);
     87}
     88
     89static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
     90{
     91	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
     92	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
     93	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
     94	bool start = !mdp5_cstate->defer_start;
     95
     96	mdp5_cstate->defer_start = false;
     97
     98	DBG("%s: flush=%08x", crtc->name, flush_mask);
     99
    100	return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
    101}
    102
    103/*
    104 * flush updates, to make sure hw is updated to new scanout fb,
    105 * so that we can safely queue unref to current fb (ie. next
    106 * vblank we know hw is done w/ previous scanout_fb).
    107 */
    108static u32 crtc_flush_all(struct drm_crtc *crtc)
    109{
    110	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    111	struct mdp5_hw_mixer *mixer, *r_mixer;
    112	struct drm_plane *plane;
    113	uint32_t flush_mask = 0;
    114
    115	/* this should not happen: */
    116	if (WARN_ON(!mdp5_cstate->ctl))
    117		return 0;
    118
    119	drm_atomic_crtc_for_each_plane(plane, crtc) {
    120		if (!plane->state->visible)
    121			continue;
    122		flush_mask |= mdp5_plane_get_flush(plane);
    123	}
    124
    125	mixer = mdp5_cstate->pipeline.mixer;
    126	flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
    127
    128	r_mixer = mdp5_cstate->pipeline.r_mixer;
    129	if (r_mixer)
    130		flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
    131
    132	return crtc_flush(crtc, flush_mask);
    133}
    134
    135/* if file!=NULL, this is preclose potential cancel-flip path */
    136static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
    137{
    138	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    139	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
    140	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    141	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
    142	struct drm_device *dev = crtc->dev;
    143	struct drm_pending_vblank_event *event;
    144	unsigned long flags;
    145
    146	spin_lock_irqsave(&dev->event_lock, flags);
    147	event = mdp5_crtc->event;
    148	if (event) {
    149		mdp5_crtc->event = NULL;
    150		DBG("%s: send event: %p", crtc->name, event);
    151		drm_crtc_send_vblank_event(crtc, event);
    152	}
    153	spin_unlock_irqrestore(&dev->event_lock, flags);
    154
    155	if (ctl && !crtc->state->enable) {
    156		/* set STAGE_UNUSED for all layers */
    157		mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
    158		/* XXX: What to do here? */
    159		/* mdp5_crtc->ctl = NULL; */
    160	}
    161}
    162
    163static void unref_cursor_worker(struct drm_flip_work *work, void *val)
    164{
    165	struct mdp5_crtc *mdp5_crtc =
    166		container_of(work, struct mdp5_crtc, unref_cursor_work);
    167	struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
    168	struct msm_kms *kms = &mdp5_kms->base.base;
    169
    170	msm_gem_unpin_iova(val, kms->aspace);
    171	drm_gem_object_put(val);
    172}
    173
    174static void mdp5_crtc_destroy(struct drm_crtc *crtc)
    175{
    176	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    177
    178	drm_crtc_cleanup(crtc);
    179	drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
    180
    181	kfree(mdp5_crtc);
    182}
    183
    184static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
    185{
    186	switch (stage) {
    187	case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
    188	case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
    189	case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
    190	case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
    191	case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
    192	case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
    193	case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
    194	default:
    195		return 0;
    196	}
    197}
    198
    199/*
    200 * left/right pipe offsets for the stage array used in blend_setup()
    201 */
    202#define PIPE_LEFT	0
    203#define PIPE_RIGHT	1
    204
    205/*
    206 * blend_setup() - blend all the planes of a CRTC
    207 *
    208 * If no base layer is available, border will be enabled as the base layer.
    209 * Otherwise all layers will be blended based on their stage calculated
    210 * in mdp5_crtc_atomic_check.
    211 */
    212static void blend_setup(struct drm_crtc *crtc)
    213{
    214	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    215	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    216	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
    217	struct mdp5_kms *mdp5_kms = get_kms(crtc);
    218	struct drm_plane *plane;
    219	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
    220	const struct mdp_format *format;
    221	struct mdp5_hw_mixer *mixer = pipeline->mixer;
    222	uint32_t lm = mixer->lm;
    223	struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
    224	uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
    225	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
    226	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
    227	unsigned long flags;
    228	enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
    229	enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
    230	int i, plane_cnt = 0;
    231	bool bg_alpha_enabled = false;
    232	u32 mixer_op_mode = 0;
    233	u32 val;
    234#define blender(stage)	((stage) - STAGE0)
    235
    236	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
    237
    238	/* ctl could be released already when we are shutting down: */
    239	/* XXX: Can this happen now? */
    240	if (!ctl)
    241		goto out;
    242
    243	/* Collect all plane information */
    244	drm_atomic_crtc_for_each_plane(plane, crtc) {
    245		enum mdp5_pipe right_pipe;
    246
    247		if (!plane->state->visible)
    248			continue;
    249
    250		pstate = to_mdp5_plane_state(plane->state);
    251		pstates[pstate->stage] = pstate;
    252		stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
    253		/*
    254		 * if we have a right mixer, stage the same pipe as we
    255		 * have on the left mixer
    256		 */
    257		if (r_mixer)
    258			r_stage[pstate->stage][PIPE_LEFT] =
    259						mdp5_plane_pipe(plane);
    260		/*
    261		 * if we have a right pipe (i.e, the plane comprises of 2
    262		 * hwpipes, then stage the right pipe on the right side of both
    263		 * the layer mixers
    264		 */
    265		right_pipe = mdp5_plane_right_pipe(plane);
    266		if (right_pipe) {
    267			stage[pstate->stage][PIPE_RIGHT] = right_pipe;
    268			r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
    269		}
    270
    271		plane_cnt++;
    272	}
    273
    274	if (!pstates[STAGE_BASE]) {
    275		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
    276		DBG("Border Color is enabled");
    277	} else if (plane_cnt) {
    278		format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
    279
    280		if (format->alpha_enable)
    281			bg_alpha_enabled = true;
    282	}
    283
    284	/* The reset for blending */
    285	for (i = STAGE0; i <= STAGE_MAX; i++) {
    286		if (!pstates[i])
    287			continue;
    288
    289		format = to_mdp_format(
    290			msm_framebuffer_format(pstates[i]->base.fb));
    291		plane = pstates[i]->base.plane;
    292		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
    293			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
    294		fg_alpha = pstates[i]->base.alpha >> 8;
    295		bg_alpha = 0xFF - fg_alpha;
    296
    297		if (!format->alpha_enable && bg_alpha_enabled)
    298			mixer_op_mode = 0;
    299		else
    300			mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
    301
    302		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
    303
    304		if (format->alpha_enable &&
    305		    pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
    306			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
    307				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
    308			if (fg_alpha != 0xff) {
    309				bg_alpha = fg_alpha;
    310				blend_op |=
    311					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
    312					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
    313			} else {
    314				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
    315			}
    316		} else if (format->alpha_enable &&
    317			   pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
    318			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
    319				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
    320			if (fg_alpha != 0xff) {
    321				bg_alpha = fg_alpha;
    322				blend_op |=
    323				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
    324				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
    325				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
    326				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
    327			} else {
    328				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
    329			}
    330		}
    331
    332		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
    333				blender(i)), blend_op);
    334		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
    335				blender(i)), fg_alpha);
    336		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
    337				blender(i)), bg_alpha);
    338		if (r_mixer) {
    339			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
    340					blender(i)), blend_op);
    341			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
    342					blender(i)), fg_alpha);
    343			mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
    344					blender(i)), bg_alpha);
    345		}
    346	}
    347
    348	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
    349	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
    350		   val | mixer_op_mode);
    351	if (r_mixer) {
    352		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
    353		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
    354			   val | mixer_op_mode);
    355	}
    356
    357	mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
    358		       ctl_blend_flags);
    359out:
    360	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
    361}
    362
    363static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
    364{
    365	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    366	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    367	struct mdp5_kms *mdp5_kms = get_kms(crtc);
    368	struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
    369	struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
    370	uint32_t lm = mixer->lm;
    371	u32 mixer_width, val;
    372	unsigned long flags;
    373	struct drm_display_mode *mode;
    374
    375	if (WARN_ON(!crtc->state))
    376		return;
    377
    378	mode = &crtc->state->adjusted_mode;
    379
    380	DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
    381
    382	mixer_width = mode->hdisplay;
    383	if (r_mixer)
    384		mixer_width /= 2;
    385
    386	spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
    387	mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
    388			MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
    389			MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
    390
    391	/* Assign mixer to LEFT side in source split mode */
    392	val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
    393	val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
    394	mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
    395
    396	if (r_mixer) {
    397		u32 r_lm = r_mixer->lm;
    398
    399		mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
    400			   MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
    401			   MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
    402
    403		/* Assign mixer to RIGHT side in source split mode */
    404		val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
    405		val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
    406		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
    407	}
    408
    409	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
    410}
    411
    412static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
    413{
    414	struct drm_device *dev = crtc->dev;
    415	struct drm_encoder *encoder;
    416
    417	drm_for_each_encoder(encoder, dev)
    418		if (encoder->crtc == crtc)
    419			return encoder;
    420
    421	return NULL;
    422}
    423
    424static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
    425					   bool in_vblank_irq,
    426					   int *vpos, int *hpos,
    427					   ktime_t *stime, ktime_t *etime,
    428					   const struct drm_display_mode *mode)
    429{
    430	unsigned int pipe = crtc->index;
    431	struct drm_encoder *encoder;
    432	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
    433
    434
    435	encoder = get_encoder_from_crtc(crtc);
    436	if (!encoder) {
    437		DRM_ERROR("no encoder found for crtc %d\n", pipe);
    438		return false;
    439	}
    440
    441	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
    442	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
    443
    444	/*
    445	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
    446	 * the end of VFP. Translate the porch values relative to the line
    447	 * counter positions.
    448	 */
    449
    450	vactive_start = vsw + vbp + 1;
    451
    452	vactive_end = vactive_start + mode->crtc_vdisplay;
    453
    454	/* last scan line before VSYNC */
    455	vfp_end = mode->crtc_vtotal;
    456
    457	if (stime)
    458		*stime = ktime_get();
    459
    460	line = mdp5_encoder_get_linecount(encoder);
    461
    462	if (line < vactive_start)
    463		line -= vactive_start;
    464	else if (line > vactive_end)
    465		line = line - vfp_end - vactive_start;
    466	else
    467		line -= vactive_start;
    468
    469	*vpos = line;
    470	*hpos = 0;
    471
    472	if (etime)
    473		*etime = ktime_get();
    474
    475	return true;
    476}
    477
    478static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
    479{
    480	struct drm_encoder *encoder;
    481
    482	encoder = get_encoder_from_crtc(crtc);
    483	if (!encoder)
    484		return 0;
    485
    486	return mdp5_encoder_get_framecount(encoder);
    487}
    488
    489static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
    490				     struct drm_atomic_state *state)
    491{
    492	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    493	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    494	struct mdp5_kms *mdp5_kms = get_kms(crtc);
    495	struct device *dev = &mdp5_kms->pdev->dev;
    496	unsigned long flags;
    497
    498	DBG("%s", crtc->name);
    499
    500	if (WARN_ON(!mdp5_crtc->enabled))
    501		return;
    502
    503	/* Disable/save vblank irq handling before power is disabled */
    504	drm_crtc_vblank_off(crtc);
    505
    506	if (mdp5_cstate->cmd_mode)
    507		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
    508
    509	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
    510	pm_runtime_put_sync(dev);
    511
    512	if (crtc->state->event && !crtc->state->active) {
    513		WARN_ON(mdp5_crtc->event);
    514		spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
    515		drm_crtc_send_vblank_event(crtc, crtc->state->event);
    516		crtc->state->event = NULL;
    517		spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
    518	}
    519
    520	mdp5_crtc->enabled = false;
    521}
    522
    523static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
    524{
    525	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    526	struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
    527	u32 count;
    528
    529	count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
    530	drm_crtc_set_max_vblank_count(crtc, count);
    531
    532	drm_crtc_vblank_on(crtc);
    533}
    534
    535static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
    536				    struct drm_atomic_state *state)
    537{
    538	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    539	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    540	struct mdp5_kms *mdp5_kms = get_kms(crtc);
    541	struct device *dev = &mdp5_kms->pdev->dev;
    542
    543	DBG("%s", crtc->name);
    544
    545	if (WARN_ON(mdp5_crtc->enabled))
    546		return;
    547
    548	pm_runtime_get_sync(dev);
    549
    550	if (mdp5_crtc->lm_cursor_enabled) {
    551		/*
    552		 * Restore LM cursor state, as it might have been lost
    553		 * with suspend:
    554		 */
    555		if (mdp5_crtc->cursor.iova) {
    556			unsigned long flags;
    557
    558			spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
    559			mdp5_crtc_restore_cursor(crtc);
    560			spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
    561
    562			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
    563					    &mdp5_cstate->pipeline, 0, true);
    564		} else {
    565			mdp5_ctl_set_cursor(mdp5_cstate->ctl,
    566					    &mdp5_cstate->pipeline, 0, false);
    567		}
    568	}
    569
    570	/* Restore vblank irq handling after power is enabled */
    571	mdp5_crtc_vblank_on(crtc);
    572
    573	mdp5_crtc_mode_set_nofb(crtc);
    574
    575	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
    576
    577	if (mdp5_cstate->cmd_mode)
    578		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
    579
    580	mdp5_crtc->enabled = true;
    581}
    582
    583static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
    584				    struct drm_crtc_state *new_crtc_state,
    585				    bool need_right_mixer)
    586{
    587	struct mdp5_crtc_state *mdp5_cstate =
    588			to_mdp5_crtc_state(new_crtc_state);
    589	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
    590	struct mdp5_interface *intf;
    591	bool new_mixer = false;
    592
    593	new_mixer = !pipeline->mixer;
    594
    595	if ((need_right_mixer && !pipeline->r_mixer) ||
    596	    (!need_right_mixer && pipeline->r_mixer))
    597		new_mixer = true;
    598
    599	if (new_mixer) {
    600		struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
    601		struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
    602		u32 caps;
    603		int ret;
    604
    605		caps = MDP_LM_CAP_DISPLAY;
    606		if (need_right_mixer)
    607			caps |= MDP_LM_CAP_PAIR;
    608
    609		ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
    610					&pipeline->mixer, need_right_mixer ?
    611					&pipeline->r_mixer : NULL);
    612		if (ret)
    613			return ret;
    614
    615		ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
    616		if (ret)
    617			return ret;
    618
    619		if (old_r_mixer) {
    620			ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
    621			if (ret)
    622				return ret;
    623
    624			if (!need_right_mixer)
    625				pipeline->r_mixer = NULL;
    626		}
    627	}
    628
    629	/*
    630	 * these should have been already set up in the encoder's atomic
    631	 * check (called by drm_atomic_helper_check_modeset)
    632	 */
    633	intf = pipeline->intf;
    634
    635	mdp5_cstate->err_irqmask = intf2err(intf->num);
    636	mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
    637
    638	if ((intf->type == INTF_DSI) &&
    639	    (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
    640		mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
    641		mdp5_cstate->cmd_mode = true;
    642	} else {
    643		mdp5_cstate->pp_done_irqmask = 0;
    644		mdp5_cstate->cmd_mode = false;
    645	}
    646
    647	return 0;
    648}
    649
    650struct plane_state {
    651	struct drm_plane *plane;
    652	struct mdp5_plane_state *state;
    653};
    654
    655static int pstate_cmp(const void *a, const void *b)
    656{
    657	struct plane_state *pa = (struct plane_state *)a;
    658	struct plane_state *pb = (struct plane_state *)b;
    659	return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos;
    660}
    661
    662/* is there a helper for this? */
    663static bool is_fullscreen(struct drm_crtc_state *cstate,
    664		struct drm_plane_state *pstate)
    665{
    666	return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
    667		((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
    668		((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
    669}
    670
    671static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
    672					struct drm_crtc_state *new_crtc_state,
    673					struct drm_plane_state *bpstate)
    674{
    675	struct mdp5_crtc_state *mdp5_cstate =
    676			to_mdp5_crtc_state(new_crtc_state);
    677
    678	/*
    679	 * if we're in source split mode, it's mandatory to have
    680	 * border out on the base stage
    681	 */
    682	if (mdp5_cstate->pipeline.r_mixer)
    683		return STAGE0;
    684
    685	/* if the bottom-most layer is not fullscreen, we need to use
    686	 * it for solid-color:
    687	 */
    688	if (!is_fullscreen(new_crtc_state, bpstate))
    689		return STAGE0;
    690
    691	return STAGE_BASE;
    692}
    693
    694static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
    695		struct drm_atomic_state *state)
    696{
    697	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
    698									  crtc);
    699	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
    700	struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
    701	struct mdp5_kms *mdp5_kms = get_kms(crtc);
    702	struct drm_plane *plane;
    703	struct drm_device *dev = crtc->dev;
    704	struct plane_state pstates[STAGE_MAX + 1];
    705	const struct mdp5_cfg_hw *hw_cfg;
    706	const struct drm_plane_state *pstate;
    707	const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
    708	bool cursor_plane = false;
    709	bool need_right_mixer = false;
    710	int cnt = 0, i;
    711	int ret;
    712	enum mdp_mixer_stage_id start;
    713
    714	DBG("%s: check", crtc->name);
    715
    716	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
    717		struct mdp5_plane_state *mdp5_pstate =
    718				to_mdp5_plane_state(pstate);
    719
    720		if (!pstate->visible)
    721			continue;
    722
    723		pstates[cnt].plane = plane;
    724		pstates[cnt].state = to_mdp5_plane_state(pstate);
    725
    726		mdp5_pstate->needs_dirtyfb =
    727			intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
    728
    729		/*
    730		 * if any plane on this crtc uses 2 hwpipes, then we need
    731		 * the crtc to have a right hwmixer.
    732		 */
    733		if (pstates[cnt].state->r_hwpipe)
    734			need_right_mixer = true;
    735		cnt++;
    736
    737		if (plane->type == DRM_PLANE_TYPE_CURSOR)
    738			cursor_plane = true;
    739	}
    740
    741	/* bail out early if there aren't any planes */
    742	if (!cnt)
    743		return 0;
    744
    745	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
    746
    747	/*
    748	 * we need a right hwmixer if the mode's width is greater than a single
    749	 * LM's max width
    750	 */
    751	if (mode->hdisplay > hw_cfg->lm.max_width)
    752		need_right_mixer = true;
    753
    754	ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
    755	if (ret) {
    756		DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
    757		return ret;
    758	}
    759
    760	/* assign a stage based on sorted zpos property */
    761	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
    762
    763	/* trigger a warning if cursor isn't the highest zorder */
    764	WARN_ON(cursor_plane &&
    765		(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
    766
    767	start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
    768
    769	/* verify that there are not too many planes attached to crtc
    770	 * and that we don't have conflicting mixer stages:
    771	 */
    772	if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
    773		DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
    774			cnt, start);
    775		return -EINVAL;
    776	}
    777
    778	for (i = 0; i < cnt; i++) {
    779		if (cursor_plane && (i == (cnt - 1)))
    780			pstates[i].state->stage = hw_cfg->lm.nb_stages;
    781		else
    782			pstates[i].state->stage = start + i;
    783		DBG("%s: assign pipe %s on stage=%d", crtc->name,
    784				pstates[i].plane->name,
    785				pstates[i].state->stage);
    786	}
    787
    788	return 0;
    789}
    790
    791static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
    792				   struct drm_atomic_state *state)
    793{
    794	DBG("%s: begin", crtc->name);
    795}
    796
    797static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
    798				   struct drm_atomic_state *state)
    799{
    800	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    801	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    802	struct drm_device *dev = crtc->dev;
    803	unsigned long flags;
    804
    805	DBG("%s: event: %p", crtc->name, crtc->state->event);
    806
    807	WARN_ON(mdp5_crtc->event);
    808
    809	spin_lock_irqsave(&dev->event_lock, flags);
    810	mdp5_crtc->event = crtc->state->event;
    811	crtc->state->event = NULL;
    812	spin_unlock_irqrestore(&dev->event_lock, flags);
    813
    814	/*
    815	 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
    816	 * it means we are trying to flush a CRTC whose state is disabled:
    817	 * nothing else needs to be done.
    818	 */
    819	/* XXX: Can this happen now ? */
    820	if (unlikely(!mdp5_cstate->ctl))
    821		return;
    822
    823	blend_setup(crtc);
    824
    825	/* PP_DONE irq is only used by command mode for now.
    826	 * It is better to request pending before FLUSH and START trigger
    827	 * to make sure no pp_done irq missed.
    828	 * This is safe because no pp_done will happen before SW trigger
    829	 * in command mode.
    830	 */
    831	if (mdp5_cstate->cmd_mode)
    832		request_pp_done_pending(crtc);
    833
    834	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
    835
    836	/* XXX are we leaking out state here? */
    837	mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
    838	mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
    839	mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
    840
    841	request_pending(crtc, PENDING_FLIP);
    842}
    843
    844static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
    845{
    846	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    847	uint32_t xres = crtc->mode.hdisplay;
    848	uint32_t yres = crtc->mode.vdisplay;
    849
    850	/*
    851	 * Cursor Region Of Interest (ROI) is a plane read from cursor
    852	 * buffer to render. The ROI region is determined by the visibility of
    853	 * the cursor point. In the default Cursor image the cursor point will
    854	 * be at the top left of the cursor image.
    855	 *
    856	 * Without rotation:
    857	 * If the cursor point reaches the right (xres - x < cursor.width) or
    858	 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
    859	 * width and ROI height need to be evaluated to crop the cursor image
    860	 * accordingly.
    861	 * (xres-x) will be new cursor width when x > (xres - cursor.width)
    862	 * (yres-y) will be new cursor height when y > (yres - cursor.height)
    863	 *
    864	 * With rotation:
    865	 * We get negative x and/or y coordinates.
    866	 * (cursor.width - abs(x)) will be new cursor width when x < 0
    867	 * (cursor.height - abs(y)) will be new cursor width when y < 0
    868	 */
    869	if (mdp5_crtc->cursor.x >= 0)
    870		*roi_w = min(mdp5_crtc->cursor.width, xres -
    871			mdp5_crtc->cursor.x);
    872	else
    873		*roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
    874	if (mdp5_crtc->cursor.y >= 0)
    875		*roi_h = min(mdp5_crtc->cursor.height, yres -
    876			mdp5_crtc->cursor.y);
    877	else
    878		*roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
    879}
    880
    881static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
    882{
    883	const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
    884	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    885	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    886	struct mdp5_kms *mdp5_kms = get_kms(crtc);
    887	const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
    888	uint32_t blendcfg, stride;
    889	uint32_t x, y, src_x, src_y, width, height;
    890	uint32_t roi_w, roi_h;
    891	int lm;
    892
    893	assert_spin_locked(&mdp5_crtc->cursor.lock);
    894
    895	lm = mdp5_cstate->pipeline.mixer->lm;
    896
    897	x = mdp5_crtc->cursor.x;
    898	y = mdp5_crtc->cursor.y;
    899	width = mdp5_crtc->cursor.width;
    900	height = mdp5_crtc->cursor.height;
    901
    902	stride = width * info->cpp[0];
    903
    904	get_roi(crtc, &roi_w, &roi_h);
    905
    906	/* If cusror buffer overlaps due to rotation on the
    907	 * upper or left screen border the pixel offset inside
    908	 * the cursor buffer of the ROI is the positive overlap
    909	 * distance.
    910	 */
    911	if (mdp5_crtc->cursor.x < 0) {
    912		src_x = abs(mdp5_crtc->cursor.x);
    913		x = 0;
    914	} else {
    915		src_x = 0;
    916	}
    917	if (mdp5_crtc->cursor.y < 0) {
    918		src_y = abs(mdp5_crtc->cursor.y);
    919		y = 0;
    920	} else {
    921		src_y = 0;
    922	}
    923	DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
    924		crtc->name, x, y, roi_w, roi_h, src_x, src_y);
    925
    926	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
    927	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
    928			MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
    929	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
    930			MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
    931			MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
    932	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
    933			MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
    934			MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
    935	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
    936			MDP5_LM_CURSOR_START_XY_Y_START(y) |
    937			MDP5_LM_CURSOR_START_XY_X_START(x));
    938	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
    939			MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
    940			MDP5_LM_CURSOR_XY_SRC_X(src_x));
    941	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
    942			mdp5_crtc->cursor.iova);
    943
    944	blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
    945	blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
    946	mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
    947}
    948
    949static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
    950		struct drm_file *file, uint32_t handle,
    951		uint32_t width, uint32_t height)
    952{
    953	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
    954	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
    955	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
    956	struct drm_device *dev = crtc->dev;
    957	struct mdp5_kms *mdp5_kms = get_kms(crtc);
    958	struct platform_device *pdev = mdp5_kms->pdev;
    959	struct msm_kms *kms = &mdp5_kms->base.base;
    960	struct drm_gem_object *cursor_bo, *old_bo = NULL;
    961	struct mdp5_ctl *ctl;
    962	int ret;
    963	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
    964	bool cursor_enable = true;
    965	unsigned long flags;
    966
    967	if (!mdp5_crtc->lm_cursor_enabled) {
    968		dev_warn(dev->dev,
    969			 "cursor_set is deprecated with cursor planes\n");
    970		return -EINVAL;
    971	}
    972
    973	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
    974		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
    975		return -EINVAL;
    976	}
    977
    978	ctl = mdp5_cstate->ctl;
    979	if (!ctl)
    980		return -EINVAL;
    981
    982	/* don't support LM cursors when we have source split enabled */
    983	if (mdp5_cstate->pipeline.r_mixer)
    984		return -EINVAL;
    985
    986	if (!handle) {
    987		DBG("Cursor off");
    988		cursor_enable = false;
    989		mdp5_crtc->cursor.iova = 0;
    990		pm_runtime_get_sync(&pdev->dev);
    991		goto set_cursor;
    992	}
    993
    994	cursor_bo = drm_gem_object_lookup(file, handle);
    995	if (!cursor_bo)
    996		return -ENOENT;
    997
    998	ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
    999			&mdp5_crtc->cursor.iova);
   1000	if (ret) {
   1001		drm_gem_object_put(cursor_bo);
   1002		return -EINVAL;
   1003	}
   1004
   1005	pm_runtime_get_sync(&pdev->dev);
   1006
   1007	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
   1008	old_bo = mdp5_crtc->cursor.scanout_bo;
   1009
   1010	mdp5_crtc->cursor.scanout_bo = cursor_bo;
   1011	mdp5_crtc->cursor.width = width;
   1012	mdp5_crtc->cursor.height = height;
   1013
   1014	mdp5_crtc_restore_cursor(crtc);
   1015
   1016	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
   1017
   1018set_cursor:
   1019	ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
   1020	if (ret) {
   1021		DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
   1022				cursor_enable ? "en" : "dis", ret);
   1023		goto end;
   1024	}
   1025
   1026	crtc_flush(crtc, flush_mask);
   1027
   1028end:
   1029	pm_runtime_put_sync(&pdev->dev);
   1030	if (old_bo) {
   1031		drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
   1032		/* enable vblank to complete cursor work: */
   1033		request_pending(crtc, PENDING_CURSOR);
   1034	}
   1035	return ret;
   1036}
   1037
   1038static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
   1039{
   1040	struct mdp5_kms *mdp5_kms = get_kms(crtc);
   1041	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
   1042	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1043	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
   1044	struct drm_device *dev = crtc->dev;
   1045	uint32_t roi_w;
   1046	uint32_t roi_h;
   1047	unsigned long flags;
   1048
   1049	if (!mdp5_crtc->lm_cursor_enabled) {
   1050		dev_warn(dev->dev,
   1051			 "cursor_move is deprecated with cursor planes\n");
   1052		return -EINVAL;
   1053	}
   1054
   1055	/* don't support LM cursors when we have source split enabled */
   1056	if (mdp5_cstate->pipeline.r_mixer)
   1057		return -EINVAL;
   1058
   1059	/* In case the CRTC is disabled, just drop the cursor update */
   1060	if (unlikely(!crtc->state->enable))
   1061		return 0;
   1062
   1063	/* accept negative x/y coordinates up to maximum cursor overlap */
   1064	mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
   1065	mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
   1066
   1067	get_roi(crtc, &roi_w, &roi_h);
   1068
   1069	pm_runtime_get_sync(&mdp5_kms->pdev->dev);
   1070
   1071	spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
   1072	mdp5_crtc_restore_cursor(crtc);
   1073	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
   1074
   1075	crtc_flush(crtc, flush_mask);
   1076
   1077	pm_runtime_put_sync(&mdp5_kms->pdev->dev);
   1078
   1079	return 0;
   1080}
   1081
   1082static void
   1083mdp5_crtc_atomic_print_state(struct drm_printer *p,
   1084			     const struct drm_crtc_state *state)
   1085{
   1086	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
   1087	struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
   1088	struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
   1089
   1090	if (WARN_ON(!pipeline))
   1091		return;
   1092
   1093	if (mdp5_cstate->ctl)
   1094		drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
   1095
   1096	drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
   1097			pipeline->mixer->name : "(null)");
   1098
   1099	if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
   1100		drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
   1101			   pipeline->r_mixer->name : "(null)");
   1102
   1103	drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
   1104}
   1105
   1106static struct drm_crtc_state *
   1107mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
   1108{
   1109	struct mdp5_crtc_state *mdp5_cstate;
   1110
   1111	if (WARN_ON(!crtc->state))
   1112		return NULL;
   1113
   1114	mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
   1115			      sizeof(*mdp5_cstate), GFP_KERNEL);
   1116	if (!mdp5_cstate)
   1117		return NULL;
   1118
   1119	__drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
   1120
   1121	return &mdp5_cstate->base;
   1122}
   1123
   1124static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
   1125{
   1126	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
   1127
   1128	__drm_atomic_helper_crtc_destroy_state(state);
   1129
   1130	kfree(mdp5_cstate);
   1131}
   1132
   1133static void mdp5_crtc_reset(struct drm_crtc *crtc)
   1134{
   1135	struct mdp5_crtc_state *mdp5_cstate =
   1136		kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
   1137
   1138	if (crtc->state)
   1139		mdp5_crtc_destroy_state(crtc, crtc->state);
   1140
   1141	__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
   1142}
   1143
   1144static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
   1145	.set_config = drm_atomic_helper_set_config,
   1146	.destroy = mdp5_crtc_destroy,
   1147	.page_flip = drm_atomic_helper_page_flip,
   1148	.reset = mdp5_crtc_reset,
   1149	.atomic_duplicate_state = mdp5_crtc_duplicate_state,
   1150	.atomic_destroy_state = mdp5_crtc_destroy_state,
   1151	.atomic_print_state = mdp5_crtc_atomic_print_state,
   1152	.get_vblank_counter = mdp5_crtc_get_vblank_counter,
   1153	.enable_vblank  = msm_crtc_enable_vblank,
   1154	.disable_vblank = msm_crtc_disable_vblank,
   1155	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
   1156};
   1157
   1158static const struct drm_crtc_funcs mdp5_crtc_funcs = {
   1159	.set_config = drm_atomic_helper_set_config,
   1160	.destroy = mdp5_crtc_destroy,
   1161	.page_flip = drm_atomic_helper_page_flip,
   1162	.reset = mdp5_crtc_reset,
   1163	.atomic_duplicate_state = mdp5_crtc_duplicate_state,
   1164	.atomic_destroy_state = mdp5_crtc_destroy_state,
   1165	.cursor_set = mdp5_crtc_cursor_set,
   1166	.cursor_move = mdp5_crtc_cursor_move,
   1167	.atomic_print_state = mdp5_crtc_atomic_print_state,
   1168	.get_vblank_counter = mdp5_crtc_get_vblank_counter,
   1169	.enable_vblank  = msm_crtc_enable_vblank,
   1170	.disable_vblank = msm_crtc_disable_vblank,
   1171	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
   1172};
   1173
   1174static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
   1175	.mode_set_nofb = mdp5_crtc_mode_set_nofb,
   1176	.atomic_check = mdp5_crtc_atomic_check,
   1177	.atomic_begin = mdp5_crtc_atomic_begin,
   1178	.atomic_flush = mdp5_crtc_atomic_flush,
   1179	.atomic_enable = mdp5_crtc_atomic_enable,
   1180	.atomic_disable = mdp5_crtc_atomic_disable,
   1181	.get_scanout_position = mdp5_crtc_get_scanout_position,
   1182};
   1183
   1184static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
   1185{
   1186	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
   1187	struct drm_crtc *crtc = &mdp5_crtc->base;
   1188	struct msm_drm_private *priv = crtc->dev->dev_private;
   1189	unsigned pending;
   1190
   1191	mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
   1192
   1193	pending = atomic_xchg(&mdp5_crtc->pending, 0);
   1194
   1195	if (pending & PENDING_FLIP) {
   1196		complete_flip(crtc, NULL);
   1197	}
   1198
   1199	if (pending & PENDING_CURSOR)
   1200		drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
   1201}
   1202
   1203static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
   1204{
   1205	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
   1206
   1207	DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
   1208}
   1209
   1210static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
   1211{
   1212	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
   1213								pp_done);
   1214
   1215	complete_all(&mdp5_crtc->pp_completion);
   1216}
   1217
   1218static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
   1219{
   1220	struct drm_device *dev = crtc->dev;
   1221	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
   1222	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1223	int ret;
   1224
   1225	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
   1226						msecs_to_jiffies(50));
   1227	if (ret == 0)
   1228		dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
   1229				     mdp5_cstate->pipeline.mixer->lm);
   1230}
   1231
   1232static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
   1233{
   1234	struct drm_device *dev = crtc->dev;
   1235	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
   1236	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1237	struct mdp5_ctl *ctl = mdp5_cstate->ctl;
   1238	int ret;
   1239
   1240	/* Should not call this function if crtc is disabled. */
   1241	if (!ctl)
   1242		return;
   1243
   1244	ret = drm_crtc_vblank_get(crtc);
   1245	if (ret)
   1246		return;
   1247
   1248	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
   1249		((mdp5_ctl_get_commit_status(ctl) &
   1250		mdp5_crtc->flushed_mask) == 0),
   1251		msecs_to_jiffies(50));
   1252	if (ret <= 0)
   1253		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
   1254
   1255	mdp5_crtc->flushed_mask = 0;
   1256
   1257	drm_crtc_vblank_put(crtc);
   1258}
   1259
   1260uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
   1261{
   1262	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
   1263	return mdp5_crtc->vblank.irqmask;
   1264}
   1265
   1266void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
   1267{
   1268	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1269	struct mdp5_kms *mdp5_kms = get_kms(crtc);
   1270
   1271	/* should this be done elsewhere ? */
   1272	mdp_irq_update(&mdp5_kms->base);
   1273
   1274	mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
   1275}
   1276
   1277struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
   1278{
   1279	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1280
   1281	return mdp5_cstate->ctl;
   1282}
   1283
   1284struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
   1285{
   1286	struct mdp5_crtc_state *mdp5_cstate;
   1287
   1288	if (WARN_ON(!crtc))
   1289		return ERR_PTR(-EINVAL);
   1290
   1291	mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1292
   1293	return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
   1294		ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
   1295}
   1296
   1297struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
   1298{
   1299	struct mdp5_crtc_state *mdp5_cstate;
   1300
   1301	if (WARN_ON(!crtc))
   1302		return ERR_PTR(-EINVAL);
   1303
   1304	mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1305
   1306	return &mdp5_cstate->pipeline;
   1307}
   1308
   1309void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
   1310{
   1311	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
   1312
   1313	if (mdp5_cstate->cmd_mode)
   1314		mdp5_crtc_wait_for_pp_done(crtc);
   1315	else
   1316		mdp5_crtc_wait_for_flush_done(crtc);
   1317}
   1318
   1319/* initialize crtc */
   1320struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
   1321				struct drm_plane *plane,
   1322				struct drm_plane *cursor_plane, int id)
   1323{
   1324	struct drm_crtc *crtc = NULL;
   1325	struct mdp5_crtc *mdp5_crtc;
   1326
   1327	mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
   1328	if (!mdp5_crtc)
   1329		return ERR_PTR(-ENOMEM);
   1330
   1331	crtc = &mdp5_crtc->base;
   1332
   1333	mdp5_crtc->id = id;
   1334
   1335	spin_lock_init(&mdp5_crtc->lm_lock);
   1336	spin_lock_init(&mdp5_crtc->cursor.lock);
   1337	init_completion(&mdp5_crtc->pp_completion);
   1338
   1339	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
   1340	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
   1341	mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
   1342
   1343	mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
   1344
   1345	drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
   1346				  cursor_plane ?
   1347				  &mdp5_crtc_no_lm_cursor_funcs :
   1348				  &mdp5_crtc_funcs, NULL);
   1349
   1350	drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
   1351			"unref cursor", unref_cursor_worker);
   1352
   1353	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
   1354
   1355	return crtc;
   1356}