cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mdp4_crtc.c (17983B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2013 Red Hat
      4 * Author: Rob Clark <robdclark@gmail.com>
      5 */
      6
      7#include <drm/drm_crtc.h>
      8#include <drm/drm_flip_work.h>
      9#include <drm/drm_mode.h>
     10#include <drm/drm_probe_helper.h>
     11#include <drm/drm_vblank.h>
     12
     13#include "mdp4_kms.h"
     14#include "msm_gem.h"
     15
     16struct mdp4_crtc {
     17	struct drm_crtc base;
     18	char name[8];
     19	int id;
     20	int ovlp;
     21	enum mdp4_dma dma;
     22	bool enabled;
     23
     24	/* which mixer/encoder we route output to: */
     25	int mixer;
     26
     27	struct {
     28		spinlock_t lock;
     29		bool stale;
     30		uint32_t width, height;
     31		uint32_t x, y;
     32
     33		/* next cursor to scan-out: */
     34		uint32_t next_iova;
     35		struct drm_gem_object *next_bo;
     36
     37		/* current cursor being scanned out: */
     38		struct drm_gem_object *scanout_bo;
     39	} cursor;
     40
     41
     42	/* if there is a pending flip, these will be non-null: */
     43	struct drm_pending_vblank_event *event;
     44
     45	/* Bits have been flushed at the last commit,
     46	 * used to decide if a vsync has happened since last commit.
     47	 */
     48	u32 flushed_mask;
     49
     50#define PENDING_CURSOR 0x1
     51#define PENDING_FLIP   0x2
     52	atomic_t pending;
     53
     54	/* for unref'ing cursor bo's after scanout completes: */
     55	struct drm_flip_work unref_cursor_work;
     56
     57	struct mdp_irq vblank;
     58	struct mdp_irq err;
     59};
     60#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
     61
     62static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
     63{
     64	struct msm_drm_private *priv = crtc->dev->dev_private;
     65	return to_mdp4_kms(to_mdp_kms(priv->kms));
     66}
     67
     68static void request_pending(struct drm_crtc *crtc, uint32_t pending)
     69{
     70	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
     71
     72	atomic_or(pending, &mdp4_crtc->pending);
     73	mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
     74}
     75
     76static void crtc_flush(struct drm_crtc *crtc)
     77{
     78	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
     79	struct mdp4_kms *mdp4_kms = get_kms(crtc);
     80	struct drm_plane *plane;
     81	uint32_t flush = 0;
     82
     83	drm_atomic_crtc_for_each_plane(plane, crtc) {
     84		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
     85		flush |= pipe2flush(pipe_id);
     86	}
     87
     88	flush |= ovlp2flush(mdp4_crtc->ovlp);
     89
     90	DBG("%s: flush=%08x", mdp4_crtc->name, flush);
     91
     92	mdp4_crtc->flushed_mask = flush;
     93
     94	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
     95}
     96
     97/* if file!=NULL, this is preclose potential cancel-flip path */
     98static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
     99{
    100	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    101	struct drm_device *dev = crtc->dev;
    102	struct drm_pending_vblank_event *event;
    103	unsigned long flags;
    104
    105	spin_lock_irqsave(&dev->event_lock, flags);
    106	event = mdp4_crtc->event;
    107	if (event) {
    108		mdp4_crtc->event = NULL;
    109		DBG("%s: send event: %p", mdp4_crtc->name, event);
    110		drm_crtc_send_vblank_event(crtc, event);
    111	}
    112	spin_unlock_irqrestore(&dev->event_lock, flags);
    113}
    114
    115static void unref_cursor_worker(struct drm_flip_work *work, void *val)
    116{
    117	struct mdp4_crtc *mdp4_crtc =
    118		container_of(work, struct mdp4_crtc, unref_cursor_work);
    119	struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
    120	struct msm_kms *kms = &mdp4_kms->base.base;
    121
    122	msm_gem_unpin_iova(val, kms->aspace);
    123	drm_gem_object_put(val);
    124}
    125
    126static void mdp4_crtc_destroy(struct drm_crtc *crtc)
    127{
    128	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    129
    130	drm_crtc_cleanup(crtc);
    131	drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
    132
    133	kfree(mdp4_crtc);
    134}
    135
    136/* statically (for now) map planes to mixer stage (z-order): */
    137static const int idxs[] = {
    138		[VG1]  = 1,
    139		[VG2]  = 2,
    140		[RGB1] = 0,
    141		[RGB2] = 0,
    142		[RGB3] = 0,
    143		[VG3]  = 3,
    144		[VG4]  = 4,
    145
    146};
    147
    148/* setup mixer config, for which we need to consider all crtc's and
    149 * the planes attached to them
    150 *
    151 * TODO may possibly need some extra locking here
    152 */
    153static void setup_mixer(struct mdp4_kms *mdp4_kms)
    154{
    155	struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
    156	struct drm_crtc *crtc;
    157	uint32_t mixer_cfg = 0;
    158	static const enum mdp_mixer_stage_id stages[] = {
    159			STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
    160	};
    161
    162	list_for_each_entry(crtc, &config->crtc_list, head) {
    163		struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    164		struct drm_plane *plane;
    165
    166		drm_atomic_crtc_for_each_plane(plane, crtc) {
    167			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
    168			int idx = idxs[pipe_id];
    169			mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
    170					pipe_id, stages[idx]);
    171		}
    172	}
    173
    174	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
    175}
    176
    177static void blend_setup(struct drm_crtc *crtc)
    178{
    179	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    180	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    181	struct drm_plane *plane;
    182	int i, ovlp = mdp4_crtc->ovlp;
    183	bool alpha[4]= { false, false, false, false };
    184
    185	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
    186	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
    187	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
    188	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
    189
    190	drm_atomic_crtc_for_each_plane(plane, crtc) {
    191		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
    192		int idx = idxs[pipe_id];
    193		if (idx > 0) {
    194			const struct mdp_format *format =
    195					to_mdp_format(msm_framebuffer_format(plane->state->fb));
    196			alpha[idx-1] = format->alpha_enable;
    197		}
    198	}
    199
    200	for (i = 0; i < 4; i++) {
    201		uint32_t op;
    202
    203		if (alpha[i]) {
    204			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
    205					MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
    206					MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
    207		} else {
    208			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
    209					MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
    210		}
    211
    212		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
    213		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
    214		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
    215		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
    216		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
    217		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
    218		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
    219		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
    220	}
    221
    222	setup_mixer(mdp4_kms);
    223}
    224
    225static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
    226{
    227	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    228	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    229	enum mdp4_dma dma = mdp4_crtc->dma;
    230	int ovlp = mdp4_crtc->ovlp;
    231	struct drm_display_mode *mode;
    232
    233	if (WARN_ON(!crtc->state))
    234		return;
    235
    236	mode = &crtc->state->adjusted_mode;
    237
    238	DBG("%s: set mode: " DRM_MODE_FMT,
    239			mdp4_crtc->name, DRM_MODE_ARG(mode));
    240
    241	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
    242			MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
    243			MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
    244
    245	/* take data from pipe: */
    246	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
    247	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
    248	mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
    249			MDP4_DMA_DST_SIZE_WIDTH(0) |
    250			MDP4_DMA_DST_SIZE_HEIGHT(0));
    251
    252	mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
    253	mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
    254			MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
    255			MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
    256	mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
    257
    258	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
    259
    260	if (dma == DMA_E) {
    261		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
    262		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
    263		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
    264	}
    265}
    266
    267static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
    268				     struct drm_atomic_state *state)
    269{
    270	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    271	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    272
    273	DBG("%s", mdp4_crtc->name);
    274
    275	if (WARN_ON(!mdp4_crtc->enabled))
    276		return;
    277
    278	/* Disable/save vblank irq handling before power is disabled */
    279	drm_crtc_vblank_off(crtc);
    280
    281	mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
    282	mdp4_disable(mdp4_kms);
    283
    284	mdp4_crtc->enabled = false;
    285}
    286
    287static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
    288				    struct drm_atomic_state *state)
    289{
    290	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    291	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    292
    293	DBG("%s", mdp4_crtc->name);
    294
    295	if (WARN_ON(mdp4_crtc->enabled))
    296		return;
    297
    298	mdp4_enable(mdp4_kms);
    299
    300	/* Restore vblank irq handling after power is enabled */
    301	drm_crtc_vblank_on(crtc);
    302
    303	mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
    304
    305	crtc_flush(crtc);
    306
    307	mdp4_crtc->enabled = true;
    308}
    309
    310static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
    311		struct drm_atomic_state *state)
    312{
    313	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    314	DBG("%s: check", mdp4_crtc->name);
    315	// TODO anything else to check?
    316	return 0;
    317}
    318
    319static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
    320				   struct drm_atomic_state *state)
    321{
    322	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    323	DBG("%s: begin", mdp4_crtc->name);
    324}
    325
    326static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
    327				   struct drm_atomic_state *state)
    328{
    329	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    330	struct drm_device *dev = crtc->dev;
    331	unsigned long flags;
    332
    333	DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
    334
    335	WARN_ON(mdp4_crtc->event);
    336
    337	spin_lock_irqsave(&dev->event_lock, flags);
    338	mdp4_crtc->event = crtc->state->event;
    339	crtc->state->event = NULL;
    340	spin_unlock_irqrestore(&dev->event_lock, flags);
    341
    342	blend_setup(crtc);
    343	crtc_flush(crtc);
    344	request_pending(crtc, PENDING_FLIP);
    345}
    346
    347#define CURSOR_WIDTH 64
    348#define CURSOR_HEIGHT 64
    349
    350/* called from IRQ to update cursor related registers (if needed).  The
    351 * cursor registers, other than x/y position, appear not to be double
    352 * buffered, and changing them other than from vblank seems to trigger
    353 * underflow.
    354 */
    355static void update_cursor(struct drm_crtc *crtc)
    356{
    357	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    358	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    359	struct msm_kms *kms = &mdp4_kms->base.base;
    360	enum mdp4_dma dma = mdp4_crtc->dma;
    361	unsigned long flags;
    362
    363	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
    364	if (mdp4_crtc->cursor.stale) {
    365		struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
    366		struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
    367		uint64_t iova = mdp4_crtc->cursor.next_iova;
    368
    369		if (next_bo) {
    370			/* take a obj ref + iova ref when we start scanning out: */
    371			drm_gem_object_get(next_bo);
    372			msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
    373
    374			/* enable cursor: */
    375			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
    376					MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
    377					MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
    378			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
    379			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
    380					MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
    381					MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
    382		} else {
    383			/* disable cursor: */
    384			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
    385					mdp4_kms->blank_cursor_iova);
    386		}
    387
    388		/* and drop the iova ref + obj rev when done scanning out: */
    389		if (prev_bo)
    390			drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
    391
    392		mdp4_crtc->cursor.scanout_bo = next_bo;
    393		mdp4_crtc->cursor.stale = false;
    394	}
    395
    396	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
    397			MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
    398			MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
    399
    400	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
    401}
    402
    403static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
    404		struct drm_file *file_priv, uint32_t handle,
    405		uint32_t width, uint32_t height)
    406{
    407	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    408	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    409	struct msm_kms *kms = &mdp4_kms->base.base;
    410	struct drm_device *dev = crtc->dev;
    411	struct drm_gem_object *cursor_bo, *old_bo;
    412	unsigned long flags;
    413	uint64_t iova;
    414	int ret;
    415
    416	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
    417		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
    418		return -EINVAL;
    419	}
    420
    421	if (handle) {
    422		cursor_bo = drm_gem_object_lookup(file_priv, handle);
    423		if (!cursor_bo)
    424			return -ENOENT;
    425	} else {
    426		cursor_bo = NULL;
    427	}
    428
    429	if (cursor_bo) {
    430		ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
    431		if (ret)
    432			goto fail;
    433	} else {
    434		iova = 0;
    435	}
    436
    437	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
    438	old_bo = mdp4_crtc->cursor.next_bo;
    439	mdp4_crtc->cursor.next_bo   = cursor_bo;
    440	mdp4_crtc->cursor.next_iova = iova;
    441	mdp4_crtc->cursor.width     = width;
    442	mdp4_crtc->cursor.height    = height;
    443	mdp4_crtc->cursor.stale     = true;
    444	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
    445
    446	if (old_bo) {
    447		/* drop our previous reference: */
    448		drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
    449	}
    450
    451	request_pending(crtc, PENDING_CURSOR);
    452
    453	return 0;
    454
    455fail:
    456	drm_gem_object_put(cursor_bo);
    457	return ret;
    458}
    459
    460static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
    461{
    462	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    463	unsigned long flags;
    464
    465	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
    466	mdp4_crtc->cursor.x = x;
    467	mdp4_crtc->cursor.y = y;
    468	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
    469
    470	crtc_flush(crtc);
    471	request_pending(crtc, PENDING_CURSOR);
    472
    473	return 0;
    474}
    475
    476static const struct drm_crtc_funcs mdp4_crtc_funcs = {
    477	.set_config = drm_atomic_helper_set_config,
    478	.destroy = mdp4_crtc_destroy,
    479	.page_flip = drm_atomic_helper_page_flip,
    480	.cursor_set = mdp4_crtc_cursor_set,
    481	.cursor_move = mdp4_crtc_cursor_move,
    482	.reset = drm_atomic_helper_crtc_reset,
    483	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
    484	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
    485	.enable_vblank  = msm_crtc_enable_vblank,
    486	.disable_vblank = msm_crtc_disable_vblank,
    487};
    488
    489static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
    490	.mode_set_nofb = mdp4_crtc_mode_set_nofb,
    491	.atomic_check = mdp4_crtc_atomic_check,
    492	.atomic_begin = mdp4_crtc_atomic_begin,
    493	.atomic_flush = mdp4_crtc_atomic_flush,
    494	.atomic_enable = mdp4_crtc_atomic_enable,
    495	.atomic_disable = mdp4_crtc_atomic_disable,
    496};
    497
    498static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
    499{
    500	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
    501	struct drm_crtc *crtc = &mdp4_crtc->base;
    502	struct msm_drm_private *priv = crtc->dev->dev_private;
    503	unsigned pending;
    504
    505	mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
    506
    507	pending = atomic_xchg(&mdp4_crtc->pending, 0);
    508
    509	if (pending & PENDING_FLIP) {
    510		complete_flip(crtc, NULL);
    511	}
    512
    513	if (pending & PENDING_CURSOR) {
    514		update_cursor(crtc);
    515		drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
    516	}
    517}
    518
    519static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
    520{
    521	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
    522	struct drm_crtc *crtc = &mdp4_crtc->base;
    523	DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
    524	crtc_flush(crtc);
    525}
    526
    527static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
    528{
    529	struct drm_device *dev = crtc->dev;
    530	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    531	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    532	int ret;
    533
    534	ret = drm_crtc_vblank_get(crtc);
    535	if (ret)
    536		return;
    537
    538	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
    539		!(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
    540			mdp4_crtc->flushed_mask),
    541		msecs_to_jiffies(50));
    542	if (ret <= 0)
    543		dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
    544
    545	mdp4_crtc->flushed_mask = 0;
    546
    547	drm_crtc_vblank_put(crtc);
    548}
    549
    550uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
    551{
    552	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    553	return mdp4_crtc->vblank.irqmask;
    554}
    555
    556/* set dma config, ie. the format the encoder wants. */
    557void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
    558{
    559	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    560	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    561
    562	mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
    563}
    564
    565/* set interface for routing crtc->encoder: */
    566void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
    567{
    568	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
    569	struct mdp4_kms *mdp4_kms = get_kms(crtc);
    570	uint32_t intf_sel;
    571
    572	intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
    573
    574	switch (mdp4_crtc->dma) {
    575	case DMA_P:
    576		intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
    577		intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
    578		break;
    579	case DMA_S:
    580		intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
    581		intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
    582		break;
    583	case DMA_E:
    584		intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
    585		intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
    586		break;
    587	}
    588
    589	if (intf == INTF_DSI_VIDEO) {
    590		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
    591		intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
    592	} else if (intf == INTF_DSI_CMD) {
    593		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
    594		intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
    595	}
    596
    597	mdp4_crtc->mixer = mixer;
    598
    599	blend_setup(crtc);
    600
    601	DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
    602
    603	mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
    604}
    605
    606void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
    607{
    608	/* wait_for_flush_done is the only case for now.
    609	 * Later we will have command mode CRTC to wait for
    610	 * other event.
    611	 */
    612	mdp4_crtc_wait_for_flush_done(crtc);
    613}
    614
    615static const char *dma_names[] = {
    616		"DMA_P", "DMA_S", "DMA_E",
    617};
    618
    619/* initialize crtc */
    620struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
    621		struct drm_plane *plane, int id, int ovlp_id,
    622		enum mdp4_dma dma_id)
    623{
    624	struct drm_crtc *crtc = NULL;
    625	struct mdp4_crtc *mdp4_crtc;
    626
    627	mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
    628	if (!mdp4_crtc)
    629		return ERR_PTR(-ENOMEM);
    630
    631	crtc = &mdp4_crtc->base;
    632
    633	mdp4_crtc->id = id;
    634
    635	mdp4_crtc->ovlp = ovlp_id;
    636	mdp4_crtc->dma = dma_id;
    637
    638	mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
    639	mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
    640
    641	mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
    642	mdp4_crtc->err.irq = mdp4_crtc_err_irq;
    643
    644	snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
    645			dma_names[dma_id], ovlp_id);
    646
    647	spin_lock_init(&mdp4_crtc->cursor.lock);
    648
    649	drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
    650			"unref cursor", unref_cursor_worker);
    651
    652	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
    653				  NULL);
    654	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
    655
    656	return crtc;
    657}