cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mdp5_plane.c (31045B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
      4 * Copyright (C) 2013 Red Hat
      5 * Author: Rob Clark <robdclark@gmail.com>
      6 */
      7
      8#include <drm/drm_atomic.h>
      9#include <drm/drm_damage_helper.h>
     10#include <drm/drm_fourcc.h>
     11#include <drm/drm_gem_atomic_helper.h>
     12#include <drm/drm_print.h>
     13
     14#include "mdp5_kms.h"
     15
     16struct mdp5_plane {
     17	struct drm_plane base;
     18
     19	uint32_t nformats;
     20	uint32_t formats[32];
     21};
     22#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
     23
     24static int mdp5_plane_mode_set(struct drm_plane *plane,
     25		struct drm_crtc *crtc, struct drm_framebuffer *fb,
     26		struct drm_rect *src, struct drm_rect *dest);
     27
     28static struct mdp5_kms *get_kms(struct drm_plane *plane)
     29{
     30	struct msm_drm_private *priv = plane->dev->dev_private;
     31	return to_mdp5_kms(to_mdp_kms(priv->kms));
     32}
     33
     34static bool plane_enabled(struct drm_plane_state *state)
     35{
     36	return state->visible;
     37}
     38
     39static void mdp5_plane_destroy(struct drm_plane *plane)
     40{
     41	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
     42
     43	drm_plane_cleanup(plane);
     44
     45	kfree(mdp5_plane);
     46}
     47
     48/* helper to install properties which are common to planes and crtcs */
     49static void mdp5_plane_install_properties(struct drm_plane *plane,
     50		struct drm_mode_object *obj)
     51{
     52	unsigned int zpos;
     53
     54	drm_plane_create_rotation_property(plane,
     55					   DRM_MODE_ROTATE_0,
     56					   DRM_MODE_ROTATE_0 |
     57					   DRM_MODE_ROTATE_180 |
     58					   DRM_MODE_REFLECT_X |
     59					   DRM_MODE_REFLECT_Y);
     60	drm_plane_create_alpha_property(plane);
     61	drm_plane_create_blend_mode_property(plane,
     62			BIT(DRM_MODE_BLEND_PIXEL_NONE) |
     63			BIT(DRM_MODE_BLEND_PREMULTI) |
     64			BIT(DRM_MODE_BLEND_COVERAGE));
     65
     66	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
     67		zpos = STAGE_BASE;
     68	else
     69		zpos = STAGE0 + drm_plane_index(plane);
     70	drm_plane_create_zpos_property(plane, zpos, 1, 255);
     71}
     72
     73static void
     74mdp5_plane_atomic_print_state(struct drm_printer *p,
     75		const struct drm_plane_state *state)
     76{
     77	struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
     78	struct mdp5_kms *mdp5_kms = get_kms(state->plane);
     79
     80	drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
     81			pstate->hwpipe->name : "(null)");
     82	if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
     83		drm_printf(p, "\tright-hwpipe=%s\n",
     84			   pstate->r_hwpipe ? pstate->r_hwpipe->name :
     85					      "(null)");
     86	drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
     87	drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
     88	drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
     89	drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
     90	drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
     91}
     92
     93static void mdp5_plane_reset(struct drm_plane *plane)
     94{
     95	struct mdp5_plane_state *mdp5_state;
     96
     97	if (plane->state)
     98		__drm_atomic_helper_plane_destroy_state(plane->state);
     99
    100	kfree(to_mdp5_plane_state(plane->state));
    101	plane->state = NULL;
    102	mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
    103	if (!mdp5_state)
    104		return;
    105	__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
    106}
    107
    108static struct drm_plane_state *
    109mdp5_plane_duplicate_state(struct drm_plane *plane)
    110{
    111	struct mdp5_plane_state *mdp5_state;
    112
    113	if (WARN_ON(!plane->state))
    114		return NULL;
    115
    116	mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
    117			sizeof(*mdp5_state), GFP_KERNEL);
    118	if (!mdp5_state)
    119		return NULL;
    120
    121	__drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
    122
    123	return &mdp5_state->base;
    124}
    125
    126static void mdp5_plane_destroy_state(struct drm_plane *plane,
    127		struct drm_plane_state *state)
    128{
    129	struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
    130
    131	if (state->fb)
    132		drm_framebuffer_put(state->fb);
    133
    134	kfree(pstate);
    135}
    136
    137static const struct drm_plane_funcs mdp5_plane_funcs = {
    138		.update_plane = drm_atomic_helper_update_plane,
    139		.disable_plane = drm_atomic_helper_disable_plane,
    140		.destroy = mdp5_plane_destroy,
    141		.reset = mdp5_plane_reset,
    142		.atomic_duplicate_state = mdp5_plane_duplicate_state,
    143		.atomic_destroy_state = mdp5_plane_destroy_state,
    144		.atomic_print_state = mdp5_plane_atomic_print_state,
    145};
    146
    147static int mdp5_plane_prepare_fb(struct drm_plane *plane,
    148				 struct drm_plane_state *new_state)
    149{
    150	struct msm_drm_private *priv = plane->dev->dev_private;
    151	struct msm_kms *kms = priv->kms;
    152	bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
    153
    154	if (!new_state->fb)
    155		return 0;
    156
    157	drm_gem_plane_helper_prepare_fb(plane, new_state);
    158
    159	return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
    160}
    161
    162static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
    163				  struct drm_plane_state *old_state)
    164{
    165	struct mdp5_kms *mdp5_kms = get_kms(plane);
    166	struct msm_kms *kms = &mdp5_kms->base.base;
    167	struct drm_framebuffer *fb = old_state->fb;
    168	bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
    169
    170	if (!fb)
    171		return;
    172
    173	DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
    174	msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
    175}
    176
    177static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
    178					      struct drm_plane_state *state)
    179{
    180	struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
    181	struct drm_plane *plane = state->plane;
    182	struct drm_plane_state *old_state = plane->state;
    183	struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
    184	bool new_hwpipe = false;
    185	bool need_right_hwpipe = false;
    186	uint32_t max_width, max_height;
    187	bool out_of_bounds = false;
    188	uint32_t caps = 0;
    189	int min_scale, max_scale;
    190	int ret;
    191
    192	DBG("%s: check (%d -> %d)", plane->name,
    193			plane_enabled(old_state), plane_enabled(state));
    194
    195	max_width = config->hw->lm.max_width << 16;
    196	max_height = config->hw->lm.max_height << 16;
    197
    198	/* Make sure source dimensions are within bounds. */
    199	if (state->src_h > max_height)
    200		out_of_bounds = true;
    201
    202	if (state->src_w > max_width) {
    203		/* If source split is supported, we can go up to 2x
    204		 * the max LM width, but we'd need to stage another
    205		 * hwpipe to the right LM. So, the drm_plane would
    206		 * consist of 2 hwpipes.
    207		 */
    208		if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
    209		    (state->src_w <= 2 * max_width))
    210			need_right_hwpipe = true;
    211		else
    212			out_of_bounds = true;
    213	}
    214
    215	if (out_of_bounds) {
    216		struct drm_rect src = drm_plane_state_src(state);
    217		DBG("Invalid source size "DRM_RECT_FP_FMT,
    218				DRM_RECT_FP_ARG(&src));
    219		return -ERANGE;
    220	}
    221
    222	min_scale = FRAC_16_16(1, 8);
    223	max_scale = FRAC_16_16(8, 1);
    224
    225	ret = drm_atomic_helper_check_plane_state(state, crtc_state,
    226						  min_scale, max_scale,
    227						  true, true);
    228	if (ret)
    229		return ret;
    230
    231	if (plane_enabled(state)) {
    232		unsigned int rotation;
    233		const struct mdp_format *format;
    234		struct mdp5_kms *mdp5_kms = get_kms(plane);
    235		uint32_t blkcfg = 0;
    236
    237		format = to_mdp_format(msm_framebuffer_format(state->fb));
    238		if (MDP_FORMAT_IS_YUV(format))
    239			caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
    240
    241		if (((state->src_w >> 16) != state->crtc_w) ||
    242				((state->src_h >> 16) != state->crtc_h))
    243			caps |= MDP_PIPE_CAP_SCALE;
    244
    245		rotation = drm_rotation_simplify(state->rotation,
    246						 DRM_MODE_ROTATE_0 |
    247						 DRM_MODE_REFLECT_X |
    248						 DRM_MODE_REFLECT_Y);
    249
    250		if (rotation & DRM_MODE_REFLECT_X)
    251			caps |= MDP_PIPE_CAP_HFLIP;
    252
    253		if (rotation & DRM_MODE_REFLECT_Y)
    254			caps |= MDP_PIPE_CAP_VFLIP;
    255
    256		if (plane->type == DRM_PLANE_TYPE_CURSOR)
    257			caps |= MDP_PIPE_CAP_CURSOR;
    258
    259		/* (re)allocate hw pipe if we don't have one or caps-mismatch: */
    260		if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
    261			new_hwpipe = true;
    262
    263		/*
    264		 * (re)allocte hw pipe if we're either requesting for 2 hw pipes
    265		 * or we're switching from 2 hw pipes to 1 hw pipe because the
    266		 * new src_w can be supported by 1 hw pipe itself.
    267		 */
    268		if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
    269		    (!need_right_hwpipe && mdp5_state->r_hwpipe))
    270			new_hwpipe = true;
    271
    272		if (mdp5_kms->smp) {
    273			const struct mdp_format *format =
    274				to_mdp_format(msm_framebuffer_format(state->fb));
    275
    276			blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
    277					state->src_w >> 16, false);
    278
    279			if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
    280				new_hwpipe = true;
    281		}
    282
    283		/* (re)assign hwpipe if needed, otherwise keep old one: */
    284		if (new_hwpipe) {
    285			/* TODO maybe we want to re-assign hwpipe sometimes
    286			 * in cases when we no-longer need some caps to make
    287			 * it available for other planes?
    288			 */
    289			struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
    290			struct mdp5_hw_pipe *old_right_hwpipe =
    291							  mdp5_state->r_hwpipe;
    292			struct mdp5_hw_pipe *new_hwpipe = NULL;
    293			struct mdp5_hw_pipe *new_right_hwpipe = NULL;
    294
    295			ret = mdp5_pipe_assign(state->state, plane, caps,
    296					       blkcfg, &new_hwpipe,
    297					       need_right_hwpipe ?
    298					       &new_right_hwpipe : NULL);
    299			if (ret) {
    300				DBG("%s: failed to assign hwpipe(s)!",
    301				    plane->name);
    302				return ret;
    303			}
    304
    305			mdp5_state->hwpipe = new_hwpipe;
    306			if (need_right_hwpipe)
    307				mdp5_state->r_hwpipe = new_right_hwpipe;
    308			else
    309				/*
    310				 * set it to NULL so that the driver knows we
    311				 * don't have a right hwpipe when committing a
    312				 * new state
    313				 */
    314				mdp5_state->r_hwpipe = NULL;
    315
    316
    317			ret = mdp5_pipe_release(state->state, old_hwpipe);
    318			if (ret)
    319				return ret;
    320
    321			ret = mdp5_pipe_release(state->state, old_right_hwpipe);
    322			if (ret)
    323				return ret;
    324
    325		}
    326	} else {
    327		ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
    328		if (ret)
    329			return ret;
    330
    331		ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
    332		if (ret)
    333			return ret;
    334
    335		mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
    336	}
    337
    338	return 0;
    339}
    340
    341static int mdp5_plane_atomic_check(struct drm_plane *plane,
    342				   struct drm_atomic_state *state)
    343{
    344	struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
    345										 plane);
    346	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
    347										 plane);
    348	struct drm_crtc *crtc;
    349	struct drm_crtc_state *crtc_state;
    350
    351	crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
    352	if (!crtc)
    353		return 0;
    354
    355	crtc_state = drm_atomic_get_existing_crtc_state(state,
    356							crtc);
    357	if (WARN_ON(!crtc_state))
    358		return -EINVAL;
    359
    360	return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
    361}
    362
    363static void mdp5_plane_atomic_update(struct drm_plane *plane,
    364				     struct drm_atomic_state *state)
    365{
    366	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
    367									   plane);
    368
    369	DBG("%s: update", plane->name);
    370
    371	if (plane_enabled(new_state)) {
    372		int ret;
    373
    374		ret = mdp5_plane_mode_set(plane,
    375				new_state->crtc, new_state->fb,
    376				&new_state->src, &new_state->dst);
    377		/* atomic_check should have ensured that this doesn't fail */
    378		WARN_ON(ret < 0);
    379	}
    380}
    381
    382static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
    383					 struct drm_atomic_state *state)
    384{
    385	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
    386										 plane);
    387	struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
    388	struct drm_crtc_state *crtc_state;
    389	int min_scale, max_scale;
    390	int ret;
    391
    392	crtc_state = drm_atomic_get_existing_crtc_state(state,
    393							new_plane_state->crtc);
    394	if (WARN_ON(!crtc_state))
    395		return -EINVAL;
    396
    397	if (!crtc_state->active)
    398		return -EINVAL;
    399
    400	/* don't use fast path if we don't have a hwpipe allocated yet */
    401	if (!mdp5_state->hwpipe)
    402		return -EINVAL;
    403
    404	/* only allow changing of position(crtc x/y or src x/y) in fast path */
    405	if (plane->state->crtc != new_plane_state->crtc ||
    406	    plane->state->src_w != new_plane_state->src_w ||
    407	    plane->state->src_h != new_plane_state->src_h ||
    408	    plane->state->crtc_w != new_plane_state->crtc_w ||
    409	    plane->state->crtc_h != new_plane_state->crtc_h ||
    410	    !plane->state->fb ||
    411	    plane->state->fb != new_plane_state->fb)
    412		return -EINVAL;
    413
    414	min_scale = FRAC_16_16(1, 8);
    415	max_scale = FRAC_16_16(8, 1);
    416
    417	ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
    418						  min_scale, max_scale,
    419						  true, true);
    420	if (ret)
    421		return ret;
    422
    423	/*
    424	 * if the visibility of the plane changes (i.e, if the cursor is
    425	 * clipped out completely, we can't take the async path because
    426	 * we need to stage/unstage the plane from the Layer Mixer(s). We
    427	 * also assign/unassign the hwpipe(s) tied to the plane. We avoid
    428	 * taking the fast path for both these reasons.
    429	 */
    430	if (new_plane_state->visible != plane->state->visible)
    431		return -EINVAL;
    432
    433	return 0;
    434}
    435
    436static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
    437					   struct drm_atomic_state *state)
    438{
    439	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
    440									   plane);
    441	struct drm_framebuffer *old_fb = plane->state->fb;
    442
    443	plane->state->src_x = new_state->src_x;
    444	plane->state->src_y = new_state->src_y;
    445	plane->state->crtc_x = new_state->crtc_x;
    446	plane->state->crtc_y = new_state->crtc_y;
    447
    448	if (plane_enabled(new_state)) {
    449		struct mdp5_ctl *ctl;
    450		struct mdp5_pipeline *pipeline =
    451					mdp5_crtc_get_pipeline(new_state->crtc);
    452		int ret;
    453
    454		ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
    455				&new_state->src, &new_state->dst);
    456		WARN_ON(ret < 0);
    457
    458		ctl = mdp5_crtc_get_ctl(new_state->crtc);
    459
    460		mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
    461	}
    462
    463	*to_mdp5_plane_state(plane->state) =
    464		*to_mdp5_plane_state(new_state);
    465
    466	new_state->fb = old_fb;
    467}
    468
    469static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
    470		.prepare_fb = mdp5_plane_prepare_fb,
    471		.cleanup_fb = mdp5_plane_cleanup_fb,
    472		.atomic_check = mdp5_plane_atomic_check,
    473		.atomic_update = mdp5_plane_atomic_update,
    474		.atomic_async_check = mdp5_plane_atomic_async_check,
    475		.atomic_async_update = mdp5_plane_atomic_async_update,
    476};
    477
    478static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
    479			       enum mdp5_pipe pipe,
    480			       struct drm_framebuffer *fb)
    481{
    482	struct msm_kms *kms = &mdp5_kms->base.base;
    483
    484	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
    485			MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
    486			MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
    487
    488	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
    489			MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
    490			MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
    491
    492	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
    493			msm_framebuffer_iova(fb, kms->aspace, 0));
    494	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
    495			msm_framebuffer_iova(fb, kms->aspace, 1));
    496	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
    497			msm_framebuffer_iova(fb, kms->aspace, 2));
    498	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
    499			msm_framebuffer_iova(fb, kms->aspace, 3));
    500}
    501
    502/* Note: mdp5_plane->pipe_lock must be locked */
    503static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
    504{
    505	uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
    506			 ~MDP5_PIPE_OP_MODE_CSC_1_EN;
    507
    508	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
    509}
    510
    511/* Note: mdp5_plane->pipe_lock must be locked */
    512static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
    513		struct csc_cfg *csc)
    514{
    515	uint32_t  i, mode = 0; /* RGB, no CSC */
    516	uint32_t *matrix;
    517
    518	if (unlikely(!csc))
    519		return;
    520
    521	if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
    522		mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
    523	if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
    524		mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
    525	mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
    526	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
    527
    528	matrix = csc->matrix;
    529	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
    530			MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
    531			MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
    532	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
    533			MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
    534			MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
    535	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
    536			MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
    537			MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
    538	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
    539			MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
    540			MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
    541	mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
    542			MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
    543
    544	for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
    545		uint32_t *pre_clamp = csc->pre_clamp;
    546		uint32_t *post_clamp = csc->post_clamp;
    547
    548		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
    549			MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
    550			MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
    551
    552		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
    553			MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
    554			MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
    555
    556		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
    557			MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
    558
    559		mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
    560			MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
    561	}
    562}
    563
    564#define PHASE_STEP_SHIFT	21
    565#define DOWN_SCALE_RATIO_MAX	32	/* 2^(26-21) */
    566
    567static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
    568{
    569	uint32_t unit;
    570
    571	if (src == 0 || dst == 0)
    572		return -EINVAL;
    573
    574	/*
    575	 * PHASE_STEP_X/Y is coded on 26 bits (25:0),
    576	 * where 2^21 represents the unity "1" in fixed-point hardware design.
    577	 * This leaves 5 bits for the integer part (downscale case):
    578	 *	-> maximum downscale ratio = 0b1_1111 = 31
    579	 */
    580	if (src > (dst * DOWN_SCALE_RATIO_MAX))
    581		return -EOVERFLOW;
    582
    583	unit = 1 << PHASE_STEP_SHIFT;
    584	*out_phase = mult_frac(unit, src, dst);
    585
    586	return 0;
    587}
    588
    589static int calc_scalex_steps(struct drm_plane *plane,
    590		uint32_t pixel_format, uint32_t src, uint32_t dest,
    591		uint32_t phasex_steps[COMP_MAX])
    592{
    593	const struct drm_format_info *info = drm_format_info(pixel_format);
    594	struct mdp5_kms *mdp5_kms = get_kms(plane);
    595	struct device *dev = mdp5_kms->dev->dev;
    596	uint32_t phasex_step;
    597	int ret;
    598
    599	ret = calc_phase_step(src, dest, &phasex_step);
    600	if (ret) {
    601		DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
    602		return ret;
    603	}
    604
    605	phasex_steps[COMP_0]   = phasex_step;
    606	phasex_steps[COMP_3]   = phasex_step;
    607	phasex_steps[COMP_1_2] = phasex_step / info->hsub;
    608
    609	return 0;
    610}
    611
    612static int calc_scaley_steps(struct drm_plane *plane,
    613		uint32_t pixel_format, uint32_t src, uint32_t dest,
    614		uint32_t phasey_steps[COMP_MAX])
    615{
    616	const struct drm_format_info *info = drm_format_info(pixel_format);
    617	struct mdp5_kms *mdp5_kms = get_kms(plane);
    618	struct device *dev = mdp5_kms->dev->dev;
    619	uint32_t phasey_step;
    620	int ret;
    621
    622	ret = calc_phase_step(src, dest, &phasey_step);
    623	if (ret) {
    624		DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
    625		return ret;
    626	}
    627
    628	phasey_steps[COMP_0]   = phasey_step;
    629	phasey_steps[COMP_3]   = phasey_step;
    630	phasey_steps[COMP_1_2] = phasey_step / info->vsub;
    631
    632	return 0;
    633}
    634
    635static uint32_t get_scale_config(const struct mdp_format *format,
    636		uint32_t src, uint32_t dst, bool horz)
    637{
    638	const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
    639	bool scaling = format->is_yuv ? true : (src != dst);
    640	uint32_t sub;
    641	uint32_t ya_filter, uv_filter;
    642	bool yuv = format->is_yuv;
    643
    644	if (!scaling)
    645		return 0;
    646
    647	if (yuv) {
    648		sub = horz ? info->hsub : info->vsub;
    649		uv_filter = ((src / sub) <= dst) ?
    650				   SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
    651	}
    652	ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
    653
    654	if (horz)
    655		return  MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
    656			MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
    657			MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
    658			COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
    659	else
    660		return  MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
    661			MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
    662			MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
    663			COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
    664}
    665
    666static void calc_pixel_ext(const struct mdp_format *format,
    667		uint32_t src, uint32_t dst, uint32_t phase_step[2],
    668		int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
    669		bool horz)
    670{
    671	bool scaling = format->is_yuv ? true : (src != dst);
    672	int i;
    673
    674	/*
    675	 * Note:
    676	 * We assume here that:
    677	 *     1. PCMN filter is used for downscale
    678	 *     2. bilinear filter is used for upscale
    679	 *     3. we are in a single pipe configuration
    680	 */
    681
    682	for (i = 0; i < COMP_MAX; i++) {
    683		pix_ext_edge1[i] = 0;
    684		pix_ext_edge2[i] = scaling ? 1 : 0;
    685	}
    686}
    687
    688static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
    689	const struct mdp_format *format,
    690	uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
    691	uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
    692{
    693	const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
    694	uint32_t lr, tb, req;
    695	int i;
    696
    697	for (i = 0; i < COMP_MAX; i++) {
    698		uint32_t roi_w = src_w;
    699		uint32_t roi_h = src_h;
    700
    701		if (format->is_yuv && i == COMP_1_2) {
    702			roi_w /= info->hsub;
    703			roi_h /= info->vsub;
    704		}
    705
    706		lr  = (pe_left[i] >= 0) ?
    707			MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
    708			MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
    709
    710		lr |= (pe_right[i] >= 0) ?
    711			MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
    712			MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
    713
    714		tb  = (pe_top[i] >= 0) ?
    715			MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
    716			MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
    717
    718		tb |= (pe_bottom[i] >= 0) ?
    719			MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
    720			MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
    721
    722		req  = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
    723				pe_left[i] + pe_right[i]);
    724
    725		req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
    726				pe_top[i] + pe_bottom[i]);
    727
    728		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
    729		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
    730		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
    731
    732		DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
    733			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
    734			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
    735			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
    736			FIELD(lr,  MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
    737			FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
    738
    739		DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
    740			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
    741			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
    742			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
    743			FIELD(tb,  MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
    744			FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
    745	}
    746}
    747
    748struct pixel_ext {
    749	int left[COMP_MAX];
    750	int right[COMP_MAX];
    751	int top[COMP_MAX];
    752	int bottom[COMP_MAX];
    753};
    754
    755struct phase_step {
    756	u32 x[COMP_MAX];
    757	u32 y[COMP_MAX];
    758};
    759
    760static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
    761				 struct mdp5_hw_pipe *hwpipe,
    762				 struct drm_framebuffer *fb,
    763				 struct phase_step *step,
    764				 struct pixel_ext *pe,
    765				 u32 scale_config, u32 hdecm, u32 vdecm,
    766				 bool hflip, bool vflip,
    767				 int crtc_x, int crtc_y,
    768				 unsigned int crtc_w, unsigned int crtc_h,
    769				 u32 src_img_w, u32 src_img_h,
    770				 u32 src_x, u32 src_y,
    771				 u32 src_w, u32 src_h)
    772{
    773	enum mdp5_pipe pipe = hwpipe->pipe;
    774	bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
    775	const struct mdp_format *format =
    776			to_mdp_format(msm_framebuffer_format(fb));
    777
    778	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
    779			MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
    780			MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
    781
    782	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
    783			MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
    784			MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
    785
    786	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
    787			MDP5_PIPE_SRC_XY_X(src_x) |
    788			MDP5_PIPE_SRC_XY_Y(src_y));
    789
    790	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
    791			MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
    792			MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
    793
    794	mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
    795			MDP5_PIPE_OUT_XY_X(crtc_x) |
    796			MDP5_PIPE_OUT_XY_Y(crtc_y));
    797
    798	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
    799			MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
    800			MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
    801			MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
    802			MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
    803			COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
    804			MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
    805			MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
    806			COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
    807			MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
    808			MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
    809
    810	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
    811			MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
    812			MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
    813			MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
    814			MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
    815
    816	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
    817			(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
    818			(vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
    819			COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
    820			MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
    821
    822	/* not using secure mode: */
    823	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
    824
    825	if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
    826		mdp5_write_pixel_ext(mdp5_kms, pipe, format,
    827				src_w, pe->left, pe->right,
    828				src_h, pe->top, pe->bottom);
    829
    830	if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
    831		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
    832				step->x[COMP_0]);
    833		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
    834				step->y[COMP_0]);
    835		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
    836				step->x[COMP_1_2]);
    837		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
    838				step->y[COMP_1_2]);
    839		mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
    840				MDP5_PIPE_DECIMATION_VERT(vdecm) |
    841				MDP5_PIPE_DECIMATION_HORZ(hdecm));
    842		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
    843			   scale_config);
    844	}
    845
    846	if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
    847		if (MDP_FORMAT_IS_YUV(format))
    848			csc_enable(mdp5_kms, pipe,
    849					mdp_get_default_csc_cfg(CSC_YUV2RGB));
    850		else
    851			csc_disable(mdp5_kms, pipe);
    852	}
    853
    854	set_scanout_locked(mdp5_kms, pipe, fb);
    855}
    856
    857static int mdp5_plane_mode_set(struct drm_plane *plane,
    858		struct drm_crtc *crtc, struct drm_framebuffer *fb,
    859		struct drm_rect *src, struct drm_rect *dest)
    860{
    861	struct drm_plane_state *pstate = plane->state;
    862	struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
    863	struct mdp5_kms *mdp5_kms = get_kms(plane);
    864	enum mdp5_pipe pipe = hwpipe->pipe;
    865	struct mdp5_hw_pipe *right_hwpipe;
    866	const struct mdp_format *format;
    867	uint32_t nplanes, config = 0;
    868	struct phase_step step = { { 0 } };
    869	struct pixel_ext pe = { { 0 } };
    870	uint32_t hdecm = 0, vdecm = 0;
    871	uint32_t pix_format;
    872	unsigned int rotation;
    873	bool vflip, hflip;
    874	int crtc_x, crtc_y;
    875	unsigned int crtc_w, crtc_h;
    876	uint32_t src_x, src_y;
    877	uint32_t src_w, src_h;
    878	uint32_t src_img_w, src_img_h;
    879	int ret;
    880
    881	nplanes = fb->format->num_planes;
    882
    883	/* bad formats should already be rejected: */
    884	if (WARN_ON(nplanes > pipe2nclients(pipe)))
    885		return -EINVAL;
    886
    887	format = to_mdp_format(msm_framebuffer_format(fb));
    888	pix_format = format->base.pixel_format;
    889
    890	src_x = src->x1;
    891	src_y = src->y1;
    892	src_w = drm_rect_width(src);
    893	src_h = drm_rect_height(src);
    894
    895	crtc_x = dest->x1;
    896	crtc_y = dest->y1;
    897	crtc_w = drm_rect_width(dest);
    898	crtc_h = drm_rect_height(dest);
    899
    900	/* src values are in Q16 fixed point, convert to integer: */
    901	src_x = src_x >> 16;
    902	src_y = src_y >> 16;
    903	src_w = src_w >> 16;
    904	src_h = src_h >> 16;
    905
    906	src_img_w = min(fb->width, src_w);
    907	src_img_h = min(fb->height, src_h);
    908
    909	DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
    910			fb->base.id, src_x, src_y, src_w, src_h,
    911			crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
    912
    913	right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
    914	if (right_hwpipe) {
    915		/*
    916		 * if the plane comprises of 2 hw pipes, assume that the width
    917		 * is split equally across them. The only parameters that varies
    918		 * between the 2 pipes are src_x and crtc_x
    919		 */
    920		crtc_w /= 2;
    921		src_w /= 2;
    922		src_img_w /= 2;
    923	}
    924
    925	ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
    926	if (ret)
    927		return ret;
    928
    929	ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
    930	if (ret)
    931		return ret;
    932
    933	if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
    934		calc_pixel_ext(format, src_w, crtc_w, step.x,
    935			       pe.left, pe.right, true);
    936		calc_pixel_ext(format, src_h, crtc_h, step.y,
    937			       pe.top, pe.bottom, false);
    938	}
    939
    940	/* TODO calc hdecm, vdecm */
    941
    942	/* SCALE is used to both scale and up-sample chroma components */
    943	config |= get_scale_config(format, src_w, crtc_w, true);
    944	config |= get_scale_config(format, src_h, crtc_h, false);
    945	DBG("scale config = %x", config);
    946
    947	rotation = drm_rotation_simplify(pstate->rotation,
    948					 DRM_MODE_ROTATE_0 |
    949					 DRM_MODE_REFLECT_X |
    950					 DRM_MODE_REFLECT_Y);
    951	hflip = !!(rotation & DRM_MODE_REFLECT_X);
    952	vflip = !!(rotation & DRM_MODE_REFLECT_Y);
    953
    954	mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
    955			     config, hdecm, vdecm, hflip, vflip,
    956			     crtc_x, crtc_y, crtc_w, crtc_h,
    957			     src_img_w, src_img_h,
    958			     src_x, src_y, src_w, src_h);
    959	if (right_hwpipe)
    960		mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
    961				     config, hdecm, vdecm, hflip, vflip,
    962				     crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
    963				     src_img_w, src_img_h,
    964				     src_x + src_w, src_y, src_w, src_h);
    965
    966	return ret;
    967}
    968
    969/*
    970 * Use this func and the one below only after the atomic state has been
    971 * successfully swapped
    972 */
    973enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
    974{
    975	struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
    976
    977	if (WARN_ON(!pstate->hwpipe))
    978		return SSPP_NONE;
    979
    980	return pstate->hwpipe->pipe;
    981}
    982
    983enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
    984{
    985	struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
    986
    987	if (!pstate->r_hwpipe)
    988		return SSPP_NONE;
    989
    990	return pstate->r_hwpipe->pipe;
    991}
    992
    993uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
    994{
    995	struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
    996	u32 mask;
    997
    998	if (WARN_ON(!pstate->hwpipe))
    999		return 0;
   1000
   1001	mask = pstate->hwpipe->flush_mask;
   1002
   1003	if (pstate->r_hwpipe)
   1004		mask |= pstate->r_hwpipe->flush_mask;
   1005
   1006	return mask;
   1007}
   1008
   1009/* initialize plane */
   1010struct drm_plane *mdp5_plane_init(struct drm_device *dev,
   1011				  enum drm_plane_type type)
   1012{
   1013	struct drm_plane *plane = NULL;
   1014	struct mdp5_plane *mdp5_plane;
   1015	int ret;
   1016
   1017	mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
   1018	if (!mdp5_plane) {
   1019		ret = -ENOMEM;
   1020		goto fail;
   1021	}
   1022
   1023	plane = &mdp5_plane->base;
   1024
   1025	mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
   1026		ARRAY_SIZE(mdp5_plane->formats), false);
   1027
   1028	ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
   1029			mdp5_plane->formats, mdp5_plane->nformats,
   1030			NULL, type, NULL);
   1031	if (ret)
   1032		goto fail;
   1033
   1034	drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
   1035
   1036	mdp5_plane_install_properties(plane, &plane->base);
   1037
   1038	drm_plane_enable_fb_damage_clips(plane);
   1039
   1040	return plane;
   1041
   1042fail:
   1043	if (plane)
   1044		mdp5_plane_destroy(plane);
   1045
   1046	return ERR_PTR(ret);
   1047}