cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

plane.c (18206B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
      4 */
      5
      6#include <linux/iommu.h>
      7#include <linux/interconnect.h>
      8
      9#include <drm/drm_atomic.h>
     10#include <drm/drm_atomic_helper.h>
     11#include <drm/drm_fourcc.h>
     12#include <drm/drm_gem_atomic_helper.h>
     13#include <drm/drm_plane_helper.h>
     14
     15#include "dc.h"
     16#include "plane.h"
     17
     18static void tegra_plane_destroy(struct drm_plane *plane)
     19{
     20	struct tegra_plane *p = to_tegra_plane(plane);
     21
     22	drm_plane_cleanup(plane);
     23	kfree(p);
     24}
     25
     26static void tegra_plane_reset(struct drm_plane *plane)
     27{
     28	struct tegra_plane *p = to_tegra_plane(plane);
     29	struct tegra_plane_state *state;
     30	unsigned int i;
     31
     32	if (plane->state)
     33		__drm_atomic_helper_plane_destroy_state(plane->state);
     34
     35	kfree(plane->state);
     36	plane->state = NULL;
     37
     38	state = kzalloc(sizeof(*state), GFP_KERNEL);
     39	if (state) {
     40		plane->state = &state->base;
     41		plane->state->plane = plane;
     42		plane->state->zpos = p->index;
     43		plane->state->normalized_zpos = p->index;
     44
     45		for (i = 0; i < 3; i++)
     46			state->iova[i] = DMA_MAPPING_ERROR;
     47	}
     48}
     49
     50static struct drm_plane_state *
     51tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
     52{
     53	struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
     54	struct tegra_plane_state *copy;
     55	unsigned int i;
     56
     57	copy = kmalloc(sizeof(*copy), GFP_KERNEL);
     58	if (!copy)
     59		return NULL;
     60
     61	__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
     62	copy->tiling = state->tiling;
     63	copy->format = state->format;
     64	copy->swap = state->swap;
     65	copy->reflect_x = state->reflect_x;
     66	copy->reflect_y = state->reflect_y;
     67	copy->opaque = state->opaque;
     68	copy->total_peak_memory_bandwidth = state->total_peak_memory_bandwidth;
     69	copy->peak_memory_bandwidth = state->peak_memory_bandwidth;
     70	copy->avg_memory_bandwidth = state->avg_memory_bandwidth;
     71
     72	for (i = 0; i < 2; i++)
     73		copy->blending[i] = state->blending[i];
     74
     75	for (i = 0; i < 3; i++) {
     76		copy->iova[i] = DMA_MAPPING_ERROR;
     77		copy->map[i] = NULL;
     78	}
     79
     80	return &copy->base;
     81}
     82
     83static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
     84					     struct drm_plane_state *state)
     85{
     86	__drm_atomic_helper_plane_destroy_state(state);
     87	kfree(state);
     88}
     89
     90static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
     91{
     92	struct drm_crtc *crtc;
     93
     94	drm_for_each_crtc(crtc, plane->dev) {
     95		if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
     96			struct tegra_dc *dc = to_tegra_dc(crtc);
     97
     98			if (!dc->soc->supports_sector_layout)
     99				return false;
    100		}
    101	}
    102
    103	return true;
    104}
    105
    106static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
    107					     uint32_t format,
    108					     uint64_t modifier)
    109{
    110	const struct drm_format_info *info = drm_format_info(format);
    111
    112	if (modifier == DRM_FORMAT_MOD_LINEAR)
    113		return true;
    114
    115	/* check for the sector layout bit */
    116	if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
    117		if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
    118			if (!tegra_plane_supports_sector_layout(plane))
    119				return false;
    120		}
    121	}
    122
    123	if (info->num_planes == 1)
    124		return true;
    125
    126	return false;
    127}
    128
    129const struct drm_plane_funcs tegra_plane_funcs = {
    130	.update_plane = drm_atomic_helper_update_plane,
    131	.disable_plane = drm_atomic_helper_disable_plane,
    132	.destroy = tegra_plane_destroy,
    133	.reset = tegra_plane_reset,
    134	.atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
    135	.atomic_destroy_state = tegra_plane_atomic_destroy_state,
    136	.format_mod_supported = tegra_plane_format_mod_supported,
    137};
    138
    139static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
    140{
    141	unsigned int i;
    142	int err;
    143
    144	for (i = 0; i < state->base.fb->format->num_planes; i++) {
    145		struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
    146		struct host1x_bo_mapping *map;
    147
    148		map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
    149		if (IS_ERR(map)) {
    150			err = PTR_ERR(map);
    151			goto unpin;
    152		}
    153
    154		if (!dc->client.group) {
    155			/*
    156			 * The display controller needs contiguous memory, so
    157			 * fail if the buffer is discontiguous and we fail to
    158			 * map its SG table to a single contiguous chunk of
    159			 * I/O virtual memory.
    160			 */
    161			if (map->chunks > 1) {
    162				err = -EINVAL;
    163				goto unpin;
    164			}
    165
    166			state->iova[i] = map->phys;
    167		} else {
    168			state->iova[i] = bo->iova;
    169		}
    170
    171		state->map[i] = map;
    172	}
    173
    174	return 0;
    175
    176unpin:
    177	dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
    178
    179	while (i--) {
    180		host1x_bo_unpin(state->map[i]);
    181		state->iova[i] = DMA_MAPPING_ERROR;
    182		state->map[i] = NULL;
    183	}
    184
    185	return err;
    186}
    187
    188static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
    189{
    190	unsigned int i;
    191
    192	for (i = 0; i < state->base.fb->format->num_planes; i++) {
    193		host1x_bo_unpin(state->map[i]);
    194		state->iova[i] = DMA_MAPPING_ERROR;
    195		state->map[i] = NULL;
    196	}
    197}
    198
    199int tegra_plane_prepare_fb(struct drm_plane *plane,
    200			   struct drm_plane_state *state)
    201{
    202	struct tegra_dc *dc = to_tegra_dc(state->crtc);
    203	int err;
    204
    205	if (!state->fb)
    206		return 0;
    207
    208	err = drm_gem_plane_helper_prepare_fb(plane, state);
    209	if (err < 0)
    210		return err;
    211
    212	return tegra_dc_pin(dc, to_tegra_plane_state(state));
    213}
    214
    215void tegra_plane_cleanup_fb(struct drm_plane *plane,
    216			    struct drm_plane_state *state)
    217{
    218	struct tegra_dc *dc = to_tegra_dc(state->crtc);
    219
    220	if (dc)
    221		tegra_dc_unpin(dc, to_tegra_plane_state(state));
    222}
    223
    224static int tegra_plane_calculate_memory_bandwidth(struct drm_plane_state *state)
    225{
    226	struct tegra_plane_state *tegra_state = to_tegra_plane_state(state);
    227	unsigned int i, bpp, dst_w, dst_h, src_w, src_h, mul;
    228	const struct tegra_dc_soc_info *soc;
    229	const struct drm_format_info *fmt;
    230	struct drm_crtc_state *crtc_state;
    231	u64 avg_bandwidth, peak_bandwidth;
    232
    233	if (!state->visible)
    234		return 0;
    235
    236	crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
    237	if (!crtc_state)
    238		return -EINVAL;
    239
    240	src_w = drm_rect_width(&state->src) >> 16;
    241	src_h = drm_rect_height(&state->src) >> 16;
    242	dst_w = drm_rect_width(&state->dst);
    243	dst_h = drm_rect_height(&state->dst);
    244
    245	fmt = state->fb->format;
    246	soc = to_tegra_dc(state->crtc)->soc;
    247
    248	/*
    249	 * Note that real memory bandwidth vary depending on format and
    250	 * memory layout, we are not taking that into account because small
    251	 * estimation error isn't important since bandwidth is rounded up
    252	 * anyway.
    253	 */
    254	for (i = 0, bpp = 0; i < fmt->num_planes; i++) {
    255		unsigned int bpp_plane = fmt->cpp[i] * 8;
    256
    257		/*
    258		 * Sub-sampling is relevant for chroma planes only and vertical
    259		 * readouts are not cached, hence only horizontal sub-sampling
    260		 * matters.
    261		 */
    262		if (i > 0)
    263			bpp_plane /= fmt->hsub;
    264
    265		bpp += bpp_plane;
    266	}
    267
    268	/* average bandwidth in kbytes/sec */
    269	avg_bandwidth  = min(src_w, dst_w) * min(src_h, dst_h);
    270	avg_bandwidth *= drm_mode_vrefresh(&crtc_state->adjusted_mode);
    271	avg_bandwidth  = DIV_ROUND_UP(avg_bandwidth * bpp, 8) + 999;
    272	do_div(avg_bandwidth, 1000);
    273
    274	/* mode.clock in kHz, peak bandwidth in kbytes/sec */
    275	peak_bandwidth = DIV_ROUND_UP(crtc_state->adjusted_mode.clock * bpp, 8);
    276
    277	/*
    278	 * Tegra30/114 Memory Controller can't interleave DC memory requests
    279	 * for the tiled windows because DC uses 16-bytes atom, while DDR3
    280	 * uses 32-bytes atom.  Hence there is x2 memory overfetch for tiled
    281	 * framebuffer and DDR3 on these SoCs.
    282	 */
    283	if (soc->plane_tiled_memory_bandwidth_x2 &&
    284	    tegra_state->tiling.mode == TEGRA_BO_TILING_MODE_TILED)
    285		mul = 2;
    286	else
    287		mul = 1;
    288
    289	/* ICC bandwidth in kbytes/sec */
    290	tegra_state->peak_memory_bandwidth = kBps_to_icc(peak_bandwidth) * mul;
    291	tegra_state->avg_memory_bandwidth  = kBps_to_icc(avg_bandwidth)  * mul;
    292
    293	return 0;
    294}
    295
    296int tegra_plane_state_add(struct tegra_plane *plane,
    297			  struct drm_plane_state *state)
    298{
    299	struct drm_crtc_state *crtc_state;
    300	struct tegra_dc_state *tegra;
    301	int err;
    302
    303	/* Propagate errors from allocation or locking failures. */
    304	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
    305	if (IS_ERR(crtc_state))
    306		return PTR_ERR(crtc_state);
    307
    308	/* Check plane state for visibility and calculate clipping bounds */
    309	err = drm_atomic_helper_check_plane_state(state, crtc_state,
    310						  0, INT_MAX, true, true);
    311	if (err < 0)
    312		return err;
    313
    314	err = tegra_plane_calculate_memory_bandwidth(state);
    315	if (err < 0)
    316		return err;
    317
    318	tegra = to_dc_state(crtc_state);
    319
    320	tegra->planes |= WIN_A_ACT_REQ << plane->index;
    321
    322	return 0;
    323}
    324
    325int tegra_plane_format(u32 fourcc, u32 *format, u32 *swap)
    326{
    327	/* assume no swapping of fetched data */
    328	if (swap)
    329		*swap = BYTE_SWAP_NOSWAP;
    330
    331	switch (fourcc) {
    332	case DRM_FORMAT_ARGB4444:
    333		*format = WIN_COLOR_DEPTH_B4G4R4A4;
    334		break;
    335
    336	case DRM_FORMAT_ARGB1555:
    337		*format = WIN_COLOR_DEPTH_B5G5R5A1;
    338		break;
    339
    340	case DRM_FORMAT_RGB565:
    341		*format = WIN_COLOR_DEPTH_B5G6R5;
    342		break;
    343
    344	case DRM_FORMAT_RGBA5551:
    345		*format = WIN_COLOR_DEPTH_A1B5G5R5;
    346		break;
    347
    348	case DRM_FORMAT_ARGB8888:
    349		*format = WIN_COLOR_DEPTH_B8G8R8A8;
    350		break;
    351
    352	case DRM_FORMAT_ABGR8888:
    353		*format = WIN_COLOR_DEPTH_R8G8B8A8;
    354		break;
    355
    356	case DRM_FORMAT_ABGR4444:
    357		*format = WIN_COLOR_DEPTH_R4G4B4A4;
    358		break;
    359
    360	case DRM_FORMAT_ABGR1555:
    361		*format = WIN_COLOR_DEPTH_R5G5B5A;
    362		break;
    363
    364	case DRM_FORMAT_BGRA5551:
    365		*format = WIN_COLOR_DEPTH_AR5G5B5;
    366		break;
    367
    368	case DRM_FORMAT_XRGB1555:
    369		*format = WIN_COLOR_DEPTH_B5G5R5X1;
    370		break;
    371
    372	case DRM_FORMAT_RGBX5551:
    373		*format = WIN_COLOR_DEPTH_X1B5G5R5;
    374		break;
    375
    376	case DRM_FORMAT_XBGR1555:
    377		*format = WIN_COLOR_DEPTH_R5G5B5X1;
    378		break;
    379
    380	case DRM_FORMAT_BGRX5551:
    381		*format = WIN_COLOR_DEPTH_X1R5G5B5;
    382		break;
    383
    384	case DRM_FORMAT_BGR565:
    385		*format = WIN_COLOR_DEPTH_R5G6B5;
    386		break;
    387
    388	case DRM_FORMAT_BGRA8888:
    389		*format = WIN_COLOR_DEPTH_A8R8G8B8;
    390		break;
    391
    392	case DRM_FORMAT_RGBA8888:
    393		*format = WIN_COLOR_DEPTH_A8B8G8R8;
    394		break;
    395
    396	case DRM_FORMAT_XRGB8888:
    397		*format = WIN_COLOR_DEPTH_B8G8R8X8;
    398		break;
    399
    400	case DRM_FORMAT_XBGR8888:
    401		*format = WIN_COLOR_DEPTH_R8G8B8X8;
    402		break;
    403
    404	case DRM_FORMAT_UYVY:
    405		*format = WIN_COLOR_DEPTH_YCbCr422;
    406		break;
    407
    408	case DRM_FORMAT_YUYV:
    409		if (!swap)
    410			return -EINVAL;
    411
    412		*format = WIN_COLOR_DEPTH_YCbCr422;
    413		*swap = BYTE_SWAP_SWAP2;
    414		break;
    415
    416	case DRM_FORMAT_YVYU:
    417		if (!swap)
    418			return -EINVAL;
    419
    420		*format = WIN_COLOR_DEPTH_YCbCr422;
    421		*swap = BYTE_SWAP_SWAP4;
    422		break;
    423
    424	case DRM_FORMAT_VYUY:
    425		if (!swap)
    426			return -EINVAL;
    427
    428		*format = WIN_COLOR_DEPTH_YCbCr422;
    429		*swap = BYTE_SWAP_SWAP4HW;
    430		break;
    431
    432	case DRM_FORMAT_YUV420:
    433		*format = WIN_COLOR_DEPTH_YCbCr420P;
    434		break;
    435
    436	case DRM_FORMAT_YUV422:
    437		*format = WIN_COLOR_DEPTH_YCbCr422P;
    438		break;
    439
    440	case DRM_FORMAT_YUV444:
    441		*format = WIN_COLOR_DEPTH_YCbCr444P;
    442		break;
    443
    444	case DRM_FORMAT_NV12:
    445		*format = WIN_COLOR_DEPTH_YCbCr420SP;
    446		break;
    447
    448	case DRM_FORMAT_NV21:
    449		*format = WIN_COLOR_DEPTH_YCrCb420SP;
    450		break;
    451
    452	case DRM_FORMAT_NV16:
    453		*format = WIN_COLOR_DEPTH_YCbCr422SP;
    454		break;
    455
    456	case DRM_FORMAT_NV61:
    457		*format = WIN_COLOR_DEPTH_YCrCb422SP;
    458		break;
    459
    460	case DRM_FORMAT_NV24:
    461		*format = WIN_COLOR_DEPTH_YCbCr444SP;
    462		break;
    463
    464	case DRM_FORMAT_NV42:
    465		*format = WIN_COLOR_DEPTH_YCrCb444SP;
    466		break;
    467
    468	default:
    469		return -EINVAL;
    470	}
    471
    472	return 0;
    473}
    474
    475bool tegra_plane_format_is_indexed(unsigned int format)
    476{
    477	switch (format) {
    478	case WIN_COLOR_DEPTH_P1:
    479	case WIN_COLOR_DEPTH_P2:
    480	case WIN_COLOR_DEPTH_P4:
    481	case WIN_COLOR_DEPTH_P8:
    482		return true;
    483	}
    484
    485	return false;
    486}
    487
    488bool tegra_plane_format_is_yuv(unsigned int format, unsigned int *planes, unsigned int *bpc)
    489{
    490	switch (format) {
    491	case WIN_COLOR_DEPTH_YCbCr422:
    492	case WIN_COLOR_DEPTH_YUV422:
    493		if (planes)
    494			*planes = 1;
    495
    496		if (bpc)
    497			*bpc = 8;
    498
    499		return true;
    500
    501	case WIN_COLOR_DEPTH_YCbCr420P:
    502	case WIN_COLOR_DEPTH_YUV420P:
    503	case WIN_COLOR_DEPTH_YCbCr422P:
    504	case WIN_COLOR_DEPTH_YUV422P:
    505	case WIN_COLOR_DEPTH_YCbCr422R:
    506	case WIN_COLOR_DEPTH_YUV422R:
    507	case WIN_COLOR_DEPTH_YCbCr422RA:
    508	case WIN_COLOR_DEPTH_YUV422RA:
    509	case WIN_COLOR_DEPTH_YCbCr444P:
    510		if (planes)
    511			*planes = 3;
    512
    513		if (bpc)
    514			*bpc = 8;
    515
    516		return true;
    517
    518	case WIN_COLOR_DEPTH_YCrCb420SP:
    519	case WIN_COLOR_DEPTH_YCbCr420SP:
    520	case WIN_COLOR_DEPTH_YCrCb422SP:
    521	case WIN_COLOR_DEPTH_YCbCr422SP:
    522	case WIN_COLOR_DEPTH_YCrCb444SP:
    523	case WIN_COLOR_DEPTH_YCbCr444SP:
    524		if (planes)
    525			*planes = 2;
    526
    527		if (bpc)
    528			*bpc = 8;
    529
    530		return true;
    531	}
    532
    533	if (planes)
    534		*planes = 1;
    535
    536	return false;
    537}
    538
    539static bool __drm_format_has_alpha(u32 format)
    540{
    541	switch (format) {
    542	case DRM_FORMAT_ARGB1555:
    543	case DRM_FORMAT_RGBA5551:
    544	case DRM_FORMAT_ABGR8888:
    545	case DRM_FORMAT_ARGB8888:
    546		return true;
    547	}
    548
    549	return false;
    550}
    551
    552static int tegra_plane_format_get_alpha(unsigned int opaque,
    553					unsigned int *alpha)
    554{
    555	if (tegra_plane_format_is_yuv(opaque, NULL, NULL)) {
    556		*alpha = opaque;
    557		return 0;
    558	}
    559
    560	switch (opaque) {
    561	case WIN_COLOR_DEPTH_B5G5R5X1:
    562		*alpha = WIN_COLOR_DEPTH_B5G5R5A1;
    563		return 0;
    564
    565	case WIN_COLOR_DEPTH_X1B5G5R5:
    566		*alpha = WIN_COLOR_DEPTH_A1B5G5R5;
    567		return 0;
    568
    569	case WIN_COLOR_DEPTH_R8G8B8X8:
    570		*alpha = WIN_COLOR_DEPTH_R8G8B8A8;
    571		return 0;
    572
    573	case WIN_COLOR_DEPTH_B8G8R8X8:
    574		*alpha = WIN_COLOR_DEPTH_B8G8R8A8;
    575		return 0;
    576
    577	case WIN_COLOR_DEPTH_B5G6R5:
    578		*alpha = opaque;
    579		return 0;
    580	}
    581
    582	return -EINVAL;
    583}
    584
    585/*
    586 * This is applicable to Tegra20 and Tegra30 only where the opaque formats can
    587 * be emulated using the alpha formats and alpha blending disabled.
    588 */
    589static int tegra_plane_setup_opacity(struct tegra_plane *tegra,
    590				     struct tegra_plane_state *state)
    591{
    592	unsigned int format;
    593	int err;
    594
    595	switch (state->format) {
    596	case WIN_COLOR_DEPTH_B5G5R5A1:
    597	case WIN_COLOR_DEPTH_A1B5G5R5:
    598	case WIN_COLOR_DEPTH_R8G8B8A8:
    599	case WIN_COLOR_DEPTH_B8G8R8A8:
    600		state->opaque = false;
    601		break;
    602
    603	default:
    604		err = tegra_plane_format_get_alpha(state->format, &format);
    605		if (err < 0)
    606			return err;
    607
    608		state->format = format;
    609		state->opaque = true;
    610		break;
    611	}
    612
    613	return 0;
    614}
    615
    616static int tegra_plane_check_transparency(struct tegra_plane *tegra,
    617					  struct tegra_plane_state *state)
    618{
    619	struct drm_plane_state *old, *plane_state;
    620	struct drm_plane *plane;
    621
    622	old = drm_atomic_get_old_plane_state(state->base.state, &tegra->base);
    623
    624	/* check if zpos / transparency changed */
    625	if (old->normalized_zpos == state->base.normalized_zpos &&
    626	    to_tegra_plane_state(old)->opaque == state->opaque)
    627		return 0;
    628
    629	/* include all sibling planes into this commit */
    630	drm_for_each_plane(plane, tegra->base.dev) {
    631		struct tegra_plane *p = to_tegra_plane(plane);
    632
    633		/* skip this plane and planes on different CRTCs */
    634		if (p == tegra || p->dc != tegra->dc)
    635			continue;
    636
    637		plane_state = drm_atomic_get_plane_state(state->base.state,
    638							 plane);
    639		if (IS_ERR(plane_state))
    640			return PTR_ERR(plane_state);
    641	}
    642
    643	return 1;
    644}
    645
    646static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
    647						  struct tegra_plane *other)
    648{
    649	unsigned int index = 0, i;
    650
    651	WARN_ON(plane == other);
    652
    653	for (i = 0; i < 3; i++) {
    654		if (i == plane->index)
    655			continue;
    656
    657		if (i == other->index)
    658			break;
    659
    660		index++;
    661	}
    662
    663	return index;
    664}
    665
    666static void tegra_plane_update_transparency(struct tegra_plane *tegra,
    667					    struct tegra_plane_state *state)
    668{
    669	struct drm_plane_state *new;
    670	struct drm_plane *plane;
    671	unsigned int i;
    672
    673	for_each_new_plane_in_state(state->base.state, plane, new, i) {
    674		struct tegra_plane *p = to_tegra_plane(plane);
    675		unsigned index;
    676
    677		/* skip this plane and planes on different CRTCs */
    678		if (p == tegra || p->dc != tegra->dc)
    679			continue;
    680
    681		index = tegra_plane_get_overlap_index(tegra, p);
    682
    683		if (new->fb && __drm_format_has_alpha(new->fb->format->format))
    684			state->blending[index].alpha = true;
    685		else
    686			state->blending[index].alpha = false;
    687
    688		if (new->normalized_zpos > state->base.normalized_zpos)
    689			state->blending[index].top = true;
    690		else
    691			state->blending[index].top = false;
    692
    693		/*
    694		 * Missing framebuffer means that plane is disabled, in this
    695		 * case mark B / C window as top to be able to differentiate
    696		 * windows indices order in regards to zPos for the middle
    697		 * window X / Y registers programming.
    698		 */
    699		if (!new->fb)
    700			state->blending[index].top = (index == 1);
    701	}
    702}
    703
    704static int tegra_plane_setup_transparency(struct tegra_plane *tegra,
    705					  struct tegra_plane_state *state)
    706{
    707	struct tegra_plane_state *tegra_state;
    708	struct drm_plane_state *new;
    709	struct drm_plane *plane;
    710	int err;
    711
    712	/*
    713	 * If planes zpos / transparency changed, sibling planes blending
    714	 * state may require adjustment and in this case they will be included
    715	 * into this atom commit, otherwise blending state is unchanged.
    716	 */
    717	err = tegra_plane_check_transparency(tegra, state);
    718	if (err <= 0)
    719		return err;
    720
    721	/*
    722	 * All planes are now in the atomic state, walk them up and update
    723	 * transparency state for each plane.
    724	 */
    725	drm_for_each_plane(plane, tegra->base.dev) {
    726		struct tegra_plane *p = to_tegra_plane(plane);
    727
    728		/* skip planes on different CRTCs */
    729		if (p->dc != tegra->dc)
    730			continue;
    731
    732		new = drm_atomic_get_new_plane_state(state->base.state, plane);
    733		tegra_state = to_tegra_plane_state(new);
    734
    735		/*
    736		 * There is no need to update blending state for the disabled
    737		 * plane.
    738		 */
    739		if (new->fb)
    740			tegra_plane_update_transparency(p, tegra_state);
    741	}
    742
    743	return 0;
    744}
    745
    746int tegra_plane_setup_legacy_state(struct tegra_plane *tegra,
    747				   struct tegra_plane_state *state)
    748{
    749	int err;
    750
    751	err = tegra_plane_setup_opacity(tegra, state);
    752	if (err < 0)
    753		return err;
    754
    755	err = tegra_plane_setup_transparency(tegra, state);
    756	if (err < 0)
    757		return err;
    758
    759	return 0;
    760}
    761
    762static const char * const tegra_plane_icc_names[TEGRA_DC_LEGACY_PLANES_NUM] = {
    763	"wina", "winb", "winc", NULL, NULL, NULL, "cursor",
    764};
    765
    766int tegra_plane_interconnect_init(struct tegra_plane *plane)
    767{
    768	const char *icc_name = tegra_plane_icc_names[plane->index];
    769	struct device *dev = plane->dc->dev;
    770	struct tegra_dc *dc = plane->dc;
    771	int err;
    772
    773	if (WARN_ON(plane->index >= TEGRA_DC_LEGACY_PLANES_NUM) ||
    774	    WARN_ON(!tegra_plane_icc_names[plane->index]))
    775		return -EINVAL;
    776
    777	plane->icc_mem = devm_of_icc_get(dev, icc_name);
    778	err = PTR_ERR_OR_ZERO(plane->icc_mem);
    779	if (err) {
    780		dev_err_probe(dev, err, "failed to get %s interconnect\n",
    781			      icc_name);
    782		return err;
    783	}
    784
    785	/* plane B on T20/30 has a dedicated memory client for a 6-tap vertical filter */
    786	if (plane->index == 1 && dc->soc->has_win_b_vfilter_mem_client) {
    787		plane->icc_mem_vfilter = devm_of_icc_get(dev, "winb-vfilter");
    788		err = PTR_ERR_OR_ZERO(plane->icc_mem_vfilter);
    789		if (err) {
    790			dev_err_probe(dev, err, "failed to get %s interconnect\n",
    791				      "winb-vfilter");
    792			return err;
    793		}
    794	}
    795
    796	return 0;
    797}