cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dpu_rm.c (16952B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
      4 */
      5
      6#define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
      7#include "dpu_kms.h"
      8#include "dpu_hw_lm.h"
      9#include "dpu_hw_ctl.h"
     10#include "dpu_hw_pingpong.h"
     11#include "dpu_hw_intf.h"
     12#include "dpu_hw_wb.h"
     13#include "dpu_hw_dspp.h"
     14#include "dpu_hw_merge3d.h"
     15#include "dpu_hw_dsc.h"
     16#include "dpu_encoder.h"
     17#include "dpu_trace.h"
     18
     19
     20static inline bool reserved_by_other(uint32_t *res_map, int idx,
     21				     uint32_t enc_id)
     22{
     23	return res_map[idx] && res_map[idx] != enc_id;
     24}
     25
     26/**
     27 * struct dpu_rm_requirements - Reservation requirements parameter bundle
     28 * @topology:  selected topology for the display
     29 * @hw_res:	   Hardware resources required as reported by the encoders
     30 */
     31struct dpu_rm_requirements {
     32	struct msm_display_topology topology;
     33};
     34
     35int dpu_rm_destroy(struct dpu_rm *rm)
     36{
     37	int i;
     38
     39	for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
     40		struct dpu_hw_dspp *hw;
     41
     42		if (rm->dspp_blks[i]) {
     43			hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
     44			dpu_hw_dspp_destroy(hw);
     45		}
     46	}
     47	for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
     48		struct dpu_hw_pingpong *hw;
     49
     50		if (rm->pingpong_blks[i]) {
     51			hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
     52			dpu_hw_pingpong_destroy(hw);
     53		}
     54	}
     55	for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
     56		struct dpu_hw_merge_3d *hw;
     57
     58		if (rm->merge_3d_blks[i]) {
     59			hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
     60			dpu_hw_merge_3d_destroy(hw);
     61		}
     62	}
     63	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
     64		struct dpu_hw_mixer *hw;
     65
     66		if (rm->mixer_blks[i]) {
     67			hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
     68			dpu_hw_lm_destroy(hw);
     69		}
     70	}
     71	for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
     72		struct dpu_hw_ctl *hw;
     73
     74		if (rm->ctl_blks[i]) {
     75			hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
     76			dpu_hw_ctl_destroy(hw);
     77		}
     78	}
     79	for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
     80		dpu_hw_intf_destroy(rm->hw_intf[i]);
     81
     82	for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
     83		struct dpu_hw_dsc *hw;
     84
     85		if (rm->dsc_blks[i]) {
     86			hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
     87			dpu_hw_dsc_destroy(hw);
     88		}
     89	}
     90
     91	for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
     92		dpu_hw_wb_destroy(rm->hw_wb[i]);
     93
     94	return 0;
     95}
     96
     97int dpu_rm_init(struct dpu_rm *rm,
     98		struct dpu_mdss_cfg *cat,
     99		void __iomem *mmio)
    100{
    101	int rc, i;
    102
    103	if (!rm || !cat || !mmio) {
    104		DPU_ERROR("invalid kms\n");
    105		return -EINVAL;
    106	}
    107
    108	/* Clear, setup lists */
    109	memset(rm, 0, sizeof(*rm));
    110
    111	/* Interrogate HW catalog and create tracking items for hw blocks */
    112	for (i = 0; i < cat->mixer_count; i++) {
    113		struct dpu_hw_mixer *hw;
    114		const struct dpu_lm_cfg *lm = &cat->mixer[i];
    115
    116		if (lm->pingpong == PINGPONG_MAX) {
    117			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
    118			continue;
    119		}
    120
    121		if (lm->id < LM_0 || lm->id >= LM_MAX) {
    122			DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
    123			continue;
    124		}
    125		hw = dpu_hw_lm_init(lm->id, mmio, cat);
    126		if (IS_ERR(hw)) {
    127			rc = PTR_ERR(hw);
    128			DPU_ERROR("failed lm object creation: err %d\n", rc);
    129			goto fail;
    130		}
    131		rm->mixer_blks[lm->id - LM_0] = &hw->base;
    132	}
    133
    134	for (i = 0; i < cat->merge_3d_count; i++) {
    135		struct dpu_hw_merge_3d *hw;
    136		const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
    137
    138		if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) {
    139			DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id);
    140			continue;
    141		}
    142		hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
    143		if (IS_ERR(hw)) {
    144			rc = PTR_ERR(hw);
    145			DPU_ERROR("failed merge_3d object creation: err %d\n",
    146				rc);
    147			goto fail;
    148		}
    149		rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
    150	}
    151
    152	for (i = 0; i < cat->pingpong_count; i++) {
    153		struct dpu_hw_pingpong *hw;
    154		const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
    155
    156		if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
    157			DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
    158			continue;
    159		}
    160		hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
    161		if (IS_ERR(hw)) {
    162			rc = PTR_ERR(hw);
    163			DPU_ERROR("failed pingpong object creation: err %d\n",
    164				rc);
    165			goto fail;
    166		}
    167		if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
    168			hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
    169		rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
    170	}
    171
    172	for (i = 0; i < cat->intf_count; i++) {
    173		struct dpu_hw_intf *hw;
    174		const struct dpu_intf_cfg *intf = &cat->intf[i];
    175
    176		if (intf->type == INTF_NONE) {
    177			DPU_DEBUG("skip intf %d with type none\n", i);
    178			continue;
    179		}
    180		if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
    181			DPU_ERROR("skip intf %d with invalid id\n", intf->id);
    182			continue;
    183		}
    184		hw = dpu_hw_intf_init(intf->id, mmio, cat);
    185		if (IS_ERR(hw)) {
    186			rc = PTR_ERR(hw);
    187			DPU_ERROR("failed intf object creation: err %d\n", rc);
    188			goto fail;
    189		}
    190		rm->hw_intf[intf->id - INTF_0] = hw;
    191	}
    192
    193	for (i = 0; i < cat->wb_count; i++) {
    194		struct dpu_hw_wb *hw;
    195		const struct dpu_wb_cfg *wb = &cat->wb[i];
    196
    197		if (wb->id < WB_0 || wb->id >= WB_MAX) {
    198			DPU_ERROR("skip intf %d with invalid id\n", wb->id);
    199			continue;
    200		}
    201
    202		hw = dpu_hw_wb_init(wb->id, mmio, cat);
    203		if (IS_ERR(hw)) {
    204			rc = PTR_ERR(hw);
    205			DPU_ERROR("failed wb object creation: err %d\n", rc);
    206			goto fail;
    207		}
    208		rm->hw_wb[wb->id - WB_0] = hw;
    209	}
    210
    211	for (i = 0; i < cat->ctl_count; i++) {
    212		struct dpu_hw_ctl *hw;
    213		const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
    214
    215		if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
    216			DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
    217			continue;
    218		}
    219		hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
    220		if (IS_ERR(hw)) {
    221			rc = PTR_ERR(hw);
    222			DPU_ERROR("failed ctl object creation: err %d\n", rc);
    223			goto fail;
    224		}
    225		rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
    226	}
    227
    228	for (i = 0; i < cat->dspp_count; i++) {
    229		struct dpu_hw_dspp *hw;
    230		const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
    231
    232		if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
    233			DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
    234			continue;
    235		}
    236		hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
    237		if (IS_ERR(hw)) {
    238			rc = PTR_ERR(hw);
    239			DPU_ERROR("failed dspp object creation: err %d\n", rc);
    240			goto fail;
    241		}
    242		rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
    243	}
    244
    245	for (i = 0; i < cat->dsc_count; i++) {
    246		struct dpu_hw_dsc *hw;
    247		const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
    248
    249		hw = dpu_hw_dsc_init(dsc->id, mmio, cat);
    250		if (IS_ERR_OR_NULL(hw)) {
    251			rc = PTR_ERR(hw);
    252			DPU_ERROR("failed dsc object creation: err %d\n", rc);
    253			goto fail;
    254		}
    255		rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
    256	}
    257
    258	return 0;
    259
    260fail:
    261	dpu_rm_destroy(rm);
    262
    263	return rc ? rc : -EFAULT;
    264}
    265
    266static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
    267{
    268	return top->num_intf > 1;
    269}
    270
    271/**
    272 * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
    273 * @rm: dpu resource manager handle
    274 * @primary_idx: index of primary mixer in rm->mixer_blks[]
    275 * @peer_idx: index of other mixer in rm->mixer_blks[]
    276 * Return: true if rm->mixer_blks[peer_idx] is a peer of
    277 *          rm->mixer_blks[primary_idx]
    278 */
    279static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
    280		int peer_idx)
    281{
    282	const struct dpu_lm_cfg *prim_lm_cfg;
    283	const struct dpu_lm_cfg *peer_cfg;
    284
    285	prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
    286	peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
    287
    288	if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
    289		DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
    290				peer_cfg->id);
    291		return false;
    292	}
    293	return true;
    294}
    295
    296/**
    297 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
    298 *	proposed use case requirements, incl. hardwired dependent blocks like
    299 *	pingpong
    300 * @rm: dpu resource manager handle
    301 * @global_state: resources shared across multiple kms objects
    302 * @enc_id: encoder id requesting for allocation
    303 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
    304 *      if lm, and all other hardwired blocks connected to the lm (pp) is
    305 *      available and appropriate
    306 * @pp_idx: output parameter, index of pingpong block attached to the layer
    307 *      mixer in rm->pingpong_blks[].
    308 * @dspp_idx: output parameter, index of dspp block attached to the layer
    309 *      mixer in rm->dspp_blks[].
    310 * @reqs: input parameter, rm requirements for HW blocks needed in the
    311 *      datapath.
    312 * Return: true if lm matches all requirements, false otherwise
    313 */
    314static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
    315		struct dpu_global_state *global_state,
    316		uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
    317		struct dpu_rm_requirements *reqs)
    318{
    319	const struct dpu_lm_cfg *lm_cfg;
    320	int idx;
    321
    322	/* Already reserved? */
    323	if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
    324		DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
    325		return false;
    326	}
    327
    328	lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
    329	idx = lm_cfg->pingpong - PINGPONG_0;
    330	if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
    331		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
    332		return false;
    333	}
    334
    335	if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
    336		DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
    337				lm_cfg->pingpong);
    338		return false;
    339	}
    340	*pp_idx = idx;
    341
    342	if (!reqs->topology.num_dspp)
    343		return true;
    344
    345	idx = lm_cfg->dspp - DSPP_0;
    346	if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
    347		DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
    348		return false;
    349	}
    350
    351	if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
    352		DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
    353				lm_cfg->dspp);
    354		return false;
    355	}
    356	*dspp_idx = idx;
    357
    358	return true;
    359}
    360
    361static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
    362			       struct dpu_global_state *global_state,
    363			       uint32_t enc_id,
    364			       struct dpu_rm_requirements *reqs)
    365
    366{
    367	int lm_idx[MAX_BLOCKS];
    368	int pp_idx[MAX_BLOCKS];
    369	int dspp_idx[MAX_BLOCKS] = {0};
    370	int i, j, lm_count = 0;
    371
    372	if (!reqs->topology.num_lm) {
    373		DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
    374		return -EINVAL;
    375	}
    376
    377	/* Find a primary mixer */
    378	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
    379			lm_count < reqs->topology.num_lm; i++) {
    380		if (!rm->mixer_blks[i])
    381			continue;
    382
    383		lm_count = 0;
    384		lm_idx[lm_count] = i;
    385
    386		if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
    387				enc_id, i, &pp_idx[lm_count],
    388				&dspp_idx[lm_count], reqs)) {
    389			continue;
    390		}
    391
    392		++lm_count;
    393
    394		/* Valid primary mixer found, find matching peers */
    395		for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
    396				lm_count < reqs->topology.num_lm; j++) {
    397			if (!rm->mixer_blks[j])
    398				continue;
    399
    400			if (!_dpu_rm_check_lm_peer(rm, i, j)) {
    401				DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
    402						LM_0 + i);
    403				continue;
    404			}
    405
    406			if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
    407					global_state, enc_id, j,
    408					&pp_idx[lm_count], &dspp_idx[lm_count],
    409					reqs)) {
    410				continue;
    411			}
    412
    413			lm_idx[lm_count] = j;
    414			++lm_count;
    415		}
    416	}
    417
    418	if (lm_count != reqs->topology.num_lm) {
    419		DPU_DEBUG("unable to find appropriate mixers\n");
    420		return -ENAVAIL;
    421	}
    422
    423	for (i = 0; i < lm_count; i++) {
    424		global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
    425		global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
    426		global_state->dspp_to_enc_id[dspp_idx[i]] =
    427			reqs->topology.num_dspp ? enc_id : 0;
    428
    429		trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
    430					 pp_idx[i] + PINGPONG_0);
    431	}
    432
    433	return 0;
    434}
    435
    436static int _dpu_rm_reserve_ctls(
    437		struct dpu_rm *rm,
    438		struct dpu_global_state *global_state,
    439		uint32_t enc_id,
    440		const struct msm_display_topology *top)
    441{
    442	int ctl_idx[MAX_BLOCKS];
    443	int i = 0, j, num_ctls;
    444	bool needs_split_display;
    445
    446	/* each hw_intf needs its own hw_ctrl to program its control path */
    447	num_ctls = top->num_intf;
    448
    449	needs_split_display = _dpu_rm_needs_split_display(top);
    450
    451	for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
    452		const struct dpu_hw_ctl *ctl;
    453		unsigned long features;
    454		bool has_split_display;
    455
    456		if (!rm->ctl_blks[j])
    457			continue;
    458		if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
    459			continue;
    460
    461		ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
    462		features = ctl->caps->features;
    463		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
    464
    465		DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
    466
    467		if (needs_split_display != has_split_display)
    468			continue;
    469
    470		ctl_idx[i] = j;
    471		DPU_DEBUG("ctl %d match\n", j + CTL_0);
    472
    473		if (++i == num_ctls)
    474			break;
    475
    476	}
    477
    478	if (i != num_ctls)
    479		return -ENAVAIL;
    480
    481	for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
    482		global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
    483		trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
    484	}
    485
    486	return 0;
    487}
    488
    489static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
    490			       struct dpu_global_state *global_state,
    491			       struct drm_encoder *enc,
    492			       const struct msm_display_topology *top)
    493{
    494	int num_dsc = top->num_dsc;
    495	int i;
    496
    497	/* check if DSC required are allocated or not */
    498	for (i = 0; i < num_dsc; i++) {
    499		if (global_state->dsc_to_enc_id[i]) {
    500			DPU_ERROR("DSC %d is already allocated\n", i);
    501			return -EIO;
    502		}
    503	}
    504
    505	for (i = 0; i < num_dsc; i++)
    506		global_state->dsc_to_enc_id[i] = enc->base.id;
    507
    508	return 0;
    509}
    510
    511static int _dpu_rm_make_reservation(
    512		struct dpu_rm *rm,
    513		struct dpu_global_state *global_state,
    514		struct drm_encoder *enc,
    515		struct dpu_rm_requirements *reqs)
    516{
    517	int ret;
    518
    519	ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
    520	if (ret) {
    521		DPU_ERROR("unable to find appropriate mixers\n");
    522		return ret;
    523	}
    524
    525	ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
    526				&reqs->topology);
    527	if (ret) {
    528		DPU_ERROR("unable to find appropriate CTL\n");
    529		return ret;
    530	}
    531
    532	ret  = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
    533	if (ret)
    534		return ret;
    535
    536	return ret;
    537}
    538
    539static int _dpu_rm_populate_requirements(
    540		struct drm_encoder *enc,
    541		struct dpu_rm_requirements *reqs,
    542		struct msm_display_topology req_topology)
    543{
    544	reqs->topology = req_topology;
    545
    546	DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
    547		      reqs->topology.num_lm, reqs->topology.num_enc,
    548		      reqs->topology.num_intf);
    549
    550	return 0;
    551}
    552
    553static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
    554				  uint32_t enc_id)
    555{
    556	int i;
    557
    558	for (i = 0; i < cnt; i++) {
    559		if (res_mapping[i] == enc_id)
    560			res_mapping[i] = 0;
    561	}
    562}
    563
    564void dpu_rm_release(struct dpu_global_state *global_state,
    565		    struct drm_encoder *enc)
    566{
    567	_dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
    568		ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
    569	_dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
    570		ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
    571	_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
    572		ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
    573	_dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
    574		ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
    575}
    576
    577int dpu_rm_reserve(
    578		struct dpu_rm *rm,
    579		struct dpu_global_state *global_state,
    580		struct drm_encoder *enc,
    581		struct drm_crtc_state *crtc_state,
    582		struct msm_display_topology topology)
    583{
    584	struct dpu_rm_requirements reqs;
    585	int ret;
    586
    587	/* Check if this is just a page-flip */
    588	if (!drm_atomic_crtc_needs_modeset(crtc_state))
    589		return 0;
    590
    591	if (IS_ERR(global_state)) {
    592		DPU_ERROR("failed to global state\n");
    593		return PTR_ERR(global_state);
    594	}
    595
    596	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
    597		      enc->base.id, crtc_state->crtc->base.id);
    598
    599	ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
    600	if (ret) {
    601		DPU_ERROR("failed to populate hw requirements\n");
    602		return ret;
    603	}
    604
    605	ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
    606	if (ret)
    607		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
    608
    609
    610
    611	return ret;
    612}
    613
    614int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
    615	struct dpu_global_state *global_state, uint32_t enc_id,
    616	enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
    617{
    618	struct dpu_hw_blk **hw_blks;
    619	uint32_t *hw_to_enc_id;
    620	int i, num_blks, max_blks;
    621
    622	switch (type) {
    623	case DPU_HW_BLK_PINGPONG:
    624		hw_blks = rm->pingpong_blks;
    625		hw_to_enc_id = global_state->pingpong_to_enc_id;
    626		max_blks = ARRAY_SIZE(rm->pingpong_blks);
    627		break;
    628	case DPU_HW_BLK_LM:
    629		hw_blks = rm->mixer_blks;
    630		hw_to_enc_id = global_state->mixer_to_enc_id;
    631		max_blks = ARRAY_SIZE(rm->mixer_blks);
    632		break;
    633	case DPU_HW_BLK_CTL:
    634		hw_blks = rm->ctl_blks;
    635		hw_to_enc_id = global_state->ctl_to_enc_id;
    636		max_blks = ARRAY_SIZE(rm->ctl_blks);
    637		break;
    638	case DPU_HW_BLK_DSPP:
    639		hw_blks = rm->dspp_blks;
    640		hw_to_enc_id = global_state->dspp_to_enc_id;
    641		max_blks = ARRAY_SIZE(rm->dspp_blks);
    642		break;
    643	case DPU_HW_BLK_DSC:
    644		hw_blks = rm->dsc_blks;
    645		hw_to_enc_id = global_state->dsc_to_enc_id;
    646		max_blks = ARRAY_SIZE(rm->dsc_blks);
    647		break;
    648	default:
    649		DPU_ERROR("blk type %d not managed by rm\n", type);
    650		return 0;
    651	}
    652
    653	num_blks = 0;
    654	for (i = 0; i < max_blks; i++) {
    655		if (hw_to_enc_id[i] != enc_id)
    656			continue;
    657
    658		if (num_blks == blks_size) {
    659			DPU_ERROR("More than %d resources assigned to enc %d\n",
    660				  blks_size, enc_id);
    661			break;
    662		}
    663		blks[num_blks++] = hw_blks[i];
    664	}
    665
    666	return num_blks;
    667}