cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dpu_kms.c (34620B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2013 Red Hat
      4 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
      5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
      6 *
      7 * Author: Rob Clark <robdclark@gmail.com>
      8 */
      9
     10#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
     11
     12#include <linux/debugfs.h>
     13#include <linux/dma-buf.h>
     14#include <linux/of_irq.h>
     15#include <linux/pm_opp.h>
     16
     17#include <drm/drm_crtc.h>
     18#include <drm/drm_file.h>
     19#include <drm/drm_vblank.h>
     20#include <drm/drm_writeback.h>
     21
     22#include "msm_drv.h"
     23#include "msm_mmu.h"
     24#include "msm_gem.h"
     25#include "disp/msm_disp_snapshot.h"
     26
     27#include "dpu_core_irq.h"
     28#include "dpu_crtc.h"
     29#include "dpu_encoder.h"
     30#include "dpu_formats.h"
     31#include "dpu_hw_vbif.h"
     32#include "dpu_kms.h"
     33#include "dpu_plane.h"
     34#include "dpu_vbif.h"
     35#include "dpu_writeback.h"
     36
     37#define CREATE_TRACE_POINTS
     38#include "dpu_trace.h"
     39
     40/*
     41 * To enable overall DRM driver logging
     42 * # echo 0x2 > /sys/module/drm/parameters/debug
     43 *
     44 * To enable DRM driver h/w logging
     45 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
     46 *
     47 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
     48 */
     49#define DPU_DEBUGFS_DIR "msm_dpu"
     50#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
     51
     52static int dpu_kms_hw_init(struct msm_kms *kms);
     53static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
     54
     55#ifdef CONFIG_DEBUG_FS
     56static int _dpu_danger_signal_status(struct seq_file *s,
     57		bool danger_status)
     58{
     59	struct dpu_kms *kms = (struct dpu_kms *)s->private;
     60	struct dpu_danger_safe_status status;
     61	int i;
     62
     63	if (!kms->hw_mdp) {
     64		DPU_ERROR("invalid arg(s)\n");
     65		return 0;
     66	}
     67
     68	memset(&status, 0, sizeof(struct dpu_danger_safe_status));
     69
     70	pm_runtime_get_sync(&kms->pdev->dev);
     71	if (danger_status) {
     72		seq_puts(s, "\nDanger signal status:\n");
     73		if (kms->hw_mdp->ops.get_danger_status)
     74			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
     75					&status);
     76	} else {
     77		seq_puts(s, "\nSafe signal status:\n");
     78		if (kms->hw_mdp->ops.get_safe_status)
     79			kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
     80					&status);
     81	}
     82	pm_runtime_put_sync(&kms->pdev->dev);
     83
     84	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
     85
     86	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
     87		seq_printf(s, "SSPP%d   :  0x%x  \n", i - SSPP_VIG0,
     88				status.sspp[i]);
     89	seq_puts(s, "\n");
     90
     91	return 0;
     92}
     93
     94static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
     95{
     96	return _dpu_danger_signal_status(s, true);
     97}
     98DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
     99
    100static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
    101{
    102	return _dpu_danger_signal_status(s, false);
    103}
    104DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
    105
    106static ssize_t _dpu_plane_danger_read(struct file *file,
    107			char __user *buff, size_t count, loff_t *ppos)
    108{
    109	struct dpu_kms *kms = file->private_data;
    110	int len;
    111	char buf[40];
    112
    113	len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
    114
    115	return simple_read_from_buffer(buff, count, ppos, buf, len);
    116}
    117
    118static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
    119{
    120	struct drm_plane *plane;
    121
    122	drm_for_each_plane(plane, kms->dev) {
    123		if (plane->fb && plane->state) {
    124			dpu_plane_danger_signal_ctrl(plane, enable);
    125			DPU_DEBUG("plane:%d img:%dx%d ",
    126				plane->base.id, plane->fb->width,
    127				plane->fb->height);
    128			DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
    129				plane->state->src_x >> 16,
    130				plane->state->src_y >> 16,
    131				plane->state->src_w >> 16,
    132				plane->state->src_h >> 16,
    133				plane->state->crtc_x, plane->state->crtc_y,
    134				plane->state->crtc_w, plane->state->crtc_h);
    135		} else {
    136			DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
    137		}
    138	}
    139}
    140
    141static ssize_t _dpu_plane_danger_write(struct file *file,
    142		    const char __user *user_buf, size_t count, loff_t *ppos)
    143{
    144	struct dpu_kms *kms = file->private_data;
    145	int disable_panic;
    146	int ret;
    147
    148	ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
    149	if (ret)
    150		return ret;
    151
    152	if (disable_panic) {
    153		/* Disable panic signal for all active pipes */
    154		DPU_DEBUG("Disabling danger:\n");
    155		_dpu_plane_set_danger_state(kms, false);
    156		kms->has_danger_ctrl = false;
    157	} else {
    158		/* Enable panic signal for all active pipes */
    159		DPU_DEBUG("Enabling danger:\n");
    160		kms->has_danger_ctrl = true;
    161		_dpu_plane_set_danger_state(kms, true);
    162	}
    163
    164	return count;
    165}
    166
    167static const struct file_operations dpu_plane_danger_enable = {
    168	.open = simple_open,
    169	.read = _dpu_plane_danger_read,
    170	.write = _dpu_plane_danger_write,
    171};
    172
    173static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
    174		struct dentry *parent)
    175{
    176	struct dentry *entry = debugfs_create_dir("danger", parent);
    177
    178	debugfs_create_file("danger_status", 0600, entry,
    179			dpu_kms, &dpu_debugfs_danger_stats_fops);
    180	debugfs_create_file("safe_status", 0600, entry,
    181			dpu_kms, &dpu_debugfs_safe_stats_fops);
    182	debugfs_create_file("disable_danger", 0600, entry,
    183			dpu_kms, &dpu_plane_danger_enable);
    184
    185}
    186
    187/*
    188 * Companion structure for dpu_debugfs_create_regset32.
    189 */
    190struct dpu_debugfs_regset32 {
    191	uint32_t offset;
    192	uint32_t blk_len;
    193	struct dpu_kms *dpu_kms;
    194};
    195
    196static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
    197{
    198	struct dpu_debugfs_regset32 *regset = s->private;
    199	struct dpu_kms *dpu_kms = regset->dpu_kms;
    200	void __iomem *base;
    201	uint32_t i, addr;
    202
    203	if (!dpu_kms->mmio)
    204		return 0;
    205
    206	base = dpu_kms->mmio + regset->offset;
    207
    208	/* insert padding spaces, if needed */
    209	if (regset->offset & 0xF) {
    210		seq_printf(s, "[%x]", regset->offset & ~0xF);
    211		for (i = 0; i < (regset->offset & 0xF); i += 4)
    212			seq_puts(s, "         ");
    213	}
    214
    215	pm_runtime_get_sync(&dpu_kms->pdev->dev);
    216
    217	/* main register output */
    218	for (i = 0; i < regset->blk_len; i += 4) {
    219		addr = regset->offset + i;
    220		if ((addr & 0xF) == 0x0)
    221			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
    222		seq_printf(s, " %08x", readl_relaxed(base + i));
    223	}
    224	seq_puts(s, "\n");
    225	pm_runtime_put_sync(&dpu_kms->pdev->dev);
    226
    227	return 0;
    228}
    229
    230static int dpu_debugfs_open_regset32(struct inode *inode,
    231		struct file *file)
    232{
    233	return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
    234}
    235
    236static const struct file_operations dpu_fops_regset32 = {
    237	.open =		dpu_debugfs_open_regset32,
    238	.read =		seq_read,
    239	.llseek =	seq_lseek,
    240	.release =	single_release,
    241};
    242
    243void dpu_debugfs_create_regset32(const char *name, umode_t mode,
    244		void *parent,
    245		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
    246{
    247	struct dpu_debugfs_regset32 *regset;
    248
    249	if (WARN_ON(!name || !dpu_kms || !length))
    250		return;
    251
    252	regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
    253	if (!regset)
    254		return;
    255
    256	/* make sure offset is a multiple of 4 */
    257	regset->offset = round_down(offset, 4);
    258	regset->blk_len = length;
    259	regset->dpu_kms = dpu_kms;
    260
    261	debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
    262}
    263
    264static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
    265{
    266	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
    267	void *p = dpu_hw_util_get_log_mask_ptr();
    268	struct dentry *entry;
    269	struct drm_device *dev;
    270	struct msm_drm_private *priv;
    271	int i;
    272
    273	if (!p)
    274		return -EINVAL;
    275
    276	/* Only create a set of debugfs for the primary node, ignore render nodes */
    277	if (minor->type != DRM_MINOR_PRIMARY)
    278		return 0;
    279
    280	dev = dpu_kms->dev;
    281	priv = dev->dev_private;
    282
    283	entry = debugfs_create_dir("debug", minor->debugfs_root);
    284
    285	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
    286
    287	dpu_debugfs_danger_init(dpu_kms, entry);
    288	dpu_debugfs_vbif_init(dpu_kms, entry);
    289	dpu_debugfs_core_irq_init(dpu_kms, entry);
    290	dpu_debugfs_sspp_init(dpu_kms, entry);
    291
    292	for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
    293		if (priv->dp[i])
    294			msm_dp_debugfs_init(priv->dp[i], minor);
    295	}
    296
    297	return dpu_core_perf_debugfs_init(dpu_kms, entry);
    298}
    299#endif
    300
    301/* Global/shared object state funcs */
    302
    303/*
    304 * This is a helper that returns the private state currently in operation.
    305 * Note that this would return the "old_state" if called in the atomic check
    306 * path, and the "new_state" after the atomic swap has been done.
    307 */
    308struct dpu_global_state *
    309dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
    310{
    311	return to_dpu_global_state(dpu_kms->global_state.state);
    312}
    313
    314/*
    315 * This acquires the modeset lock set aside for global state, creates
    316 * a new duplicated private object state.
    317 */
    318struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
    319{
    320	struct msm_drm_private *priv = s->dev->dev_private;
    321	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
    322	struct drm_private_state *priv_state;
    323	int ret;
    324
    325	ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
    326	if (ret)
    327		return ERR_PTR(ret);
    328
    329	priv_state = drm_atomic_get_private_obj_state(s,
    330						&dpu_kms->global_state);
    331	if (IS_ERR(priv_state))
    332		return ERR_CAST(priv_state);
    333
    334	return to_dpu_global_state(priv_state);
    335}
    336
    337static struct drm_private_state *
    338dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
    339{
    340	struct dpu_global_state *state;
    341
    342	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
    343	if (!state)
    344		return NULL;
    345
    346	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
    347
    348	return &state->base;
    349}
    350
    351static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
    352				      struct drm_private_state *state)
    353{
    354	struct dpu_global_state *dpu_state = to_dpu_global_state(state);
    355
    356	kfree(dpu_state);
    357}
    358
    359static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
    360	.atomic_duplicate_state = dpu_kms_global_duplicate_state,
    361	.atomic_destroy_state = dpu_kms_global_destroy_state,
    362};
    363
    364static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
    365{
    366	struct dpu_global_state *state;
    367
    368	drm_modeset_lock_init(&dpu_kms->global_state_lock);
    369
    370	state = kzalloc(sizeof(*state), GFP_KERNEL);
    371	if (!state)
    372		return -ENOMEM;
    373
    374	drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
    375				    &state->base,
    376				    &dpu_kms_global_state_funcs);
    377	return 0;
    378}
    379
    380static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
    381{
    382	struct icc_path *path0;
    383	struct icc_path *path1;
    384	struct drm_device *dev = dpu_kms->dev;
    385	struct device *dpu_dev = dev->dev;
    386	struct device *mdss_dev = dpu_dev->parent;
    387
    388	/* Interconnects are a part of MDSS device tree binding, not the
    389	 * MDP/DPU device. */
    390	path0 = of_icc_get(mdss_dev, "mdp0-mem");
    391	path1 = of_icc_get(mdss_dev, "mdp1-mem");
    392
    393	if (IS_ERR_OR_NULL(path0))
    394		return PTR_ERR_OR_ZERO(path0);
    395
    396	dpu_kms->path[0] = path0;
    397	dpu_kms->num_paths = 1;
    398
    399	if (!IS_ERR_OR_NULL(path1)) {
    400		dpu_kms->path[1] = path1;
    401		dpu_kms->num_paths++;
    402	}
    403	return 0;
    404}
    405
    406static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
    407{
    408	return dpu_crtc_vblank(crtc, true);
    409}
    410
    411static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
    412{
    413	dpu_crtc_vblank(crtc, false);
    414}
    415
    416static void dpu_kms_enable_commit(struct msm_kms *kms)
    417{
    418	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
    419	pm_runtime_get_sync(&dpu_kms->pdev->dev);
    420}
    421
    422static void dpu_kms_disable_commit(struct msm_kms *kms)
    423{
    424	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
    425	pm_runtime_put_sync(&dpu_kms->pdev->dev);
    426}
    427
    428static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
    429{
    430	struct drm_encoder *encoder;
    431
    432	drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
    433		ktime_t vsync_time;
    434
    435		if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
    436			return vsync_time;
    437	}
    438
    439	return ktime_get();
    440}
    441
    442static void dpu_kms_prepare_commit(struct msm_kms *kms,
    443		struct drm_atomic_state *state)
    444{
    445	struct drm_crtc *crtc;
    446	struct drm_crtc_state *crtc_state;
    447	struct drm_encoder *encoder;
    448	int i;
    449
    450	if (!kms)
    451		return;
    452
    453	/* Call prepare_commit for all affected encoders */
    454	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
    455		drm_for_each_encoder_mask(encoder, crtc->dev,
    456					  crtc_state->encoder_mask) {
    457			dpu_encoder_prepare_commit(encoder);
    458		}
    459	}
    460}
    461
    462static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
    463{
    464	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
    465	struct drm_crtc *crtc;
    466
    467	for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
    468		if (!crtc->state->active)
    469			continue;
    470
    471		trace_dpu_kms_commit(DRMID(crtc));
    472		dpu_crtc_commit_kickoff(crtc);
    473	}
    474}
    475
    476static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
    477{
    478	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
    479	struct drm_crtc *crtc;
    480
    481	DPU_ATRACE_BEGIN("kms_complete_commit");
    482
    483	for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
    484		dpu_crtc_complete_commit(crtc);
    485
    486	DPU_ATRACE_END("kms_complete_commit");
    487}
    488
    489static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
    490		struct drm_crtc *crtc)
    491{
    492	struct drm_encoder *encoder;
    493	struct drm_device *dev;
    494	int ret;
    495
    496	if (!kms || !crtc || !crtc->state) {
    497		DPU_ERROR("invalid params\n");
    498		return;
    499	}
    500
    501	dev = crtc->dev;
    502
    503	if (!crtc->state->enable) {
    504		DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
    505		return;
    506	}
    507
    508	if (!crtc->state->active) {
    509		DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
    510		return;
    511	}
    512
    513	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
    514		if (encoder->crtc != crtc)
    515			continue;
    516		/*
    517		 * Wait for post-flush if necessary to delay before
    518		 * plane_cleanup. For example, wait for vsync in case of video
    519		 * mode panels. This may be a no-op for command mode panels.
    520		 */
    521		trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
    522		ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
    523		if (ret && ret != -EWOULDBLOCK) {
    524			DPU_ERROR("wait for commit done returned %d\n", ret);
    525			break;
    526		}
    527	}
    528}
    529
    530static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
    531{
    532	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
    533	struct drm_crtc *crtc;
    534
    535	for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
    536		dpu_kms_wait_for_commit_done(kms, crtc);
    537}
    538
    539static int _dpu_kms_initialize_dsi(struct drm_device *dev,
    540				    struct msm_drm_private *priv,
    541				    struct dpu_kms *dpu_kms)
    542{
    543	struct drm_encoder *encoder = NULL;
    544	struct msm_display_info info;
    545	int i, rc = 0;
    546
    547	if (!(priv->dsi[0] || priv->dsi[1]))
    548		return rc;
    549
    550	/*
    551	 * We support following confiurations:
    552	 * - Single DSI host (dsi0 or dsi1)
    553	 * - Two independent DSI hosts
    554	 * - Bonded DSI0 and DSI1 hosts
    555	 *
    556	 * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
    557	 */
    558	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
    559		int other = (i + 1) % 2;
    560
    561		if (!priv->dsi[i])
    562			continue;
    563
    564		if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
    565		    !msm_dsi_is_master_dsi(priv->dsi[i]))
    566			continue;
    567
    568		encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
    569		if (IS_ERR(encoder)) {
    570			DPU_ERROR("encoder init failed for dsi display\n");
    571			return PTR_ERR(encoder);
    572		}
    573
    574		memset(&info, 0, sizeof(info));
    575		info.intf_type = encoder->encoder_type;
    576
    577		rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
    578		if (rc) {
    579			DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
    580				i, rc);
    581			break;
    582		}
    583
    584		info.h_tile_instance[info.num_of_h_tiles++] = i;
    585		info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ?
    586			MSM_DISPLAY_CAP_CMD_MODE :
    587			MSM_DISPLAY_CAP_VID_MODE;
    588
    589		info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
    590
    591		if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
    592			rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
    593			if (rc) {
    594				DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
    595					other, rc);
    596				break;
    597			}
    598
    599			info.h_tile_instance[info.num_of_h_tiles++] = other;
    600		}
    601
    602		rc = dpu_encoder_setup(dev, encoder, &info);
    603		if (rc)
    604			DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
    605				  encoder->base.id, rc);
    606	}
    607
    608	return rc;
    609}
    610
    611static int _dpu_kms_initialize_displayport(struct drm_device *dev,
    612					    struct msm_drm_private *priv,
    613					    struct dpu_kms *dpu_kms)
    614{
    615	struct drm_encoder *encoder = NULL;
    616	struct msm_display_info info;
    617	int rc;
    618	int i;
    619
    620	for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
    621		if (!priv->dp[i])
    622			continue;
    623
    624		encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
    625		if (IS_ERR(encoder)) {
    626			DPU_ERROR("encoder init failed for dsi display\n");
    627			return PTR_ERR(encoder);
    628		}
    629
    630		memset(&info, 0, sizeof(info));
    631		rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
    632		if (rc) {
    633			DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
    634			drm_encoder_cleanup(encoder);
    635			return rc;
    636		}
    637
    638		info.num_of_h_tiles = 1;
    639		info.h_tile_instance[0] = i;
    640		info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
    641		info.intf_type = encoder->encoder_type;
    642		rc = dpu_encoder_setup(dev, encoder, &info);
    643		if (rc) {
    644			DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
    645				  encoder->base.id, rc);
    646			return rc;
    647		}
    648	}
    649
    650	return 0;
    651}
    652
    653static int _dpu_kms_initialize_writeback(struct drm_device *dev,
    654		struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
    655		const u32 *wb_formats, int n_formats)
    656{
    657	struct drm_encoder *encoder = NULL;
    658	struct msm_display_info info;
    659	int rc;
    660
    661	encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
    662	if (IS_ERR(encoder)) {
    663		DPU_ERROR("encoder init failed for dsi display\n");
    664		return PTR_ERR(encoder);
    665	}
    666
    667	memset(&info, 0, sizeof(info));
    668
    669	rc = dpu_writeback_init(dev, encoder, wb_formats,
    670			n_formats);
    671	if (rc) {
    672		DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
    673		drm_encoder_cleanup(encoder);
    674		return rc;
    675	}
    676
    677	info.num_of_h_tiles = 1;
    678	/* use only WB idx 2 instance for DPU */
    679	info.h_tile_instance[0] = WB_2;
    680	info.intf_type = encoder->encoder_type;
    681
    682	rc = dpu_encoder_setup(dev, encoder, &info);
    683	if (rc) {
    684		DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
    685				  encoder->base.id, rc);
    686		return rc;
    687	}
    688
    689	return 0;
    690}
    691
    692/**
    693 * _dpu_kms_setup_displays - create encoders, bridges and connectors
    694 *                           for underlying displays
    695 * @dev:        Pointer to drm device structure
    696 * @priv:       Pointer to private drm device data
    697 * @dpu_kms:    Pointer to dpu kms structure
    698 * Returns:     Zero on success
    699 */
    700static int _dpu_kms_setup_displays(struct drm_device *dev,
    701				    struct msm_drm_private *priv,
    702				    struct dpu_kms *dpu_kms)
    703{
    704	int rc = 0;
    705	int i;
    706
    707	rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
    708	if (rc) {
    709		DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
    710		return rc;
    711	}
    712
    713	rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
    714	if (rc) {
    715		DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
    716		return rc;
    717	}
    718
    719	/* Since WB isn't a driver check the catalog before initializing */
    720	if (dpu_kms->catalog->wb_count) {
    721		for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
    722			if (dpu_kms->catalog->wb[i].id == WB_2) {
    723				rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
    724						dpu_kms->catalog->wb[i].format_list,
    725						dpu_kms->catalog->wb[i].num_formats);
    726				if (rc) {
    727					DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
    728					return rc;
    729				}
    730			}
    731		}
    732	}
    733
    734	return rc;
    735}
    736
    737#define MAX_PLANES 20
    738static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
    739{
    740	struct drm_device *dev;
    741	struct drm_plane *primary_planes[MAX_PLANES], *plane;
    742	struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
    743	struct drm_crtc *crtc;
    744	struct drm_encoder *encoder;
    745	unsigned int num_encoders;
    746
    747	struct msm_drm_private *priv;
    748	struct dpu_mdss_cfg *catalog;
    749
    750	int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
    751	int max_crtc_count;
    752	dev = dpu_kms->dev;
    753	priv = dev->dev_private;
    754	catalog = dpu_kms->catalog;
    755
    756	/*
    757	 * Create encoder and query display drivers to create
    758	 * bridges and connectors
    759	 */
    760	ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
    761	if (ret)
    762		return ret;
    763
    764	num_encoders = 0;
    765	drm_for_each_encoder(encoder, dev)
    766		num_encoders++;
    767
    768	max_crtc_count = min(catalog->mixer_count, num_encoders);
    769
    770	/* Create the planes, keeping track of one primary/cursor per crtc */
    771	for (i = 0; i < catalog->sspp_count; i++) {
    772		enum drm_plane_type type;
    773
    774		if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
    775			&& cursor_planes_idx < max_crtc_count)
    776			type = DRM_PLANE_TYPE_CURSOR;
    777		else if (primary_planes_idx < max_crtc_count)
    778			type = DRM_PLANE_TYPE_PRIMARY;
    779		else
    780			type = DRM_PLANE_TYPE_OVERLAY;
    781
    782		DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
    783			  type, catalog->sspp[i].features,
    784			  catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
    785
    786		plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
    787				       (1UL << max_crtc_count) - 1, 0);
    788		if (IS_ERR(plane)) {
    789			DPU_ERROR("dpu_plane_init failed\n");
    790			ret = PTR_ERR(plane);
    791			return ret;
    792		}
    793
    794		if (type == DRM_PLANE_TYPE_CURSOR)
    795			cursor_planes[cursor_planes_idx++] = plane;
    796		else if (type == DRM_PLANE_TYPE_PRIMARY)
    797			primary_planes[primary_planes_idx++] = plane;
    798	}
    799
    800	max_crtc_count = min(max_crtc_count, primary_planes_idx);
    801
    802	/* Create one CRTC per encoder */
    803	for (i = 0; i < max_crtc_count; i++) {
    804		crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
    805		if (IS_ERR(crtc)) {
    806			ret = PTR_ERR(crtc);
    807			return ret;
    808		}
    809		priv->crtcs[priv->num_crtcs++] = crtc;
    810	}
    811
    812	/* All CRTCs are compatible with all encoders */
    813	drm_for_each_encoder(encoder, dev)
    814		encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
    815
    816	return 0;
    817}
    818
    819static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
    820{
    821	int i;
    822
    823	if (dpu_kms->hw_intr)
    824		dpu_hw_intr_destroy(dpu_kms->hw_intr);
    825	dpu_kms->hw_intr = NULL;
    826
    827	/* safe to call these more than once during shutdown */
    828	_dpu_kms_mmu_destroy(dpu_kms);
    829
    830	if (dpu_kms->catalog) {
    831		for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
    832			u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
    833
    834			if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
    835				dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
    836				dpu_kms->hw_vbif[vbif_idx] = NULL;
    837			}
    838		}
    839	}
    840
    841	if (dpu_kms->rm_init)
    842		dpu_rm_destroy(&dpu_kms->rm);
    843	dpu_kms->rm_init = false;
    844
    845	if (dpu_kms->catalog)
    846		dpu_hw_catalog_deinit(dpu_kms->catalog);
    847	dpu_kms->catalog = NULL;
    848
    849	if (dpu_kms->vbif[VBIF_NRT])
    850		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
    851	dpu_kms->vbif[VBIF_NRT] = NULL;
    852
    853	if (dpu_kms->vbif[VBIF_RT])
    854		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
    855	dpu_kms->vbif[VBIF_RT] = NULL;
    856
    857	if (dpu_kms->hw_mdp)
    858		dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
    859	dpu_kms->hw_mdp = NULL;
    860
    861	if (dpu_kms->mmio)
    862		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
    863	dpu_kms->mmio = NULL;
    864}
    865
    866static void dpu_kms_destroy(struct msm_kms *kms)
    867{
    868	struct dpu_kms *dpu_kms;
    869
    870	if (!kms) {
    871		DPU_ERROR("invalid kms\n");
    872		return;
    873	}
    874
    875	dpu_kms = to_dpu_kms(kms);
    876
    877	_dpu_kms_hw_destroy(dpu_kms);
    878
    879	msm_kms_destroy(&dpu_kms->base);
    880
    881	if (dpu_kms->rpm_enabled)
    882		pm_runtime_disable(&dpu_kms->pdev->dev);
    883}
    884
    885static int dpu_irq_postinstall(struct msm_kms *kms)
    886{
    887	struct msm_drm_private *priv;
    888	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
    889	int i;
    890
    891	if (!dpu_kms || !dpu_kms->dev)
    892		return -EINVAL;
    893
    894	priv = dpu_kms->dev->dev_private;
    895	if (!priv)
    896		return -EINVAL;
    897
    898	for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
    899		msm_dp_irq_postinstall(priv->dp[i]);
    900
    901	return 0;
    902}
    903
    904static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
    905{
    906	int i;
    907	struct dpu_kms *dpu_kms;
    908	struct dpu_mdss_cfg *cat;
    909	struct dpu_hw_mdp *top;
    910
    911	dpu_kms = to_dpu_kms(kms);
    912
    913	cat = dpu_kms->catalog;
    914	top = dpu_kms->hw_mdp;
    915
    916	pm_runtime_get_sync(&dpu_kms->pdev->dev);
    917
    918	/* dump CTL sub-blocks HW regs info */
    919	for (i = 0; i < cat->ctl_count; i++)
    920		msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
    921				dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
    922
    923	/* dump DSPP sub-blocks HW regs info */
    924	for (i = 0; i < cat->dspp_count; i++)
    925		msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
    926				dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
    927
    928	/* dump INTF sub-blocks HW regs info */
    929	for (i = 0; i < cat->intf_count; i++)
    930		msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
    931				dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
    932
    933	/* dump PP sub-blocks HW regs info */
    934	for (i = 0; i < cat->pingpong_count; i++)
    935		msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
    936				dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
    937
    938	/* dump SSPP sub-blocks HW regs info */
    939	for (i = 0; i < cat->sspp_count; i++)
    940		msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
    941				dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
    942
    943	/* dump LM sub-blocks HW regs info */
    944	for (i = 0; i < cat->mixer_count; i++)
    945		msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
    946				dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
    947
    948	/* dump WB sub-blocks HW regs info */
    949	for (i = 0; i < cat->wb_count; i++)
    950		msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
    951				dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
    952
    953	msm_disp_snapshot_add_block(disp_state, top->hw.length,
    954			dpu_kms->mmio + top->hw.blk_off, "top");
    955
    956	pm_runtime_put_sync(&dpu_kms->pdev->dev);
    957}
    958
    959static const struct msm_kms_funcs kms_funcs = {
    960	.hw_init         = dpu_kms_hw_init,
    961	.irq_preinstall  = dpu_core_irq_preinstall,
    962	.irq_postinstall = dpu_irq_postinstall,
    963	.irq_uninstall   = dpu_core_irq_uninstall,
    964	.irq             = dpu_core_irq,
    965	.enable_commit   = dpu_kms_enable_commit,
    966	.disable_commit  = dpu_kms_disable_commit,
    967	.vsync_time      = dpu_kms_vsync_time,
    968	.prepare_commit  = dpu_kms_prepare_commit,
    969	.flush_commit    = dpu_kms_flush_commit,
    970	.wait_flush      = dpu_kms_wait_flush,
    971	.complete_commit = dpu_kms_complete_commit,
    972	.enable_vblank   = dpu_kms_enable_vblank,
    973	.disable_vblank  = dpu_kms_disable_vblank,
    974	.check_modified_format = dpu_format_check_modified_format,
    975	.get_format      = dpu_get_msm_format,
    976	.destroy         = dpu_kms_destroy,
    977	.snapshot        = dpu_kms_mdp_snapshot,
    978#ifdef CONFIG_DEBUG_FS
    979	.debugfs_init    = dpu_kms_debugfs_init,
    980#endif
    981};
    982
    983static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
    984{
    985	struct msm_mmu *mmu;
    986
    987	if (!dpu_kms->base.aspace)
    988		return;
    989
    990	mmu = dpu_kms->base.aspace->mmu;
    991
    992	mmu->funcs->detach(mmu);
    993	msm_gem_address_space_put(dpu_kms->base.aspace);
    994
    995	dpu_kms->base.aspace = NULL;
    996}
    997
    998static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
    999{
   1000	struct iommu_domain *domain;
   1001	struct msm_gem_address_space *aspace;
   1002	struct msm_mmu *mmu;
   1003	struct device *dpu_dev = dpu_kms->dev->dev;
   1004	struct device *mdss_dev = dpu_dev->parent;
   1005
   1006	domain = iommu_domain_alloc(&platform_bus_type);
   1007	if (!domain)
   1008		return 0;
   1009
   1010	/* IOMMUs are a part of MDSS device tree binding, not the
   1011	 * MDP/DPU device. */
   1012	mmu = msm_iommu_new(mdss_dev, domain);
   1013	if (IS_ERR(mmu)) {
   1014		iommu_domain_free(domain);
   1015		return PTR_ERR(mmu);
   1016	}
   1017	aspace = msm_gem_address_space_create(mmu, "dpu1",
   1018		0x1000, 0x100000000 - 0x1000);
   1019
   1020	if (IS_ERR(aspace)) {
   1021		mmu->funcs->destroy(mmu);
   1022		return PTR_ERR(aspace);
   1023	}
   1024
   1025	dpu_kms->base.aspace = aspace;
   1026	return 0;
   1027}
   1028
   1029u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
   1030{
   1031	struct clk *clk;
   1032
   1033	clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
   1034	if (!clk)
   1035		return -EINVAL;
   1036
   1037	return clk_get_rate(clk);
   1038}
   1039
   1040static int dpu_kms_hw_init(struct msm_kms *kms)
   1041{
   1042	struct dpu_kms *dpu_kms;
   1043	struct drm_device *dev;
   1044	int i, rc = -EINVAL;
   1045
   1046	if (!kms) {
   1047		DPU_ERROR("invalid kms\n");
   1048		return rc;
   1049	}
   1050
   1051	dpu_kms = to_dpu_kms(kms);
   1052	dev = dpu_kms->dev;
   1053
   1054	rc = dpu_kms_global_obj_init(dpu_kms);
   1055	if (rc)
   1056		return rc;
   1057
   1058	atomic_set(&dpu_kms->bandwidth_ref, 0);
   1059
   1060	dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
   1061	if (IS_ERR(dpu_kms->mmio)) {
   1062		rc = PTR_ERR(dpu_kms->mmio);
   1063		DPU_ERROR("mdp register memory map failed: %d\n", rc);
   1064		dpu_kms->mmio = NULL;
   1065		goto error;
   1066	}
   1067	DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
   1068
   1069	dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
   1070	if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
   1071		rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
   1072		DPU_ERROR("vbif register memory map failed: %d\n", rc);
   1073		dpu_kms->vbif[VBIF_RT] = NULL;
   1074		goto error;
   1075	}
   1076	dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
   1077	if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
   1078		dpu_kms->vbif[VBIF_NRT] = NULL;
   1079		DPU_DEBUG("VBIF NRT is not defined");
   1080	}
   1081
   1082	dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma");
   1083	if (IS_ERR(dpu_kms->reg_dma)) {
   1084		dpu_kms->reg_dma = NULL;
   1085		DPU_DEBUG("REG_DMA is not defined");
   1086	}
   1087
   1088	dpu_kms_parse_data_bus_icc_path(dpu_kms);
   1089
   1090	rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
   1091	if (rc < 0)
   1092		goto error;
   1093
   1094	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
   1095
   1096	pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
   1097
   1098	dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
   1099	if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
   1100		rc = PTR_ERR(dpu_kms->catalog);
   1101		if (!dpu_kms->catalog)
   1102			rc = -EINVAL;
   1103		DPU_ERROR("catalog init failed: %d\n", rc);
   1104		dpu_kms->catalog = NULL;
   1105		goto power_error;
   1106	}
   1107
   1108	/*
   1109	 * Now we need to read the HW catalog and initialize resources such as
   1110	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
   1111	 */
   1112	rc = _dpu_kms_mmu_init(dpu_kms);
   1113	if (rc) {
   1114		DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
   1115		goto power_error;
   1116	}
   1117
   1118	rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
   1119	if (rc) {
   1120		DPU_ERROR("rm init failed: %d\n", rc);
   1121		goto power_error;
   1122	}
   1123
   1124	dpu_kms->rm_init = true;
   1125
   1126	dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
   1127					     dpu_kms->catalog);
   1128	if (IS_ERR(dpu_kms->hw_mdp)) {
   1129		rc = PTR_ERR(dpu_kms->hw_mdp);
   1130		DPU_ERROR("failed to get hw_mdp: %d\n", rc);
   1131		dpu_kms->hw_mdp = NULL;
   1132		goto power_error;
   1133	}
   1134
   1135	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
   1136		u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
   1137
   1138		dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
   1139				dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
   1140		if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
   1141			rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
   1142			if (!dpu_kms->hw_vbif[vbif_idx])
   1143				rc = -EINVAL;
   1144			DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
   1145			dpu_kms->hw_vbif[vbif_idx] = NULL;
   1146			goto power_error;
   1147		}
   1148	}
   1149
   1150	rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
   1151			msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
   1152	if (rc) {
   1153		DPU_ERROR("failed to init perf %d\n", rc);
   1154		goto perf_err;
   1155	}
   1156
   1157	dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
   1158	if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
   1159		rc = PTR_ERR(dpu_kms->hw_intr);
   1160		DPU_ERROR("hw_intr init failed: %d\n", rc);
   1161		dpu_kms->hw_intr = NULL;
   1162		goto hw_intr_init_err;
   1163	}
   1164
   1165	dev->mode_config.min_width = 0;
   1166	dev->mode_config.min_height = 0;
   1167
   1168	/*
   1169	 * max crtc width is equal to the max mixer width * 2 and max height is
   1170	 * is 4K
   1171	 */
   1172	dev->mode_config.max_width =
   1173			dpu_kms->catalog->caps->max_mixer_width * 2;
   1174	dev->mode_config.max_height = 4096;
   1175
   1176	dev->max_vblank_count = 0xffffffff;
   1177	/* Disable vblank irqs aggressively for power-saving */
   1178	dev->vblank_disable_immediate = true;
   1179
   1180	/*
   1181	 * _dpu_kms_drm_obj_init should create the DRM related objects
   1182	 * i.e. CRTCs, planes, encoders, connectors and so forth
   1183	 */
   1184	rc = _dpu_kms_drm_obj_init(dpu_kms);
   1185	if (rc) {
   1186		DPU_ERROR("modeset init failed: %d\n", rc);
   1187		goto drm_obj_init_err;
   1188	}
   1189
   1190	dpu_vbif_init_memtypes(dpu_kms);
   1191
   1192	pm_runtime_put_sync(&dpu_kms->pdev->dev);
   1193
   1194	return 0;
   1195
   1196drm_obj_init_err:
   1197	dpu_core_perf_destroy(&dpu_kms->perf);
   1198hw_intr_init_err:
   1199perf_err:
   1200power_error:
   1201	pm_runtime_put_sync(&dpu_kms->pdev->dev);
   1202error:
   1203	_dpu_kms_hw_destroy(dpu_kms);
   1204
   1205	return rc;
   1206}
   1207
   1208static int dpu_kms_init(struct drm_device *ddev)
   1209{
   1210	struct msm_drm_private *priv = ddev->dev_private;
   1211	struct device *dev = ddev->dev;
   1212	struct platform_device *pdev = to_platform_device(dev);
   1213	struct dpu_kms *dpu_kms;
   1214	int irq;
   1215	struct dev_pm_opp *opp;
   1216	int ret = 0;
   1217	unsigned long max_freq = ULONG_MAX;
   1218
   1219	dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
   1220	if (!dpu_kms)
   1221		return -ENOMEM;
   1222
   1223	ret = devm_pm_opp_set_clkname(dev, "core");
   1224	if (ret)
   1225		return ret;
   1226	/* OPP table is optional */
   1227	ret = devm_pm_opp_of_add_table(dev);
   1228	if (ret && ret != -ENODEV) {
   1229		dev_err(dev, "invalid OPP table in device tree\n");
   1230		return ret;
   1231	}
   1232
   1233	ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
   1234	if (ret < 0) {
   1235		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
   1236		return ret;
   1237	}
   1238	dpu_kms->num_clocks = ret;
   1239
   1240	opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
   1241	if (!IS_ERR(opp))
   1242		dev_pm_opp_put(opp);
   1243
   1244	dev_pm_opp_set_rate(dev, max_freq);
   1245
   1246	ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
   1247	if (ret) {
   1248		DPU_ERROR("failed to init kms, ret=%d\n", ret);
   1249		return ret;
   1250	}
   1251	dpu_kms->dev = ddev;
   1252	dpu_kms->pdev = pdev;
   1253
   1254	pm_runtime_enable(&pdev->dev);
   1255	dpu_kms->rpm_enabled = true;
   1256
   1257	priv->kms = &dpu_kms->base;
   1258
   1259	irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
   1260	if (!irq) {
   1261		DPU_ERROR("failed to get irq\n");
   1262		return -EINVAL;
   1263	}
   1264	dpu_kms->base.irq = irq;
   1265
   1266	return 0;
   1267}
   1268
   1269static int dpu_dev_probe(struct platform_device *pdev)
   1270{
   1271	return msm_drv_probe(&pdev->dev, dpu_kms_init);
   1272}
   1273
   1274static int dpu_dev_remove(struct platform_device *pdev)
   1275{
   1276	component_master_del(&pdev->dev, &msm_drm_ops);
   1277
   1278	return 0;
   1279}
   1280
   1281static int __maybe_unused dpu_runtime_suspend(struct device *dev)
   1282{
   1283	int i;
   1284	struct platform_device *pdev = to_platform_device(dev);
   1285	struct msm_drm_private *priv = platform_get_drvdata(pdev);
   1286	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
   1287
   1288	/* Drop the performance state vote */
   1289	dev_pm_opp_set_rate(dev, 0);
   1290	clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
   1291
   1292	for (i = 0; i < dpu_kms->num_paths; i++)
   1293		icc_set_bw(dpu_kms->path[i], 0, 0);
   1294
   1295	return 0;
   1296}
   1297
   1298static int __maybe_unused dpu_runtime_resume(struct device *dev)
   1299{
   1300	int rc = -1;
   1301	struct platform_device *pdev = to_platform_device(dev);
   1302	struct msm_drm_private *priv = platform_get_drvdata(pdev);
   1303	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
   1304	struct drm_encoder *encoder;
   1305	struct drm_device *ddev;
   1306
   1307	ddev = dpu_kms->dev;
   1308
   1309	rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
   1310	if (rc) {
   1311		DPU_ERROR("clock enable failed rc:%d\n", rc);
   1312		return rc;
   1313	}
   1314
   1315	dpu_vbif_init_memtypes(dpu_kms);
   1316
   1317	drm_for_each_encoder(encoder, ddev)
   1318		dpu_encoder_virt_runtime_resume(encoder);
   1319
   1320	return rc;
   1321}
   1322
   1323static const struct dev_pm_ops dpu_pm_ops = {
   1324	SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
   1325	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
   1326				pm_runtime_force_resume)
   1327	.prepare = msm_pm_prepare,
   1328	.complete = msm_pm_complete,
   1329};
   1330
   1331static const struct of_device_id dpu_dt_match[] = {
   1332	{ .compatible = "qcom,msm8998-dpu", },
   1333	{ .compatible = "qcom,qcm2290-dpu", },
   1334	{ .compatible = "qcom,sdm845-dpu", },
   1335	{ .compatible = "qcom,sc7180-dpu", },
   1336	{ .compatible = "qcom,sc7280-dpu", },
   1337	{ .compatible = "qcom,sc8180x-dpu", },
   1338	{ .compatible = "qcom,sm8150-dpu", },
   1339	{ .compatible = "qcom,sm8250-dpu", },
   1340	{}
   1341};
   1342MODULE_DEVICE_TABLE(of, dpu_dt_match);
   1343
   1344static struct platform_driver dpu_driver = {
   1345	.probe = dpu_dev_probe,
   1346	.remove = dpu_dev_remove,
   1347	.shutdown = msm_drv_shutdown,
   1348	.driver = {
   1349		.name = "msm_dpu",
   1350		.of_match_table = dpu_dt_match,
   1351		.pm = &dpu_pm_ops,
   1352	},
   1353};
   1354
   1355void __init msm_dpu_register(void)
   1356{
   1357	platform_driver_register(&dpu_driver);
   1358}
   1359
   1360void __exit msm_dpu_unregister(void)
   1361{
   1362	platform_driver_unregister(&dpu_driver);
   1363}