cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mdp5_kms.c (25559B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
      4 * Copyright (C) 2013 Red Hat
      5 * Author: Rob Clark <robdclark@gmail.com>
      6 */
      7
      8#include <linux/delay.h>
      9#include <linux/interconnect.h>
     10#include <linux/of_irq.h>
     11
     12#include <drm/drm_debugfs.h>
     13#include <drm/drm_drv.h>
     14#include <drm/drm_file.h>
     15#include <drm/drm_vblank.h>
     16
     17#include "msm_drv.h"
     18#include "msm_gem.h"
     19#include "msm_mmu.h"
     20#include "mdp5_kms.h"
     21
     22static int mdp5_hw_init(struct msm_kms *kms)
     23{
     24	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
     25	struct device *dev = &mdp5_kms->pdev->dev;
     26	unsigned long flags;
     27
     28	pm_runtime_get_sync(dev);
     29
     30	/* Magic unknown register writes:
     31	 *
     32	 *    W VBIF:0x004 00000001      (mdss_mdp.c:839)
     33	 *    W MDP5:0x2e0 0xe9          (mdss_mdp.c:839)
     34	 *    W MDP5:0x2e4 0x55          (mdss_mdp.c:839)
     35	 *    W MDP5:0x3ac 0xc0000ccc    (mdss_mdp.c:839)
     36	 *    W MDP5:0x3b4 0xc0000ccc    (mdss_mdp.c:839)
     37	 *    W MDP5:0x3bc 0xcccccc      (mdss_mdp.c:839)
     38	 *    W MDP5:0x4a8 0xcccc0c0     (mdss_mdp.c:839)
     39	 *    W MDP5:0x4b0 0xccccc0c0    (mdss_mdp.c:839)
     40	 *    W MDP5:0x4b8 0xccccc000    (mdss_mdp.c:839)
     41	 *
     42	 * Downstream fbdev driver gets these register offsets/values
     43	 * from DT.. not really sure what these registers are or if
     44	 * different values for different boards/SoC's, etc.  I guess
     45	 * they are the golden registers.
     46	 *
     47	 * Not setting these does not seem to cause any problem.  But
     48	 * we may be getting lucky with the bootloader initializing
     49	 * them for us.  OTOH, if we can always count on the bootloader
     50	 * setting the golden registers, then perhaps we don't need to
     51	 * care.
     52	 */
     53
     54	spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
     55	mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
     56	spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
     57
     58	mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
     59
     60	pm_runtime_put_sync(dev);
     61
     62	return 0;
     63}
     64
     65/* Global/shared object state funcs */
     66
     67/*
     68 * This is a helper that returns the private state currently in operation.
     69 * Note that this would return the "old_state" if called in the atomic check
     70 * path, and the "new_state" after the atomic swap has been done.
     71 */
     72struct mdp5_global_state *
     73mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
     74{
     75	return to_mdp5_global_state(mdp5_kms->glob_state.state);
     76}
     77
     78/*
     79 * This acquires the modeset lock set aside for global state, creates
     80 * a new duplicated private object state.
     81 */
     82struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
     83{
     84	struct msm_drm_private *priv = s->dev->dev_private;
     85	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
     86	struct drm_private_state *priv_state;
     87	int ret;
     88
     89	ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
     90	if (ret)
     91		return ERR_PTR(ret);
     92
     93	priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
     94	if (IS_ERR(priv_state))
     95		return ERR_CAST(priv_state);
     96
     97	return to_mdp5_global_state(priv_state);
     98}
     99
    100static struct drm_private_state *
    101mdp5_global_duplicate_state(struct drm_private_obj *obj)
    102{
    103	struct mdp5_global_state *state;
    104
    105	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
    106	if (!state)
    107		return NULL;
    108
    109	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
    110
    111	return &state->base;
    112}
    113
    114static void mdp5_global_destroy_state(struct drm_private_obj *obj,
    115				      struct drm_private_state *state)
    116{
    117	struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
    118
    119	kfree(mdp5_state);
    120}
    121
    122static const struct drm_private_state_funcs mdp5_global_state_funcs = {
    123	.atomic_duplicate_state = mdp5_global_duplicate_state,
    124	.atomic_destroy_state = mdp5_global_destroy_state,
    125};
    126
    127static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
    128{
    129	struct mdp5_global_state *state;
    130
    131	drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
    132
    133	state = kzalloc(sizeof(*state), GFP_KERNEL);
    134	if (!state)
    135		return -ENOMEM;
    136
    137	state->mdp5_kms = mdp5_kms;
    138
    139	drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
    140				    &state->base,
    141				    &mdp5_global_state_funcs);
    142	return 0;
    143}
    144
    145static void mdp5_enable_commit(struct msm_kms *kms)
    146{
    147	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
    148	pm_runtime_get_sync(&mdp5_kms->pdev->dev);
    149}
    150
    151static void mdp5_disable_commit(struct msm_kms *kms)
    152{
    153	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
    154	pm_runtime_put_sync(&mdp5_kms->pdev->dev);
    155}
    156
    157static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
    158{
    159	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
    160	struct mdp5_global_state *global_state;
    161
    162	global_state = mdp5_get_existing_global_state(mdp5_kms);
    163
    164	if (mdp5_kms->smp)
    165		mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
    166}
    167
    168static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
    169{
    170	/* TODO */
    171}
    172
    173static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
    174{
    175	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
    176	struct drm_crtc *crtc;
    177
    178	for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
    179		mdp5_crtc_wait_for_commit_done(crtc);
    180}
    181
    182static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
    183{
    184	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
    185	struct mdp5_global_state *global_state;
    186
    187	global_state = mdp5_get_existing_global_state(mdp5_kms);
    188
    189	if (mdp5_kms->smp)
    190		mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
    191}
    192
    193static int mdp5_set_split_display(struct msm_kms *kms,
    194		struct drm_encoder *encoder,
    195		struct drm_encoder *slave_encoder,
    196		bool is_cmd_mode)
    197{
    198	if (is_cmd_mode)
    199		return mdp5_cmd_encoder_set_split_display(encoder,
    200							slave_encoder);
    201	else
    202		return mdp5_vid_encoder_set_split_display(encoder,
    203							  slave_encoder);
    204}
    205
    206static void mdp5_destroy(struct platform_device *pdev);
    207
    208static void mdp5_kms_destroy(struct msm_kms *kms)
    209{
    210	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
    211	struct msm_gem_address_space *aspace = kms->aspace;
    212	int i;
    213
    214	for (i = 0; i < mdp5_kms->num_hwmixers; i++)
    215		mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
    216
    217	for (i = 0; i < mdp5_kms->num_hwpipes; i++)
    218		mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
    219
    220	if (aspace) {
    221		aspace->mmu->funcs->detach(aspace->mmu);
    222		msm_gem_address_space_put(aspace);
    223	}
    224
    225	mdp_kms_destroy(&mdp5_kms->base);
    226	mdp5_destroy(mdp5_kms->pdev);
    227}
    228
    229#ifdef CONFIG_DEBUG_FS
    230static int smp_show(struct seq_file *m, void *arg)
    231{
    232	struct drm_info_node *node = (struct drm_info_node *) m->private;
    233	struct drm_device *dev = node->minor->dev;
    234	struct msm_drm_private *priv = dev->dev_private;
    235	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
    236	struct drm_printer p = drm_seq_file_printer(m);
    237
    238	if (!mdp5_kms->smp) {
    239		drm_printf(&p, "no SMP pool\n");
    240		return 0;
    241	}
    242
    243	mdp5_smp_dump(mdp5_kms->smp, &p);
    244
    245	return 0;
    246}
    247
    248static struct drm_info_list mdp5_debugfs_list[] = {
    249		{"smp", smp_show },
    250};
    251
    252static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
    253{
    254	drm_debugfs_create_files(mdp5_debugfs_list,
    255				 ARRAY_SIZE(mdp5_debugfs_list),
    256				 minor->debugfs_root, minor);
    257
    258	return 0;
    259}
    260#endif
    261
    262static const struct mdp_kms_funcs kms_funcs = {
    263	.base = {
    264		.hw_init         = mdp5_hw_init,
    265		.irq_preinstall  = mdp5_irq_preinstall,
    266		.irq_postinstall = mdp5_irq_postinstall,
    267		.irq_uninstall   = mdp5_irq_uninstall,
    268		.irq             = mdp5_irq,
    269		.enable_vblank   = mdp5_enable_vblank,
    270		.disable_vblank  = mdp5_disable_vblank,
    271		.flush_commit    = mdp5_flush_commit,
    272		.enable_commit   = mdp5_enable_commit,
    273		.disable_commit  = mdp5_disable_commit,
    274		.prepare_commit  = mdp5_prepare_commit,
    275		.wait_flush      = mdp5_wait_flush,
    276		.complete_commit = mdp5_complete_commit,
    277		.get_format      = mdp_get_format,
    278		.set_split_display = mdp5_set_split_display,
    279		.destroy         = mdp5_kms_destroy,
    280#ifdef CONFIG_DEBUG_FS
    281		.debugfs_init    = mdp5_kms_debugfs_init,
    282#endif
    283	},
    284	.set_irqmask         = mdp5_set_irqmask,
    285};
    286
    287static int mdp5_disable(struct mdp5_kms *mdp5_kms)
    288{
    289	DBG("");
    290
    291	mdp5_kms->enable_count--;
    292	WARN_ON(mdp5_kms->enable_count < 0);
    293
    294	clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
    295	clk_disable_unprepare(mdp5_kms->tbu_clk);
    296	clk_disable_unprepare(mdp5_kms->ahb_clk);
    297	clk_disable_unprepare(mdp5_kms->axi_clk);
    298	clk_disable_unprepare(mdp5_kms->core_clk);
    299	clk_disable_unprepare(mdp5_kms->lut_clk);
    300
    301	return 0;
    302}
    303
    304static int mdp5_enable(struct mdp5_kms *mdp5_kms)
    305{
    306	DBG("");
    307
    308	mdp5_kms->enable_count++;
    309
    310	clk_prepare_enable(mdp5_kms->ahb_clk);
    311	clk_prepare_enable(mdp5_kms->axi_clk);
    312	clk_prepare_enable(mdp5_kms->core_clk);
    313	clk_prepare_enable(mdp5_kms->lut_clk);
    314	clk_prepare_enable(mdp5_kms->tbu_clk);
    315	clk_prepare_enable(mdp5_kms->tbu_rt_clk);
    316
    317	return 0;
    318}
    319
    320static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
    321					     struct mdp5_interface *intf,
    322					     struct mdp5_ctl *ctl)
    323{
    324	struct drm_device *dev = mdp5_kms->dev;
    325	struct drm_encoder *encoder;
    326
    327	encoder = mdp5_encoder_init(dev, intf, ctl);
    328	if (IS_ERR(encoder)) {
    329		DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
    330		return encoder;
    331	}
    332
    333	return encoder;
    334}
    335
    336static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
    337{
    338	const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
    339	const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
    340	int id = 0, i;
    341
    342	for (i = 0; i < intf_cnt; i++) {
    343		if (intfs[i] == INTF_DSI) {
    344			if (intf_num == i)
    345				return id;
    346
    347			id++;
    348		}
    349	}
    350
    351	return -EINVAL;
    352}
    353
    354static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
    355			     struct mdp5_interface *intf)
    356{
    357	struct drm_device *dev = mdp5_kms->dev;
    358	struct msm_drm_private *priv = dev->dev_private;
    359	struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
    360	struct mdp5_ctl *ctl;
    361	struct drm_encoder *encoder;
    362	int ret = 0;
    363
    364	switch (intf->type) {
    365	case INTF_eDP:
    366		DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
    367		break;
    368	case INTF_HDMI:
    369		if (!priv->hdmi)
    370			break;
    371
    372		ctl = mdp5_ctlm_request(ctlm, intf->num);
    373		if (!ctl) {
    374			ret = -EINVAL;
    375			break;
    376		}
    377
    378		encoder = construct_encoder(mdp5_kms, intf, ctl);
    379		if (IS_ERR(encoder)) {
    380			ret = PTR_ERR(encoder);
    381			break;
    382		}
    383
    384		ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
    385		break;
    386	case INTF_DSI:
    387	{
    388		const struct mdp5_cfg_hw *hw_cfg =
    389					mdp5_cfg_get_hw_config(mdp5_kms->cfg);
    390		int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
    391
    392		if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
    393			DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
    394				intf->num);
    395			ret = -EINVAL;
    396			break;
    397		}
    398
    399		if (!priv->dsi[dsi_id])
    400			break;
    401
    402		ctl = mdp5_ctlm_request(ctlm, intf->num);
    403		if (!ctl) {
    404			ret = -EINVAL;
    405			break;
    406		}
    407
    408		encoder = construct_encoder(mdp5_kms, intf, ctl);
    409		if (IS_ERR(encoder)) {
    410			ret = PTR_ERR(encoder);
    411			break;
    412		}
    413
    414		ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
    415		if (!ret)
    416			mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
    417
    418		break;
    419	}
    420	default:
    421		DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
    422		ret = -EINVAL;
    423		break;
    424	}
    425
    426	return ret;
    427}
    428
    429static int modeset_init(struct mdp5_kms *mdp5_kms)
    430{
    431	struct drm_device *dev = mdp5_kms->dev;
    432	struct msm_drm_private *priv = dev->dev_private;
    433	unsigned int num_crtcs;
    434	int i, ret, pi = 0, ci = 0;
    435	struct drm_plane *primary[MAX_BASES] = { NULL };
    436	struct drm_plane *cursor[MAX_BASES] = { NULL };
    437	struct drm_encoder *encoder;
    438	unsigned int num_encoders;
    439
    440	/*
    441	 * Construct encoders and modeset initialize connector devices
    442	 * for each external display interface.
    443	 */
    444	for (i = 0; i < mdp5_kms->num_intfs; i++) {
    445		ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
    446		if (ret)
    447			goto fail;
    448	}
    449
    450	num_encoders = 0;
    451	drm_for_each_encoder(encoder, dev)
    452		num_encoders++;
    453
    454	/*
    455	 * We should ideally have less number of encoders (set up by parsing
    456	 * the MDP5 interfaces) than the number of layer mixers present in HW,
    457	 * but let's be safe here anyway
    458	 */
    459	num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
    460
    461	/*
    462	 * Construct planes equaling the number of hw pipes, and CRTCs for the
    463	 * N encoders set up by the driver. The first N planes become primary
    464	 * planes for the CRTCs, with the remainder as overlay planes:
    465	 */
    466	for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
    467		struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
    468		struct drm_plane *plane;
    469		enum drm_plane_type type;
    470
    471		if (i < num_crtcs)
    472			type = DRM_PLANE_TYPE_PRIMARY;
    473		else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
    474			type = DRM_PLANE_TYPE_CURSOR;
    475		else
    476			type = DRM_PLANE_TYPE_OVERLAY;
    477
    478		plane = mdp5_plane_init(dev, type);
    479		if (IS_ERR(plane)) {
    480			ret = PTR_ERR(plane);
    481			DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
    482			goto fail;
    483		}
    484
    485		if (type == DRM_PLANE_TYPE_PRIMARY)
    486			primary[pi++] = plane;
    487		if (type == DRM_PLANE_TYPE_CURSOR)
    488			cursor[ci++] = plane;
    489	}
    490
    491	for (i = 0; i < num_crtcs; i++) {
    492		struct drm_crtc *crtc;
    493
    494		crtc  = mdp5_crtc_init(dev, primary[i], cursor[i], i);
    495		if (IS_ERR(crtc)) {
    496			ret = PTR_ERR(crtc);
    497			DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
    498			goto fail;
    499		}
    500		priv->crtcs[priv->num_crtcs++] = crtc;
    501	}
    502
    503	/*
    504	 * Now that we know the number of crtcs we've created, set the possible
    505	 * crtcs for the encoders
    506	 */
    507	drm_for_each_encoder(encoder, dev)
    508		encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
    509
    510	return 0;
    511
    512fail:
    513	return ret;
    514}
    515
    516static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
    517				 u32 *major, u32 *minor)
    518{
    519	struct device *dev = &mdp5_kms->pdev->dev;
    520	u32 version;
    521
    522	pm_runtime_get_sync(dev);
    523	version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
    524	pm_runtime_put_sync(dev);
    525
    526	*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
    527	*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
    528
    529	DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
    530}
    531
    532static int get_clk(struct platform_device *pdev, struct clk **clkp,
    533		const char *name, bool mandatory)
    534{
    535	struct device *dev = &pdev->dev;
    536	struct clk *clk = msm_clk_get(pdev, name);
    537	if (IS_ERR(clk) && mandatory) {
    538		DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
    539		return PTR_ERR(clk);
    540	}
    541	if (IS_ERR(clk))
    542		DBG("skipping %s", name);
    543	else
    544		*clkp = clk;
    545
    546	return 0;
    547}
    548
    549static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
    550
    551static int mdp5_kms_init(struct drm_device *dev)
    552{
    553	struct msm_drm_private *priv = dev->dev_private;
    554	struct platform_device *pdev;
    555	struct mdp5_kms *mdp5_kms;
    556	struct mdp5_cfg *config;
    557	struct msm_kms *kms;
    558	struct msm_gem_address_space *aspace;
    559	int irq, i, ret;
    560	struct device *iommu_dev;
    561
    562	ret = mdp5_init(to_platform_device(dev->dev), dev);
    563
    564	/* priv->kms would have been populated by the MDP5 driver */
    565	kms = priv->kms;
    566	if (!kms)
    567		return -ENOMEM;
    568
    569	mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
    570	pdev = mdp5_kms->pdev;
    571
    572	ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
    573	if (ret) {
    574		DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n");
    575		goto fail;
    576	}
    577
    578	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
    579	if (!irq) {
    580		ret = -EINVAL;
    581		DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
    582		goto fail;
    583	}
    584
    585	kms->irq = irq;
    586
    587	config = mdp5_cfg_get_config(mdp5_kms->cfg);
    588
    589	/* make sure things are off before attaching iommu (bootloader could
    590	 * have left things on, in which case we'll start getting faults if
    591	 * we don't disable):
    592	 */
    593	pm_runtime_get_sync(&pdev->dev);
    594	for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
    595		if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
    596		    !config->hw->intf.base[i])
    597			continue;
    598		mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
    599
    600		mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
    601	}
    602	mdelay(16);
    603
    604	if (config->platform.iommu) {
    605		struct msm_mmu *mmu;
    606
    607		iommu_dev = &pdev->dev;
    608		if (!dev_iommu_fwspec_get(iommu_dev))
    609			iommu_dev = iommu_dev->parent;
    610
    611		mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
    612
    613		aspace = msm_gem_address_space_create(mmu, "mdp5",
    614			0x1000, 0x100000000 - 0x1000);
    615
    616		if (IS_ERR(aspace)) {
    617			if (!IS_ERR(mmu))
    618				mmu->funcs->destroy(mmu);
    619			ret = PTR_ERR(aspace);
    620			goto fail;
    621		}
    622
    623		kms->aspace = aspace;
    624	} else {
    625		DRM_DEV_INFO(&pdev->dev,
    626			 "no iommu, fallback to phys contig buffers for scanout\n");
    627		aspace = NULL;
    628	}
    629
    630	pm_runtime_put_sync(&pdev->dev);
    631
    632	ret = modeset_init(mdp5_kms);
    633	if (ret) {
    634		DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
    635		goto fail;
    636	}
    637
    638	dev->mode_config.min_width = 0;
    639	dev->mode_config.min_height = 0;
    640	dev->mode_config.max_width = 0xffff;
    641	dev->mode_config.max_height = 0xffff;
    642
    643	dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
    644	dev->vblank_disable_immediate = true;
    645
    646	return 0;
    647fail:
    648	if (kms)
    649		mdp5_kms_destroy(kms);
    650
    651	return ret;
    652}
    653
    654static void mdp5_destroy(struct platform_device *pdev)
    655{
    656	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
    657	int i;
    658
    659	if (mdp5_kms->ctlm)
    660		mdp5_ctlm_destroy(mdp5_kms->ctlm);
    661	if (mdp5_kms->smp)
    662		mdp5_smp_destroy(mdp5_kms->smp);
    663	if (mdp5_kms->cfg)
    664		mdp5_cfg_destroy(mdp5_kms->cfg);
    665
    666	for (i = 0; i < mdp5_kms->num_intfs; i++)
    667		kfree(mdp5_kms->intfs[i]);
    668
    669	if (mdp5_kms->rpm_enabled)
    670		pm_runtime_disable(&pdev->dev);
    671
    672	drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
    673	drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
    674}
    675
    676static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
    677		const enum mdp5_pipe *pipes, const uint32_t *offsets,
    678		uint32_t caps)
    679{
    680	struct drm_device *dev = mdp5_kms->dev;
    681	int i, ret;
    682
    683	for (i = 0; i < cnt; i++) {
    684		struct mdp5_hw_pipe *hwpipe;
    685
    686		hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
    687		if (IS_ERR(hwpipe)) {
    688			ret = PTR_ERR(hwpipe);
    689			DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
    690					pipe2name(pipes[i]), ret);
    691			return ret;
    692		}
    693		hwpipe->idx = mdp5_kms->num_hwpipes;
    694		mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
    695	}
    696
    697	return 0;
    698}
    699
    700static int hwpipe_init(struct mdp5_kms *mdp5_kms)
    701{
    702	static const enum mdp5_pipe rgb_planes[] = {
    703			SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
    704	};
    705	static const enum mdp5_pipe vig_planes[] = {
    706			SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
    707	};
    708	static const enum mdp5_pipe dma_planes[] = {
    709			SSPP_DMA0, SSPP_DMA1,
    710	};
    711	static const enum mdp5_pipe cursor_planes[] = {
    712			SSPP_CURSOR0, SSPP_CURSOR1,
    713	};
    714	const struct mdp5_cfg_hw *hw_cfg;
    715	int ret;
    716
    717	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
    718
    719	/* Construct RGB pipes: */
    720	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
    721			hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
    722	if (ret)
    723		return ret;
    724
    725	/* Construct video (VIG) pipes: */
    726	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
    727			hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
    728	if (ret)
    729		return ret;
    730
    731	/* Construct DMA pipes: */
    732	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
    733			hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
    734	if (ret)
    735		return ret;
    736
    737	/* Construct cursor pipes: */
    738	ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
    739			cursor_planes, hw_cfg->pipe_cursor.base,
    740			hw_cfg->pipe_cursor.caps);
    741	if (ret)
    742		return ret;
    743
    744	return 0;
    745}
    746
    747static int hwmixer_init(struct mdp5_kms *mdp5_kms)
    748{
    749	struct drm_device *dev = mdp5_kms->dev;
    750	const struct mdp5_cfg_hw *hw_cfg;
    751	int i, ret;
    752
    753	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
    754
    755	for (i = 0; i < hw_cfg->lm.count; i++) {
    756		struct mdp5_hw_mixer *mixer;
    757
    758		mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
    759		if (IS_ERR(mixer)) {
    760			ret = PTR_ERR(mixer);
    761			DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
    762				i, ret);
    763			return ret;
    764		}
    765
    766		mixer->idx = mdp5_kms->num_hwmixers;
    767		mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
    768	}
    769
    770	return 0;
    771}
    772
    773static int interface_init(struct mdp5_kms *mdp5_kms)
    774{
    775	struct drm_device *dev = mdp5_kms->dev;
    776	const struct mdp5_cfg_hw *hw_cfg;
    777	const enum mdp5_intf_type *intf_types;
    778	int i;
    779
    780	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
    781	intf_types = hw_cfg->intf.connect;
    782
    783	for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
    784		struct mdp5_interface *intf;
    785
    786		if (intf_types[i] == INTF_DISABLED)
    787			continue;
    788
    789		intf = kzalloc(sizeof(*intf), GFP_KERNEL);
    790		if (!intf) {
    791			DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
    792			return -ENOMEM;
    793		}
    794
    795		intf->num = i;
    796		intf->type = intf_types[i];
    797		intf->mode = MDP5_INTF_MODE_NONE;
    798		intf->idx = mdp5_kms->num_intfs;
    799		mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
    800	}
    801
    802	return 0;
    803}
    804
    805static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
    806{
    807	struct msm_drm_private *priv = dev->dev_private;
    808	struct mdp5_kms *mdp5_kms;
    809	struct mdp5_cfg *config;
    810	u32 major, minor;
    811	int ret;
    812
    813	mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
    814	if (!mdp5_kms) {
    815		ret = -ENOMEM;
    816		goto fail;
    817	}
    818
    819	platform_set_drvdata(pdev, mdp5_kms);
    820
    821	spin_lock_init(&mdp5_kms->resource_lock);
    822
    823	mdp5_kms->dev = dev;
    824	mdp5_kms->pdev = pdev;
    825
    826	ret = mdp5_global_obj_init(mdp5_kms);
    827	if (ret)
    828		goto fail;
    829
    830	mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
    831	if (IS_ERR(mdp5_kms->mmio)) {
    832		ret = PTR_ERR(mdp5_kms->mmio);
    833		goto fail;
    834	}
    835
    836	/* mandatory clocks: */
    837	ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
    838	if (ret)
    839		goto fail;
    840	ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
    841	if (ret)
    842		goto fail;
    843	ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
    844	if (ret)
    845		goto fail;
    846	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
    847	if (ret)
    848		goto fail;
    849
    850	/* optional clocks: */
    851	get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
    852	get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
    853	get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
    854
    855	/* we need to set a default rate before enabling.  Set a safe
    856	 * rate first, then figure out hw revision, and then set a
    857	 * more optimal rate:
    858	 */
    859	clk_set_rate(mdp5_kms->core_clk, 200000000);
    860
    861	pm_runtime_enable(&pdev->dev);
    862	mdp5_kms->rpm_enabled = true;
    863
    864	read_mdp_hw_revision(mdp5_kms, &major, &minor);
    865
    866	mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
    867	if (IS_ERR(mdp5_kms->cfg)) {
    868		ret = PTR_ERR(mdp5_kms->cfg);
    869		mdp5_kms->cfg = NULL;
    870		goto fail;
    871	}
    872
    873	config = mdp5_cfg_get_config(mdp5_kms->cfg);
    874	mdp5_kms->caps = config->hw->mdp.caps;
    875
    876	/* TODO: compute core clock rate at runtime */
    877	clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
    878
    879	/*
    880	 * Some chipsets have a Shared Memory Pool (SMP), while others
    881	 * have dedicated latency buffering per source pipe instead;
    882	 * this section initializes the SMP:
    883	 */
    884	if (mdp5_kms->caps & MDP_CAP_SMP) {
    885		mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
    886		if (IS_ERR(mdp5_kms->smp)) {
    887			ret = PTR_ERR(mdp5_kms->smp);
    888			mdp5_kms->smp = NULL;
    889			goto fail;
    890		}
    891	}
    892
    893	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
    894	if (IS_ERR(mdp5_kms->ctlm)) {
    895		ret = PTR_ERR(mdp5_kms->ctlm);
    896		mdp5_kms->ctlm = NULL;
    897		goto fail;
    898	}
    899
    900	ret = hwpipe_init(mdp5_kms);
    901	if (ret)
    902		goto fail;
    903
    904	ret = hwmixer_init(mdp5_kms);
    905	if (ret)
    906		goto fail;
    907
    908	ret = interface_init(mdp5_kms);
    909	if (ret)
    910		goto fail;
    911
    912	/* set uninit-ed kms */
    913	priv->kms = &mdp5_kms->base.base;
    914
    915	return 0;
    916fail:
    917	if (mdp5_kms)
    918		mdp5_destroy(pdev);
    919	return ret;
    920}
    921
    922static int mdp5_setup_interconnect(struct platform_device *pdev)
    923{
    924	/* Interconnects are a part of MDSS device tree binding, not the
    925	 * MDP5 device. */
    926	struct device *mdss_dev = pdev->dev.parent;
    927	struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
    928	struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
    929	struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
    930
    931	if (IS_ERR(path0))
    932		return PTR_ERR(path0);
    933
    934	if (!path0) {
    935		/* no interconnect support is not necessarily a fatal
    936		 * condition, the platform may simply not have an
    937		 * interconnect driver yet.  But warn about it in case
    938		 * bootloader didn't setup bus clocks high enough for
    939		 * scanout.
    940		 */
    941		dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
    942		return 0;
    943	}
    944
    945	icc_set_bw(path0, 0, MBps_to_icc(6400));
    946
    947	if (!IS_ERR_OR_NULL(path1))
    948		icc_set_bw(path1, 0, MBps_to_icc(6400));
    949	if (!IS_ERR_OR_NULL(path_rot))
    950		icc_set_bw(path_rot, 0, MBps_to_icc(6400));
    951
    952	return 0;
    953}
    954
    955static int mdp5_dev_probe(struct platform_device *pdev)
    956{
    957	int ret;
    958
    959	DBG("");
    960
    961	ret = mdp5_setup_interconnect(pdev);
    962	if (ret)
    963		return ret;
    964
    965	return msm_drv_probe(&pdev->dev, mdp5_kms_init);
    966}
    967
    968static int mdp5_dev_remove(struct platform_device *pdev)
    969{
    970	DBG("");
    971	component_master_del(&pdev->dev, &msm_drm_ops);
    972	return 0;
    973}
    974
    975static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
    976{
    977	struct platform_device *pdev = to_platform_device(dev);
    978	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
    979
    980	DBG("");
    981
    982	return mdp5_disable(mdp5_kms);
    983}
    984
    985static __maybe_unused int mdp5_runtime_resume(struct device *dev)
    986{
    987	struct platform_device *pdev = to_platform_device(dev);
    988	struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
    989
    990	DBG("");
    991
    992	return mdp5_enable(mdp5_kms);
    993}
    994
    995static const struct dev_pm_ops mdp5_pm_ops = {
    996	SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
    997	.prepare = msm_pm_prepare,
    998	.complete = msm_pm_complete,
    999};
   1000
   1001static const struct of_device_id mdp5_dt_match[] = {
   1002	{ .compatible = "qcom,mdp5", },
   1003	/* to support downstream DT files */
   1004	{ .compatible = "qcom,mdss_mdp", },
   1005	{}
   1006};
   1007MODULE_DEVICE_TABLE(of, mdp5_dt_match);
   1008
   1009static struct platform_driver mdp5_driver = {
   1010	.probe = mdp5_dev_probe,
   1011	.remove = mdp5_dev_remove,
   1012	.shutdown = msm_drv_shutdown,
   1013	.driver = {
   1014		.name = "msm_mdp",
   1015		.of_match_table = mdp5_dt_match,
   1016		.pm = &mdp5_pm_ops,
   1017	},
   1018};
   1019
   1020void __init msm_mdp_register(void)
   1021{
   1022	DBG("");
   1023	platform_driver_register(&mdp5_driver);
   1024}
   1025
   1026void __exit msm_mdp_unregister(void)
   1027{
   1028	DBG("");
   1029	platform_driver_unregister(&mdp5_driver);
   1030}