cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dpu_encoder.c (69730B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2013 Red Hat
      4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
      5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
      6 *
      7 * Author: Rob Clark <robdclark@gmail.com>
      8 */
      9
     10#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
     11#include <linux/debugfs.h>
     12#include <linux/kthread.h>
     13#include <linux/seq_file.h>
     14
     15#include <drm/drm_crtc.h>
     16#include <drm/drm_file.h>
     17#include <drm/drm_probe_helper.h>
     18
     19#include "msm_drv.h"
     20#include "dpu_kms.h"
     21#include "dpu_hwio.h"
     22#include "dpu_hw_catalog.h"
     23#include "dpu_hw_intf.h"
     24#include "dpu_hw_ctl.h"
     25#include "dpu_hw_dspp.h"
     26#include "dpu_hw_dsc.h"
     27#include "dpu_hw_merge3d.h"
     28#include "dpu_formats.h"
     29#include "dpu_encoder_phys.h"
     30#include "dpu_crtc.h"
     31#include "dpu_trace.h"
     32#include "dpu_core_irq.h"
     33#include "disp/msm_disp_snapshot.h"
     34
     35#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
     36		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
     37
     38#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
     39		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
     40
     41/*
     42 * Two to anticipate panels that can do cmd/vid dynamic switching
     43 * plan is to create all possible physical encoder types, and switch between
     44 * them at runtime
     45 */
     46#define NUM_PHYS_ENCODER_TYPES 2
     47
     48#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
     49	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
     50
     51#define MAX_CHANNELS_PER_ENC 2
     52
     53#define IDLE_SHORT_TIMEOUT	1
     54
     55#define MAX_HDISPLAY_SPLIT 1080
     56
     57/* timeout in frames waiting for frame done */
     58#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
     59
     60/**
     61 * enum dpu_enc_rc_events - events for resource control state machine
     62 * @DPU_ENC_RC_EVENT_KICKOFF:
     63 *	This event happens at NORMAL priority.
     64 *	Event that signals the start of the transfer. When this event is
     65 *	received, enable MDP/DSI core clocks. Regardless of the previous
     66 *	state, the resource should be in ON state at the end of this event.
     67 * @DPU_ENC_RC_EVENT_FRAME_DONE:
     68 *	This event happens at INTERRUPT level.
     69 *	Event signals the end of the data transfer after the PP FRAME_DONE
     70 *	event. At the end of this event, a delayed work is scheduled to go to
     71 *	IDLE_PC state after IDLE_TIMEOUT time.
     72 * @DPU_ENC_RC_EVENT_PRE_STOP:
     73 *	This event happens at NORMAL priority.
     74 *	This event, when received during the ON state, leave the RC STATE
     75 *	in the PRE_OFF state. It should be followed by the STOP event as
     76 *	part of encoder disable.
     77 *	If received during IDLE or OFF states, it will do nothing.
     78 * @DPU_ENC_RC_EVENT_STOP:
     79 *	This event happens at NORMAL priority.
     80 *	When this event is received, disable all the MDP/DSI core clocks, and
     81 *	disable IRQs. It should be called from the PRE_OFF or IDLE states.
     82 *	IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
     83 *	PRE_OFF is expected when PRE_STOP was executed during the ON state.
     84 *	Resource state should be in OFF at the end of the event.
     85 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
     86 *	This event happens at NORMAL priority from a work item.
     87 *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
     88 *	This would disable MDP/DSI core clocks and change the resource state
     89 *	to IDLE.
     90 */
     91enum dpu_enc_rc_events {
     92	DPU_ENC_RC_EVENT_KICKOFF = 1,
     93	DPU_ENC_RC_EVENT_FRAME_DONE,
     94	DPU_ENC_RC_EVENT_PRE_STOP,
     95	DPU_ENC_RC_EVENT_STOP,
     96	DPU_ENC_RC_EVENT_ENTER_IDLE
     97};
     98
     99/*
    100 * enum dpu_enc_rc_states - states that the resource control maintains
    101 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
    102 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
    103 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
    104 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
    105 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
    106 */
    107enum dpu_enc_rc_states {
    108	DPU_ENC_RC_STATE_OFF,
    109	DPU_ENC_RC_STATE_PRE_OFF,
    110	DPU_ENC_RC_STATE_ON,
    111	DPU_ENC_RC_STATE_IDLE
    112};
    113
    114/**
    115 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
    116 *	encoders. Virtual encoder manages one "logical" display. Physical
    117 *	encoders manage one intf block, tied to a specific panel/sub-panel.
    118 *	Virtual encoder defers as much as possible to the physical encoders.
    119 *	Virtual encoder registers itself with the DRM Framework as the encoder.
    120 * @base:		drm_encoder base class for registration with DRM
    121 * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
    122 * @enabled:		True if the encoder is active, protected by enc_lock
    123 * @num_phys_encs:	Actual number of physical encoders contained.
    124 * @phys_encs:		Container of physical encoders managed.
    125 * @cur_master:		Pointer to the current master in this mode. Optimization
    126 *			Only valid after enable. Cleared as disable.
    127 * @cur_slave:		As above but for the slave encoder.
    128 * @hw_pp:		Handle to the pingpong blocks used for the display. No.
    129 *			pingpong blocks can be different than num_phys_encs.
    130 * @hw_dsc:		Handle to the DSC blocks used for the display.
    131 * @dsc_mask:		Bitmask of used DSC blocks.
    132 * @intfs_swapped:	Whether or not the phys_enc interfaces have been swapped
    133 *			for partial update right-only cases, such as pingpong
    134 *			split where virtual pingpong does not generate IRQs
    135 * @crtc:		Pointer to the currently assigned crtc. Normally you
    136 *			would use crtc->state->encoder_mask to determine the
    137 *			link between encoder/crtc. However in this case we need
    138 *			to track crtc in the disable() hook which is called
    139 *			_after_ encoder_mask is cleared.
    140 * @connector:		If a mode is set, cached pointer to the active connector
    141 * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
    142 *				all CTL paths
    143 * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
    144 * @debugfs_root:		Debug file system root file node
    145 * @enc_lock:			Lock around physical encoder
    146 *				create/destroy/enable/disable
    147 * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
    148 *				busy processing current command.
    149 *				Bit0 = phys_encs[0] etc.
    150 * @crtc_frame_event_cb:	callback handler for frame event
    151 * @crtc_frame_event_cb_data:	callback handler private data
    152 * @frame_done_timeout_ms:	frame done timeout in ms
    153 * @frame_done_timer:		watchdog timer for frame done event
    154 * @vsync_event_timer:		vsync timer
    155 * @disp_info:			local copy of msm_display_info struct
    156 * @idle_pc_supported:		indicate if idle power collaps is supported
    157 * @rc_lock:			resource control mutex lock to protect
    158 *				virt encoder over various state changes
    159 * @rc_state:			resource controller state
    160 * @delayed_off_work:		delayed worker to schedule disabling of
    161 *				clks and resources after IDLE_TIMEOUT time.
    162 * @vsync_event_work:		worker to handle vsync event for autorefresh
    163 * @topology:                   topology of the display
    164 * @idle_timeout:		idle timeout duration in milliseconds
    165 * @dsc:			msm_display_dsc_config pointer, for DSC-enabled encoders
    166 */
    167struct dpu_encoder_virt {
    168	struct drm_encoder base;
    169	spinlock_t enc_spinlock;
    170
    171	bool enabled;
    172
    173	unsigned int num_phys_encs;
    174	struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
    175	struct dpu_encoder_phys *cur_master;
    176	struct dpu_encoder_phys *cur_slave;
    177	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
    178	struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
    179
    180	unsigned int dsc_mask;
    181
    182	bool intfs_swapped;
    183
    184	struct drm_crtc *crtc;
    185	struct drm_connector *connector;
    186
    187	struct dentry *debugfs_root;
    188	struct mutex enc_lock;
    189	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
    190	void (*crtc_frame_event_cb)(void *, u32 event);
    191	void *crtc_frame_event_cb_data;
    192
    193	atomic_t frame_done_timeout_ms;
    194	struct timer_list frame_done_timer;
    195	struct timer_list vsync_event_timer;
    196
    197	struct msm_display_info disp_info;
    198
    199	bool idle_pc_supported;
    200	struct mutex rc_lock;
    201	enum dpu_enc_rc_states rc_state;
    202	struct delayed_work delayed_off_work;
    203	struct kthread_work vsync_event_work;
    204	struct msm_display_topology topology;
    205
    206	u32 idle_timeout;
    207
    208	bool wide_bus_en;
    209
    210	/* DSC configuration */
    211	struct msm_display_dsc_config *dsc;
    212};
    213
    214#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
    215
    216static u32 dither_matrix[DITHER_MATRIX_SZ] = {
    217	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
    218};
    219
    220
    221bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
    222{
    223	const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
    224
    225	return dpu_enc->wide_bus_en;
    226}
    227
    228static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
    229{
    230	struct dpu_hw_dither_cfg dither_cfg = { 0 };
    231
    232	if (!hw_pp->ops.setup_dither)
    233		return;
    234
    235	switch (bpc) {
    236	case 6:
    237		dither_cfg.c0_bitdepth = 6;
    238		dither_cfg.c1_bitdepth = 6;
    239		dither_cfg.c2_bitdepth = 6;
    240		dither_cfg.c3_bitdepth = 6;
    241		dither_cfg.temporal_en = 0;
    242		break;
    243	default:
    244		hw_pp->ops.setup_dither(hw_pp, NULL);
    245		return;
    246	}
    247
    248	memcpy(&dither_cfg.matrix, dither_matrix,
    249			sizeof(u32) * DITHER_MATRIX_SZ);
    250
    251	hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
    252}
    253
    254static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
    255{
    256	switch (intf_mode) {
    257	case INTF_MODE_VIDEO:
    258		return "INTF_MODE_VIDEO";
    259	case INTF_MODE_CMD:
    260		return "INTF_MODE_CMD";
    261	case INTF_MODE_WB_BLOCK:
    262		return "INTF_MODE_WB_BLOCK";
    263	case INTF_MODE_WB_LINE:
    264		return "INTF_MODE_WB_LINE";
    265	default:
    266		return "INTF_MODE_UNKNOWN";
    267	}
    268}
    269
    270void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
    271		enum dpu_intr_idx intr_idx)
    272{
    273	DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
    274			DRMID(phys_enc->parent),
    275			dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
    276			phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
    277			phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
    278
    279	if (phys_enc->parent_ops->handle_frame_done)
    280		phys_enc->parent_ops->handle_frame_done(
    281				phys_enc->parent, phys_enc,
    282				DPU_ENCODER_FRAME_EVENT_ERROR);
    283}
    284
    285static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
    286		u32 irq_idx, struct dpu_encoder_wait_info *info);
    287
    288int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
    289		int irq,
    290		void (*func)(void *arg, int irq_idx),
    291		struct dpu_encoder_wait_info *wait_info)
    292{
    293	u32 irq_status;
    294	int ret;
    295
    296	if (!wait_info) {
    297		DPU_ERROR("invalid params\n");
    298		return -EINVAL;
    299	}
    300	/* note: do master / slave checking outside */
    301
    302	/* return EWOULDBLOCK since we know the wait isn't necessary */
    303	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
    304		DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
    305			  DRMID(phys_enc->parent), func,
    306			  irq);
    307		return -EWOULDBLOCK;
    308	}
    309
    310	if (irq < 0) {
    311		DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
    312			      DRMID(phys_enc->parent), func);
    313		return 0;
    314	}
    315
    316	DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
    317		      DRMID(phys_enc->parent), func,
    318		      irq, phys_enc->hw_pp->idx - PINGPONG_0,
    319		      atomic_read(wait_info->atomic_cnt));
    320
    321	ret = dpu_encoder_helper_wait_event_timeout(
    322			DRMID(phys_enc->parent),
    323			irq,
    324			wait_info);
    325
    326	if (ret <= 0) {
    327		irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
    328		if (irq_status) {
    329			unsigned long flags;
    330
    331			DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
    332				      DRMID(phys_enc->parent), func,
    333				      irq,
    334				      phys_enc->hw_pp->idx - PINGPONG_0,
    335				      atomic_read(wait_info->atomic_cnt));
    336			local_irq_save(flags);
    337			func(phys_enc, irq);
    338			local_irq_restore(flags);
    339			ret = 0;
    340		} else {
    341			ret = -ETIMEDOUT;
    342			DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
    343				      DRMID(phys_enc->parent), func,
    344				      irq,
    345				      phys_enc->hw_pp->idx - PINGPONG_0,
    346				      atomic_read(wait_info->atomic_cnt));
    347		}
    348	} else {
    349		ret = 0;
    350		trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
    351			func, irq,
    352			phys_enc->hw_pp->idx - PINGPONG_0,
    353			atomic_read(wait_info->atomic_cnt));
    354	}
    355
    356	return ret;
    357}
    358
    359int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
    360{
    361	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
    362	struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
    363	return phys ? atomic_read(&phys->vsync_cnt) : 0;
    364}
    365
    366int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
    367{
    368	struct dpu_encoder_virt *dpu_enc;
    369	struct dpu_encoder_phys *phys;
    370	int linecount = 0;
    371
    372	dpu_enc = to_dpu_encoder_virt(drm_enc);
    373	phys = dpu_enc ? dpu_enc->cur_master : NULL;
    374
    375	if (phys && phys->ops.get_line_count)
    376		linecount = phys->ops.get_line_count(phys);
    377
    378	return linecount;
    379}
    380
    381static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
    382{
    383	struct dpu_encoder_virt *dpu_enc = NULL;
    384	int i = 0;
    385
    386	if (!drm_enc) {
    387		DPU_ERROR("invalid encoder\n");
    388		return;
    389	}
    390
    391	dpu_enc = to_dpu_encoder_virt(drm_enc);
    392	DPU_DEBUG_ENC(dpu_enc, "\n");
    393
    394	mutex_lock(&dpu_enc->enc_lock);
    395
    396	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
    397		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
    398
    399		if (phys->ops.destroy) {
    400			phys->ops.destroy(phys);
    401			--dpu_enc->num_phys_encs;
    402			dpu_enc->phys_encs[i] = NULL;
    403		}
    404	}
    405
    406	if (dpu_enc->num_phys_encs)
    407		DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
    408				dpu_enc->num_phys_encs);
    409	dpu_enc->num_phys_encs = 0;
    410	mutex_unlock(&dpu_enc->enc_lock);
    411
    412	drm_encoder_cleanup(drm_enc);
    413	mutex_destroy(&dpu_enc->enc_lock);
    414}
    415
    416void dpu_encoder_helper_split_config(
    417		struct dpu_encoder_phys *phys_enc,
    418		enum dpu_intf interface)
    419{
    420	struct dpu_encoder_virt *dpu_enc;
    421	struct split_pipe_cfg cfg = { 0 };
    422	struct dpu_hw_mdp *hw_mdptop;
    423	struct msm_display_info *disp_info;
    424
    425	if (!phys_enc->hw_mdptop || !phys_enc->parent) {
    426		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
    427		return;
    428	}
    429
    430	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
    431	hw_mdptop = phys_enc->hw_mdptop;
    432	disp_info = &dpu_enc->disp_info;
    433
    434	if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
    435		return;
    436
    437	/**
    438	 * disable split modes since encoder will be operating in as the only
    439	 * encoder, either for the entire use case in the case of, for example,
    440	 * single DSI, or for this frame in the case of left/right only partial
    441	 * update.
    442	 */
    443	if (phys_enc->split_role == ENC_ROLE_SOLO) {
    444		if (hw_mdptop->ops.setup_split_pipe)
    445			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
    446		return;
    447	}
    448
    449	cfg.en = true;
    450	cfg.mode = phys_enc->intf_mode;
    451	cfg.intf = interface;
    452
    453	if (cfg.en && phys_enc->ops.needs_single_flush &&
    454			phys_enc->ops.needs_single_flush(phys_enc))
    455		cfg.split_flush_en = true;
    456
    457	if (phys_enc->split_role == ENC_ROLE_MASTER) {
    458		DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
    459
    460		if (hw_mdptop->ops.setup_split_pipe)
    461			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
    462	}
    463}
    464
    465bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
    466{
    467	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
    468	int i, intf_count = 0, num_dsc = 0;
    469
    470	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
    471		if (dpu_enc->phys_encs[i])
    472			intf_count++;
    473
    474	/* See dpu_encoder_get_topology, we only support 2:2:1 topology */
    475	if (dpu_enc->dsc)
    476		num_dsc = 2;
    477
    478	return (num_dsc > 0) && (num_dsc > intf_count);
    479}
    480
    481static struct msm_display_topology dpu_encoder_get_topology(
    482			struct dpu_encoder_virt *dpu_enc,
    483			struct dpu_kms *dpu_kms,
    484			struct drm_display_mode *mode)
    485{
    486	struct msm_display_topology topology = {0};
    487	int i, intf_count = 0;
    488
    489	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
    490		if (dpu_enc->phys_encs[i])
    491			intf_count++;
    492
    493	/* Datapath topology selection
    494	 *
    495	 * Dual display
    496	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
    497	 *
    498	 * Single display
    499	 * 1 LM, 1 INTF
    500	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
    501	 *
    502	 * Adding color blocks only to primary interface if available in
    503	 * sufficient number
    504	 */
    505	if (intf_count == 2)
    506		topology.num_lm = 2;
    507	else if (!dpu_kms->catalog->caps->has_3d_merge)
    508		topology.num_lm = 1;
    509	else
    510		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
    511
    512	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
    513		if (dpu_kms->catalog->dspp &&
    514			(dpu_kms->catalog->dspp_count >= topology.num_lm))
    515			topology.num_dspp = topology.num_lm;
    516	}
    517
    518	topology.num_enc = 0;
    519	topology.num_intf = intf_count;
    520
    521	if (dpu_enc->dsc) {
    522		/* In case of Display Stream Compression (DSC), we would use
    523		 * 2 encoders, 2 layer mixers and 1 interface
    524		 * this is power optimal and can drive up to (including) 4k
    525		 * screens
    526		 */
    527		topology.num_enc = 2;
    528		topology.num_dsc = 2;
    529		topology.num_intf = 1;
    530		topology.num_lm = 2;
    531	}
    532
    533	return topology;
    534}
    535
    536static int dpu_encoder_virt_atomic_check(
    537		struct drm_encoder *drm_enc,
    538		struct drm_crtc_state *crtc_state,
    539		struct drm_connector_state *conn_state)
    540{
    541	struct dpu_encoder_virt *dpu_enc;
    542	struct msm_drm_private *priv;
    543	struct dpu_kms *dpu_kms;
    544	struct drm_display_mode *adj_mode;
    545	struct msm_display_topology topology;
    546	struct dpu_global_state *global_state;
    547	int i = 0;
    548	int ret = 0;
    549
    550	if (!drm_enc || !crtc_state || !conn_state) {
    551		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
    552				drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
    553		return -EINVAL;
    554	}
    555
    556	dpu_enc = to_dpu_encoder_virt(drm_enc);
    557	DPU_DEBUG_ENC(dpu_enc, "\n");
    558
    559	priv = drm_enc->dev->dev_private;
    560	dpu_kms = to_dpu_kms(priv->kms);
    561	adj_mode = &crtc_state->adjusted_mode;
    562	global_state = dpu_kms_get_global_state(crtc_state->state);
    563	if (IS_ERR(global_state))
    564		return PTR_ERR(global_state);
    565
    566	trace_dpu_enc_atomic_check(DRMID(drm_enc));
    567
    568	/* perform atomic check on the first physical encoder (master) */
    569	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
    570		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
    571
    572		if (phys->ops.atomic_check)
    573			ret = phys->ops.atomic_check(phys, crtc_state,
    574					conn_state);
    575		if (ret) {
    576			DPU_ERROR_ENC(dpu_enc,
    577					"mode unsupported, phys idx %d\n", i);
    578			break;
    579		}
    580	}
    581
    582	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
    583
    584	/* Reserve dynamic resources now. */
    585	if (!ret) {
    586		/*
    587		 * Release and Allocate resources on every modeset
    588		 * Dont allocate when active is false.
    589		 */
    590		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
    591			dpu_rm_release(global_state, drm_enc);
    592
    593			if (!crtc_state->active_changed || crtc_state->active)
    594				ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
    595						drm_enc, crtc_state, topology);
    596		}
    597	}
    598
    599	trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
    600
    601	return ret;
    602}
    603
    604static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
    605			struct msm_display_info *disp_info)
    606{
    607	struct dpu_vsync_source_cfg vsync_cfg = { 0 };
    608	struct msm_drm_private *priv;
    609	struct dpu_kms *dpu_kms;
    610	struct dpu_hw_mdp *hw_mdptop;
    611	struct drm_encoder *drm_enc;
    612	int i;
    613
    614	if (!dpu_enc || !disp_info) {
    615		DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
    616					dpu_enc != NULL, disp_info != NULL);
    617		return;
    618	} else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
    619		DPU_ERROR("invalid num phys enc %d/%d\n",
    620				dpu_enc->num_phys_encs,
    621				(int) ARRAY_SIZE(dpu_enc->hw_pp));
    622		return;
    623	}
    624
    625	drm_enc = &dpu_enc->base;
    626	/* this pointers are checked in virt_enable_helper */
    627	priv = drm_enc->dev->dev_private;
    628
    629	dpu_kms = to_dpu_kms(priv->kms);
    630	hw_mdptop = dpu_kms->hw_mdp;
    631	if (!hw_mdptop) {
    632		DPU_ERROR("invalid mdptop\n");
    633		return;
    634	}
    635
    636	if (hw_mdptop->ops.setup_vsync_source &&
    637			disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
    638		for (i = 0; i < dpu_enc->num_phys_encs; i++)
    639			vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
    640
    641		vsync_cfg.pp_count = dpu_enc->num_phys_encs;
    642		if (disp_info->is_te_using_watchdog_timer)
    643			vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
    644		else
    645			vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
    646
    647		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
    648	}
    649}
    650
    651static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
    652{
    653	struct dpu_encoder_virt *dpu_enc;
    654	int i;
    655
    656	if (!drm_enc) {
    657		DPU_ERROR("invalid encoder\n");
    658		return;
    659	}
    660
    661	dpu_enc = to_dpu_encoder_virt(drm_enc);
    662
    663	DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
    664	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
    665		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
    666
    667		if (phys->ops.irq_control)
    668			phys->ops.irq_control(phys, enable);
    669	}
    670
    671}
    672
    673static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
    674		bool enable)
    675{
    676	struct msm_drm_private *priv;
    677	struct dpu_kms *dpu_kms;
    678	struct dpu_encoder_virt *dpu_enc;
    679
    680	dpu_enc = to_dpu_encoder_virt(drm_enc);
    681	priv = drm_enc->dev->dev_private;
    682	dpu_kms = to_dpu_kms(priv->kms);
    683
    684	trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
    685
    686	if (!dpu_enc->cur_master) {
    687		DPU_ERROR("encoder master not set\n");
    688		return;
    689	}
    690
    691	if (enable) {
    692		/* enable DPU core clks */
    693		pm_runtime_get_sync(&dpu_kms->pdev->dev);
    694
    695		/* enable all the irq */
    696		_dpu_encoder_irq_control(drm_enc, true);
    697
    698	} else {
    699		/* disable all the irq */
    700		_dpu_encoder_irq_control(drm_enc, false);
    701
    702		/* disable DPU core clks */
    703		pm_runtime_put_sync(&dpu_kms->pdev->dev);
    704	}
    705
    706}
    707
    708static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
    709		u32 sw_event)
    710{
    711	struct dpu_encoder_virt *dpu_enc;
    712	struct msm_drm_private *priv;
    713	bool is_vid_mode = false;
    714
    715	if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
    716		DPU_ERROR("invalid parameters\n");
    717		return -EINVAL;
    718	}
    719	dpu_enc = to_dpu_encoder_virt(drm_enc);
    720	priv = drm_enc->dev->dev_private;
    721	is_vid_mode = dpu_enc->disp_info.capabilities &
    722						MSM_DISPLAY_CAP_VID_MODE;
    723
    724	/*
    725	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
    726	 * events and return early for other events (ie wb display).
    727	 */
    728	if (!dpu_enc->idle_pc_supported &&
    729			(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
    730			sw_event != DPU_ENC_RC_EVENT_STOP &&
    731			sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
    732		return 0;
    733
    734	trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
    735			 dpu_enc->rc_state, "begin");
    736
    737	switch (sw_event) {
    738	case DPU_ENC_RC_EVENT_KICKOFF:
    739		/* cancel delayed off work, if any */
    740		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
    741			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
    742					sw_event);
    743
    744		mutex_lock(&dpu_enc->rc_lock);
    745
    746		/* return if the resource control is already in ON state */
    747		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
    748			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
    749				      DRMID(drm_enc), sw_event);
    750			mutex_unlock(&dpu_enc->rc_lock);
    751			return 0;
    752		} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
    753				dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
    754			DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
    755				      DRMID(drm_enc), sw_event,
    756				      dpu_enc->rc_state);
    757			mutex_unlock(&dpu_enc->rc_lock);
    758			return -EINVAL;
    759		}
    760
    761		if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
    762			_dpu_encoder_irq_control(drm_enc, true);
    763		else
    764			_dpu_encoder_resource_control_helper(drm_enc, true);
    765
    766		dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
    767
    768		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
    769				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
    770				 "kickoff");
    771
    772		mutex_unlock(&dpu_enc->rc_lock);
    773		break;
    774
    775	case DPU_ENC_RC_EVENT_FRAME_DONE:
    776		/*
    777		 * mutex lock is not used as this event happens at interrupt
    778		 * context. And locking is not required as, the other events
    779		 * like KICKOFF and STOP does a wait-for-idle before executing
    780		 * the resource_control
    781		 */
    782		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
    783			DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
    784				      DRMID(drm_enc), sw_event,
    785				      dpu_enc->rc_state);
    786			return -EINVAL;
    787		}
    788
    789		/*
    790		 * schedule off work item only when there are no
    791		 * frames pending
    792		 */
    793		if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
    794			DRM_DEBUG_KMS("id:%d skip schedule work\n",
    795				      DRMID(drm_enc));
    796			return 0;
    797		}
    798
    799		queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
    800				   msecs_to_jiffies(dpu_enc->idle_timeout));
    801
    802		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
    803				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
    804				 "frame done");
    805		break;
    806
    807	case DPU_ENC_RC_EVENT_PRE_STOP:
    808		/* cancel delayed off work, if any */
    809		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
    810			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
    811					sw_event);
    812
    813		mutex_lock(&dpu_enc->rc_lock);
    814
    815		if (is_vid_mode &&
    816			  dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
    817			_dpu_encoder_irq_control(drm_enc, true);
    818		}
    819		/* skip if is already OFF or IDLE, resources are off already */
    820		else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
    821				dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
    822			DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
    823				      DRMID(drm_enc), sw_event,
    824				      dpu_enc->rc_state);
    825			mutex_unlock(&dpu_enc->rc_lock);
    826			return 0;
    827		}
    828
    829		dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
    830
    831		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
    832				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
    833				 "pre stop");
    834
    835		mutex_unlock(&dpu_enc->rc_lock);
    836		break;
    837
    838	case DPU_ENC_RC_EVENT_STOP:
    839		mutex_lock(&dpu_enc->rc_lock);
    840
    841		/* return if the resource control is already in OFF state */
    842		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
    843			DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
    844				      DRMID(drm_enc), sw_event);
    845			mutex_unlock(&dpu_enc->rc_lock);
    846			return 0;
    847		} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
    848			DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
    849				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
    850			mutex_unlock(&dpu_enc->rc_lock);
    851			return -EINVAL;
    852		}
    853
    854		/**
    855		 * expect to arrive here only if in either idle state or pre-off
    856		 * and in IDLE state the resources are already disabled
    857		 */
    858		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
    859			_dpu_encoder_resource_control_helper(drm_enc, false);
    860
    861		dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
    862
    863		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
    864				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
    865				 "stop");
    866
    867		mutex_unlock(&dpu_enc->rc_lock);
    868		break;
    869
    870	case DPU_ENC_RC_EVENT_ENTER_IDLE:
    871		mutex_lock(&dpu_enc->rc_lock);
    872
    873		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
    874			DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
    875				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
    876			mutex_unlock(&dpu_enc->rc_lock);
    877			return 0;
    878		}
    879
    880		/*
    881		 * if we are in ON but a frame was just kicked off,
    882		 * ignore the IDLE event, it's probably a stale timer event
    883		 */
    884		if (dpu_enc->frame_busy_mask[0]) {
    885			DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
    886				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
    887			mutex_unlock(&dpu_enc->rc_lock);
    888			return 0;
    889		}
    890
    891		if (is_vid_mode)
    892			_dpu_encoder_irq_control(drm_enc, false);
    893		else
    894			_dpu_encoder_resource_control_helper(drm_enc, false);
    895
    896		dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
    897
    898		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
    899				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
    900				 "idle");
    901
    902		mutex_unlock(&dpu_enc->rc_lock);
    903		break;
    904
    905	default:
    906		DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
    907			  sw_event);
    908		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
    909				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
    910				 "error");
    911		break;
    912	}
    913
    914	trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
    915			 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
    916			 "end");
    917	return 0;
    918}
    919
    920void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
    921		struct drm_writeback_job *job)
    922{
    923	struct dpu_encoder_virt *dpu_enc;
    924	int i;
    925
    926	dpu_enc = to_dpu_encoder_virt(drm_enc);
    927
    928	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
    929		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
    930
    931		if (phys->ops.prepare_wb_job)
    932			phys->ops.prepare_wb_job(phys, job);
    933
    934	}
    935}
    936
    937void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
    938		struct drm_writeback_job *job)
    939{
    940	struct dpu_encoder_virt *dpu_enc;
    941	int i;
    942
    943	dpu_enc = to_dpu_encoder_virt(drm_enc);
    944
    945	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
    946		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
    947
    948		if (phys->ops.cleanup_wb_job)
    949			phys->ops.cleanup_wb_job(phys, job);
    950
    951	}
    952}
    953
    954static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
    955					     struct drm_crtc_state *crtc_state,
    956					     struct drm_connector_state *conn_state)
    957{
    958	struct dpu_encoder_virt *dpu_enc;
    959	struct msm_drm_private *priv;
    960	struct dpu_kms *dpu_kms;
    961	struct dpu_crtc_state *cstate;
    962	struct dpu_global_state *global_state;
    963	struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
    964	struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
    965	struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
    966	struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
    967	struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
    968	int num_lm, num_ctl, num_pp, num_dsc;
    969	unsigned int dsc_mask = 0;
    970	int i;
    971
    972	if (!drm_enc) {
    973		DPU_ERROR("invalid encoder\n");
    974		return;
    975	}
    976
    977	dpu_enc = to_dpu_encoder_virt(drm_enc);
    978	DPU_DEBUG_ENC(dpu_enc, "\n");
    979
    980	priv = drm_enc->dev->dev_private;
    981	dpu_kms = to_dpu_kms(priv->kms);
    982
    983	global_state = dpu_kms_get_existing_global_state(dpu_kms);
    984	if (IS_ERR_OR_NULL(global_state)) {
    985		DPU_ERROR("Failed to get global state");
    986		return;
    987	}
    988
    989	trace_dpu_enc_mode_set(DRMID(drm_enc));
    990
    991	/* Query resource that have been reserved in atomic check step. */
    992	num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
    993		drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
    994		ARRAY_SIZE(hw_pp));
    995	num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
    996		drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
    997	num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
    998		drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
    999	dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
   1000		drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
   1001		ARRAY_SIZE(hw_dspp));
   1002
   1003	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
   1004		dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
   1005						: NULL;
   1006
   1007	if (dpu_enc->dsc) {
   1008		num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
   1009							drm_enc->base.id, DPU_HW_BLK_DSC,
   1010							hw_dsc, ARRAY_SIZE(hw_dsc));
   1011		for (i = 0; i < num_dsc; i++) {
   1012			dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
   1013			dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
   1014		}
   1015	}
   1016
   1017	dpu_enc->dsc_mask = dsc_mask;
   1018
   1019	cstate = to_dpu_crtc_state(crtc_state);
   1020
   1021	for (i = 0; i < num_lm; i++) {
   1022		int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
   1023
   1024		cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
   1025		cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
   1026		cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
   1027	}
   1028
   1029	cstate->num_mixers = num_lm;
   1030
   1031	dpu_enc->connector = conn_state->connector;
   1032
   1033	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1034		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
   1035
   1036		if (!dpu_enc->hw_pp[i]) {
   1037			DPU_ERROR_ENC(dpu_enc,
   1038				"no pp block assigned at idx: %d\n", i);
   1039			return;
   1040		}
   1041
   1042		if (!hw_ctl[i]) {
   1043			DPU_ERROR_ENC(dpu_enc,
   1044				"no ctl block assigned at idx: %d\n", i);
   1045			return;
   1046		}
   1047
   1048		phys->hw_pp = dpu_enc->hw_pp[i];
   1049		phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
   1050
   1051		if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
   1052			phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
   1053
   1054		if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
   1055			phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
   1056
   1057		if (!phys->hw_intf && !phys->hw_wb) {
   1058			DPU_ERROR_ENC(dpu_enc,
   1059				      "no intf or wb block assigned at idx: %d\n", i);
   1060			return;
   1061		}
   1062
   1063		if (phys->hw_intf && phys->hw_wb) {
   1064			DPU_ERROR_ENC(dpu_enc,
   1065					"invalid phys both intf and wb block at idx: %d\n", i);
   1066			return;
   1067		}
   1068
   1069		phys->cached_mode = crtc_state->adjusted_mode;
   1070		if (phys->ops.atomic_mode_set)
   1071			phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
   1072	}
   1073}
   1074
   1075static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
   1076{
   1077	struct dpu_encoder_virt *dpu_enc = NULL;
   1078	int i;
   1079
   1080	if (!drm_enc || !drm_enc->dev) {
   1081		DPU_ERROR("invalid parameters\n");
   1082		return;
   1083	}
   1084
   1085	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1086	if (!dpu_enc || !dpu_enc->cur_master) {
   1087		DPU_ERROR("invalid dpu encoder/master\n");
   1088		return;
   1089	}
   1090
   1091
   1092	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
   1093		dpu_enc->cur_master->hw_mdptop &&
   1094		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
   1095		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
   1096			dpu_enc->cur_master->hw_mdptop);
   1097
   1098	_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
   1099
   1100	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
   1101			!WARN_ON(dpu_enc->num_phys_encs == 0)) {
   1102		unsigned bpc = dpu_enc->connector->display_info.bpc;
   1103		for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
   1104			if (!dpu_enc->hw_pp[i])
   1105				continue;
   1106			_dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
   1107		}
   1108	}
   1109}
   1110
   1111void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
   1112{
   1113	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
   1114
   1115	mutex_lock(&dpu_enc->enc_lock);
   1116
   1117	if (!dpu_enc->enabled)
   1118		goto out;
   1119
   1120	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
   1121		dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
   1122	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
   1123		dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
   1124
   1125	_dpu_encoder_virt_enable_helper(drm_enc);
   1126
   1127out:
   1128	mutex_unlock(&dpu_enc->enc_lock);
   1129}
   1130
   1131static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
   1132{
   1133	struct dpu_encoder_virt *dpu_enc = NULL;
   1134	int ret = 0;
   1135	struct drm_display_mode *cur_mode = NULL;
   1136
   1137	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1138
   1139	mutex_lock(&dpu_enc->enc_lock);
   1140	cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
   1141
   1142	trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
   1143			     cur_mode->vdisplay);
   1144
   1145	/* always enable slave encoder before master */
   1146	if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
   1147		dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
   1148
   1149	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
   1150		dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
   1151
   1152	ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
   1153	if (ret) {
   1154		DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
   1155				ret);
   1156		goto out;
   1157	}
   1158
   1159	_dpu_encoder_virt_enable_helper(drm_enc);
   1160
   1161	dpu_enc->enabled = true;
   1162
   1163out:
   1164	mutex_unlock(&dpu_enc->enc_lock);
   1165}
   1166
   1167static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
   1168{
   1169	struct dpu_encoder_virt *dpu_enc = NULL;
   1170	int i = 0;
   1171
   1172	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1173	DPU_DEBUG_ENC(dpu_enc, "\n");
   1174
   1175	mutex_lock(&dpu_enc->enc_lock);
   1176	dpu_enc->enabled = false;
   1177
   1178	trace_dpu_enc_disable(DRMID(drm_enc));
   1179
   1180	/* wait for idle */
   1181	dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
   1182
   1183	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
   1184
   1185	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1186		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
   1187
   1188		if (phys->ops.disable)
   1189			phys->ops.disable(phys);
   1190	}
   1191
   1192
   1193	/* after phys waits for frame-done, should be no more frames pending */
   1194	if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
   1195		DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
   1196		del_timer_sync(&dpu_enc->frame_done_timer);
   1197	}
   1198
   1199	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
   1200
   1201	dpu_enc->connector = NULL;
   1202
   1203	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
   1204
   1205	mutex_unlock(&dpu_enc->enc_lock);
   1206}
   1207
   1208static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
   1209		enum dpu_intf_type type, u32 controller_id)
   1210{
   1211	int i = 0;
   1212
   1213	if (type != INTF_WB) {
   1214		for (i = 0; i < catalog->intf_count; i++) {
   1215			if (catalog->intf[i].type == type
   1216				&& catalog->intf[i].controller_id == controller_id) {
   1217				return catalog->intf[i].id;
   1218			}
   1219		}
   1220	}
   1221
   1222	return INTF_MAX;
   1223}
   1224
   1225static enum dpu_wb dpu_encoder_get_wb(struct dpu_mdss_cfg *catalog,
   1226		enum dpu_intf_type type, u32 controller_id)
   1227{
   1228	int i = 0;
   1229
   1230	if (type != INTF_WB)
   1231		goto end;
   1232
   1233	for (i = 0; i < catalog->wb_count; i++) {
   1234		if (catalog->wb[i].id == controller_id)
   1235			return catalog->wb[i].id;
   1236	}
   1237
   1238end:
   1239	return WB_MAX;
   1240}
   1241
   1242static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
   1243		struct dpu_encoder_phys *phy_enc)
   1244{
   1245	struct dpu_encoder_virt *dpu_enc = NULL;
   1246	unsigned long lock_flags;
   1247
   1248	if (!drm_enc || !phy_enc)
   1249		return;
   1250
   1251	DPU_ATRACE_BEGIN("encoder_vblank_callback");
   1252	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1253
   1254	atomic_inc(&phy_enc->vsync_cnt);
   1255
   1256	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
   1257	if (dpu_enc->crtc)
   1258		dpu_crtc_vblank_callback(dpu_enc->crtc);
   1259	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
   1260
   1261	DPU_ATRACE_END("encoder_vblank_callback");
   1262}
   1263
   1264static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
   1265		struct dpu_encoder_phys *phy_enc)
   1266{
   1267	if (!phy_enc)
   1268		return;
   1269
   1270	DPU_ATRACE_BEGIN("encoder_underrun_callback");
   1271	atomic_inc(&phy_enc->underrun_cnt);
   1272
   1273	/* trigger dump only on the first underrun */
   1274	if (atomic_read(&phy_enc->underrun_cnt) == 1)
   1275		msm_disp_snapshot_state(drm_enc->dev);
   1276
   1277	trace_dpu_enc_underrun_cb(DRMID(drm_enc),
   1278				  atomic_read(&phy_enc->underrun_cnt));
   1279	DPU_ATRACE_END("encoder_underrun_callback");
   1280}
   1281
   1282void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
   1283{
   1284	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
   1285	unsigned long lock_flags;
   1286
   1287	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
   1288	/* crtc should always be cleared before re-assigning */
   1289	WARN_ON(crtc && dpu_enc->crtc);
   1290	dpu_enc->crtc = crtc;
   1291	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
   1292}
   1293
   1294void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
   1295					struct drm_crtc *crtc, bool enable)
   1296{
   1297	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
   1298	unsigned long lock_flags;
   1299	int i;
   1300
   1301	trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
   1302
   1303	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
   1304	if (dpu_enc->crtc != crtc) {
   1305		spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
   1306		return;
   1307	}
   1308	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
   1309
   1310	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1311		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
   1312
   1313		if (phys->ops.control_vblank_irq)
   1314			phys->ops.control_vblank_irq(phys, enable);
   1315	}
   1316}
   1317
   1318void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
   1319		void (*frame_event_cb)(void *, u32 event),
   1320		void *frame_event_cb_data)
   1321{
   1322	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
   1323	unsigned long lock_flags;
   1324	bool enable;
   1325
   1326	enable = frame_event_cb ? true : false;
   1327
   1328	if (!drm_enc) {
   1329		DPU_ERROR("invalid encoder\n");
   1330		return;
   1331	}
   1332	trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
   1333
   1334	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
   1335	dpu_enc->crtc_frame_event_cb = frame_event_cb;
   1336	dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
   1337	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
   1338}
   1339
   1340static void dpu_encoder_frame_done_callback(
   1341		struct drm_encoder *drm_enc,
   1342		struct dpu_encoder_phys *ready_phys, u32 event)
   1343{
   1344	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
   1345	unsigned int i;
   1346
   1347	if (event & (DPU_ENCODER_FRAME_EVENT_DONE
   1348			| DPU_ENCODER_FRAME_EVENT_ERROR
   1349			| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
   1350
   1351		if (!dpu_enc->frame_busy_mask[0]) {
   1352			/**
   1353			 * suppress frame_done without waiter,
   1354			 * likely autorefresh
   1355			 */
   1356			trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
   1357					dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
   1358					ready_phys->intf_idx, ready_phys->wb_idx);
   1359			return;
   1360		}
   1361
   1362		/* One of the physical encoders has become idle */
   1363		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1364			if (dpu_enc->phys_encs[i] == ready_phys) {
   1365				trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
   1366						dpu_enc->frame_busy_mask[0]);
   1367				clear_bit(i, dpu_enc->frame_busy_mask);
   1368			}
   1369		}
   1370
   1371		if (!dpu_enc->frame_busy_mask[0]) {
   1372			atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
   1373			del_timer(&dpu_enc->frame_done_timer);
   1374
   1375			dpu_encoder_resource_control(drm_enc,
   1376					DPU_ENC_RC_EVENT_FRAME_DONE);
   1377
   1378			if (dpu_enc->crtc_frame_event_cb)
   1379				dpu_enc->crtc_frame_event_cb(
   1380					dpu_enc->crtc_frame_event_cb_data,
   1381					event);
   1382		}
   1383	} else {
   1384		if (dpu_enc->crtc_frame_event_cb)
   1385			dpu_enc->crtc_frame_event_cb(
   1386				dpu_enc->crtc_frame_event_cb_data, event);
   1387	}
   1388}
   1389
   1390static void dpu_encoder_off_work(struct work_struct *work)
   1391{
   1392	struct dpu_encoder_virt *dpu_enc = container_of(work,
   1393			struct dpu_encoder_virt, delayed_off_work.work);
   1394
   1395	dpu_encoder_resource_control(&dpu_enc->base,
   1396						DPU_ENC_RC_EVENT_ENTER_IDLE);
   1397
   1398	dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
   1399				DPU_ENCODER_FRAME_EVENT_IDLE);
   1400}
   1401
   1402/**
   1403 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
   1404 * @drm_enc: Pointer to drm encoder structure
   1405 * @phys: Pointer to physical encoder structure
   1406 * @extra_flush_bits: Additional bit mask to include in flush trigger
   1407 */
   1408static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
   1409		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
   1410{
   1411	struct dpu_hw_ctl *ctl;
   1412	int pending_kickoff_cnt;
   1413	u32 ret = UINT_MAX;
   1414
   1415	if (!phys->hw_pp) {
   1416		DPU_ERROR("invalid pingpong hw\n");
   1417		return;
   1418	}
   1419
   1420	ctl = phys->hw_ctl;
   1421	if (!ctl->ops.trigger_flush) {
   1422		DPU_ERROR("missing trigger cb\n");
   1423		return;
   1424	}
   1425
   1426	pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
   1427
   1428	if (extra_flush_bits && ctl->ops.update_pending_flush)
   1429		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
   1430
   1431	ctl->ops.trigger_flush(ctl);
   1432
   1433	if (ctl->ops.get_pending_flush)
   1434		ret = ctl->ops.get_pending_flush(ctl);
   1435
   1436	trace_dpu_enc_trigger_flush(DRMID(drm_enc),
   1437			dpu_encoder_helper_get_intf_type(phys->intf_mode),
   1438			phys->intf_idx, phys->wb_idx,
   1439			pending_kickoff_cnt, ctl->idx,
   1440			extra_flush_bits, ret);
   1441}
   1442
   1443/**
   1444 * _dpu_encoder_trigger_start - trigger start for a physical encoder
   1445 * @phys: Pointer to physical encoder structure
   1446 */
   1447static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
   1448{
   1449	if (!phys) {
   1450		DPU_ERROR("invalid argument(s)\n");
   1451		return;
   1452	}
   1453
   1454	if (!phys->hw_pp) {
   1455		DPU_ERROR("invalid pingpong hw\n");
   1456		return;
   1457	}
   1458
   1459	if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
   1460		phys->ops.trigger_start(phys);
   1461}
   1462
   1463void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
   1464{
   1465	struct dpu_hw_ctl *ctl;
   1466
   1467	ctl = phys_enc->hw_ctl;
   1468	if (ctl->ops.trigger_start) {
   1469		ctl->ops.trigger_start(ctl);
   1470		trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
   1471	}
   1472}
   1473
   1474static int dpu_encoder_helper_wait_event_timeout(
   1475		int32_t drm_id,
   1476		u32 irq_idx,
   1477		struct dpu_encoder_wait_info *info)
   1478{
   1479	int rc = 0;
   1480	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
   1481	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
   1482	s64 time;
   1483
   1484	do {
   1485		rc = wait_event_timeout(*(info->wq),
   1486				atomic_read(info->atomic_cnt) == 0, jiffies);
   1487		time = ktime_to_ms(ktime_get());
   1488
   1489		trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
   1490						 expected_time,
   1491						 atomic_read(info->atomic_cnt));
   1492	/* If we timed out, counter is valid and time is less, wait again */
   1493	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
   1494			(time < expected_time));
   1495
   1496	return rc;
   1497}
   1498
   1499static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
   1500{
   1501	struct dpu_encoder_virt *dpu_enc;
   1502	struct dpu_hw_ctl *ctl;
   1503	int rc;
   1504	struct drm_encoder *drm_enc;
   1505
   1506	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
   1507	ctl = phys_enc->hw_ctl;
   1508	drm_enc = phys_enc->parent;
   1509
   1510	if (!ctl->ops.reset)
   1511		return;
   1512
   1513	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
   1514		      ctl->idx);
   1515
   1516	rc = ctl->ops.reset(ctl);
   1517	if (rc) {
   1518		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
   1519		msm_disp_snapshot_state(drm_enc->dev);
   1520	}
   1521
   1522	phys_enc->enable_state = DPU_ENC_ENABLED;
   1523}
   1524
   1525/**
   1526 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
   1527 *	Iterate through the physical encoders and perform consolidated flush
   1528 *	and/or control start triggering as needed. This is done in the virtual
   1529 *	encoder rather than the individual physical ones in order to handle
   1530 *	use cases that require visibility into multiple physical encoders at
   1531 *	a time.
   1532 * @dpu_enc: Pointer to virtual encoder structure
   1533 */
   1534static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
   1535{
   1536	struct dpu_hw_ctl *ctl;
   1537	uint32_t i, pending_flush;
   1538	unsigned long lock_flags;
   1539
   1540	pending_flush = 0x0;
   1541
   1542	/* update pending counts and trigger kickoff ctl flush atomically */
   1543	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
   1544
   1545	/* don't perform flush/start operations for slave encoders */
   1546	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1547		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
   1548
   1549		if (phys->enable_state == DPU_ENC_DISABLED)
   1550			continue;
   1551
   1552		ctl = phys->hw_ctl;
   1553
   1554		/*
   1555		 * This is cleared in frame_done worker, which isn't invoked
   1556		 * for async commits. So don't set this for async, since it'll
   1557		 * roll over to the next commit.
   1558		 */
   1559		if (phys->split_role != ENC_ROLE_SLAVE)
   1560			set_bit(i, dpu_enc->frame_busy_mask);
   1561
   1562		if (!phys->ops.needs_single_flush ||
   1563				!phys->ops.needs_single_flush(phys))
   1564			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
   1565		else if (ctl->ops.get_pending_flush)
   1566			pending_flush |= ctl->ops.get_pending_flush(ctl);
   1567	}
   1568
   1569	/* for split flush, combine pending flush masks and send to master */
   1570	if (pending_flush && dpu_enc->cur_master) {
   1571		_dpu_encoder_trigger_flush(
   1572				&dpu_enc->base,
   1573				dpu_enc->cur_master,
   1574				pending_flush);
   1575	}
   1576
   1577	_dpu_encoder_trigger_start(dpu_enc->cur_master);
   1578
   1579	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
   1580}
   1581
   1582void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
   1583{
   1584	struct dpu_encoder_virt *dpu_enc;
   1585	struct dpu_encoder_phys *phys;
   1586	unsigned int i;
   1587	struct dpu_hw_ctl *ctl;
   1588	struct msm_display_info *disp_info;
   1589
   1590	if (!drm_enc) {
   1591		DPU_ERROR("invalid encoder\n");
   1592		return;
   1593	}
   1594	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1595	disp_info = &dpu_enc->disp_info;
   1596
   1597	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1598		phys = dpu_enc->phys_encs[i];
   1599
   1600		ctl = phys->hw_ctl;
   1601		if (ctl->ops.clear_pending_flush)
   1602			ctl->ops.clear_pending_flush(ctl);
   1603
   1604		/* update only for command mode primary ctl */
   1605		if ((phys == dpu_enc->cur_master) &&
   1606		   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
   1607		    && ctl->ops.trigger_pending)
   1608			ctl->ops.trigger_pending(ctl);
   1609	}
   1610}
   1611
   1612static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
   1613		struct drm_display_mode *mode)
   1614{
   1615	u64 pclk_rate;
   1616	u32 pclk_period;
   1617	u32 line_time;
   1618
   1619	/*
   1620	 * For linetime calculation, only operate on master encoder.
   1621	 */
   1622	if (!dpu_enc->cur_master)
   1623		return 0;
   1624
   1625	if (!dpu_enc->cur_master->ops.get_line_count) {
   1626		DPU_ERROR("get_line_count function not defined\n");
   1627		return 0;
   1628	}
   1629
   1630	pclk_rate = mode->clock; /* pixel clock in kHz */
   1631	if (pclk_rate == 0) {
   1632		DPU_ERROR("pclk is 0, cannot calculate line time\n");
   1633		return 0;
   1634	}
   1635
   1636	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
   1637	if (pclk_period == 0) {
   1638		DPU_ERROR("pclk period is 0\n");
   1639		return 0;
   1640	}
   1641
   1642	/*
   1643	 * Line time calculation based on Pixel clock and HTOTAL.
   1644	 * Final unit is in ns.
   1645	 */
   1646	line_time = (pclk_period * mode->htotal) / 1000;
   1647	if (line_time == 0) {
   1648		DPU_ERROR("line time calculation is 0\n");
   1649		return 0;
   1650	}
   1651
   1652	DPU_DEBUG_ENC(dpu_enc,
   1653			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
   1654			pclk_rate, pclk_period, line_time);
   1655
   1656	return line_time;
   1657}
   1658
   1659int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
   1660{
   1661	struct drm_display_mode *mode;
   1662	struct dpu_encoder_virt *dpu_enc;
   1663	u32 cur_line;
   1664	u32 line_time;
   1665	u32 vtotal, time_to_vsync;
   1666	ktime_t cur_time;
   1667
   1668	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1669
   1670	if (!drm_enc->crtc || !drm_enc->crtc->state) {
   1671		DPU_ERROR("crtc/crtc state object is NULL\n");
   1672		return -EINVAL;
   1673	}
   1674	mode = &drm_enc->crtc->state->adjusted_mode;
   1675
   1676	line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
   1677	if (!line_time)
   1678		return -EINVAL;
   1679
   1680	cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
   1681
   1682	vtotal = mode->vtotal;
   1683	if (cur_line >= vtotal)
   1684		time_to_vsync = line_time * vtotal;
   1685	else
   1686		time_to_vsync = line_time * (vtotal - cur_line);
   1687
   1688	if (time_to_vsync == 0) {
   1689		DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
   1690				vtotal);
   1691		return -EINVAL;
   1692	}
   1693
   1694	cur_time = ktime_get();
   1695	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
   1696
   1697	DPU_DEBUG_ENC(dpu_enc,
   1698			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
   1699			cur_line, vtotal, time_to_vsync,
   1700			ktime_to_ms(cur_time),
   1701			ktime_to_ms(*wakeup_time));
   1702	return 0;
   1703}
   1704
   1705static void dpu_encoder_vsync_event_handler(struct timer_list *t)
   1706{
   1707	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
   1708			vsync_event_timer);
   1709	struct drm_encoder *drm_enc = &dpu_enc->base;
   1710	struct msm_drm_private *priv;
   1711	struct msm_drm_thread *event_thread;
   1712
   1713	if (!drm_enc->dev || !drm_enc->crtc) {
   1714		DPU_ERROR("invalid parameters\n");
   1715		return;
   1716	}
   1717
   1718	priv = drm_enc->dev->dev_private;
   1719
   1720	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
   1721		DPU_ERROR("invalid crtc index\n");
   1722		return;
   1723	}
   1724	event_thread = &priv->event_thread[drm_enc->crtc->index];
   1725	if (!event_thread) {
   1726		DPU_ERROR("event_thread not found for crtc:%d\n",
   1727				drm_enc->crtc->index);
   1728		return;
   1729	}
   1730
   1731	del_timer(&dpu_enc->vsync_event_timer);
   1732}
   1733
   1734static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
   1735{
   1736	struct dpu_encoder_virt *dpu_enc = container_of(work,
   1737			struct dpu_encoder_virt, vsync_event_work);
   1738	ktime_t wakeup_time;
   1739
   1740	if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
   1741		return;
   1742
   1743	trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
   1744	mod_timer(&dpu_enc->vsync_event_timer,
   1745			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
   1746}
   1747
   1748static u32
   1749dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
   1750				  u32 enc_ip_width)
   1751{
   1752	int ssm_delay, total_pixels, soft_slice_per_enc;
   1753
   1754	soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
   1755
   1756	/*
   1757	 * minimum number of initial line pixels is a sum of:
   1758	 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
   1759	 *    91 for 10 bpc) * 3
   1760	 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
   1761	 * 3. the initial xmit delay
   1762	 * 4. total pipeline delay through the "lock step" of encoder (47)
   1763	 * 5. 6 additional pixels as the output of the rate buffer is
   1764	 *    48 bits wide
   1765	 */
   1766	ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
   1767	total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
   1768	if (soft_slice_per_enc > 1)
   1769		total_pixels += (ssm_delay * 3);
   1770	return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
   1771}
   1772
   1773static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
   1774				     struct dpu_hw_pingpong *hw_pp,
   1775				     struct msm_display_dsc_config *dsc,
   1776				     u32 common_mode,
   1777				     u32 initial_lines)
   1778{
   1779	if (hw_dsc->ops.dsc_config)
   1780		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
   1781
   1782	if (hw_dsc->ops.dsc_config_thresh)
   1783		hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
   1784
   1785	if (hw_pp->ops.setup_dsc)
   1786		hw_pp->ops.setup_dsc(hw_pp);
   1787
   1788	if (hw_pp->ops.enable_dsc)
   1789		hw_pp->ops.enable_dsc(hw_pp);
   1790}
   1791
   1792static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
   1793				 struct msm_display_dsc_config *dsc)
   1794{
   1795	/* coding only for 2LM, 2enc, 1 dsc config */
   1796	struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
   1797	struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
   1798	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
   1799	int this_frame_slices;
   1800	int intf_ip_w, enc_ip_w;
   1801	int dsc_common_mode;
   1802	int pic_width;
   1803	u32 initial_lines;
   1804	int i;
   1805
   1806	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
   1807		hw_pp[i] = dpu_enc->hw_pp[i];
   1808		hw_dsc[i] = dpu_enc->hw_dsc[i];
   1809
   1810		if (!hw_pp[i] || !hw_dsc[i]) {
   1811			DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
   1812			return;
   1813		}
   1814	}
   1815
   1816	pic_width = dsc->drm->pic_width;
   1817
   1818	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
   1819	if (enc_master->intf_mode == INTF_MODE_VIDEO)
   1820		dsc_common_mode |= DSC_MODE_VIDEO;
   1821
   1822	this_frame_slices = pic_width / dsc->drm->slice_width;
   1823	intf_ip_w = this_frame_slices * dsc->drm->slice_width;
   1824
   1825	/*
   1826	 * dsc merge case: when using 2 encoders for the same stream,
   1827	 * no. of slices need to be same on both the encoders.
   1828	 */
   1829	enc_ip_w = intf_ip_w / 2;
   1830	initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
   1831
   1832	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
   1833		dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
   1834}
   1835
   1836void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
   1837{
   1838	struct dpu_encoder_virt *dpu_enc;
   1839	struct dpu_encoder_phys *phys;
   1840	bool needs_hw_reset = false;
   1841	unsigned int i;
   1842
   1843	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1844
   1845	trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
   1846
   1847	/* prepare for next kickoff, may include waiting on previous kickoff */
   1848	DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
   1849	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1850		phys = dpu_enc->phys_encs[i];
   1851		if (phys->ops.prepare_for_kickoff)
   1852			phys->ops.prepare_for_kickoff(phys);
   1853		if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
   1854			needs_hw_reset = true;
   1855	}
   1856	DPU_ATRACE_END("enc_prepare_for_kickoff");
   1857
   1858	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
   1859
   1860	/* if any phys needs reset, reset all phys, in-order */
   1861	if (needs_hw_reset) {
   1862		trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
   1863		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1864			dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
   1865		}
   1866	}
   1867
   1868	if (dpu_enc->dsc)
   1869		dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
   1870}
   1871
   1872bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
   1873{
   1874	struct dpu_encoder_virt *dpu_enc;
   1875	unsigned int i;
   1876	struct dpu_encoder_phys *phys;
   1877
   1878	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1879
   1880	if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
   1881		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1882			phys = dpu_enc->phys_encs[i];
   1883			if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
   1884				DPU_DEBUG("invalid FB not kicking off\n");
   1885				return false;
   1886			}
   1887		}
   1888	}
   1889
   1890	return true;
   1891}
   1892
   1893void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
   1894{
   1895	struct dpu_encoder_virt *dpu_enc;
   1896	struct dpu_encoder_phys *phys;
   1897	ktime_t wakeup_time;
   1898	unsigned long timeout_ms;
   1899	unsigned int i;
   1900
   1901	DPU_ATRACE_BEGIN("encoder_kickoff");
   1902	dpu_enc = to_dpu_encoder_virt(drm_enc);
   1903
   1904	trace_dpu_enc_kickoff(DRMID(drm_enc));
   1905
   1906	timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
   1907			drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
   1908
   1909	atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
   1910	mod_timer(&dpu_enc->frame_done_timer,
   1911			jiffies + msecs_to_jiffies(timeout_ms));
   1912
   1913	/* All phys encs are ready to go, trigger the kickoff */
   1914	_dpu_encoder_kickoff_phys(dpu_enc);
   1915
   1916	/* allow phys encs to handle any post-kickoff business */
   1917	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1918		phys = dpu_enc->phys_encs[i];
   1919		if (phys->ops.handle_post_kickoff)
   1920			phys->ops.handle_post_kickoff(phys);
   1921	}
   1922
   1923	if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
   1924			!dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
   1925		trace_dpu_enc_early_kickoff(DRMID(drm_enc),
   1926					    ktime_to_ms(wakeup_time));
   1927		mod_timer(&dpu_enc->vsync_event_timer,
   1928				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
   1929	}
   1930
   1931	DPU_ATRACE_END("encoder_kickoff");
   1932}
   1933
   1934static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
   1935{
   1936	struct dpu_hw_mixer_cfg mixer;
   1937	int i, num_lm;
   1938	u32 flush_mask = 0;
   1939	struct dpu_global_state *global_state;
   1940	struct dpu_hw_blk *hw_lm[2];
   1941	struct dpu_hw_mixer *hw_mixer[2];
   1942	struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
   1943
   1944	memset(&mixer, 0, sizeof(mixer));
   1945
   1946	/* reset all mixers for this encoder */
   1947	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
   1948		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
   1949
   1950	global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
   1951
   1952	num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
   1953		phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
   1954
   1955	for (i = 0; i < num_lm; i++) {
   1956		hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
   1957		flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
   1958		if (phys_enc->hw_ctl->ops.update_pending_flush)
   1959			phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
   1960
   1961		/* clear all blendstages */
   1962		if (phys_enc->hw_ctl->ops.setup_blendstage)
   1963			phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
   1964	}
   1965}
   1966
   1967void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
   1968{
   1969	struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
   1970	struct dpu_hw_intf_cfg intf_cfg = { 0 };
   1971	int i;
   1972	struct dpu_encoder_virt *dpu_enc;
   1973
   1974	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
   1975
   1976	phys_enc->hw_ctl->ops.reset(ctl);
   1977
   1978	dpu_encoder_helper_reset_mixers(phys_enc);
   1979
   1980	/*
   1981	 * TODO: move the once-only operation like CTL flush/trigger
   1982	 * into dpu_encoder_virt_disable() and all operations which need
   1983	 * to be done per phys encoder into the phys_disable() op.
   1984	 */
   1985	if (phys_enc->hw_wb) {
   1986		/* disable the PP block */
   1987		if (phys_enc->hw_wb->ops.bind_pingpong_blk)
   1988			phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
   1989					phys_enc->hw_pp->idx);
   1990
   1991		/* mark WB flush as pending */
   1992		if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
   1993			phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
   1994	} else {
   1995		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   1996			if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
   1997				phys_enc->hw_intf->ops.bind_pingpong_blk(
   1998						dpu_enc->phys_encs[i]->hw_intf, false,
   1999						dpu_enc->phys_encs[i]->hw_pp->idx);
   2000
   2001			/* mark INTF flush as pending */
   2002			if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
   2003				phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
   2004						dpu_enc->phys_encs[i]->hw_intf->idx);
   2005		}
   2006	}
   2007
   2008	/* reset the merge 3D HW block */
   2009	if (phys_enc->hw_pp->merge_3d) {
   2010		phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
   2011				BLEND_3D_NONE);
   2012		if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
   2013			phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
   2014					phys_enc->hw_pp->merge_3d->idx);
   2015	}
   2016
   2017	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
   2018	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
   2019	if (phys_enc->hw_pp->merge_3d)
   2020		intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
   2021
   2022	if (ctl->ops.reset_intf_cfg)
   2023		ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
   2024
   2025	ctl->ops.trigger_flush(ctl);
   2026	ctl->ops.trigger_start(ctl);
   2027	ctl->ops.clear_pending_flush(ctl);
   2028}
   2029
   2030void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
   2031{
   2032	struct dpu_encoder_virt *dpu_enc;
   2033	struct dpu_encoder_phys *phys;
   2034	int i;
   2035
   2036	if (!drm_enc) {
   2037		DPU_ERROR("invalid encoder\n");
   2038		return;
   2039	}
   2040	dpu_enc = to_dpu_encoder_virt(drm_enc);
   2041
   2042	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   2043		phys = dpu_enc->phys_encs[i];
   2044		if (phys->ops.prepare_commit)
   2045			phys->ops.prepare_commit(phys);
   2046	}
   2047}
   2048
   2049#ifdef CONFIG_DEBUG_FS
   2050static int _dpu_encoder_status_show(struct seq_file *s, void *data)
   2051{
   2052	struct dpu_encoder_virt *dpu_enc = s->private;
   2053	int i;
   2054
   2055	mutex_lock(&dpu_enc->enc_lock);
   2056	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   2057		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
   2058
   2059		seq_printf(s, "intf:%d  wb:%d  vsync:%8d     underrun:%8d    ",
   2060				phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
   2061				atomic_read(&phys->vsync_cnt),
   2062				atomic_read(&phys->underrun_cnt));
   2063
   2064		seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
   2065	}
   2066	mutex_unlock(&dpu_enc->enc_lock);
   2067
   2068	return 0;
   2069}
   2070
   2071DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
   2072
   2073static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
   2074{
   2075	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
   2076	int i;
   2077
   2078	char name[DPU_NAME_SIZE];
   2079
   2080	if (!drm_enc->dev) {
   2081		DPU_ERROR("invalid encoder or kms\n");
   2082		return -EINVAL;
   2083	}
   2084
   2085	snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
   2086
   2087	/* create overall sub-directory for the encoder */
   2088	dpu_enc->debugfs_root = debugfs_create_dir(name,
   2089			drm_enc->dev->primary->debugfs_root);
   2090
   2091	/* don't error check these */
   2092	debugfs_create_file("status", 0600,
   2093		dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
   2094
   2095	for (i = 0; i < dpu_enc->num_phys_encs; i++)
   2096		if (dpu_enc->phys_encs[i]->ops.late_register)
   2097			dpu_enc->phys_encs[i]->ops.late_register(
   2098					dpu_enc->phys_encs[i],
   2099					dpu_enc->debugfs_root);
   2100
   2101	return 0;
   2102}
   2103#else
   2104static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
   2105{
   2106	return 0;
   2107}
   2108#endif
   2109
   2110static int dpu_encoder_late_register(struct drm_encoder *encoder)
   2111{
   2112	return _dpu_encoder_init_debugfs(encoder);
   2113}
   2114
   2115static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
   2116{
   2117	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
   2118
   2119	debugfs_remove_recursive(dpu_enc->debugfs_root);
   2120}
   2121
   2122static int dpu_encoder_virt_add_phys_encs(
   2123		struct msm_display_info *disp_info,
   2124		struct dpu_encoder_virt *dpu_enc,
   2125		struct dpu_enc_phys_init_params *params)
   2126{
   2127	struct dpu_encoder_phys *enc = NULL;
   2128
   2129	DPU_DEBUG_ENC(dpu_enc, "\n");
   2130
   2131	/*
   2132	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
   2133	 * in this function, check up-front.
   2134	 */
   2135	if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
   2136			ARRAY_SIZE(dpu_enc->phys_encs)) {
   2137		DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
   2138			  dpu_enc->num_phys_encs);
   2139		return -EINVAL;
   2140	}
   2141
   2142	if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) {
   2143		enc = dpu_encoder_phys_vid_init(params);
   2144
   2145		if (IS_ERR_OR_NULL(enc)) {
   2146			DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
   2147				PTR_ERR(enc));
   2148			return enc == NULL ? -EINVAL : PTR_ERR(enc);
   2149		}
   2150
   2151		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
   2152		++dpu_enc->num_phys_encs;
   2153	}
   2154
   2155	if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
   2156		enc = dpu_encoder_phys_cmd_init(params);
   2157
   2158		if (IS_ERR_OR_NULL(enc)) {
   2159			DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
   2160				PTR_ERR(enc));
   2161			return enc == NULL ? -EINVAL : PTR_ERR(enc);
   2162		}
   2163
   2164		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
   2165		++dpu_enc->num_phys_encs;
   2166	}
   2167
   2168	if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
   2169		enc = dpu_encoder_phys_wb_init(params);
   2170
   2171		if (IS_ERR_OR_NULL(enc)) {
   2172			DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
   2173					PTR_ERR(enc));
   2174			return enc == NULL ? -EINVAL : PTR_ERR(enc);
   2175		}
   2176
   2177		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
   2178		++dpu_enc->num_phys_encs;
   2179	}
   2180
   2181	if (params->split_role == ENC_ROLE_SLAVE)
   2182		dpu_enc->cur_slave = enc;
   2183	else
   2184		dpu_enc->cur_master = enc;
   2185
   2186	return 0;
   2187}
   2188
   2189static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
   2190	.handle_vblank_virt = dpu_encoder_vblank_callback,
   2191	.handle_underrun_virt = dpu_encoder_underrun_callback,
   2192	.handle_frame_done = dpu_encoder_frame_done_callback,
   2193};
   2194
   2195static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
   2196				 struct dpu_kms *dpu_kms,
   2197				 struct msm_display_info *disp_info)
   2198{
   2199	int ret = 0;
   2200	int i = 0;
   2201	enum dpu_intf_type intf_type = INTF_NONE;
   2202	struct dpu_enc_phys_init_params phys_params;
   2203
   2204	if (!dpu_enc) {
   2205		DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
   2206		return -EINVAL;
   2207	}
   2208
   2209	dpu_enc->cur_master = NULL;
   2210
   2211	memset(&phys_params, 0, sizeof(phys_params));
   2212	phys_params.dpu_kms = dpu_kms;
   2213	phys_params.parent = &dpu_enc->base;
   2214	phys_params.parent_ops = &dpu_encoder_parent_ops;
   2215	phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
   2216
   2217	switch (disp_info->intf_type) {
   2218	case DRM_MODE_ENCODER_DSI:
   2219		intf_type = INTF_DSI;
   2220		break;
   2221	case DRM_MODE_ENCODER_TMDS:
   2222		intf_type = INTF_DP;
   2223		break;
   2224	case DRM_MODE_ENCODER_VIRTUAL:
   2225		intf_type = INTF_WB;
   2226		break;
   2227	}
   2228
   2229	WARN_ON(disp_info->num_of_h_tiles < 1);
   2230
   2231	DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
   2232
   2233	if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
   2234	    (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
   2235		dpu_enc->idle_pc_supported =
   2236				dpu_kms->catalog->caps->has_idle_pc;
   2237
   2238	dpu_enc->dsc = disp_info->dsc;
   2239
   2240	mutex_lock(&dpu_enc->enc_lock);
   2241	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
   2242		/*
   2243		 * Left-most tile is at index 0, content is controller id
   2244		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
   2245		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
   2246		 */
   2247		u32 controller_id = disp_info->h_tile_instance[i];
   2248
   2249		if (disp_info->num_of_h_tiles > 1) {
   2250			if (i == 0)
   2251				phys_params.split_role = ENC_ROLE_MASTER;
   2252			else
   2253				phys_params.split_role = ENC_ROLE_SLAVE;
   2254		} else {
   2255			phys_params.split_role = ENC_ROLE_SOLO;
   2256		}
   2257
   2258		DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
   2259				i, controller_id, phys_params.split_role);
   2260
   2261		phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
   2262													intf_type,
   2263													controller_id);
   2264
   2265		phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
   2266				intf_type, controller_id);
   2267		/*
   2268		 * The phys_params might represent either an INTF or a WB unit, but not
   2269		 * both of them at the same time.
   2270		 */
   2271		if ((phys_params.intf_idx == INTF_MAX) &&
   2272				(phys_params.wb_idx == WB_MAX)) {
   2273			DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
   2274						  intf_type, controller_id);
   2275			ret = -EINVAL;
   2276		}
   2277
   2278		if ((phys_params.intf_idx != INTF_MAX) &&
   2279				(phys_params.wb_idx != WB_MAX)) {
   2280			DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
   2281						  intf_type, controller_id);
   2282			ret = -EINVAL;
   2283		}
   2284
   2285		if (!ret) {
   2286			ret = dpu_encoder_virt_add_phys_encs(disp_info,
   2287					dpu_enc, &phys_params);
   2288			if (ret)
   2289				DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
   2290		}
   2291	}
   2292
   2293	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   2294		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
   2295		atomic_set(&phys->vsync_cnt, 0);
   2296		atomic_set(&phys->underrun_cnt, 0);
   2297	}
   2298	mutex_unlock(&dpu_enc->enc_lock);
   2299
   2300	return ret;
   2301}
   2302
   2303static void dpu_encoder_frame_done_timeout(struct timer_list *t)
   2304{
   2305	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
   2306			frame_done_timer);
   2307	struct drm_encoder *drm_enc = &dpu_enc->base;
   2308	u32 event;
   2309
   2310	if (!drm_enc->dev) {
   2311		DPU_ERROR("invalid parameters\n");
   2312		return;
   2313	}
   2314
   2315	if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
   2316		DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
   2317			      DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
   2318		return;
   2319	} else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
   2320		DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
   2321		return;
   2322	}
   2323
   2324	DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
   2325
   2326	event = DPU_ENCODER_FRAME_EVENT_ERROR;
   2327	trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
   2328	dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
   2329}
   2330
   2331static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
   2332	.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
   2333	.disable = dpu_encoder_virt_disable,
   2334	.enable = dpu_encoder_virt_enable,
   2335	.atomic_check = dpu_encoder_virt_atomic_check,
   2336};
   2337
   2338static const struct drm_encoder_funcs dpu_encoder_funcs = {
   2339		.destroy = dpu_encoder_destroy,
   2340		.late_register = dpu_encoder_late_register,
   2341		.early_unregister = dpu_encoder_early_unregister,
   2342};
   2343
   2344int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
   2345		struct msm_display_info *disp_info)
   2346{
   2347	struct msm_drm_private *priv = dev->dev_private;
   2348	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
   2349	struct drm_encoder *drm_enc = NULL;
   2350	struct dpu_encoder_virt *dpu_enc = NULL;
   2351	int ret = 0;
   2352
   2353	dpu_enc = to_dpu_encoder_virt(enc);
   2354
   2355	ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
   2356	if (ret)
   2357		goto fail;
   2358
   2359	atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
   2360	timer_setup(&dpu_enc->frame_done_timer,
   2361			dpu_encoder_frame_done_timeout, 0);
   2362
   2363	if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
   2364		timer_setup(&dpu_enc->vsync_event_timer,
   2365				dpu_encoder_vsync_event_handler,
   2366				0);
   2367	else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
   2368		dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
   2369				priv->dp[disp_info->h_tile_instance[0]]);
   2370
   2371	INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
   2372			dpu_encoder_off_work);
   2373	dpu_enc->idle_timeout = IDLE_TIMEOUT;
   2374
   2375	kthread_init_work(&dpu_enc->vsync_event_work,
   2376			dpu_encoder_vsync_event_work_handler);
   2377
   2378	memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
   2379
   2380	DPU_DEBUG_ENC(dpu_enc, "created\n");
   2381
   2382	return ret;
   2383
   2384fail:
   2385	DPU_ERROR("failed to create encoder\n");
   2386	if (drm_enc)
   2387		dpu_encoder_destroy(drm_enc);
   2388
   2389	return ret;
   2390
   2391
   2392}
   2393
   2394struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
   2395		int drm_enc_mode)
   2396{
   2397	struct dpu_encoder_virt *dpu_enc = NULL;
   2398	int rc = 0;
   2399
   2400	dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
   2401	if (!dpu_enc)
   2402		return ERR_PTR(-ENOMEM);
   2403
   2404
   2405	rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
   2406							  drm_enc_mode, NULL);
   2407	if (rc) {
   2408		devm_kfree(dev->dev, dpu_enc);
   2409		return ERR_PTR(rc);
   2410	}
   2411
   2412	drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
   2413
   2414	spin_lock_init(&dpu_enc->enc_spinlock);
   2415	dpu_enc->enabled = false;
   2416	mutex_init(&dpu_enc->enc_lock);
   2417	mutex_init(&dpu_enc->rc_lock);
   2418
   2419	return &dpu_enc->base;
   2420}
   2421
   2422int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
   2423	enum msm_event_wait event)
   2424{
   2425	int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
   2426	struct dpu_encoder_virt *dpu_enc = NULL;
   2427	int i, ret = 0;
   2428
   2429	if (!drm_enc) {
   2430		DPU_ERROR("invalid encoder\n");
   2431		return -EINVAL;
   2432	}
   2433	dpu_enc = to_dpu_encoder_virt(drm_enc);
   2434	DPU_DEBUG_ENC(dpu_enc, "\n");
   2435
   2436	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
   2437		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
   2438
   2439		switch (event) {
   2440		case MSM_ENC_COMMIT_DONE:
   2441			fn_wait = phys->ops.wait_for_commit_done;
   2442			break;
   2443		case MSM_ENC_TX_COMPLETE:
   2444			fn_wait = phys->ops.wait_for_tx_complete;
   2445			break;
   2446		case MSM_ENC_VBLANK:
   2447			fn_wait = phys->ops.wait_for_vblank;
   2448			break;
   2449		default:
   2450			DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
   2451					event);
   2452			return -EINVAL;
   2453		}
   2454
   2455		if (fn_wait) {
   2456			DPU_ATRACE_BEGIN("wait_for_completion_event");
   2457			ret = fn_wait(phys);
   2458			DPU_ATRACE_END("wait_for_completion_event");
   2459			if (ret)
   2460				return ret;
   2461		}
   2462	}
   2463
   2464	return ret;
   2465}
   2466
   2467enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
   2468{
   2469	struct dpu_encoder_virt *dpu_enc = NULL;
   2470
   2471	if (!encoder) {
   2472		DPU_ERROR("invalid encoder\n");
   2473		return INTF_MODE_NONE;
   2474	}
   2475	dpu_enc = to_dpu_encoder_virt(encoder);
   2476
   2477	if (dpu_enc->cur_master)
   2478		return dpu_enc->cur_master->intf_mode;
   2479
   2480	if (dpu_enc->num_phys_encs)
   2481		return dpu_enc->phys_encs[0]->intf_mode;
   2482
   2483	return INTF_MODE_NONE;
   2484}
   2485
   2486unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
   2487{
   2488	struct drm_encoder *encoder = phys_enc->parent;
   2489	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
   2490
   2491	return dpu_enc->dsc_mask;
   2492}