cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dpu_vbif.c (9015B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
      3 */
      4
      5#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
      6
      7#include <linux/debugfs.h>
      8#include <linux/delay.h>
      9
     10#include "dpu_vbif.h"
     11#include "dpu_hw_vbif.h"
     12#include "dpu_trace.h"
     13
     14/**
     15 * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
     16 * @vbif:	Pointer to hardware vbif driver
     17 * @xin_id:	Client interface identifier
     18 * @return:	0 if success; error code otherwise
     19 */
     20static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
     21{
     22	ktime_t timeout;
     23	bool status;
     24	int rc;
     25
     26	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
     27		DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
     28		return -EINVAL;
     29	}
     30
     31	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
     32	for (;;) {
     33		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
     34		if (status)
     35			break;
     36		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
     37			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
     38			break;
     39		}
     40		usleep_range(501, 1000);
     41	}
     42
     43	if (!status) {
     44		rc = -ETIMEDOUT;
     45		DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
     46				vbif->idx - VBIF_0, xin_id);
     47	} else {
     48		rc = 0;
     49		DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
     50				vbif->idx - VBIF_0, xin_id);
     51	}
     52
     53	return rc;
     54}
     55
     56/**
     57 * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
     58 * @vbif:	Pointer to hardware vbif driver
     59 * @ot_lim:	Pointer to OT limit to be modified
     60 * @params:	Pointer to usecase parameters
     61 */
     62static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
     63		u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
     64{
     65	u64 pps;
     66	const struct dpu_vbif_dynamic_ot_tbl *tbl;
     67	u32 i;
     68
     69	if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
     70		return;
     71
     72	/* Dynamic OT setting done only for WFD */
     73	if (!params->is_wfd)
     74		return;
     75
     76	pps = params->frame_rate;
     77	pps *= params->width;
     78	pps *= params->height;
     79
     80	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
     81			&vbif->cap->dynamic_ot_wr_tbl;
     82
     83	for (i = 0; i < tbl->count; i++) {
     84		if (pps <= tbl->cfg[i].pps) {
     85			*ot_lim = tbl->cfg[i].ot_limit;
     86			break;
     87		}
     88	}
     89
     90	DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
     91			vbif->idx - VBIF_0, params->xin_id,
     92			params->width, params->height, params->frame_rate,
     93			pps, *ot_lim);
     94}
     95
     96/**
     97 * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
     98 * @vbif:	Pointer to hardware vbif driver
     99 * @params:	Pointer to usecase parameters
    100 * @return:	OT limit
    101 */
    102static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
    103	struct dpu_vbif_set_ot_params *params)
    104{
    105	u32 ot_lim = 0;
    106	u32 val;
    107
    108	if (!vbif || !vbif->cap) {
    109		DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
    110		return -EINVAL;
    111	}
    112
    113	if (vbif->cap->default_ot_wr_limit && !params->rd)
    114		ot_lim = vbif->cap->default_ot_wr_limit;
    115	else if (vbif->cap->default_ot_rd_limit && params->rd)
    116		ot_lim = vbif->cap->default_ot_rd_limit;
    117
    118	/*
    119	 * If default ot is not set from dt/catalog,
    120	 * then do not configure it.
    121	 */
    122	if (ot_lim == 0)
    123		goto exit;
    124
    125	/* Modify the limits if the target and the use case requires it */
    126	_dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
    127
    128	if (vbif && vbif->ops.get_limit_conf) {
    129		val = vbif->ops.get_limit_conf(vbif,
    130				params->xin_id, params->rd);
    131		if (val == ot_lim)
    132			ot_lim = 0;
    133	}
    134
    135exit:
    136	DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
    137			vbif->idx - VBIF_0, params->xin_id, ot_lim);
    138	return ot_lim;
    139}
    140
    141/**
    142 * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
    143 * @dpu_kms:	DPU handler
    144 * @params:	Pointer to usecase parameters
    145 *
    146 * Note this function would block waiting for bus halt.
    147 */
    148void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
    149		struct dpu_vbif_set_ot_params *params)
    150{
    151	struct dpu_hw_vbif *vbif = NULL;
    152	struct dpu_hw_mdp *mdp;
    153	bool forced_on = false;
    154	u32 ot_lim;
    155	int ret, i;
    156
    157	mdp = dpu_kms->hw_mdp;
    158
    159	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
    160		if (dpu_kms->hw_vbif[i] &&
    161				dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
    162			vbif = dpu_kms->hw_vbif[i];
    163	}
    164
    165	if (!vbif || !mdp) {
    166		DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
    167				vbif != NULL, mdp != NULL);
    168		return;
    169	}
    170
    171	if (!mdp->ops.setup_clk_force_ctrl ||
    172			!vbif->ops.set_limit_conf ||
    173			!vbif->ops.set_halt_ctrl)
    174		return;
    175
    176	/* set write_gather_en for all write clients */
    177	if (vbif->ops.set_write_gather_en && !params->rd)
    178		vbif->ops.set_write_gather_en(vbif, params->xin_id);
    179
    180	ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
    181
    182	if (ot_lim == 0)
    183		return;
    184
    185	trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
    186		params->vbif_idx);
    187
    188	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
    189
    190	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
    191
    192	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
    193
    194	ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
    195	if (ret)
    196		trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
    197
    198	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
    199
    200	if (forced_on)
    201		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
    202}
    203
    204void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
    205		struct dpu_vbif_set_qos_params *params)
    206{
    207	struct dpu_hw_vbif *vbif = NULL;
    208	struct dpu_hw_mdp *mdp;
    209	bool forced_on = false;
    210	const struct dpu_vbif_qos_tbl *qos_tbl;
    211	int i;
    212
    213	if (!params || !dpu_kms->hw_mdp) {
    214		DPU_ERROR("invalid arguments\n");
    215		return;
    216	}
    217	mdp = dpu_kms->hw_mdp;
    218
    219	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
    220		if (dpu_kms->hw_vbif[i] &&
    221				dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
    222			vbif = dpu_kms->hw_vbif[i];
    223			break;
    224		}
    225	}
    226
    227	if (!vbif || !vbif->cap) {
    228		DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
    229		return;
    230	}
    231
    232	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
    233		DRM_DEBUG_ATOMIC("qos remap not supported\n");
    234		return;
    235	}
    236
    237	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
    238			&vbif->cap->qos_nrt_tbl;
    239
    240	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
    241		DRM_DEBUG_ATOMIC("qos tbl not defined\n");
    242		return;
    243	}
    244
    245	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
    246
    247	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
    248		DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
    249				params->vbif_idx, params->xin_id, i,
    250				qos_tbl->priority_lvl[i]);
    251		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
    252				qos_tbl->priority_lvl[i]);
    253	}
    254
    255	if (forced_on)
    256		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
    257}
    258
    259void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
    260{
    261	struct dpu_hw_vbif *vbif;
    262	u32 i, pnd, src;
    263
    264	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
    265		vbif = dpu_kms->hw_vbif[i];
    266		if (vbif && vbif->ops.clear_errors) {
    267			vbif->ops.clear_errors(vbif, &pnd, &src);
    268			if (pnd || src) {
    269				DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
    270					      vbif->idx - VBIF_0, pnd, src);
    271			}
    272		}
    273	}
    274}
    275
    276void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
    277{
    278	struct dpu_hw_vbif *vbif;
    279	int i, j;
    280
    281	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
    282		vbif = dpu_kms->hw_vbif[i];
    283		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
    284			for (j = 0; j < vbif->cap->memtype_count; j++)
    285				vbif->ops.set_mem_type(
    286						vbif, j, vbif->cap->memtype[j]);
    287		}
    288	}
    289}
    290
    291#ifdef CONFIG_DEBUG_FS
    292
    293void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
    294{
    295	char vbif_name[32];
    296	struct dentry *entry, *debugfs_vbif;
    297	int i, j;
    298
    299	entry = debugfs_create_dir("vbif", debugfs_root);
    300
    301	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
    302		const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
    303
    304		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
    305
    306		debugfs_vbif = debugfs_create_dir(vbif_name, entry);
    307
    308		debugfs_create_u32("features", 0600, debugfs_vbif,
    309			(u32 *)&vbif->features);
    310
    311		debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
    312			(u32 *)&vbif->xin_halt_timeout);
    313
    314		debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
    315			(u32 *)&vbif->default_ot_rd_limit);
    316
    317		debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
    318			(u32 *)&vbif->default_ot_wr_limit);
    319
    320		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
    321			const struct dpu_vbif_dynamic_ot_cfg *cfg =
    322					&vbif->dynamic_ot_rd_tbl.cfg[j];
    323
    324			snprintf(vbif_name, sizeof(vbif_name),
    325					"dynamic_ot_rd_%d_pps", j);
    326			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
    327					(u64 *)&cfg->pps);
    328			snprintf(vbif_name, sizeof(vbif_name),
    329					"dynamic_ot_rd_%d_ot_limit", j);
    330			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
    331					(u32 *)&cfg->ot_limit);
    332		}
    333
    334		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
    335			const struct dpu_vbif_dynamic_ot_cfg *cfg =
    336					&vbif->dynamic_ot_wr_tbl.cfg[j];
    337
    338			snprintf(vbif_name, sizeof(vbif_name),
    339					"dynamic_ot_wr_%d_pps", j);
    340			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
    341					(u64 *)&cfg->pps);
    342			snprintf(vbif_name, sizeof(vbif_name),
    343					"dynamic_ot_wr_%d_ot_limit", j);
    344			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
    345					(u32 *)&cfg->ot_limit);
    346		}
    347	}
    348}
    349#endif