cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vdec_msg_queue.c (7894B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2021 MediaTek Inc.
      4 * Author: Yunfei Dong <yunfei.dong@mediatek.com>
      5 */
      6
      7#include <linux/freezer.h>
      8#include <linux/interrupt.h>
      9#include <linux/kthread.h>
     10
     11#include "mtk_vcodec_dec_pm.h"
     12#include "mtk_vcodec_drv.h"
     13#include "vdec_msg_queue.h"
     14
     15#define VDEC_MSG_QUEUE_TIMEOUT_MS 1500
     16
     17/* the size used to store lat slice header information */
     18#define VDEC_LAT_SLICE_HEADER_SZ    (640 * SZ_1K)
     19
     20/* the size used to store avc error information */
     21#define VDEC_ERR_MAP_SZ_AVC         (17 * SZ_1K)
     22
     23/* core will read the trans buffer which decoded by lat to decode again.
     24 * The trans buffer size of FHD and 4K bitstreams are different.
     25 */
     26static int vde_msg_queue_get_trans_size(int width, int height)
     27{
     28	if (width > 1920 || height > 1088)
     29		return 30 * SZ_1M;
     30	else
     31		return 6 * SZ_1M;
     32}
     33
     34void vdec_msg_queue_init_ctx(struct vdec_msg_queue_ctx *ctx, int hardware_index)
     35{
     36	init_waitqueue_head(&ctx->ready_to_use);
     37	INIT_LIST_HEAD(&ctx->ready_queue);
     38	spin_lock_init(&ctx->ready_lock);
     39	ctx->ready_num = 0;
     40	ctx->hardware_index = hardware_index;
     41}
     42
     43static struct list_head *vdec_get_buf_list(int hardware_index, struct vdec_lat_buf *buf)
     44{
     45	switch (hardware_index) {
     46	case MTK_VDEC_CORE:
     47		return &buf->core_list;
     48	case MTK_VDEC_LAT0:
     49		return &buf->lat_list;
     50	default:
     51		return NULL;
     52	}
     53}
     54
     55int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
     56{
     57	struct list_head *head;
     58
     59	head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
     60	if (!head) {
     61		mtk_v4l2_err("fail to qbuf: %d", msg_ctx->hardware_index);
     62		return -EINVAL;
     63	}
     64
     65	spin_lock(&msg_ctx->ready_lock);
     66	list_add_tail(head, &msg_ctx->ready_queue);
     67	msg_ctx->ready_num++;
     68
     69	if (msg_ctx->hardware_index != MTK_VDEC_CORE)
     70		wake_up_all(&msg_ctx->ready_to_use);
     71	else
     72		queue_work(buf->ctx->dev->core_workqueue,
     73			   &buf->ctx->msg_queue.core_work);
     74
     75	mtk_v4l2_debug(3, "enqueue buf type: %d addr: 0x%p num: %d",
     76		       msg_ctx->hardware_index, buf, msg_ctx->ready_num);
     77	spin_unlock(&msg_ctx->ready_lock);
     78
     79	return 0;
     80}
     81
     82static bool vdec_msg_queue_wait_event(struct vdec_msg_queue_ctx *msg_ctx)
     83{
     84	int ret;
     85
     86	ret = wait_event_timeout(msg_ctx->ready_to_use,
     87				 !list_empty(&msg_ctx->ready_queue),
     88				 msecs_to_jiffies(VDEC_MSG_QUEUE_TIMEOUT_MS));
     89	if (!ret)
     90		return false;
     91
     92	return true;
     93}
     94
     95struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
     96{
     97	struct vdec_lat_buf *buf;
     98	struct list_head *head;
     99	int ret;
    100
    101	spin_lock(&msg_ctx->ready_lock);
    102	if (list_empty(&msg_ctx->ready_queue)) {
    103		mtk_v4l2_debug(3, "queue is NULL, type:%d num: %d",
    104			       msg_ctx->hardware_index, msg_ctx->ready_num);
    105		spin_unlock(&msg_ctx->ready_lock);
    106
    107		if (msg_ctx->hardware_index == MTK_VDEC_CORE)
    108			return NULL;
    109
    110		ret = vdec_msg_queue_wait_event(msg_ctx);
    111		if (!ret)
    112			return NULL;
    113		spin_lock(&msg_ctx->ready_lock);
    114	}
    115
    116	if (msg_ctx->hardware_index == MTK_VDEC_CORE)
    117		buf = list_first_entry(&msg_ctx->ready_queue,
    118				       struct vdec_lat_buf, core_list);
    119	else
    120		buf = list_first_entry(&msg_ctx->ready_queue,
    121				       struct vdec_lat_buf, lat_list);
    122
    123	head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
    124	if (!head) {
    125		spin_unlock(&msg_ctx->ready_lock);
    126		mtk_v4l2_err("fail to dqbuf: %d", msg_ctx->hardware_index);
    127		return NULL;
    128	}
    129	list_del(head);
    130
    131	msg_ctx->ready_num--;
    132	mtk_v4l2_debug(3, "dqueue buf type:%d addr: 0x%p num: %d",
    133		       msg_ctx->hardware_index, buf, msg_ctx->ready_num);
    134	spin_unlock(&msg_ctx->ready_lock);
    135
    136	return buf;
    137}
    138
    139void vdec_msg_queue_update_ube_rptr(struct vdec_msg_queue *msg_queue, uint64_t ube_rptr)
    140{
    141	spin_lock(&msg_queue->lat_ctx.ready_lock);
    142	msg_queue->wdma_rptr_addr = ube_rptr;
    143	mtk_v4l2_debug(3, "update ube rprt (0x%llx)", ube_rptr);
    144	spin_unlock(&msg_queue->lat_ctx.ready_lock);
    145}
    146
    147void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t ube_wptr)
    148{
    149	spin_lock(&msg_queue->lat_ctx.ready_lock);
    150	msg_queue->wdma_wptr_addr = ube_wptr;
    151	mtk_v4l2_debug(3, "update ube wprt: (0x%llx 0x%llx) offset: 0x%llx",
    152		       msg_queue->wdma_rptr_addr, msg_queue->wdma_wptr_addr,
    153		       ube_wptr);
    154	spin_unlock(&msg_queue->lat_ctx.ready_lock);
    155}
    156
    157bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
    158{
    159	long timeout_jiff;
    160	int ret;
    161
    162	timeout_jiff = msecs_to_jiffies(1000 * (NUM_BUFFER_COUNT + 2));
    163	ret = wait_event_timeout(msg_queue->lat_ctx.ready_to_use,
    164				 msg_queue->lat_ctx.ready_num == NUM_BUFFER_COUNT,
    165				 timeout_jiff);
    166	if (ret) {
    167		mtk_v4l2_debug(3, "success to get lat buf: %d",
    168			       msg_queue->lat_ctx.ready_num);
    169		return true;
    170	}
    171	mtk_v4l2_err("failed with lat buf isn't full: %d",
    172		     msg_queue->lat_ctx.ready_num);
    173	return false;
    174}
    175
    176void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
    177			   struct mtk_vcodec_ctx *ctx)
    178{
    179	struct vdec_lat_buf *lat_buf;
    180	struct mtk_vcodec_mem *mem;
    181	int i;
    182
    183	mem = &msg_queue->wdma_addr;
    184	if (mem->va)
    185		mtk_vcodec_mem_free(ctx, mem);
    186	for (i = 0; i < NUM_BUFFER_COUNT; i++) {
    187		lat_buf = &msg_queue->lat_buf[i];
    188
    189		mem = &lat_buf->wdma_err_addr;
    190		if (mem->va)
    191			mtk_vcodec_mem_free(ctx, mem);
    192
    193		mem = &lat_buf->slice_bc_addr;
    194		if (mem->va)
    195			mtk_vcodec_mem_free(ctx, mem);
    196
    197		kfree(lat_buf->private_data);
    198	}
    199}
    200
    201static void vdec_msg_queue_core_work(struct work_struct *work)
    202{
    203	struct vdec_msg_queue *msg_queue =
    204		container_of(work, struct vdec_msg_queue, core_work);
    205	struct mtk_vcodec_ctx *ctx =
    206		container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
    207	struct mtk_vcodec_dev *dev = ctx->dev;
    208	struct vdec_lat_buf *lat_buf;
    209
    210	lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
    211	if (!lat_buf)
    212		return;
    213
    214	ctx = lat_buf->ctx;
    215	mtk_vcodec_dec_enable_hardware(ctx, MTK_VDEC_CORE);
    216	mtk_vcodec_set_curr_ctx(dev, ctx, MTK_VDEC_CORE);
    217
    218	lat_buf->core_decode(lat_buf);
    219
    220	mtk_vcodec_set_curr_ctx(dev, NULL, MTK_VDEC_CORE);
    221	mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
    222	vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
    223
    224	if (!list_empty(&ctx->msg_queue.lat_ctx.ready_queue)) {
    225		mtk_v4l2_debug(3, "re-schedule to decode for core: %d",
    226			       dev->msg_queue_core_ctx.ready_num);
    227		queue_work(dev->core_workqueue, &msg_queue->core_work);
    228	}
    229}
    230
    231int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
    232			struct mtk_vcodec_ctx *ctx, core_decode_cb_t core_decode,
    233			int private_size)
    234{
    235	struct vdec_lat_buf *lat_buf;
    236	int i, err;
    237
    238	/* already init msg queue */
    239	if (msg_queue->wdma_addr.size)
    240		return 0;
    241
    242	vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
    243	INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
    244	msg_queue->wdma_addr.size =
    245		vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
    246					     ctx->picinfo.buf_h);
    247
    248	err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
    249	if (err) {
    250		mtk_v4l2_err("failed to allocate wdma_addr buf");
    251		return -ENOMEM;
    252	}
    253	msg_queue->wdma_rptr_addr = msg_queue->wdma_addr.dma_addr;
    254	msg_queue->wdma_wptr_addr = msg_queue->wdma_addr.dma_addr;
    255
    256	for (i = 0; i < NUM_BUFFER_COUNT; i++) {
    257		lat_buf = &msg_queue->lat_buf[i];
    258
    259		lat_buf->wdma_err_addr.size = VDEC_ERR_MAP_SZ_AVC;
    260		err = mtk_vcodec_mem_alloc(ctx, &lat_buf->wdma_err_addr);
    261		if (err) {
    262			mtk_v4l2_err("failed to allocate wdma_err_addr buf[%d]", i);
    263			goto mem_alloc_err;
    264		}
    265
    266		lat_buf->slice_bc_addr.size = VDEC_LAT_SLICE_HEADER_SZ;
    267		err = mtk_vcodec_mem_alloc(ctx, &lat_buf->slice_bc_addr);
    268		if (err) {
    269			mtk_v4l2_err("failed to allocate wdma_addr buf[%d]", i);
    270			goto mem_alloc_err;
    271		}
    272
    273		lat_buf->private_data = kzalloc(private_size, GFP_KERNEL);
    274		if (!lat_buf->private_data) {
    275			err = -ENOMEM;
    276			goto mem_alloc_err;
    277		}
    278
    279		lat_buf->ctx = ctx;
    280		lat_buf->core_decode = core_decode;
    281		err = vdec_msg_queue_qbuf(&msg_queue->lat_ctx, lat_buf);
    282		if (err) {
    283			mtk_v4l2_err("failed to qbuf buf[%d]", i);
    284			goto mem_alloc_err;
    285		}
    286	}
    287	return 0;
    288
    289mem_alloc_err:
    290	vdec_msg_queue_deinit(msg_queue, ctx);
    291	return err;
    292}