cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vpu_msgs.c (10580B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright 2020-2021 NXP
      4 */
      5
      6#include <linux/init.h>
      7#include <linux/interconnect.h>
      8#include <linux/ioctl.h>
      9#include <linux/list.h>
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include "vpu.h"
     13#include "vpu_core.h"
     14#include "vpu_rpc.h"
     15#include "vpu_mbox.h"
     16#include "vpu_defs.h"
     17#include "vpu_cmds.h"
     18#include "vpu_msgs.h"
     19#include "vpu_v4l2.h"
     20
     21#define VPU_PKT_HEADER_LENGTH		3
     22
     23struct vpu_msg_handler {
     24	u32 id;
     25	void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
     26};
     27
     28static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     29{
     30	vpu_trace(inst->dev, "[%d]\n", inst->id);
     31}
     32
     33static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     34{
     35	struct vpu_pkt_mem_req_data req_data;
     36
     37	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
     38	vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
     39		  inst->id,
     40		  req_data.enc_frame_size,
     41		  req_data.enc_frame_num,
     42		  req_data.ref_frame_size,
     43		  req_data.ref_frame_num,
     44		  req_data.act_buf_size,
     45		  req_data.act_buf_num);
     46	call_void_vop(inst, mem_request,
     47		      req_data.enc_frame_size,
     48		      req_data.enc_frame_num,
     49		      req_data.ref_frame_size,
     50		      req_data.ref_frame_num,
     51		      req_data.act_buf_size,
     52		      req_data.act_buf_num);
     53}
     54
     55static void vpu_session_handle_stop_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     56{
     57	vpu_trace(inst->dev, "[%d]\n", inst->id);
     58
     59	call_void_vop(inst, stop_done);
     60}
     61
     62static void vpu_session_handle_seq_hdr(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     63{
     64	struct vpu_dec_codec_info info;
     65	const struct vpu_core_resources *res;
     66
     67	memset(&info, 0, sizeof(info));
     68	res = vpu_get_resource(inst);
     69	info.stride = res ? res->stride : 1;
     70	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
     71	call_void_vop(inst, event_notify, VPU_MSG_ID_SEQ_HDR_FOUND, &info);
     72}
     73
     74static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     75{
     76	call_void_vop(inst, event_notify, VPU_MSG_ID_RES_CHANGE, NULL);
     77}
     78
     79static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     80{
     81	struct vpu_enc_pic_info info;
     82
     83	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
     84	dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
     85		inst->id, info.frame_id, info.wptr, info.frame_size);
     86	call_void_vop(inst, get_one_frame, &info);
     87}
     88
     89static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     90{
     91	struct vpu_fs_info fs;
     92
     93	vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
     94	call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
     95}
     96
     97static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
     98{
     99	if (inst->core->type == VPU_CORE_TYPE_ENC) {
    100		struct vpu_frame_info info;
    101
    102		memset(&info, 0, sizeof(info));
    103		vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info.sequence);
    104		dev_dbg(inst->dev, "[%d] %d\n", inst->id, info.sequence);
    105		info.type = inst->out_format.type;
    106		call_void_vop(inst, buf_done, &info);
    107	} else if (inst->core->type == VPU_CORE_TYPE_DEC) {
    108		struct vpu_fs_info fs;
    109
    110		vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
    111		call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
    112	}
    113}
    114
    115static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    116{
    117	dev_dbg(inst->dev, "[%d]\n", inst->id);
    118	call_void_vop(inst, input_done);
    119}
    120
    121static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    122{
    123	struct vpu_dec_pic_info info;
    124
    125	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
    126	call_void_vop(inst, get_one_frame, &info);
    127}
    128
    129static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    130{
    131	struct vpu_dec_pic_info info;
    132	struct vpu_frame_info frame;
    133
    134	memset(&frame, 0, sizeof(frame));
    135	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
    136	if (inst->core->type == VPU_CORE_TYPE_DEC)
    137		frame.type = inst->cap_format.type;
    138	frame.id = info.id;
    139	frame.luma = info.luma;
    140	frame.skipped = info.skipped;
    141	frame.timestamp = info.timestamp;
    142
    143	call_void_vop(inst, buf_done, &frame);
    144}
    145
    146static void vpu_session_handle_eos(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    147{
    148	call_void_vop(inst, event_notify, VPU_MSG_ID_PIC_EOS, NULL);
    149}
    150
    151static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    152{
    153	dev_err(inst->dev, "unsupported stream\n");
    154	call_void_vop(inst, event_notify, VPU_MSG_ID_UNSUPPORTED, NULL);
    155	vpu_v4l2_set_error(inst);
    156}
    157
    158static void vpu_session_handle_firmware_xcpt(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    159{
    160	char *str = (char *)pkt->data;
    161
    162	dev_err(inst->dev, "%s firmware xcpt: %s\n",
    163		vpu_core_type_desc(inst->core->type), str);
    164	call_void_vop(inst, event_notify, VPU_MSG_ID_FIRMWARE_XCPT, NULL);
    165	set_bit(inst->id, &inst->core->hang_mask);
    166	vpu_v4l2_set_error(inst);
    167}
    168
    169static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    170{
    171	vpu_inst_lock(inst);
    172	vpu_skip_frame(inst, 1);
    173	vpu_inst_unlock(inst);
    174}
    175
    176static struct vpu_msg_handler handlers[] = {
    177	{VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
    178	{VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
    179	{VPU_MSG_ID_MEM_REQUEST, vpu_session_handle_mem_request},
    180	{VPU_MSG_ID_SEQ_HDR_FOUND, vpu_session_handle_seq_hdr},
    181	{VPU_MSG_ID_RES_CHANGE, vpu_session_handle_resolution_change},
    182	{VPU_MSG_ID_FRAME_INPUT_DONE, vpu_session_handle_input_done},
    183	{VPU_MSG_ID_FRAME_REQ, vpu_session_handle_frame_request},
    184	{VPU_MSG_ID_FRAME_RELEASE, vpu_session_handle_frame_release},
    185	{VPU_MSG_ID_ENC_DONE, vpu_session_handle_enc_frame_done},
    186	{VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
    187	{VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
    188	{VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
    189	{VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
    190	{VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
    191	{VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
    192};
    193
    194static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
    195{
    196	int ret;
    197	u32 msg_id;
    198	struct vpu_msg_handler *handler = NULL;
    199	unsigned int i;
    200
    201	ret = vpu_iface_convert_msg_id(inst->core, msg->hdr.id);
    202	if (ret < 0)
    203		return -EINVAL;
    204
    205	msg_id = ret;
    206	dev_dbg(inst->dev, "[%d] receive event(0x%x)\n", inst->id, msg_id);
    207
    208	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
    209		if (handlers[i].id == msg_id) {
    210			handler = &handlers[i];
    211			break;
    212		}
    213	}
    214
    215	if (handler && handler->done)
    216		handler->done(inst, msg);
    217
    218	vpu_response_cmd(inst, msg_id, 1);
    219
    220	return 0;
    221}
    222
    223static bool vpu_inst_receive_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    224{
    225	unsigned long bytes = sizeof(struct vpu_rpc_event_header);
    226	u32 ret;
    227
    228	memset(pkt, 0, sizeof(*pkt));
    229	if (kfifo_len(&inst->msg_fifo) < bytes)
    230		return false;
    231
    232	ret = kfifo_out(&inst->msg_fifo, pkt, bytes);
    233	if (ret != bytes)
    234		return false;
    235
    236	if (pkt->hdr.num > 0) {
    237		bytes = pkt->hdr.num * sizeof(u32);
    238		ret = kfifo_out(&inst->msg_fifo, pkt->data, bytes);
    239		if (ret != bytes)
    240			return false;
    241	}
    242
    243	return true;
    244}
    245
    246void vpu_inst_run_work(struct work_struct *work)
    247{
    248	struct vpu_inst *inst = container_of(work, struct vpu_inst, msg_work);
    249	struct vpu_rpc_event pkt;
    250
    251	while (vpu_inst_receive_msg(inst, &pkt))
    252		vpu_session_handle_msg(inst, &pkt);
    253}
    254
    255static void vpu_inst_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
    256{
    257	unsigned long bytes;
    258	u32 id = pkt->hdr.id;
    259	int ret;
    260
    261	if (!inst->workqueue)
    262		return;
    263
    264	bytes = sizeof(pkt->hdr) + pkt->hdr.num * sizeof(u32);
    265	ret = kfifo_in(&inst->msg_fifo, pkt, bytes);
    266	if (ret != bytes)
    267		dev_err(inst->dev, "[%d:%d]overflow: %d\n", inst->core->id, inst->id, id);
    268	queue_work(inst->workqueue, &inst->msg_work);
    269}
    270
    271static int vpu_handle_msg(struct vpu_core *core)
    272{
    273	struct vpu_rpc_event pkt;
    274	struct vpu_inst *inst;
    275	int ret;
    276
    277	memset(&pkt, 0, sizeof(pkt));
    278	while (!vpu_iface_receive_msg(core, &pkt)) {
    279		dev_dbg(core->dev, "event index = %d, id = %d, num = %d\n",
    280			pkt.hdr.index, pkt.hdr.id, pkt.hdr.num);
    281
    282		ret = vpu_iface_convert_msg_id(core, pkt.hdr.id);
    283		if (ret < 0)
    284			continue;
    285
    286		inst = vpu_core_find_instance(core, pkt.hdr.index);
    287		if (inst) {
    288			vpu_response_cmd(inst, ret, 0);
    289			mutex_lock(&core->cmd_lock);
    290			vpu_inst_record_flow(inst, ret);
    291			mutex_unlock(&core->cmd_lock);
    292
    293			vpu_inst_handle_msg(inst, &pkt);
    294			vpu_inst_put(inst);
    295		}
    296		memset(&pkt, 0, sizeof(pkt));
    297	}
    298
    299	return 0;
    300}
    301
    302static int vpu_isr_thread(struct vpu_core *core, u32 irq_code)
    303{
    304	dev_dbg(core->dev, "irq code = 0x%x\n", irq_code);
    305	switch (irq_code) {
    306	case VPU_IRQ_CODE_SYNC:
    307		vpu_mbox_send_msg(core, PRC_BUF_OFFSET, core->rpc.phys - core->fw.phys);
    308		vpu_mbox_send_msg(core, BOOT_ADDRESS, core->fw.phys);
    309		vpu_mbox_send_msg(core, INIT_DONE, 2);
    310		break;
    311	case VPU_IRQ_CODE_BOOT_DONE:
    312		break;
    313	case VPU_IRQ_CODE_SNAPSHOT_DONE:
    314		break;
    315	default:
    316		vpu_handle_msg(core);
    317		break;
    318	}
    319
    320	return 0;
    321}
    322
    323static void vpu_core_run_msg_work(struct vpu_core *core)
    324{
    325	const unsigned int SIZE = sizeof(u32);
    326
    327	while (kfifo_len(&core->msg_fifo) >= SIZE) {
    328		u32 data = 0;
    329
    330		if (kfifo_out(&core->msg_fifo, &data, SIZE) == SIZE)
    331			vpu_isr_thread(core, data);
    332	}
    333}
    334
    335void vpu_msg_run_work(struct work_struct *work)
    336{
    337	struct vpu_core *core = container_of(work, struct vpu_core, msg_work);
    338	unsigned long delay = msecs_to_jiffies(10);
    339
    340	vpu_core_run_msg_work(core);
    341	queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
    342}
    343
    344void vpu_msg_delayed_work(struct work_struct *work)
    345{
    346	struct vpu_core *core;
    347	struct delayed_work *dwork;
    348	unsigned long bytes = sizeof(u32);
    349	u32 i;
    350
    351	if (!work)
    352		return;
    353
    354	dwork = to_delayed_work(work);
    355	core = container_of(dwork, struct vpu_core, msg_delayed_work);
    356	if (kfifo_len(&core->msg_fifo) >= bytes)
    357		vpu_core_run_msg_work(core);
    358
    359	bytes = sizeof(struct vpu_rpc_event_header);
    360	for (i = 0; i < core->supported_instance_count; i++) {
    361		struct vpu_inst *inst = vpu_core_find_instance(core, i);
    362
    363		if (!inst)
    364			continue;
    365
    366		if (inst->workqueue && kfifo_len(&inst->msg_fifo) >= bytes)
    367			queue_work(inst->workqueue, &inst->msg_work);
    368
    369		vpu_inst_put(inst);
    370	}
    371}
    372
    373int vpu_isr(struct vpu_core *core, u32 irq)
    374{
    375	switch (irq) {
    376	case VPU_IRQ_CODE_SYNC:
    377		break;
    378	case VPU_IRQ_CODE_BOOT_DONE:
    379		complete(&core->cmp);
    380		break;
    381	case VPU_IRQ_CODE_SNAPSHOT_DONE:
    382		complete(&core->cmp);
    383		break;
    384	default:
    385		break;
    386	}
    387
    388	if (kfifo_in(&core->msg_fifo, &irq, sizeof(irq)) != sizeof(irq))
    389		dev_err(core->dev, "[%d]overflow: %d\n", core->id, irq);
    390	queue_work(core->workqueue, &core->msg_work);
    391
    392	return 0;
    393}