cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sst_ipc.c (11225B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  sst_ipc.c - Intel SST Driver for audio engine
      4 *
      5 *  Copyright (C) 2008-14 Intel Corporation
      6 *  Authors:	Vinod Koul <vinod.koul@intel.com>
      7 *		Harsha Priya <priya.harsha@intel.com>
      8 *		Dharageswari R <dharageswari.r@intel.com>
      9 *		KP Jeeja <jeeja.kp@intel.com>
     10 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     11 *
     12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     13 */
     14#include <linux/pci.h>
     15#include <linux/firmware.h>
     16#include <linux/sched.h>
     17#include <linux/delay.h>
     18#include <linux/pm_runtime.h>
     19#include <sound/core.h>
     20#include <sound/pcm.h>
     21#include <sound/soc.h>
     22#include <sound/compress_driver.h>
     23#include <asm/intel-mid.h>
     24#include <asm/platform_sst_audio.h>
     25#include "../sst-mfld-platform.h"
     26#include "sst.h"
     27
     28struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
     29					u32 msg_id, u32 drv_id)
     30{
     31	struct sst_block *msg = NULL;
     32
     33	dev_dbg(ctx->dev, "Enter\n");
     34	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
     35	if (!msg)
     36		return NULL;
     37	msg->condition = false;
     38	msg->on = true;
     39	msg->msg_id = msg_id;
     40	msg->drv_id = drv_id;
     41	spin_lock_bh(&ctx->block_lock);
     42	list_add_tail(&msg->node, &ctx->block_list);
     43	spin_unlock_bh(&ctx->block_lock);
     44
     45	return msg;
     46}
     47
     48/*
     49 * while handling the interrupts, we need to check for message status and
     50 * then if we are blocking for a message
     51 *
     52 * here we are unblocking the blocked ones, this is based on id we have
     53 * passed and search that for block threads.
     54 * We will not find block in two cases
     55 *  a) when its small message and block in not there, so silently ignore
     56 *  them
     57 *  b) when we are actually not able to find the block (bug perhaps)
     58 *
     59 *  Since we have bit of small messages we can spam kernel log with err
     60 *  print on above so need to keep as debug prints which should be enabled
     61 *  via dynamic debug while debugging IPC issues
     62 */
     63int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
     64		u32 drv_id, u32 ipc, void *data, u32 size)
     65{
     66	struct sst_block *block = NULL;
     67
     68	dev_dbg(ctx->dev, "Enter\n");
     69
     70	spin_lock_bh(&ctx->block_lock);
     71	list_for_each_entry(block, &ctx->block_list, node) {
     72		dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
     73							block->drv_id);
     74		if (block->msg_id == ipc && block->drv_id == drv_id) {
     75			dev_dbg(ctx->dev, "free up the block\n");
     76			block->ret_code = result;
     77			block->data = data;
     78			block->size = size;
     79			block->condition = true;
     80			spin_unlock_bh(&ctx->block_lock);
     81			wake_up(&ctx->wait_queue);
     82			return 0;
     83		}
     84	}
     85	spin_unlock_bh(&ctx->block_lock);
     86	dev_dbg(ctx->dev,
     87		"Block not found or a response received for a short msg for ipc %d, drv_id %d\n",
     88		ipc, drv_id);
     89	return -EINVAL;
     90}
     91
     92int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
     93{
     94	struct sst_block *block = NULL, *__block;
     95
     96	dev_dbg(ctx->dev, "Enter\n");
     97	spin_lock_bh(&ctx->block_lock);
     98	list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
     99		if (block == freed) {
    100			pr_debug("pvt_id freed --> %d\n", freed->drv_id);
    101			/* toggle the index position of pvt_id */
    102			list_del(&freed->node);
    103			spin_unlock_bh(&ctx->block_lock);
    104			kfree(freed->data);
    105			freed->data = NULL;
    106			kfree(freed);
    107			return 0;
    108		}
    109	}
    110	spin_unlock_bh(&ctx->block_lock);
    111	dev_err(ctx->dev, "block is already freed!!!\n");
    112	return -EINVAL;
    113}
    114
    115int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx,
    116		struct ipc_post *ipc_msg, bool sync)
    117{
    118	struct ipc_post *msg = ipc_msg;
    119	union ipc_header_mrfld header;
    120	unsigned int loop_count = 0;
    121	int retval = 0;
    122	unsigned long irq_flags;
    123
    124	dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync);
    125	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
    126	header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
    127	if (sync) {
    128		while (header.p.header_high.part.busy) {
    129			if (loop_count > 25) {
    130				dev_err(sst_drv_ctx->dev,
    131					"sst: Busy wait failed, can't send this msg\n");
    132				retval = -EBUSY;
    133				goto out;
    134			}
    135			cpu_relax();
    136			loop_count++;
    137			header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
    138		}
    139	} else {
    140		if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
    141			/* queue is empty, nothing to send */
    142			spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
    143			dev_dbg(sst_drv_ctx->dev,
    144					"Empty msg queue... NO Action\n");
    145			return 0;
    146		}
    147
    148		if (header.p.header_high.part.busy) {
    149			spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
    150			dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n");
    151			return 0;
    152		}
    153
    154		/* copy msg from list */
    155		msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
    156				struct ipc_post, node);
    157		list_del(&msg->node);
    158	}
    159	dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n",
    160				msg->mrfld_header.p.header_high.full);
    161	dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n",
    162			msg->mrfld_header.p.header_low_payload);
    163
    164	if (msg->mrfld_header.p.header_high.part.large)
    165		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
    166			msg->mailbox_data,
    167			msg->mrfld_header.p.header_low_payload);
    168
    169	sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
    170
    171out:
    172	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
    173	kfree(msg->mailbox_data);
    174	kfree(msg);
    175	return retval;
    176}
    177
    178void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx)
    179{
    180	union interrupt_reg_mrfld isr;
    181	union interrupt_reg_mrfld imr;
    182	union ipc_header_mrfld clear_ipc;
    183	unsigned long irq_flags;
    184
    185	spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
    186	imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
    187	isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
    188
    189	/* write 1 to clear*/
    190	isr.part.busy_interrupt = 1;
    191	sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
    192
    193	/* Set IA done bit */
    194	clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
    195
    196	clear_ipc.p.header_high.part.busy = 0;
    197	clear_ipc.p.header_high.part.done = 1;
    198	clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
    199	sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
    200	/* un mask busy interrupt */
    201	imr.part.busy_interrupt = 0;
    202	sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
    203	spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
    204}
    205
    206
    207/*
    208 * process_fw_init - process the FW init msg
    209 *
    210 * @msg: IPC message mailbox data from FW
    211 *
    212 * This function processes the FW init msg from FW
    213 * marks FW state and prints debug info of loaded FW
    214 */
    215static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
    216			void *msg)
    217{
    218	struct ipc_header_fw_init *init =
    219		(struct ipc_header_fw_init *)msg;
    220	int retval = 0;
    221
    222	dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n");
    223	if (init->result) {
    224		sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
    225		dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n",
    226				init->result);
    227		retval = init->result;
    228		goto ret;
    229	}
    230	if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version,
    231		   sizeof(init->fw_version)))
    232		dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
    233			init->fw_version.type, init->fw_version.major,
    234			init->fw_version.minor, init->fw_version.build);
    235	dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
    236			init->build_info.date, init->build_info.time);
    237
    238	/* Save FW version */
    239	sst_drv_ctx->fw_version.type = init->fw_version.type;
    240	sst_drv_ctx->fw_version.major = init->fw_version.major;
    241	sst_drv_ctx->fw_version.minor = init->fw_version.minor;
    242	sst_drv_ctx->fw_version.build = init->fw_version.build;
    243
    244ret:
    245	sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
    246}
    247
    248static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
    249			struct ipc_post *msg)
    250{
    251	u32 msg_id;
    252	int str_id;
    253	u32 data_size, i;
    254	void *data_offset;
    255	struct stream_info *stream;
    256	u32 msg_low, pipe_id;
    257
    258	msg_low = msg->mrfld_header.p.header_low_payload;
    259	msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
    260	data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
    261	data_size =  msg_low - (sizeof(struct ipc_dsp_hdr));
    262
    263	switch (msg_id) {
    264	case IPC_SST_PERIOD_ELAPSED_MRFLD:
    265		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
    266		str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
    267		if (str_id > 0) {
    268			dev_dbg(sst_drv_ctx->dev,
    269				"Period elapsed rcvd for pipe id 0x%x\n",
    270				pipe_id);
    271			stream = &sst_drv_ctx->streams[str_id];
    272			/* If stream is dropped, skip processing this message*/
    273			if (stream->status == STREAM_INIT)
    274				break;
    275			if (stream->period_elapsed)
    276				stream->period_elapsed(stream->pcm_substream);
    277			if (stream->compr_cb)
    278				stream->compr_cb(stream->compr_cb_param);
    279		}
    280		break;
    281
    282	case IPC_IA_DRAIN_STREAM_MRFLD:
    283		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
    284		str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
    285		if (str_id > 0) {
    286			stream = &sst_drv_ctx->streams[str_id];
    287			if (stream->drain_notify)
    288				stream->drain_notify(stream->drain_cb_param);
    289		}
    290		break;
    291
    292	case IPC_IA_FW_ASYNC_ERR_MRFLD:
    293		dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n");
    294		for (i = 0; i < (data_size/4); i++)
    295			print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
    296					16, 4, data_offset, data_size, false);
    297		break;
    298
    299	case IPC_IA_FW_INIT_CMPLT_MRFLD:
    300		process_fw_init(sst_drv_ctx, data_offset);
    301		break;
    302
    303	case IPC_IA_BUF_UNDER_RUN_MRFLD:
    304		pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
    305		str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
    306		if (str_id > 0)
    307			dev_err(sst_drv_ctx->dev,
    308				"Buffer under-run for pipe:%#x str_id:%d\n",
    309				pipe_id, str_id);
    310		break;
    311
    312	default:
    313		dev_err(sst_drv_ctx->dev,
    314			"Unrecognized async msg from FW msg_id %#x\n", msg_id);
    315	}
    316}
    317
    318void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
    319		struct ipc_post *msg)
    320{
    321	unsigned int drv_id;
    322	void *data;
    323	union ipc_header_high msg_high;
    324	u32 msg_low;
    325	struct ipc_dsp_hdr *dsp_hdr;
    326
    327	msg_high = msg->mrfld_header.p.header_high;
    328	msg_low = msg->mrfld_header.p.header_low_payload;
    329
    330	dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n",
    331			msg->mrfld_header.p.header_high.full,
    332			msg->mrfld_header.p.header_low_payload);
    333
    334	drv_id = msg_high.part.drv_id;
    335
    336	/* Check for async messages first */
    337	if (drv_id == SST_ASYNC_DRV_ID) {
    338		/*FW sent async large message*/
    339		process_fw_async_msg(sst_drv_ctx, msg);
    340		return;
    341	}
    342
    343	/* FW sent short error response for an IPC */
    344	if (msg_high.part.result && drv_id && !msg_high.part.large) {
    345		/* 32-bit FW error code in msg_low */
    346		dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
    347		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
    348			msg_high.part.drv_id,
    349			msg_high.part.msg_id, NULL, 0);
    350		return;
    351	}
    352
    353	/*
    354	 * Process all valid responses
    355	 * if it is a large message, the payload contains the size to
    356	 * copy from mailbox
    357	 **/
    358	if (msg_high.part.large) {
    359		data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
    360		if (!data)
    361			return;
    362		/* Copy command id so that we can use to put sst to reset */
    363		dsp_hdr = (struct ipc_dsp_hdr *)data;
    364		dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id);
    365		if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
    366				msg_high.part.drv_id,
    367				msg_high.part.msg_id, data, msg_low))
    368			kfree(data);
    369	} else {
    370		sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
    371				msg_high.part.drv_id,
    372				msg_high.part.msg_id, NULL, 0);
    373	}
    374
    375}