cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cnl.c (12492B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
      2//
      3// This file is provided under a dual BSD/GPLv2 license.  When using or
      4// redistributing this file, you may do so under either license.
      5//
      6// Copyright(c) 2018 Intel Corporation. All rights reserved.
      7//
      8// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
      9//	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
     10//	    Rander Wang <rander.wang@intel.com>
     11//          Keyon Jie <yang.jie@linux.intel.com>
     12//
     13
     14/*
     15 * Hardware interface for audio DSP on Cannonlake.
     16 */
     17
     18#include <sound/sof/ext_manifest4.h>
     19#include <sound/sof/ipc4/header.h>
     20#include "../ipc4-priv.h"
     21#include "../ops.h"
     22#include "hda.h"
     23#include "hda-ipc.h"
     24#include "../sof-audio.h"
     25
     26static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
     27	{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
     28	{"pp", HDA_DSP_PP_BAR,  0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
     29	{"dsp", HDA_DSP_BAR,  0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
     30};
     31
     32static void cnl_ipc_host_done(struct snd_sof_dev *sdev);
     33static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev);
     34
     35irqreturn_t cnl_ipc4_irq_thread(int irq, void *context)
     36{
     37	struct sof_ipc4_msg notification_data = {{ 0 }};
     38	struct snd_sof_dev *sdev = context;
     39	bool ipc_irq = false;
     40	u32 hipcida, hipctdr;
     41
     42	hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
     43	if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
     44		/* DSP received the message */
     45		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
     46					CNL_DSP_REG_HIPCCTL,
     47					CNL_DSP_REG_HIPCCTL_DONE, 0);
     48		cnl_ipc_dsp_done(sdev);
     49
     50		ipc_irq = true;
     51	}
     52
     53	hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
     54	if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
     55		/* Message from DSP (reply or notification) */
     56		u32 hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
     57					       CNL_DSP_REG_HIPCTDD);
     58		u32 primary = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
     59		u32 extension = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
     60
     61		if (primary & SOF_IPC4_MSG_DIR_MASK) {
     62			/* Reply received */
     63			struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
     64
     65			data->primary = primary;
     66			data->extension = extension;
     67
     68			spin_lock_irq(&sdev->ipc_lock);
     69
     70			snd_sof_ipc_get_reply(sdev);
     71			snd_sof_ipc_reply(sdev, data->primary);
     72
     73			spin_unlock_irq(&sdev->ipc_lock);
     74		} else {
     75			/* Notification received */
     76			notification_data.primary = primary;
     77			notification_data.extension = extension;
     78
     79			sdev->ipc->msg.rx_data = &notification_data;
     80			snd_sof_ipc_msgs_rx(sdev);
     81			sdev->ipc->msg.rx_data = NULL;
     82		}
     83
     84		/* Let DSP know that we have finished processing the message */
     85		cnl_ipc_host_done(sdev);
     86
     87		ipc_irq = true;
     88	}
     89
     90	if (!ipc_irq)
     91		/* This interrupt is not shared so no need to return IRQ_NONE. */
     92		dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
     93
     94	return IRQ_HANDLED;
     95}
     96
     97irqreturn_t cnl_ipc_irq_thread(int irq, void *context)
     98{
     99	struct snd_sof_dev *sdev = context;
    100	u32 hipci;
    101	u32 hipcida;
    102	u32 hipctdr;
    103	u32 hipctdd;
    104	u32 msg;
    105	u32 msg_ext;
    106	bool ipc_irq = false;
    107
    108	hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
    109	hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
    110	hipctdd = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDD);
    111	hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR);
    112
    113	/* reply message from DSP */
    114	if (hipcida & CNL_DSP_REG_HIPCIDA_DONE) {
    115		msg_ext = hipci & CNL_DSP_REG_HIPCIDR_MSG_MASK;
    116		msg = hipcida & CNL_DSP_REG_HIPCIDA_MSG_MASK;
    117
    118		dev_vdbg(sdev->dev,
    119			 "ipc: firmware response, msg:0x%x, msg_ext:0x%x\n",
    120			 msg, msg_ext);
    121
    122		/* mask Done interrupt */
    123		snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
    124					CNL_DSP_REG_HIPCCTL,
    125					CNL_DSP_REG_HIPCCTL_DONE, 0);
    126
    127		spin_lock_irq(&sdev->ipc_lock);
    128
    129		/* handle immediate reply from DSP core */
    130		hda_dsp_ipc_get_reply(sdev);
    131		snd_sof_ipc_reply(sdev, msg);
    132
    133		cnl_ipc_dsp_done(sdev);
    134
    135		spin_unlock_irq(&sdev->ipc_lock);
    136
    137		ipc_irq = true;
    138	}
    139
    140	/* new message from DSP */
    141	if (hipctdr & CNL_DSP_REG_HIPCTDR_BUSY) {
    142		msg = hipctdr & CNL_DSP_REG_HIPCTDR_MSG_MASK;
    143		msg_ext = hipctdd & CNL_DSP_REG_HIPCTDD_MSG_MASK;
    144
    145		dev_vdbg(sdev->dev,
    146			 "ipc: firmware initiated, msg:0x%x, msg_ext:0x%x\n",
    147			 msg, msg_ext);
    148
    149		/* handle messages from DSP */
    150		if ((hipctdr & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
    151			struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
    152			bool non_recoverable = true;
    153
    154			/*
    155			 * This is a PANIC message!
    156			 *
    157			 * If it is arriving during firmware boot and it is not
    158			 * the last boot attempt then change the non_recoverable
    159			 * to false as the DSP might be able to boot in the next
    160			 * iteration(s)
    161			 */
    162			if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
    163			    hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
    164				non_recoverable = false;
    165
    166			snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
    167					  non_recoverable);
    168		} else {
    169			snd_sof_ipc_msgs_rx(sdev);
    170		}
    171
    172		cnl_ipc_host_done(sdev);
    173
    174		ipc_irq = true;
    175	}
    176
    177	if (!ipc_irq) {
    178		/*
    179		 * This interrupt is not shared so no need to return IRQ_NONE.
    180		 */
    181		dev_dbg_ratelimited(sdev->dev,
    182				    "nothing to do in IPC IRQ thread\n");
    183	}
    184
    185	return IRQ_HANDLED;
    186}
    187
    188static void cnl_ipc_host_done(struct snd_sof_dev *sdev)
    189{
    190	/*
    191	 * clear busy interrupt to tell dsp controller this
    192	 * interrupt has been accepted, not trigger it again
    193	 */
    194	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
    195				       CNL_DSP_REG_HIPCTDR,
    196				       CNL_DSP_REG_HIPCTDR_BUSY,
    197				       CNL_DSP_REG_HIPCTDR_BUSY);
    198	/*
    199	 * set done bit to ack dsp the msg has been
    200	 * processed and send reply msg to dsp
    201	 */
    202	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
    203				       CNL_DSP_REG_HIPCTDA,
    204				       CNL_DSP_REG_HIPCTDA_DONE,
    205				       CNL_DSP_REG_HIPCTDA_DONE);
    206}
    207
    208static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
    209{
    210	/*
    211	 * set DONE bit - tell DSP we have received the reply msg
    212	 * from DSP, and processed it, don't send more reply to host
    213	 */
    214	snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
    215				       CNL_DSP_REG_HIPCIDA,
    216				       CNL_DSP_REG_HIPCIDA_DONE,
    217				       CNL_DSP_REG_HIPCIDA_DONE);
    218
    219	/* unmask Done interrupt */
    220	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
    221				CNL_DSP_REG_HIPCCTL,
    222				CNL_DSP_REG_HIPCCTL_DONE,
    223				CNL_DSP_REG_HIPCCTL_DONE);
    224}
    225
    226static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
    227				     u32 *dr, u32 *dd)
    228{
    229	struct sof_ipc_pm_gate *pm_gate = msg->msg_data;
    230
    231	if (pm_gate->hdr.cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
    232		/* send the compact message via the primary register */
    233		*dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
    234
    235		/* send payload via the extended data register */
    236		*dd = pm_gate->flags;
    237
    238		return true;
    239	}
    240
    241	return false;
    242}
    243
    244int cnl_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
    245{
    246	struct sof_ipc4_msg *msg_data = msg->msg_data;
    247
    248	/* send the message via mailbox */
    249	if (msg_data->data_size)
    250		sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
    251				  msg_data->data_size);
    252
    253	snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD, msg_data->extension);
    254	snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
    255			  msg_data->primary | CNL_DSP_REG_HIPCIDR_BUSY);
    256
    257	return 0;
    258}
    259
    260int cnl_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
    261{
    262	struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
    263	struct sof_ipc_cmd_hdr *hdr;
    264	u32 dr = 0;
    265	u32 dd = 0;
    266
    267	/*
    268	 * Currently the only compact IPC supported is the PM_GATE
    269	 * IPC which is used for transitioning the DSP between the
    270	 * D0I0 and D0I3 states. And these are sent only during the
    271	 * set_power_state() op. Therefore, there will never be a case
    272	 * that a compact IPC results in the DSP exiting D0I3 without
    273	 * the host and FW being in sync.
    274	 */
    275	if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
    276		/* send the message via IPC registers */
    277		snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
    278				  dd);
    279		snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
    280				  CNL_DSP_REG_HIPCIDR_BUSY | dr);
    281		return 0;
    282	}
    283
    284	/* send the message via mailbox */
    285	sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
    286			  msg->msg_size);
    287	snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
    288			  CNL_DSP_REG_HIPCIDR_BUSY);
    289
    290	hdr = msg->msg_data;
    291
    292	/*
    293	 * Use mod_delayed_work() to schedule the delayed work
    294	 * to avoid scheduling multiple workqueue items when
    295	 * IPCs are sent at a high-rate. mod_delayed_work()
    296	 * modifies the timer if the work is pending.
    297	 * Also, a new delayed work should not be queued after the
    298	 * CTX_SAVE IPC, which is sent before the DSP enters D3.
    299	 */
    300	if (hdr->cmd != (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE))
    301		mod_delayed_work(system_wq, &hdev->d0i3_work,
    302				 msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
    303
    304	return 0;
    305}
    306
    307void cnl_ipc_dump(struct snd_sof_dev *sdev)
    308{
    309	u32 hipcctl;
    310	u32 hipcida;
    311	u32 hipctdr;
    312
    313	hda_ipc_irq_dump(sdev);
    314
    315	/* read IPC status */
    316	hipcida = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDA);
    317	hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCCTL);
    318	hipctdr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCTDR);
    319
    320	/* dump the IPC regs */
    321	/* TODO: parse the raw msg */
    322	dev_err(sdev->dev,
    323		"error: host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
    324		hipcida, hipctdr, hipcctl);
    325}
    326
    327/* cannonlake ops */
    328struct snd_sof_dsp_ops sof_cnl_ops;
    329EXPORT_SYMBOL_NS(sof_cnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
    330
    331int sof_cnl_ops_init(struct snd_sof_dev *sdev)
    332{
    333	/* common defaults */
    334	memcpy(&sof_cnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
    335
    336	/* probe/remove/shutdown */
    337	sof_cnl_ops.shutdown	= hda_dsp_shutdown;
    338
    339	/* ipc */
    340	if (sdev->pdata->ipc_type == SOF_IPC) {
    341		/* doorbell */
    342		sof_cnl_ops.irq_thread	= cnl_ipc_irq_thread;
    343
    344		/* ipc */
    345		sof_cnl_ops.send_msg	= cnl_ipc_send_msg;
    346	}
    347
    348	if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
    349		struct sof_ipc4_fw_data *ipc4_data;
    350
    351		sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
    352		if (!sdev->private)
    353			return -ENOMEM;
    354
    355		ipc4_data = sdev->private;
    356		ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
    357
    358		/* doorbell */
    359		sof_cnl_ops.irq_thread	= cnl_ipc4_irq_thread;
    360
    361		/* ipc */
    362		sof_cnl_ops.send_msg	= cnl_ipc4_send_msg;
    363	}
    364
    365	/* set DAI driver ops */
    366	hda_set_dai_drv_ops(sdev, &sof_cnl_ops);
    367
    368	/* debug */
    369	sof_cnl_ops.debug_map	= cnl_dsp_debugfs;
    370	sof_cnl_ops.debug_map_count	= ARRAY_SIZE(cnl_dsp_debugfs);
    371	sof_cnl_ops.ipc_dump	= cnl_ipc_dump;
    372
    373	/* pre/post fw run */
    374	sof_cnl_ops.post_fw_run = hda_dsp_post_fw_run;
    375
    376	/* firmware run */
    377	sof_cnl_ops.run = hda_dsp_cl_boot_firmware;
    378
    379	/* dsp core get/put */
    380	sof_cnl_ops.core_get = hda_dsp_core_get;
    381
    382	return 0;
    383};
    384EXPORT_SYMBOL_NS(sof_cnl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
    385
    386const struct sof_intel_dsp_desc cnl_chip_info = {
    387	/* Cannonlake */
    388	.cores_num = 4,
    389	.init_core_mask = 1,
    390	.host_managed_cores_mask = GENMASK(3, 0),
    391	.ipc_req = CNL_DSP_REG_HIPCIDR,
    392	.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
    393	.ipc_ack = CNL_DSP_REG_HIPCIDA,
    394	.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
    395	.ipc_ctl = CNL_DSP_REG_HIPCCTL,
    396	.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
    397	.rom_init_timeout	= 300,
    398	.ssp_count = CNL_SSP_COUNT,
    399	.ssp_base_offset = CNL_SSP_BASE_OFFSET,
    400	.sdw_shim_base = SDW_SHIM_BASE,
    401	.sdw_alh_base = SDW_ALH_BASE,
    402	.check_sdw_irq	= hda_common_check_sdw_irq,
    403	.check_ipc_irq	= hda_dsp_check_ipc_irq,
    404	.hw_ip_version = SOF_INTEL_CAVS_1_8,
    405};
    406EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
    407
    408/*
    409 * JasperLake is technically derived from IceLake, and should be in
    410 * described in icl.c. However since JasperLake was designed with
    411 * two cores, it cannot support the IceLake-specific power-up sequences
    412 * which rely on core3. To simplify, JasperLake uses the CannonLake ops and
    413 * is described in cnl.c
    414 */
    415const struct sof_intel_dsp_desc jsl_chip_info = {
    416	/* Jasperlake */
    417	.cores_num = 2,
    418	.init_core_mask = 1,
    419	.host_managed_cores_mask = GENMASK(1, 0),
    420	.ipc_req = CNL_DSP_REG_HIPCIDR,
    421	.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
    422	.ipc_ack = CNL_DSP_REG_HIPCIDA,
    423	.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
    424	.ipc_ctl = CNL_DSP_REG_HIPCCTL,
    425	.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
    426	.rom_init_timeout	= 300,
    427	.ssp_count = ICL_SSP_COUNT,
    428	.ssp_base_offset = CNL_SSP_BASE_OFFSET,
    429	.sdw_shim_base = SDW_SHIM_BASE,
    430	.sdw_alh_base = SDW_ALH_BASE,
    431	.check_sdw_irq	= hda_common_check_sdw_irq,
    432	.check_ipc_irq	= hda_dsp_check_ipc_irq,
    433	.hw_ip_version = SOF_INTEL_CAVS_2_0,
    434};
    435EXPORT_SYMBOL_NS(jsl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);