cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ptdma-dev.c (8332B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * AMD Passthru DMA device driver
      4 * -- Based on the CCP driver
      5 *
      6 * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
      7 *
      8 * Author: Sanjay R Mehta <sanju.mehta@amd.com>
      9 * Author: Gary R Hook <gary.hook@amd.com>
     10 */
     11
     12#include <linux/bitfield.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/debugfs.h>
     15#include <linux/interrupt.h>
     16#include <linux/kernel.h>
     17#include <linux/module.h>
     18#include <linux/pci.h>
     19
     20#include "ptdma.h"
     21
     22/* Human-readable error strings */
     23static char *pt_error_codes[] = {
     24	"",
     25	"ERR 01: ILLEGAL_ENGINE",
     26	"ERR 03: ILLEGAL_FUNCTION_TYPE",
     27	"ERR 04: ILLEGAL_FUNCTION_MODE",
     28	"ERR 06: ILLEGAL_FUNCTION_SIZE",
     29	"ERR 08: ILLEGAL_FUNCTION_RSVD",
     30	"ERR 09: ILLEGAL_BUFFER_LENGTH",
     31	"ERR 10: VLSB_FAULT",
     32	"ERR 11: ILLEGAL_MEM_ADDR",
     33	"ERR 12: ILLEGAL_MEM_SEL",
     34	"ERR 13: ILLEGAL_CONTEXT_ID",
     35	"ERR 15: 0xF Reserved",
     36	"ERR 18: CMD_TIMEOUT",
     37	"ERR 19: IDMA0_AXI_SLVERR",
     38	"ERR 20: IDMA0_AXI_DECERR",
     39	"ERR 21: 0x15 Reserved",
     40	"ERR 22: IDMA1_AXI_SLAVE_FAULT",
     41	"ERR 23: IDMA1_AIXI_DECERR",
     42	"ERR 24: 0x18 Reserved",
     43	"ERR 27: 0x1B Reserved",
     44	"ERR 38: ODMA0_AXI_SLVERR",
     45	"ERR 39: ODMA0_AXI_DECERR",
     46	"ERR 40: 0x28 Reserved",
     47	"ERR 41: ODMA1_AXI_SLVERR",
     48	"ERR 42: ODMA1_AXI_DECERR",
     49	"ERR 43: LSB_PARITY_ERR",
     50};
     51
     52static void pt_log_error(struct pt_device *d, int e)
     53{
     54	dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e);
     55}
     56
     57void pt_start_queue(struct pt_cmd_queue *cmd_q)
     58{
     59	/* Turn on the run bit */
     60	iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control);
     61}
     62
     63void pt_stop_queue(struct pt_cmd_queue *cmd_q)
     64{
     65	/* Turn off the run bit */
     66	iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control);
     67}
     68
     69static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q)
     70{
     71	bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
     72	u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
     73	u32 tail;
     74
     75	if (soc) {
     76		desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
     77		desc->dw0 &= ~DWORD0_SOC;
     78	}
     79	mutex_lock(&cmd_q->q_mutex);
     80
     81	/* Copy 32-byte command descriptor to hw queue. */
     82	memcpy(q_desc, desc, 32);
     83	cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
     84
     85	/* The data used by this command must be flushed to memory */
     86	wmb();
     87
     88	/* Write the new tail address back to the queue register */
     89	tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
     90	iowrite32(tail, cmd_q->reg_control + 0x0004);
     91
     92	/* Turn the queue back on using our cached control register */
     93	pt_start_queue(cmd_q);
     94	mutex_unlock(&cmd_q->q_mutex);
     95
     96	return 0;
     97}
     98
     99int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
    100			     struct pt_passthru_engine *pt_engine)
    101{
    102	struct ptdma_desc desc;
    103	struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
    104
    105	cmd_q->cmd_error = 0;
    106	cmd_q->total_pt_ops++;
    107	memset(&desc, 0, sizeof(desc));
    108	desc.dw0 = CMD_DESC_DW0_VAL;
    109	desc.length = pt_engine->src_len;
    110	desc.src_lo = lower_32_bits(pt_engine->src_dma);
    111	desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma);
    112	desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
    113	desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
    114
    115	if (cmd_q->int_en)
    116		pt_core_enable_queue_interrupts(pt);
    117	else
    118		pt_core_disable_queue_interrupts(pt);
    119
    120	return pt_core_execute_cmd(&desc, cmd_q);
    121}
    122
    123static void pt_do_cmd_complete(unsigned long data)
    124{
    125	struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
    126	struct pt_cmd *cmd = tdata->cmd;
    127	struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
    128	u32 tail;
    129
    130	if (cmd_q->cmd_error) {
    131	       /*
    132		* Log the error and flush the queue by
    133		* moving the head pointer
    134		*/
    135		tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
    136		pt_log_error(cmd_q->pt, cmd_q->cmd_error);
    137		iowrite32(tail, cmd_q->reg_control + 0x0008);
    138	}
    139
    140	cmd->pt_cmd_callback(cmd->data, cmd->ret);
    141}
    142
    143void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
    144{
    145	u32 status;
    146
    147	status = ioread32(cmd_q->reg_control + 0x0010);
    148	if (status) {
    149		cmd_q->int_status = status;
    150		cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100);
    151		cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104);
    152
    153		/* On error, only save the first error value */
    154		if ((status & INT_ERROR) && !cmd_q->cmd_error)
    155			cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
    156
    157		/* Acknowledge the completion */
    158		iowrite32(status, cmd_q->reg_control + 0x0010);
    159		pt_do_cmd_complete((ulong)&pt->tdata);
    160	}
    161}
    162
    163static irqreturn_t pt_core_irq_handler(int irq, void *data)
    164{
    165	struct pt_device *pt = data;
    166	struct pt_cmd_queue *cmd_q = &pt->cmd_q;
    167
    168	pt_core_disable_queue_interrupts(pt);
    169	pt->total_interrupts++;
    170	pt_check_status_trans(pt, cmd_q);
    171	pt_core_enable_queue_interrupts(pt);
    172	return IRQ_HANDLED;
    173}
    174
    175int pt_core_init(struct pt_device *pt)
    176{
    177	char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
    178	struct pt_cmd_queue *cmd_q = &pt->cmd_q;
    179	u32 dma_addr_lo, dma_addr_hi;
    180	struct device *dev = pt->dev;
    181	struct dma_pool *dma_pool;
    182	int ret;
    183
    184	/* Allocate a dma pool for the queue */
    185	snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev));
    186
    187	dma_pool = dma_pool_create(dma_pool_name, dev,
    188				   PT_DMAPOOL_MAX_SIZE,
    189				   PT_DMAPOOL_ALIGN, 0);
    190	if (!dma_pool)
    191		return -ENOMEM;
    192
    193	/* ptdma core initialisation */
    194	iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET);
    195	iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET);
    196	iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET);
    197	iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET);
    198	iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET);
    199
    200	cmd_q->pt = pt;
    201	cmd_q->dma_pool = dma_pool;
    202	mutex_init(&cmd_q->q_mutex);
    203
    204	/* Page alignment satisfies our needs for N <= 128 */
    205	cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
    206	cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
    207					  &cmd_q->qbase_dma,
    208					  GFP_KERNEL);
    209	if (!cmd_q->qbase) {
    210		dev_err(dev, "unable to allocate command queue\n");
    211		ret = -ENOMEM;
    212		goto e_destroy_pool;
    213	}
    214
    215	cmd_q->qidx = 0;
    216
    217	/* Preset some register values */
    218	cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR;
    219
    220	/* Turn off the queues and disable interrupts until ready */
    221	pt_core_disable_queue_interrupts(pt);
    222
    223	cmd_q->qcontrol = 0; /* Start with nothing */
    224	iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
    225
    226	ioread32(cmd_q->reg_control + 0x0104);
    227	ioread32(cmd_q->reg_control + 0x0100);
    228
    229	/* Clear the interrupt status */
    230	iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
    231
    232	/* Request an irq */
    233	ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
    234	if (ret) {
    235		dev_err(dev, "unable to allocate an IRQ\n");
    236		goto e_free_dma;
    237	}
    238
    239	/* Update the device registers with queue information. */
    240	cmd_q->qcontrol &= ~CMD_Q_SIZE;
    241	cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL);
    242
    243	cmd_q->qdma_tail = cmd_q->qbase_dma;
    244	dma_addr_lo = lower_32_bits(cmd_q->qdma_tail);
    245	iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004);
    246	iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008);
    247
    248	dma_addr_hi = upper_32_bits(cmd_q->qdma_tail);
    249	cmd_q->qcontrol |= (dma_addr_hi << 16);
    250	iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
    251
    252	pt_core_enable_queue_interrupts(pt);
    253
    254	/* Register the DMA engine support */
    255	ret = pt_dmaengine_register(pt);
    256	if (ret)
    257		goto e_free_irq;
    258
    259	/* Set up debugfs entries */
    260	ptdma_debugfs_setup(pt);
    261
    262	return 0;
    263
    264e_free_irq:
    265	free_irq(pt->pt_irq, pt);
    266
    267e_free_dma:
    268	dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
    269
    270e_destroy_pool:
    271	dma_pool_destroy(pt->cmd_q.dma_pool);
    272
    273	return ret;
    274}
    275
    276void pt_core_destroy(struct pt_device *pt)
    277{
    278	struct device *dev = pt->dev;
    279	struct pt_cmd_queue *cmd_q = &pt->cmd_q;
    280	struct pt_cmd *cmd;
    281
    282	/* Unregister the DMA engine */
    283	pt_dmaengine_unregister(pt);
    284
    285	/* Disable and clear interrupts */
    286	pt_core_disable_queue_interrupts(pt);
    287
    288	/* Turn off the run bit */
    289	pt_stop_queue(cmd_q);
    290
    291	/* Clear the interrupt status */
    292	iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
    293	ioread32(cmd_q->reg_control + 0x0104);
    294	ioread32(cmd_q->reg_control + 0x0100);
    295
    296	free_irq(pt->pt_irq, pt);
    297
    298	dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
    299			  cmd_q->qbase_dma);
    300
    301	/* Flush the cmd queue */
    302	while (!list_empty(&pt->cmd)) {
    303		/* Invoke the callback directly with an error code */
    304		cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry);
    305		list_del(&cmd->entry);
    306		cmd->pt_cmd_callback(cmd->data, -ENODEV);
    307	}
    308}