cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lima_gp.c (8622B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
      3
      4#include <linux/interrupt.h>
      5#include <linux/iopoll.h>
      6#include <linux/device.h>
      7#include <linux/slab.h>
      8
      9#include <drm/lima_drm.h>
     10
     11#include "lima_device.h"
     12#include "lima_gp.h"
     13#include "lima_regs.h"
     14#include "lima_gem.h"
     15#include "lima_vm.h"
     16
     17#define gp_write(reg, data) writel(data, ip->iomem + reg)
     18#define gp_read(reg) readl(ip->iomem + reg)
     19
     20static irqreturn_t lima_gp_irq_handler(int irq, void *data)
     21{
     22	struct lima_ip *ip = data;
     23	struct lima_device *dev = ip->dev;
     24	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
     25	struct lima_sched_task *task = pipe->current_task;
     26	u32 state = gp_read(LIMA_GP_INT_STAT);
     27	u32 status = gp_read(LIMA_GP_STATUS);
     28	bool done = false;
     29
     30	/* for shared irq case */
     31	if (!state)
     32		return IRQ_NONE;
     33
     34	if (state & LIMA_GP_IRQ_MASK_ERROR) {
     35		if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
     36		    LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
     37			dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
     38				status);
     39		} else {
     40			dev_err(dev->dev, "gp error irq state=%x status=%x\n",
     41				state, status);
     42			if (task)
     43				task->recoverable = false;
     44		}
     45
     46		/* mask all interrupts before hard reset */
     47		gp_write(LIMA_GP_INT_MASK, 0);
     48
     49		pipe->error = true;
     50		done = true;
     51	} else {
     52		bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST |
     53				      LIMA_GP_IRQ_PLBU_END_CMD_LST);
     54		bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
     55					LIMA_GP_STATUS_PLBU_ACTIVE);
     56		done = valid && !active;
     57		pipe->error = false;
     58	}
     59
     60	gp_write(LIMA_GP_INT_CLEAR, state);
     61
     62	if (done)
     63		lima_sched_pipe_task_done(pipe);
     64
     65	return IRQ_HANDLED;
     66}
     67
     68static void lima_gp_soft_reset_async(struct lima_ip *ip)
     69{
     70	if (ip->data.async_reset)
     71		return;
     72
     73	gp_write(LIMA_GP_INT_MASK, 0);
     74	gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED);
     75	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET);
     76	ip->data.async_reset = true;
     77}
     78
     79static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
     80{
     81	struct lima_device *dev = ip->dev;
     82	int err;
     83	u32 v;
     84
     85	if (!ip->data.async_reset)
     86		return 0;
     87
     88	err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v,
     89				 v & LIMA_GP_IRQ_RESET_COMPLETED,
     90				 0, 100);
     91	if (err) {
     92		dev_err(dev->dev, "gp soft reset time out\n");
     93		return err;
     94	}
     95
     96	gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
     97	gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
     98
     99	ip->data.async_reset = false;
    100	return 0;
    101}
    102
    103static int lima_gp_task_validate(struct lima_sched_pipe *pipe,
    104				 struct lima_sched_task *task)
    105{
    106	struct drm_lima_gp_frame *frame = task->frame;
    107	u32 *f = frame->frame;
    108	(void)pipe;
    109
    110	if (f[LIMA_GP_VSCL_START_ADDR >> 2] >
    111	    f[LIMA_GP_VSCL_END_ADDR >> 2] ||
    112	    f[LIMA_GP_PLBUCL_START_ADDR >> 2] >
    113	    f[LIMA_GP_PLBUCL_END_ADDR >> 2] ||
    114	    f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] >
    115	    f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2])
    116		return -EINVAL;
    117
    118	if (f[LIMA_GP_VSCL_START_ADDR >> 2] ==
    119	    f[LIMA_GP_VSCL_END_ADDR >> 2] &&
    120	    f[LIMA_GP_PLBUCL_START_ADDR >> 2] ==
    121	    f[LIMA_GP_PLBUCL_END_ADDR >> 2])
    122		return -EINVAL;
    123
    124	return 0;
    125}
    126
    127static void lima_gp_task_run(struct lima_sched_pipe *pipe,
    128			     struct lima_sched_task *task)
    129{
    130	struct lima_ip *ip = pipe->processor[0];
    131	struct drm_lima_gp_frame *frame = task->frame;
    132	u32 *f = frame->frame;
    133	u32 cmd = 0;
    134	int i;
    135
    136	/* update real heap buffer size for GP */
    137	for (i = 0; i < task->num_bos; i++) {
    138		struct lima_bo *bo = task->bos[i];
    139
    140		if (bo->heap_size &&
    141		    lima_vm_get_va(task->vm, bo) ==
    142		    f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) {
    143			f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
    144				f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] +
    145				bo->heap_size;
    146			task->recoverable = true;
    147			task->heap = bo;
    148			break;
    149		}
    150	}
    151
    152	if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
    153	    f[LIMA_GP_VSCL_END_ADDR >> 2])
    154		cmd |= LIMA_GP_CMD_START_VS;
    155	if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] !=
    156	    f[LIMA_GP_PLBUCL_END_ADDR >> 2])
    157		cmd |= LIMA_GP_CMD_START_PLBU;
    158
    159	/* before any hw ops, wait last success task async soft reset */
    160	lima_gp_soft_reset_async_wait(ip);
    161
    162	for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++)
    163		writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4);
    164
    165	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
    166	gp_write(LIMA_GP_CMD, cmd);
    167}
    168
    169static int lima_gp_hard_reset_poll(struct lima_ip *ip)
    170{
    171	gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
    172	return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000;
    173}
    174
    175static int lima_gp_hard_reset(struct lima_ip *ip)
    176{
    177	struct lima_device *dev = ip->dev;
    178	int ret;
    179
    180	gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
    181	gp_write(LIMA_GP_INT_MASK, 0);
    182	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
    183	ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
    184	if (ret) {
    185		dev_err(dev->dev, "gp hard reset timeout\n");
    186		return ret;
    187	}
    188
    189	gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0);
    190	gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
    191	gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
    192	return 0;
    193}
    194
    195static void lima_gp_task_fini(struct lima_sched_pipe *pipe)
    196{
    197	lima_gp_soft_reset_async(pipe->processor[0]);
    198}
    199
    200static void lima_gp_task_error(struct lima_sched_pipe *pipe)
    201{
    202	struct lima_ip *ip = pipe->processor[0];
    203
    204	dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
    205		gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS));
    206
    207	lima_gp_hard_reset(ip);
    208}
    209
    210static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
    211{
    212	lima_sched_pipe_task_done(pipe);
    213}
    214
    215static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
    216{
    217	struct lima_ip *ip = pipe->processor[0];
    218	struct lima_sched_task *task = pipe->current_task;
    219	struct drm_lima_gp_frame *frame = task->frame;
    220	u32 *f = frame->frame;
    221	size_t fail_size =
    222		f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] -
    223		f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2];
    224
    225	if (fail_size == task->heap->heap_size) {
    226		int ret;
    227
    228		ret = lima_heap_alloc(task->heap, task->vm);
    229		if (ret < 0)
    230			return ret;
    231	}
    232
    233	gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
    234	/* Resume from where we stopped, i.e. new start is old end */
    235	gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR,
    236		 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
    237	f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
    238		f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size;
    239	gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR,
    240		 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
    241	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
    242	return 0;
    243}
    244
    245static void lima_gp_print_version(struct lima_ip *ip)
    246{
    247	u32 version, major, minor;
    248	char *name;
    249
    250	version = gp_read(LIMA_GP_VERSION);
    251	major = (version >> 8) & 0xFF;
    252	minor = version & 0xFF;
    253	switch (version >> 16) {
    254	case 0xA07:
    255	    name = "mali200";
    256		break;
    257	case 0xC07:
    258		name = "mali300";
    259		break;
    260	case 0xB07:
    261		name = "mali400";
    262		break;
    263	case 0xD07:
    264		name = "mali450";
    265		break;
    266	default:
    267		name = "unknown";
    268		break;
    269	}
    270	dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
    271		 lima_ip_name(ip), name, major, minor);
    272}
    273
    274static struct kmem_cache *lima_gp_task_slab;
    275static int lima_gp_task_slab_refcnt;
    276
    277static int lima_gp_hw_init(struct lima_ip *ip)
    278{
    279	ip->data.async_reset = false;
    280	lima_gp_soft_reset_async(ip);
    281	return lima_gp_soft_reset_async_wait(ip);
    282}
    283
    284int lima_gp_resume(struct lima_ip *ip)
    285{
    286	return lima_gp_hw_init(ip);
    287}
    288
    289void lima_gp_suspend(struct lima_ip *ip)
    290{
    291
    292}
    293
    294int lima_gp_init(struct lima_ip *ip)
    295{
    296	struct lima_device *dev = ip->dev;
    297	int err;
    298
    299	lima_gp_print_version(ip);
    300
    301	err = lima_gp_hw_init(ip);
    302	if (err)
    303		return err;
    304
    305	err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
    306			       IRQF_SHARED, lima_ip_name(ip), ip);
    307	if (err) {
    308		dev_err(dev->dev, "gp %s fail to request irq\n",
    309			lima_ip_name(ip));
    310		return err;
    311	}
    312
    313	dev->gp_version = gp_read(LIMA_GP_VERSION);
    314
    315	return 0;
    316}
    317
    318void lima_gp_fini(struct lima_ip *ip)
    319{
    320
    321}
    322
    323int lima_gp_pipe_init(struct lima_device *dev)
    324{
    325	int frame_size = sizeof(struct drm_lima_gp_frame);
    326	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
    327
    328	if (!lima_gp_task_slab) {
    329		lima_gp_task_slab = kmem_cache_create_usercopy(
    330			"lima_gp_task", sizeof(struct lima_sched_task) + frame_size,
    331			0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
    332			frame_size, NULL);
    333		if (!lima_gp_task_slab)
    334			return -ENOMEM;
    335	}
    336	lima_gp_task_slab_refcnt++;
    337
    338	pipe->frame_size = frame_size;
    339	pipe->task_slab = lima_gp_task_slab;
    340
    341	pipe->task_validate = lima_gp_task_validate;
    342	pipe->task_run = lima_gp_task_run;
    343	pipe->task_fini = lima_gp_task_fini;
    344	pipe->task_error = lima_gp_task_error;
    345	pipe->task_mmu_error = lima_gp_task_mmu_error;
    346	pipe->task_recover = lima_gp_task_recover;
    347
    348	return 0;
    349}
    350
    351void lima_gp_pipe_fini(struct lima_device *dev)
    352{
    353	if (!--lima_gp_task_slab_refcnt) {
    354		kmem_cache_destroy(lima_gp_task_slab);
    355		lima_gp_task_slab = NULL;
    356	}
    357}