cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

etnaviv_dump.c (6081B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2015-2018 Etnaviv Project
      4 */
      5
      6#include <linux/devcoredump.h>
      7#include <linux/moduleparam.h>
      8
      9#include "etnaviv_cmdbuf.h"
     10#include "etnaviv_dump.h"
     11#include "etnaviv_gem.h"
     12#include "etnaviv_gpu.h"
     13#include "etnaviv_mmu.h"
     14#include "etnaviv_sched.h"
     15#include "state.xml.h"
     16#include "state_hi.xml.h"
     17
     18static bool etnaviv_dump_core = true;
     19module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
     20
     21struct core_dump_iterator {
     22	void *start;
     23	struct etnaviv_dump_object_header *hdr;
     24	void *data;
     25};
     26
     27static const unsigned short etnaviv_dump_registers[] = {
     28	VIVS_HI_AXI_STATUS,
     29	VIVS_HI_CLOCK_CONTROL,
     30	VIVS_HI_IDLE_STATE,
     31	VIVS_HI_AXI_CONFIG,
     32	VIVS_HI_INTR_ENBL,
     33	VIVS_HI_CHIP_IDENTITY,
     34	VIVS_HI_CHIP_FEATURE,
     35	VIVS_HI_CHIP_MODEL,
     36	VIVS_HI_CHIP_REV,
     37	VIVS_HI_CHIP_DATE,
     38	VIVS_HI_CHIP_TIME,
     39	VIVS_HI_CHIP_MINOR_FEATURE_0,
     40	VIVS_HI_CACHE_CONTROL,
     41	VIVS_HI_AXI_CONTROL,
     42	VIVS_PM_POWER_CONTROLS,
     43	VIVS_PM_MODULE_CONTROLS,
     44	VIVS_PM_MODULE_STATUS,
     45	VIVS_PM_PULSE_EATER,
     46	VIVS_MC_MMU_FE_PAGE_TABLE,
     47	VIVS_MC_MMU_TX_PAGE_TABLE,
     48	VIVS_MC_MMU_PE_PAGE_TABLE,
     49	VIVS_MC_MMU_PEZ_PAGE_TABLE,
     50	VIVS_MC_MMU_RA_PAGE_TABLE,
     51	VIVS_MC_DEBUG_MEMORY,
     52	VIVS_MC_MEMORY_BASE_ADDR_RA,
     53	VIVS_MC_MEMORY_BASE_ADDR_FE,
     54	VIVS_MC_MEMORY_BASE_ADDR_TX,
     55	VIVS_MC_MEMORY_BASE_ADDR_PEZ,
     56	VIVS_MC_MEMORY_BASE_ADDR_PE,
     57	VIVS_MC_MEMORY_TIMING_CONTROL,
     58	VIVS_MC_BUS_CONFIG,
     59	VIVS_FE_DMA_STATUS,
     60	VIVS_FE_DMA_DEBUG_STATE,
     61	VIVS_FE_DMA_ADDRESS,
     62	VIVS_FE_DMA_LOW,
     63	VIVS_FE_DMA_HIGH,
     64	VIVS_FE_AUTO_FLUSH,
     65};
     66
     67static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
     68	u32 type, void *data_end)
     69{
     70	struct etnaviv_dump_object_header *hdr = iter->hdr;
     71
     72	hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
     73	hdr->type = cpu_to_le32(type);
     74	hdr->file_offset = cpu_to_le32(iter->data - iter->start);
     75	hdr->file_size = cpu_to_le32(data_end - iter->data);
     76
     77	iter->hdr++;
     78	iter->data += le32_to_cpu(hdr->file_size);
     79}
     80
     81static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
     82	struct etnaviv_gpu *gpu)
     83{
     84	struct etnaviv_dump_registers *reg = iter->data;
     85	unsigned int i;
     86
     87	for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
     88		reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
     89		reg->value = cpu_to_le32(gpu_read(gpu, etnaviv_dump_registers[i]));
     90	}
     91
     92	etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
     93}
     94
     95static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
     96	struct etnaviv_iommu_context *mmu, size_t mmu_size)
     97{
     98	etnaviv_iommu_dump(mmu, iter->data);
     99
    100	etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
    101}
    102
    103static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
    104	void *ptr, size_t size, u64 iova)
    105{
    106	memcpy(iter->data, ptr, size);
    107
    108	iter->hdr->iova = cpu_to_le64(iova);
    109
    110	etnaviv_core_dump_header(iter, type, iter->data + size);
    111}
    112
    113void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
    114{
    115	struct etnaviv_gpu *gpu = submit->gpu;
    116	struct core_dump_iterator iter;
    117	struct etnaviv_gem_object *obj;
    118	unsigned int n_obj, n_bomap_pages;
    119	size_t file_size, mmu_size;
    120	__le64 *bomap, *bomap_start;
    121	int i;
    122
    123	/* Only catch the first event, or when manually re-armed */
    124	if (!etnaviv_dump_core)
    125		return;
    126	etnaviv_dump_core = false;
    127
    128	mutex_lock(&gpu->mmu_context->lock);
    129
    130	mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
    131
    132	/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
    133	n_obj = 5;
    134	n_bomap_pages = 0;
    135	file_size = ARRAY_SIZE(etnaviv_dump_registers) *
    136			sizeof(struct etnaviv_dump_registers) +
    137		    mmu_size + gpu->buffer.size + submit->cmdbuf.size;
    138
    139	/* Add in the active buffer objects */
    140	for (i = 0; i < submit->nr_bos; i++) {
    141		obj = submit->bos[i].obj;
    142		file_size += obj->base.size;
    143		n_bomap_pages += obj->base.size >> PAGE_SHIFT;
    144		n_obj++;
    145	}
    146
    147	/* If we have any buffer objects, add a bomap object */
    148	if (n_bomap_pages) {
    149		file_size += n_bomap_pages * sizeof(__le64);
    150		n_obj++;
    151	}
    152
    153	/* Add the size of the headers */
    154	file_size += sizeof(*iter.hdr) * n_obj;
    155
    156	/* Allocate the file in vmalloc memory, it's likely to be big */
    157	iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
    158			__GFP_NORETRY);
    159	if (!iter.start) {
    160		mutex_unlock(&gpu->mmu_context->lock);
    161		dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
    162		return;
    163	}
    164
    165	/* Point the data member after the headers */
    166	iter.hdr = iter.start;
    167	iter.data = &iter.hdr[n_obj];
    168
    169	memset(iter.hdr, 0, iter.data - iter.start);
    170
    171	etnaviv_core_dump_registers(&iter, gpu);
    172	etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
    173	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
    174			      gpu->buffer.size,
    175			      etnaviv_cmdbuf_get_va(&gpu->buffer,
    176					&gpu->mmu_context->cmdbuf_mapping));
    177
    178	etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
    179			      submit->cmdbuf.vaddr, submit->cmdbuf.size,
    180			      etnaviv_cmdbuf_get_va(&submit->cmdbuf,
    181					&gpu->mmu_context->cmdbuf_mapping));
    182
    183	mutex_unlock(&gpu->mmu_context->lock);
    184
    185	/* Reserve space for the bomap */
    186	if (n_bomap_pages) {
    187		bomap_start = bomap = iter.data;
    188		memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
    189		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
    190					 bomap + n_bomap_pages);
    191	} else {
    192		/* Silence warning */
    193		bomap_start = bomap = NULL;
    194	}
    195
    196	for (i = 0; i < submit->nr_bos; i++) {
    197		struct etnaviv_vram_mapping *vram;
    198		struct page **pages;
    199		void *vaddr;
    200
    201		obj = submit->bos[i].obj;
    202		vram = submit->bos[i].mapping;
    203
    204		mutex_lock(&obj->lock);
    205		pages = etnaviv_gem_get_pages(obj);
    206		mutex_unlock(&obj->lock);
    207		if (!IS_ERR(pages)) {
    208			int j;
    209
    210			iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
    211
    212			for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
    213				*bomap++ = cpu_to_le64(page_to_phys(*pages++));
    214		}
    215
    216		iter.hdr->iova = cpu_to_le64(vram->iova);
    217
    218		vaddr = etnaviv_gem_vmap(&obj->base);
    219		if (vaddr)
    220			memcpy(iter.data, vaddr, obj->base.size);
    221
    222		etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
    223					 obj->base.size);
    224	}
    225
    226	etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
    227
    228	dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
    229}