cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gk20a.c (9165B)


      1/*
      2 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     20 * DEALINGS IN THE SOFTWARE.
     21 */
     22#include "gf100.h"
     23#include "ctxgf100.h"
     24
     25#include <core/firmware.h>
     26#include <subdev/timer.h>
     27
     28#include <nvif/class.h>
     29
     30struct gk20a_fw_av
     31{
     32	u32 addr;
     33	u32 data;
     34};
     35
     36static int
     37gk20a_gr_av_to_init(struct gf100_gr *gr, const char *path, const char *name,
     38		    int ver, struct gf100_gr_pack **ppack)
     39{
     40	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
     41	struct nvkm_blob blob;
     42	struct gf100_gr_init *init;
     43	struct gf100_gr_pack *pack;
     44	int nent;
     45	int ret;
     46	int i;
     47
     48	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
     49	if (ret)
     50		return ret;
     51
     52	nent = (blob.size / sizeof(struct gk20a_fw_av));
     53	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
     54	if (!pack) {
     55		ret = -ENOMEM;
     56		goto end;
     57	}
     58
     59	init = (void *)(pack + 2);
     60	pack[0].init = init;
     61
     62	for (i = 0; i < nent; i++) {
     63		struct gf100_gr_init *ent = &init[i];
     64		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
     65
     66		ent->addr = av->addr;
     67		ent->data = av->data;
     68		ent->count = 1;
     69		ent->pitch = 1;
     70	}
     71
     72	*ppack = pack;
     73
     74end:
     75	nvkm_blob_dtor(&blob);
     76	return ret;
     77}
     78
     79struct gk20a_fw_aiv
     80{
     81	u32 addr;
     82	u32 index;
     83	u32 data;
     84};
     85
     86static int
     87gk20a_gr_aiv_to_init(struct gf100_gr *gr, const char *path, const char *name,
     88		     int ver, struct gf100_gr_pack **ppack)
     89{
     90	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
     91	struct nvkm_blob blob;
     92	struct gf100_gr_init *init;
     93	struct gf100_gr_pack *pack;
     94	int nent;
     95	int ret;
     96	int i;
     97
     98	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
     99	if (ret)
    100		return ret;
    101
    102	nent = (blob.size / sizeof(struct gk20a_fw_aiv));
    103	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
    104	if (!pack) {
    105		ret = -ENOMEM;
    106		goto end;
    107	}
    108
    109	init = (void *)(pack + 2);
    110	pack[0].init = init;
    111
    112	for (i = 0; i < nent; i++) {
    113		struct gf100_gr_init *ent = &init[i];
    114		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)blob.data)[i];
    115
    116		ent->addr = av->addr;
    117		ent->data = av->data;
    118		ent->count = 1;
    119		ent->pitch = 1;
    120	}
    121
    122	*ppack = pack;
    123
    124end:
    125	nvkm_blob_dtor(&blob);
    126	return ret;
    127}
    128
    129static int
    130gk20a_gr_av_to_method(struct gf100_gr *gr, const char *path, const char *name,
    131		      int ver, struct gf100_gr_pack **ppack)
    132{
    133	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    134	struct nvkm_blob blob;
    135	struct gf100_gr_init *init;
    136	struct gf100_gr_pack *pack;
    137	/* We don't suppose we will initialize more than 16 classes here... */
    138	static const unsigned int max_classes = 16;
    139	u32 classidx = 0, prevclass = 0;
    140	int nent;
    141	int ret;
    142	int i;
    143
    144	ret = nvkm_firmware_load_blob(subdev, path, name, ver, &blob);
    145	if (ret)
    146		return ret;
    147
    148	nent = (blob.size / sizeof(struct gk20a_fw_av));
    149
    150	pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
    151		       (sizeof(*init) * (nent + max_classes + 1)));
    152	if (!pack) {
    153		ret = -ENOMEM;
    154		goto end;
    155	}
    156
    157	init = (void *)(pack + max_classes + 1);
    158
    159	for (i = 0; i < nent; i++, init++) {
    160		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)blob.data)[i];
    161		u32 class = av->addr & 0xffff;
    162		u32 addr = (av->addr & 0xffff0000) >> 14;
    163
    164		if (prevclass != class) {
    165			if (prevclass) /* Add terminator to the method list. */
    166				init++;
    167			pack[classidx].init = init;
    168			pack[classidx].type = class;
    169			prevclass = class;
    170			if (++classidx >= max_classes) {
    171				vfree(pack);
    172				ret = -ENOSPC;
    173				goto end;
    174			}
    175		}
    176
    177		init->addr = addr;
    178		init->data = av->data;
    179		init->count = 1;
    180		init->pitch = 1;
    181	}
    182
    183	*ppack = pack;
    184
    185end:
    186	nvkm_blob_dtor(&blob);
    187	return ret;
    188}
    189
    190static int
    191gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
    192{
    193	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    194	struct nvkm_device *device = subdev->device;
    195
    196	if (nvkm_msec(device, 2000,
    197		if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
    198			break;
    199	) < 0) {
    200		nvkm_error(subdev, "FECS mem scrubbing timeout\n");
    201		return -ETIMEDOUT;
    202	}
    203
    204	if (nvkm_msec(device, 2000,
    205		if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
    206			break;
    207	) < 0) {
    208		nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
    209		return -ETIMEDOUT;
    210	}
    211
    212	return 0;
    213}
    214
    215static void
    216gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
    217{
    218	struct nvkm_device *device = gr->base.engine.subdev.device;
    219	nvkm_wr32(device, 0x419e44, 0x1ffffe);
    220	nvkm_wr32(device, 0x419e4c, 0x7f);
    221}
    222
    223int
    224gk20a_gr_init(struct gf100_gr *gr)
    225{
    226	struct nvkm_device *device = gr->base.engine.subdev.device;
    227	int ret;
    228
    229	/* Clear SCC RAM */
    230	nvkm_wr32(device, 0x40802c, 0x1);
    231
    232	gf100_gr_mmio(gr, gr->sw_nonctx);
    233
    234	ret = gk20a_gr_wait_mem_scrubbing(gr);
    235	if (ret)
    236		return ret;
    237
    238	ret = gf100_gr_wait_idle(gr);
    239	if (ret)
    240		return ret;
    241
    242	/* MMU debug buffer */
    243	if (gr->func->init_gpc_mmu)
    244		gr->func->init_gpc_mmu(gr);
    245
    246	/* Set the PE as stream master */
    247	nvkm_mask(device, 0x503018, 0x1, 0x1);
    248
    249	/* Zcull init */
    250	gr->func->init_zcull(gr);
    251
    252	gr->func->init_rop_active_fbps(gr);
    253
    254	/* Enable FIFO access */
    255	nvkm_wr32(device, 0x400500, 0x00010001);
    256
    257	/* Enable interrupts */
    258	nvkm_wr32(device, 0x400100, 0xffffffff);
    259	nvkm_wr32(device, 0x40013c, 0xffffffff);
    260
    261	/* Enable FECS error interrupts */
    262	nvkm_wr32(device, 0x409c24, 0x000f0000);
    263
    264	/* Enable hardware warning exceptions */
    265	nvkm_wr32(device, 0x404000, 0xc0000000);
    266	nvkm_wr32(device, 0x404600, 0xc0000000);
    267
    268	if (gr->func->set_hww_esr_report_mask)
    269		gr->func->set_hww_esr_report_mask(gr);
    270
    271	/* Enable TPC exceptions per GPC */
    272	nvkm_wr32(device, 0x419d0c, 0x2);
    273	nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
    274
    275	/* Reset and enable all exceptions */
    276	nvkm_wr32(device, 0x400108, 0xffffffff);
    277	nvkm_wr32(device, 0x400138, 0xffffffff);
    278	nvkm_wr32(device, 0x400118, 0xffffffff);
    279	nvkm_wr32(device, 0x400130, 0xffffffff);
    280	nvkm_wr32(device, 0x40011c, 0xffffffff);
    281	nvkm_wr32(device, 0x400134, 0xffffffff);
    282
    283	gf100_gr_zbc_init(gr);
    284
    285	return gf100_gr_init_ctxctl(gr);
    286}
    287
    288static const struct gf100_gr_func
    289gk20a_gr = {
    290	.oneinit_tiles = gf100_gr_oneinit_tiles,
    291	.oneinit_sm_id = gf100_gr_oneinit_sm_id,
    292	.init = gk20a_gr_init,
    293	.init_zcull = gf117_gr_init_zcull,
    294	.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
    295	.trap_mp = gf100_gr_trap_mp,
    296	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
    297	.rops = gf100_gr_rops,
    298	.ppc_nr = 1,
    299	.grctx = &gk20a_grctx,
    300	.zbc = &gf100_gr_zbc,
    301	.sclass = {
    302		{ -1, -1, FERMI_TWOD_A },
    303		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
    304		{ -1, -1, KEPLER_C, &gf100_fermi },
    305		{ -1, -1, KEPLER_COMPUTE_A },
    306		{}
    307	}
    308};
    309
    310int
    311gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
    312{
    313	if (gk20a_gr_av_to_init(gr, path, "sw_nonctx", ver, &gr->sw_nonctx) ||
    314	    gk20a_gr_aiv_to_init(gr, path, "sw_ctx", ver, &gr->sw_ctx) ||
    315	    gk20a_gr_av_to_init(gr, path, "sw_bundle_init", ver, &gr->bundle) ||
    316	    gk20a_gr_av_to_method(gr, path, "sw_method_init", ver, &gr->method))
    317		return -ENOENT;
    318
    319	return 0;
    320}
    321
    322#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
    323MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
    324MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
    325MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
    326MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
    327MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
    328MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
    329MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
    330MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
    331#endif
    332
    333static int
    334gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
    335{
    336	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
    337
    338	if (nvkm_firmware_load_blob(subdev, "", "fecs_inst", ver,
    339				    &gr->fecs.inst) ||
    340	    nvkm_firmware_load_blob(subdev, "", "fecs_data", ver,
    341				    &gr->fecs.data) ||
    342	    nvkm_firmware_load_blob(subdev, "", "gpccs_inst", ver,
    343				    &gr->gpccs.inst) ||
    344	    nvkm_firmware_load_blob(subdev, "", "gpccs_data", ver,
    345				    &gr->gpccs.data))
    346		return -ENOENT;
    347
    348	gr->firmware = true;
    349
    350	return gk20a_gr_load_sw(gr, "", ver);
    351}
    352
    353static const struct gf100_gr_fwif
    354gk20a_gr_fwif[] = {
    355	{ 0, gk20a_gr_load, &gk20a_gr },
    356	{}
    357};
    358
    359int
    360gk20a_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
    361{
    362	return gf100_gr_new_(gk20a_gr_fwif, device, type, inst, pgr);
    363}