cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gp102.c (10897B)


      1/*
      2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     20 * DEALINGS IN THE SOFTWARE.
     21 */
     22#include "priv.h"
     23
     24#include <core/memory.h>
     25#include <subdev/acr.h>
     26#include <subdev/timer.h>
     27
     28#include <nvfw/flcn.h>
     29#include <nvfw/sec2.h>
     30
     31int
     32gp102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
     33		const struct nvkm_sec2_fwif *fwif)
     34{
     35	nvkm_warn(&sec2->engine.subdev, "firmware unavailable\n");
     36	return 0;
     37}
     38
     39static int
     40gp102_sec2_acr_bootstrap_falcon_callback(void *priv, struct nvfw_falcon_msg *hdr)
     41{
     42	struct nv_sec2_acr_bootstrap_falcon_msg *msg =
     43		container_of(hdr, typeof(*msg), msg.hdr);
     44	struct nvkm_subdev *subdev = priv;
     45	const char *name = nvkm_acr_lsf_id(msg->falcon_id);
     46
     47	if (msg->error_code) {
     48		nvkm_error(subdev, "ACR_BOOTSTRAP_FALCON failed for "
     49				   "falcon %d [%s]: %08x\n",
     50			   msg->falcon_id, name, msg->error_code);
     51		return -EINVAL;
     52	}
     53
     54	nvkm_debug(subdev, "%s booted\n", name);
     55	return 0;
     56}
     57
     58static int
     59gp102_sec2_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
     60			        enum nvkm_acr_lsf_id id)
     61{
     62	struct nvkm_sec2 *sec2 = container_of(falcon, typeof(*sec2), falcon);
     63	struct nv_sec2_acr_bootstrap_falcon_cmd cmd = {
     64		.cmd.hdr.unit_id = sec2->func->unit_acr,
     65		.cmd.hdr.size = sizeof(cmd),
     66		.cmd.cmd_type = NV_SEC2_ACR_CMD_BOOTSTRAP_FALCON,
     67		.flags = NV_SEC2_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
     68		.falcon_id = id,
     69	};
     70
     71	return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr,
     72				     gp102_sec2_acr_bootstrap_falcon_callback,
     73				     &sec2->engine.subdev,
     74				     msecs_to_jiffies(1000));
     75}
     76
     77static int
     78gp102_sec2_acr_boot(struct nvkm_falcon *falcon)
     79{
     80	struct nv_sec2_args args = {};
     81	nvkm_falcon_load_dmem(falcon, &args,
     82			      falcon->func->emem_addr, sizeof(args), 0);
     83	nvkm_falcon_start(falcon);
     84	return 0;
     85}
     86
     87static void
     88gp102_sec2_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
     89{
     90	struct loader_config_v1 hdr;
     91	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
     92	hdr.code_dma_base = hdr.code_dma_base + adjust;
     93	hdr.data_dma_base = hdr.data_dma_base + adjust;
     94	hdr.overlay_dma_base = hdr.overlay_dma_base + adjust;
     95	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
     96	loader_config_v1_dump(&acr->subdev, &hdr);
     97}
     98
     99static void
    100gp102_sec2_acr_bld_write(struct nvkm_acr *acr, u32 bld,
    101			 struct nvkm_acr_lsfw *lsfw)
    102{
    103	const struct loader_config_v1 hdr = {
    104		.dma_idx = FALCON_SEC2_DMAIDX_UCODE,
    105		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
    106		.code_size_total = lsfw->app_size,
    107		.code_size_to_load = lsfw->app_resident_code_size,
    108		.code_entry_point = lsfw->app_imem_entry,
    109		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
    110				 lsfw->app_resident_data_offset,
    111		.data_size = lsfw->app_resident_data_size,
    112		.overlay_dma_base = lsfw->offset.img + lsfw->app_start_offset,
    113		.argc = 1,
    114		.argv = lsfw->falcon->func->emem_addr,
    115	};
    116
    117	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
    118}
    119
    120static const struct nvkm_acr_lsf_func
    121gp102_sec2_acr_0 = {
    122	.bld_size = sizeof(struct loader_config_v1),
    123	.bld_write = gp102_sec2_acr_bld_write,
    124	.bld_patch = gp102_sec2_acr_bld_patch,
    125	.boot = gp102_sec2_acr_boot,
    126	.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
    127			     BIT_ULL(NVKM_ACR_LSF_GPCCS) |
    128			     BIT_ULL(NVKM_ACR_LSF_SEC2),
    129	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
    130};
    131
    132int
    133gp102_sec2_initmsg(struct nvkm_sec2 *sec2)
    134{
    135	struct nv_sec2_init_msg msg;
    136	int ret, i;
    137
    138	ret = nvkm_falcon_msgq_recv_initmsg(sec2->msgq, &msg, sizeof(msg));
    139	if (ret)
    140		return ret;
    141
    142	if (msg.hdr.unit_id != NV_SEC2_UNIT_INIT ||
    143	    msg.msg_type != NV_SEC2_INIT_MSG_INIT)
    144		return -EINVAL;
    145
    146	for (i = 0; i < ARRAY_SIZE(msg.queue_info); i++) {
    147		if (msg.queue_info[i].id == NV_SEC2_INIT_MSG_QUEUE_ID_MSGQ) {
    148			nvkm_falcon_msgq_init(sec2->msgq,
    149					      msg.queue_info[i].index,
    150					      msg.queue_info[i].offset,
    151					      msg.queue_info[i].size);
    152		} else {
    153			nvkm_falcon_cmdq_init(sec2->cmdq,
    154					      msg.queue_info[i].index,
    155					      msg.queue_info[i].offset,
    156					      msg.queue_info[i].size);
    157		}
    158	}
    159
    160	return 0;
    161}
    162
    163void
    164gp102_sec2_intr(struct nvkm_sec2 *sec2)
    165{
    166	struct nvkm_subdev *subdev = &sec2->engine.subdev;
    167	struct nvkm_falcon *falcon = &sec2->falcon;
    168	u32 disp = nvkm_falcon_rd32(falcon, 0x01c);
    169	u32 intr = nvkm_falcon_rd32(falcon, 0x008) & disp & ~(disp >> 16);
    170
    171	if (intr & 0x00000040) {
    172		schedule_work(&sec2->work);
    173		nvkm_falcon_wr32(falcon, 0x004, 0x00000040);
    174		intr &= ~0x00000040;
    175	}
    176
    177	if (intr) {
    178		nvkm_error(subdev, "unhandled intr %08x\n", intr);
    179		nvkm_falcon_wr32(falcon, 0x004, intr);
    180	}
    181}
    182
    183int
    184gp102_sec2_flcn_enable(struct nvkm_falcon *falcon)
    185{
    186	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001);
    187	udelay(10);
    188	nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000);
    189	return nvkm_falcon_v1_enable(falcon);
    190}
    191
    192void
    193gp102_sec2_flcn_bind_context(struct nvkm_falcon *falcon,
    194			     struct nvkm_memory *ctx)
    195{
    196	struct nvkm_device *device = falcon->owner->device;
    197
    198	nvkm_falcon_v1_bind_context(falcon, ctx);
    199	if (!ctx)
    200		return;
    201
    202	/* Not sure if this is a WAR for a HW issue, or some additional
    203	 * programming sequence that's needed to properly complete the
    204	 * context switch we trigger above.
    205	 *
    206	 * Fixes unreliability of booting the SEC2 RTOS on Quadro P620,
    207	 * particularly when resuming from suspend.
    208	 *
    209	 * Also removes the need for an odd workaround where we needed
    210	 * to program SEC2's FALCON_CPUCTL_ALIAS_STARTCPU twice before
    211	 * the SEC2 RTOS would begin executing.
    212	 */
    213	nvkm_msec(device, 10,
    214		u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
    215		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
    216		if ((irqstat & 0x00000008) &&
    217		    (flcn0dc & 0x00007000) == 0x00005000)
    218			break;
    219	);
    220
    221	nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
    222	nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
    223
    224	nvkm_msec(device, 10,
    225		u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
    226		if ((flcn0dc & 0x00007000) == 0x00000000)
    227			break;
    228	);
    229}
    230
    231static const struct nvkm_falcon_func
    232gp102_sec2_flcn = {
    233	.debug = 0x408,
    234	.fbif = 0x600,
    235	.load_imem = nvkm_falcon_v1_load_imem,
    236	.load_dmem = nvkm_falcon_v1_load_dmem,
    237	.read_dmem = nvkm_falcon_v1_read_dmem,
    238	.emem_addr = 0x01000000,
    239	.bind_context = gp102_sec2_flcn_bind_context,
    240	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
    241	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
    242	.set_start_addr = nvkm_falcon_v1_set_start_addr,
    243	.start = nvkm_falcon_v1_start,
    244	.enable = gp102_sec2_flcn_enable,
    245	.disable = nvkm_falcon_v1_disable,
    246	.cmdq = { 0xa00, 0xa04, 8 },
    247	.msgq = { 0xa30, 0xa34, 8 },
    248};
    249
    250const struct nvkm_sec2_func
    251gp102_sec2 = {
    252	.flcn = &gp102_sec2_flcn,
    253	.unit_acr = NV_SEC2_UNIT_ACR,
    254	.intr = gp102_sec2_intr,
    255	.initmsg = gp102_sec2_initmsg,
    256};
    257
    258MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
    259MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
    260MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
    261MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
    262MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
    263MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
    264MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
    265MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
    266MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
    267MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
    268MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
    269MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
    270
    271static void
    272gp102_sec2_acr_bld_patch_1(struct nvkm_acr *acr, u32 bld, s64 adjust)
    273{
    274	struct flcn_bl_dmem_desc_v2 hdr;
    275	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
    276	hdr.code_dma_base = hdr.code_dma_base + adjust;
    277	hdr.data_dma_base = hdr.data_dma_base + adjust;
    278	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
    279	flcn_bl_dmem_desc_v2_dump(&acr->subdev, &hdr);
    280}
    281
    282static void
    283gp102_sec2_acr_bld_write_1(struct nvkm_acr *acr, u32 bld,
    284			   struct nvkm_acr_lsfw *lsfw)
    285{
    286	const struct flcn_bl_dmem_desc_v2 hdr = {
    287		.ctx_dma = FALCON_SEC2_DMAIDX_UCODE,
    288		.code_dma_base = lsfw->offset.img + lsfw->app_start_offset,
    289		.non_sec_code_off = lsfw->app_resident_code_offset,
    290		.non_sec_code_size = lsfw->app_resident_code_size,
    291		.code_entry_point = lsfw->app_imem_entry,
    292		.data_dma_base = lsfw->offset.img + lsfw->app_start_offset +
    293				 lsfw->app_resident_data_offset,
    294		.data_size = lsfw->app_resident_data_size,
    295		.argc = 1,
    296		.argv = lsfw->falcon->func->emem_addr,
    297	};
    298
    299	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
    300}
    301
    302const struct nvkm_acr_lsf_func
    303gp102_sec2_acr_1 = {
    304	.bld_size = sizeof(struct flcn_bl_dmem_desc_v2),
    305	.bld_write = gp102_sec2_acr_bld_write_1,
    306	.bld_patch = gp102_sec2_acr_bld_patch_1,
    307	.boot = gp102_sec2_acr_boot,
    308	.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_FECS) |
    309			     BIT_ULL(NVKM_ACR_LSF_GPCCS) |
    310			     BIT_ULL(NVKM_ACR_LSF_SEC2),
    311	.bootstrap_falcon = gp102_sec2_acr_bootstrap_falcon,
    312};
    313
    314int
    315gp102_sec2_load(struct nvkm_sec2 *sec2, int ver,
    316		const struct nvkm_sec2_fwif *fwif)
    317{
    318	return nvkm_acr_lsfw_load_sig_image_desc_v1(&sec2->engine.subdev,
    319						    &sec2->falcon,
    320						    NVKM_ACR_LSF_SEC2, "sec2/",
    321						    ver, fwif->acr);
    322}
    323
    324MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
    325MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
    326MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
    327MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
    328MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
    329MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
    330MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
    331MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
    332MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
    333MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
    334MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
    335MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
    336
    337static const struct nvkm_sec2_fwif
    338gp102_sec2_fwif[] = {
    339	{  1, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_1 },
    340	{  0, gp102_sec2_load, &gp102_sec2, &gp102_sec2_acr_0 },
    341	{ -1, gp102_sec2_nofw, &gp102_sec2 },
    342	{}
    343};
    344
    345int
    346gp102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
    347	       struct nvkm_sec2 **psec2)
    348{
    349	return nvkm_sec2_new_(gp102_sec2_fwif, device, type, inst, 0, psec2);
    350}