cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gm20b.c (7349B)


      1/*
      2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     20 * DEALINGS IN THE SOFTWARE.
     21 */
     22#include "priv.h"
     23
     24#include <core/memory.h>
     25#include <subdev/acr.h>
     26
     27#include <nvfw/flcn.h>
     28#include <nvfw/pmu.h>
     29
     30static int
     31gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nvfw_falcon_msg *hdr)
     32{
     33	struct nv_pmu_acr_bootstrap_falcon_msg *msg =
     34		container_of(hdr, typeof(*msg), msg.hdr);
     35	return msg->falcon_id;
     36}
     37
     38int
     39gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
     40			       enum nvkm_acr_lsf_id id)
     41{
     42	struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
     43	struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
     44		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
     45		.cmd.hdr.size = sizeof(cmd),
     46		.cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
     47		.flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
     48		.falcon_id = id,
     49	};
     50	int ret;
     51
     52	ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
     53				    gm20b_pmu_acr_bootstrap_falcon_cb,
     54				    &pmu->subdev, msecs_to_jiffies(1000));
     55	if (ret >= 0) {
     56		if (ret != cmd.falcon_id)
     57			ret = -EIO;
     58		else
     59			ret = 0;
     60	}
     61
     62	return ret;
     63}
     64
     65int
     66gm20b_pmu_acr_boot(struct nvkm_falcon *falcon)
     67{
     68	struct nv_pmu_args args = { .secure_mode = true };
     69	const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args);
     70	nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
     71	nvkm_falcon_start(falcon);
     72	return 0;
     73}
     74
     75void
     76gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
     77{
     78	struct loader_config hdr;
     79	u64 addr;
     80
     81	nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
     82	addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
     83	hdr.code_dma_base  = lower_32_bits((addr + adjust) >> 8);
     84	hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
     85	addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
     86	hdr.data_dma_base  = lower_32_bits((addr + adjust) >> 8);
     87	hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
     88	addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
     89	hdr.overlay_dma_base  = lower_32_bits((addr + adjust) << 8);
     90	hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
     91	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
     92
     93	loader_config_dump(&acr->subdev, &hdr);
     94}
     95
     96void
     97gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
     98			struct nvkm_acr_lsfw *lsfw)
     99{
    100	const u64 base = lsfw->offset.img + lsfw->app_start_offset;
    101	const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
    102	const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
    103	const struct loader_config hdr = {
    104		.dma_idx = FALCON_DMAIDX_UCODE,
    105		.code_dma_base = lower_32_bits(code),
    106		.code_size_total = lsfw->app_size,
    107		.code_size_to_load = lsfw->app_resident_code_size,
    108		.code_entry_point = lsfw->app_imem_entry,
    109		.data_dma_base = lower_32_bits(data),
    110		.data_size = lsfw->app_resident_data_size,
    111		.overlay_dma_base = lower_32_bits(code),
    112		.argc = 1,
    113		.argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
    114		.code_dma_base1 = upper_32_bits(code),
    115		.data_dma_base1 = upper_32_bits(data),
    116		.overlay_dma_base1 = upper_32_bits(code),
    117	};
    118
    119	nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
    120}
    121
    122static const struct nvkm_acr_lsf_func
    123gm20b_pmu_acr = {
    124	.flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
    125	.bld_size = sizeof(struct loader_config),
    126	.bld_write = gm20b_pmu_acr_bld_write,
    127	.bld_patch = gm20b_pmu_acr_bld_patch,
    128	.boot = gm20b_pmu_acr_boot,
    129	.bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
    130			     BIT_ULL(NVKM_ACR_LSF_FECS) |
    131			     BIT_ULL(NVKM_ACR_LSF_GPCCS),
    132	.bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
    133};
    134
    135static int
    136gm20b_pmu_acr_init_wpr_callback(void *priv, struct nvfw_falcon_msg *hdr)
    137{
    138	struct nv_pmu_acr_init_wpr_region_msg *msg =
    139		container_of(hdr, typeof(*msg), msg.hdr);
    140	struct nvkm_pmu *pmu = priv;
    141	struct nvkm_subdev *subdev = &pmu->subdev;
    142
    143	if (msg->error_code) {
    144		nvkm_error(subdev, "ACR WPR init failure: %d\n",
    145			   msg->error_code);
    146		return -EINVAL;
    147	}
    148
    149	nvkm_debug(subdev, "ACR WPR init complete\n");
    150	complete_all(&pmu->wpr_ready);
    151	return 0;
    152}
    153
    154static int
    155gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
    156{
    157	struct nv_pmu_acr_init_wpr_region_cmd cmd = {
    158		.cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
    159		.cmd.hdr.size = sizeof(cmd),
    160		.cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
    161		.region_id = 1,
    162		.wpr_offset = 0,
    163	};
    164
    165	return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
    166				     gm20b_pmu_acr_init_wpr_callback, pmu, 0);
    167}
    168
    169int
    170gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
    171{
    172	struct nv_pmu_init_msg msg;
    173	int ret;
    174
    175	ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
    176	if (ret)
    177		return ret;
    178
    179	if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
    180	    msg.msg_type != NV_PMU_INIT_MSG_INIT)
    181		return -EINVAL;
    182
    183	nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
    184					msg.queue_info[0].offset,
    185					msg.queue_info[0].size);
    186	nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
    187					msg.queue_info[1].offset,
    188					msg.queue_info[1].size);
    189	nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
    190					 msg.queue_info[4].offset,
    191					 msg.queue_info[4].size);
    192	return gm20b_pmu_acr_init_wpr(pmu);
    193}
    194
    195void
    196gm20b_pmu_recv(struct nvkm_pmu *pmu)
    197{
    198	if (!pmu->initmsg_received) {
    199		int ret = pmu->func->initmsg(pmu);
    200		if (ret) {
    201			nvkm_error(&pmu->subdev,
    202				   "error parsing init message: %d\n", ret);
    203			return;
    204		}
    205
    206		pmu->initmsg_received = true;
    207	}
    208
    209	nvkm_falcon_msgq_recv(pmu->msgq);
    210}
    211
    212static const struct nvkm_pmu_func
    213gm20b_pmu = {
    214	.flcn = &gm200_pmu_flcn,
    215	.enabled = gf100_pmu_enabled,
    216	.intr = gt215_pmu_intr,
    217	.recv = gm20b_pmu_recv,
    218	.initmsg = gm20b_pmu_initmsg,
    219	.reset = gf100_pmu_reset,
    220};
    221
    222#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
    223MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
    224MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
    225MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
    226#endif
    227
    228int
    229gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
    230{
    231	return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
    232						 NVKM_ACR_LSF_PMU, "pmu/",
    233						 ver, fwif->acr);
    234}
    235
    236static const struct nvkm_pmu_fwif
    237gm20b_pmu_fwif[] = {
    238	{  0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
    239	{ -1, gm200_pmu_nofw, &gm20b_pmu },
    240	{}
    241};
    242
    243int
    244gm20b_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
    245	      struct nvkm_pmu **ppmu)
    246{
    247	return nvkm_pmu_new_(gm20b_pmu_fwif, device, type, inst, ppmu);
    248}