cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

base.c (5723B)


      1/*
      2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     20 * DEALINGS IN THE SOFTWARE.
     21 */
     22#include "priv.h"
     23
     24#include <subdev/mc.h>
     25#include <subdev/top.h>
     26
     27void
     28nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
     29		      u32 size, u16 tag, u8 port, bool secure)
     30{
     31	if (secure && !falcon->secret) {
     32		nvkm_warn(falcon->user,
     33			  "writing with secure tag on a non-secure falcon!\n");
     34		return;
     35	}
     36
     37	falcon->func->load_imem(falcon, data, start, size, tag, port,
     38				secure);
     39}
     40
     41void
     42nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
     43		      u32 size, u8 port)
     44{
     45	mutex_lock(&falcon->dmem_mutex);
     46
     47	falcon->func->load_dmem(falcon, data, start, size, port);
     48
     49	mutex_unlock(&falcon->dmem_mutex);
     50}
     51
     52void
     53nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
     54		      void *data)
     55{
     56	mutex_lock(&falcon->dmem_mutex);
     57
     58	falcon->func->read_dmem(falcon, start, size, port, data);
     59
     60	mutex_unlock(&falcon->dmem_mutex);
     61}
     62
     63void
     64nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
     65{
     66	if (!falcon->func->bind_context) {
     67		nvkm_error(falcon->user,
     68			   "Context binding not supported on this falcon!\n");
     69		return;
     70	}
     71
     72	falcon->func->bind_context(falcon, inst);
     73}
     74
     75void
     76nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
     77{
     78	falcon->func->set_start_addr(falcon, start_addr);
     79}
     80
     81void
     82nvkm_falcon_start(struct nvkm_falcon *falcon)
     83{
     84	falcon->func->start(falcon);
     85}
     86
     87int
     88nvkm_falcon_enable(struct nvkm_falcon *falcon)
     89{
     90	struct nvkm_device *device = falcon->owner->device;
     91	int ret;
     92
     93	nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst);
     94	ret = falcon->func->enable(falcon);
     95	if (ret) {
     96		nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
     97		return ret;
     98	}
     99
    100	return 0;
    101}
    102
    103void
    104nvkm_falcon_disable(struct nvkm_falcon *falcon)
    105{
    106	struct nvkm_device *device = falcon->owner->device;
    107
    108	/* already disabled, return or wait_idle will timeout */
    109	if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst))
    110		return;
    111
    112	falcon->func->disable(falcon);
    113
    114	nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst);
    115}
    116
    117int
    118nvkm_falcon_reset(struct nvkm_falcon *falcon)
    119{
    120	if (!falcon->func->reset) {
    121		nvkm_falcon_disable(falcon);
    122		return nvkm_falcon_enable(falcon);
    123	}
    124
    125	return falcon->func->reset(falcon);
    126}
    127
    128int
    129nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
    130{
    131	return falcon->func->wait_for_halt(falcon, ms);
    132}
    133
    134int
    135nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
    136{
    137	return falcon->func->clear_interrupt(falcon, mask);
    138}
    139
    140static int
    141nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
    142{
    143	const struct nvkm_falcon_func *func = falcon->func;
    144	const struct nvkm_subdev *subdev = falcon->owner;
    145	u32 reg;
    146
    147	if (!falcon->addr) {
    148		falcon->addr = nvkm_top_addr(subdev->device, subdev->type, subdev->inst);
    149		if (WARN_ON(!falcon->addr))
    150			return -ENODEV;
    151	}
    152
    153	reg = nvkm_falcon_rd32(falcon, 0x12c);
    154	falcon->version = reg & 0xf;
    155	falcon->secret = (reg >> 4) & 0x3;
    156	falcon->code.ports = (reg >> 8) & 0xf;
    157	falcon->data.ports = (reg >> 12) & 0xf;
    158
    159	reg = nvkm_falcon_rd32(falcon, 0x108);
    160	falcon->code.limit = (reg & 0x1ff) << 8;
    161	falcon->data.limit = (reg & 0x3fe00) >> 1;
    162
    163	if (func->debug) {
    164		u32 val = nvkm_falcon_rd32(falcon, func->debug);
    165		falcon->debug = (val >> 20) & 0x1;
    166	}
    167
    168	return 0;
    169}
    170
    171void
    172nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
    173{
    174	if (unlikely(!falcon))
    175		return;
    176
    177	mutex_lock(&falcon->mutex);
    178	if (falcon->user == user) {
    179		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
    180		falcon->user = NULL;
    181	}
    182	mutex_unlock(&falcon->mutex);
    183}
    184
    185int
    186nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
    187{
    188	int ret = 0;
    189
    190	mutex_lock(&falcon->mutex);
    191	if (falcon->user) {
    192		nvkm_error(user, "%s falcon already acquired by %s!\n",
    193			   falcon->name, falcon->user->name);
    194		mutex_unlock(&falcon->mutex);
    195		return -EBUSY;
    196	}
    197
    198	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
    199	if (!falcon->oneinit)
    200		ret = nvkm_falcon_oneinit(falcon);
    201	falcon->user = user;
    202	mutex_unlock(&falcon->mutex);
    203	return ret;
    204}
    205
    206void
    207nvkm_falcon_dtor(struct nvkm_falcon *falcon)
    208{
    209}
    210
    211int
    212nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
    213		 struct nvkm_subdev *subdev, const char *name, u32 addr,
    214		 struct nvkm_falcon *falcon)
    215{
    216	falcon->func = func;
    217	falcon->owner = subdev;
    218	falcon->name = name;
    219	falcon->addr = addr;
    220	mutex_init(&falcon->mutex);
    221	mutex_init(&falcon->dmem_mutex);
    222	return 0;
    223}
    224
    225void
    226nvkm_falcon_del(struct nvkm_falcon **pfalcon)
    227{
    228	if (*pfalcon) {
    229		nvkm_falcon_dtor(*pfalcon);
    230		kfree(*pfalcon);
    231		*pfalcon = NULL;
    232	}
    233}