cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

engine.c (5545B)


      1/*
      2 * Copyright 2012 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Ben Skeggs
     23 */
     24#include <core/engine.h>
     25#include <core/device.h>
     26#include <core/option.h>
     27
     28#include <subdev/fb.h>
     29
     30bool
     31nvkm_engine_chsw_load(struct nvkm_engine *engine)
     32{
     33	if (engine->func->chsw_load)
     34		return engine->func->chsw_load(engine);
     35	return false;
     36}
     37
     38void
     39nvkm_engine_unref(struct nvkm_engine **pengine)
     40{
     41	struct nvkm_engine *engine = *pengine;
     42	if (engine) {
     43		if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) {
     44			nvkm_subdev_fini(&engine->subdev, false);
     45			engine->use.enabled = false;
     46			mutex_unlock(&engine->use.mutex);
     47		}
     48		*pengine = NULL;
     49	}
     50}
     51
     52struct nvkm_engine *
     53nvkm_engine_ref(struct nvkm_engine *engine)
     54{
     55	int ret;
     56	if (engine) {
     57		if (!refcount_inc_not_zero(&engine->use.refcount)) {
     58			mutex_lock(&engine->use.mutex);
     59			if (!refcount_inc_not_zero(&engine->use.refcount)) {
     60				engine->use.enabled = true;
     61				if ((ret = nvkm_subdev_init(&engine->subdev))) {
     62					engine->use.enabled = false;
     63					mutex_unlock(&engine->use.mutex);
     64					return ERR_PTR(ret);
     65				}
     66				refcount_set(&engine->use.refcount, 1);
     67			}
     68			mutex_unlock(&engine->use.mutex);
     69		}
     70	}
     71	return engine;
     72}
     73
     74void
     75nvkm_engine_tile(struct nvkm_engine *engine, int region)
     76{
     77	struct nvkm_fb *fb = engine->subdev.device->fb;
     78	if (engine->func->tile)
     79		engine->func->tile(engine, region, &fb->tile.region[region]);
     80}
     81
     82static void
     83nvkm_engine_intr(struct nvkm_subdev *subdev)
     84{
     85	struct nvkm_engine *engine = nvkm_engine(subdev);
     86	if (engine->func->intr)
     87		engine->func->intr(engine);
     88}
     89
     90static int
     91nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
     92{
     93	struct nvkm_engine *engine = nvkm_engine(subdev);
     94	if (engine->func->info) {
     95		if (!IS_ERR((engine = nvkm_engine_ref(engine)))) {
     96			int ret = engine->func->info(engine, mthd, data);
     97			nvkm_engine_unref(&engine);
     98			return ret;
     99		}
    100		return PTR_ERR(engine);
    101	}
    102	return -ENOSYS;
    103}
    104
    105static int
    106nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
    107{
    108	struct nvkm_engine *engine = nvkm_engine(subdev);
    109	if (engine->func->fini)
    110		return engine->func->fini(engine, suspend);
    111	return 0;
    112}
    113
    114static int
    115nvkm_engine_init(struct nvkm_subdev *subdev)
    116{
    117	struct nvkm_engine *engine = nvkm_engine(subdev);
    118	struct nvkm_fb *fb = subdev->device->fb;
    119	int ret = 0, i;
    120	s64 time;
    121
    122	if (!engine->use.enabled) {
    123		nvkm_trace(subdev, "init skipped, engine has no users\n");
    124		return ret;
    125	}
    126
    127	if (engine->func->oneinit && !engine->subdev.oneinit) {
    128		nvkm_trace(subdev, "one-time init running...\n");
    129		time = ktime_to_us(ktime_get());
    130		ret = engine->func->oneinit(engine);
    131		if (ret) {
    132			nvkm_trace(subdev, "one-time init failed, %d\n", ret);
    133			return ret;
    134		}
    135
    136		engine->subdev.oneinit = true;
    137		time = ktime_to_us(ktime_get()) - time;
    138		nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
    139	}
    140
    141	if (engine->func->init)
    142		ret = engine->func->init(engine);
    143
    144	for (i = 0; fb && i < fb->tile.regions; i++)
    145		nvkm_engine_tile(engine, i);
    146	return ret;
    147}
    148
    149static int
    150nvkm_engine_preinit(struct nvkm_subdev *subdev)
    151{
    152	struct nvkm_engine *engine = nvkm_engine(subdev);
    153	if (engine->func->preinit)
    154		engine->func->preinit(engine);
    155	return 0;
    156}
    157
    158static void *
    159nvkm_engine_dtor(struct nvkm_subdev *subdev)
    160{
    161	struct nvkm_engine *engine = nvkm_engine(subdev);
    162	if (engine->func->dtor)
    163		return engine->func->dtor(engine);
    164	mutex_destroy(&engine->use.mutex);
    165	return engine;
    166}
    167
    168const struct nvkm_subdev_func
    169nvkm_engine = {
    170	.dtor = nvkm_engine_dtor,
    171	.preinit = nvkm_engine_preinit,
    172	.init = nvkm_engine_init,
    173	.fini = nvkm_engine_fini,
    174	.info = nvkm_engine_info,
    175	.intr = nvkm_engine_intr,
    176};
    177
    178int
    179nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device,
    180		 enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine)
    181{
    182	nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev);
    183	engine->func = func;
    184	refcount_set(&engine->use.refcount, 0);
    185	mutex_init(&engine->use.mutex);
    186
    187	if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) {
    188		nvkm_debug(&engine->subdev, "disabled\n");
    189		return -ENODEV;
    190	}
    191
    192	spin_lock_init(&engine->lock);
    193	return 0;
    194}
    195
    196int
    197nvkm_engine_new_(const struct nvkm_engine_func *func, struct nvkm_device *device,
    198		 enum nvkm_subdev_type type, int inst, bool enable,
    199		 struct nvkm_engine **pengine)
    200{
    201	if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
    202		return -ENOMEM;
    203	return nvkm_engine_ctor(func, device, type, inst, enable, *pengine);
    204}