cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dmanv40.c (7603B)


      1/*
      2 * Copyright 2012 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Ben Skeggs
     23 */
     24#include "channv04.h"
     25#include "regsnv04.h"
     26
     27#include <core/client.h>
     28#include <core/ramht.h>
     29#include <subdev/instmem.h>
     30
     31#include <nvif/class.h>
     32#include <nvif/cl006b.h>
     33#include <nvif/unpack.h>
     34
     35static bool
     36nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
     37{
     38	switch (engine->subdev.type) {
     39	case NVKM_ENGINE_DMAOBJ:
     40	case NVKM_ENGINE_SW:
     41		return false;
     42	case NVKM_ENGINE_GR:
     43		*reg = 0x0032e0;
     44		*ctx = 0x38;
     45		return true;
     46	case NVKM_ENGINE_MPEG:
     47		if (engine->subdev.device->chipset < 0x44)
     48			return false;
     49		*reg = 0x00330c;
     50		*ctx = 0x54;
     51		return true;
     52	default:
     53		WARN_ON(1);
     54		return false;
     55	}
     56}
     57
     58static struct nvkm_gpuobj **
     59nv40_fifo_dma_engn(struct nv04_fifo_chan *chan, struct nvkm_engine *engine)
     60{
     61	int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
     62	if (engi >= 0)
     63		return &chan->engn[engi];
     64	return NULL;
     65}
     66
     67static int
     68nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
     69			  struct nvkm_engine *engine, bool suspend)
     70{
     71	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
     72	struct nv04_fifo *fifo = chan->fifo;
     73	struct nvkm_device *device = fifo->base.engine.subdev.device;
     74	struct nvkm_instmem *imem = device->imem;
     75	unsigned long flags;
     76	u32 reg, ctx;
     77	int chid;
     78
     79	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
     80		return 0;
     81
     82	spin_lock_irqsave(&fifo->base.lock, flags);
     83	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
     84
     85	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
     86	if (chid == chan->base.chid)
     87		nvkm_wr32(device, reg, 0x00000000);
     88	nvkm_kmap(imem->ramfc);
     89	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
     90	nvkm_done(imem->ramfc);
     91
     92	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
     93	spin_unlock_irqrestore(&fifo->base.lock, flags);
     94	return 0;
     95}
     96
     97static int
     98nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
     99			  struct nvkm_engine *engine)
    100{
    101	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
    102	struct nv04_fifo *fifo = chan->fifo;
    103	struct nvkm_device *device = fifo->base.engine.subdev.device;
    104	struct nvkm_instmem *imem = device->imem;
    105	unsigned long flags;
    106	u32 inst, reg, ctx;
    107	int chid;
    108
    109	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
    110		return 0;
    111	inst = (*nv40_fifo_dma_engn(chan, engine))->addr >> 4;
    112
    113	spin_lock_irqsave(&fifo->base.lock, flags);
    114	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
    115
    116	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
    117	if (chid == chan->base.chid)
    118		nvkm_wr32(device, reg, inst);
    119	nvkm_kmap(imem->ramfc);
    120	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
    121	nvkm_done(imem->ramfc);
    122
    123	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
    124	spin_unlock_irqrestore(&fifo->base.lock, flags);
    125	return 0;
    126}
    127
    128static void
    129nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
    130			  struct nvkm_engine *engine)
    131{
    132	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
    133	nvkm_gpuobj_del(nv40_fifo_dma_engn(chan, engine));
    134}
    135
    136static int
    137nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
    138			  struct nvkm_engine *engine,
    139			  struct nvkm_object *object)
    140{
    141	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
    142	u32 reg, ctx;
    143
    144	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
    145		return 0;
    146
    147	return nvkm_object_bind(object, NULL, 0, nv40_fifo_dma_engn(chan, engine));
    148}
    149
    150static int
    151nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
    152			  struct nvkm_object *object)
    153{
    154	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
    155	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
    156	u32 context = chan->base.chid << 23;
    157	u32 handle  = object->handle;
    158	int hash;
    159
    160	switch (object->engine->subdev.type) {
    161	case NVKM_ENGINE_DMAOBJ:
    162	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
    163	case NVKM_ENGINE_GR    : context |= 0x00100000; break;
    164	case NVKM_ENGINE_MPEG  : context |= 0x00200000; break;
    165	default:
    166		WARN_ON(1);
    167		return -EINVAL;
    168	}
    169
    170	mutex_lock(&chan->fifo->base.mutex);
    171	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
    172				 handle, context);
    173	mutex_unlock(&chan->fifo->base.mutex);
    174	return hash;
    175}
    176
    177static const struct nvkm_fifo_chan_func
    178nv40_fifo_dma_func = {
    179	.dtor = nv04_fifo_dma_dtor,
    180	.init = nv04_fifo_dma_init,
    181	.fini = nv04_fifo_dma_fini,
    182	.engine_ctor = nv40_fifo_dma_engine_ctor,
    183	.engine_dtor = nv40_fifo_dma_engine_dtor,
    184	.engine_init = nv40_fifo_dma_engine_init,
    185	.engine_fini = nv40_fifo_dma_engine_fini,
    186	.object_ctor = nv40_fifo_dma_object_ctor,
    187	.object_dtor = nv04_fifo_dma_object_dtor,
    188};
    189
    190static int
    191nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
    192		  void *data, u32 size, struct nvkm_object **pobject)
    193{
    194	struct nvkm_object *parent = oclass->parent;
    195	union {
    196		struct nv03_channel_dma_v0 v0;
    197	} *args = data;
    198	struct nv04_fifo *fifo = nv04_fifo(base);
    199	struct nv04_fifo_chan *chan = NULL;
    200	struct nvkm_device *device = fifo->base.engine.subdev.device;
    201	struct nvkm_instmem *imem = device->imem;
    202	int ret = -ENOSYS;
    203
    204	nvif_ioctl(parent, "create channel dma size %d\n", size);
    205	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
    206		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
    207				   "offset %08x\n", args->v0.version,
    208			   args->v0.pushbuf, args->v0.offset);
    209		if (!args->v0.pushbuf)
    210			return -EINVAL;
    211	} else
    212		return ret;
    213
    214	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
    215		return -ENOMEM;
    216	*pobject = &chan->base.object;
    217
    218	ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
    219				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
    220				  BIT(NV04_FIFO_ENGN_SW) |
    221				  BIT(NV04_FIFO_ENGN_GR) |
    222				  BIT(NV04_FIFO_ENGN_MPEG) |
    223				  BIT(NV04_FIFO_ENGN_DMA),
    224				  0, 0xc00000, 0x1000, oclass, &chan->base);
    225	chan->fifo = fifo;
    226	if (ret)
    227		return ret;
    228
    229	args->v0.chid = chan->base.chid;
    230	chan->ramfc = chan->base.chid * 128;
    231
    232	nvkm_kmap(imem->ramfc);
    233	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
    234	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
    235	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
    236	nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
    237			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
    238			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
    239#ifdef __BIG_ENDIAN
    240			       NV_PFIFO_CACHE1_BIG_ENDIAN |
    241#endif
    242			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
    243	nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
    244	nvkm_done(imem->ramfc);
    245	return 0;
    246}
    247
    248const struct nvkm_fifo_chan_oclass
    249nv40_fifo_dma_oclass = {
    250	.base.oclass = NV40_CHANNEL_DMA,
    251	.base.minver = 0,
    252	.base.maxver = 0,
    253	.ctor = nv40_fifo_dma_new,
    254};