cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gpfifogk104.c (10761B)


      1/*
      2 * Copyright 2012 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Ben Skeggs
     23 */
     24#include "changk104.h"
     25#include "cgrp.h"
     26
     27#include <core/client.h>
     28#include <core/gpuobj.h>
     29#include <subdev/fb.h>
     30#include <subdev/mmu.h>
     31#include <subdev/timer.h>
     32
     33#include <nvif/class.h>
     34#include <nvif/cla06f.h>
     35#include <nvif/unpack.h>
     36
     37int
     38gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *chan)
     39{
     40	struct gk104_fifo *fifo = chan->fifo;
     41	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
     42	struct nvkm_device *device = subdev->device;
     43	struct nvkm_client *client = chan->base.object.client;
     44	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
     45	int ret = 0;
     46
     47	if (cgrp)
     48		nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000);
     49	else
     50		nvkm_wr32(device, 0x002634, chan->base.chid);
     51	if (nvkm_msec(device, 2000,
     52		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
     53			break;
     54	) < 0) {
     55		nvkm_error(subdev, "%s %d [%s] kick timeout\n",
     56			   cgrp ? "tsg" : "channel",
     57			   cgrp ? cgrp->id : chan->base.chid, client->name);
     58		nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
     59		ret = -ETIMEDOUT;
     60	}
     61	return ret;
     62}
     63
     64int
     65gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
     66{
     67	int ret;
     68	mutex_lock(&chan->base.fifo->mutex);
     69	ret = gk104_fifo_gpfifo_kick_locked(chan);
     70	mutex_unlock(&chan->base.fifo->mutex);
     71	return ret;
     72}
     73
     74static u32
     75gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
     76{
     77	switch (engine->subdev.type) {
     78	case NVKM_ENGINE_SW    :
     79	case NVKM_ENGINE_CE    : return 0;
     80	case NVKM_ENGINE_GR    : return 0x0210;
     81	case NVKM_ENGINE_SEC   : return 0x0220;
     82	case NVKM_ENGINE_MSPDEC: return 0x0250;
     83	case NVKM_ENGINE_MSPPP : return 0x0260;
     84	case NVKM_ENGINE_MSVLD : return 0x0270;
     85	case NVKM_ENGINE_VIC   : return 0x0280;
     86	case NVKM_ENGINE_MSENC : return 0x0290;
     87	case NVKM_ENGINE_NVDEC : return 0x02100270;
     88	case NVKM_ENGINE_NVENC :
     89		if (engine->subdev.inst)
     90			return 0x0210;
     91		return 0x02100290;
     92	default:
     93		WARN_ON(1);
     94		return 0;
     95	}
     96}
     97
     98struct gk104_fifo_engn *
     99gk104_fifo_gpfifo_engine(struct gk104_fifo_chan *chan, struct nvkm_engine *engine)
    100{
    101	int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine);
    102	if (engi >= 0)
    103		return &chan->engn[engi];
    104	return NULL;
    105}
    106
    107static int
    108gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
    109			      struct nvkm_engine *engine, bool suspend)
    110{
    111	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
    112	struct nvkm_gpuobj *inst = chan->base.inst;
    113	u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
    114	int ret;
    115
    116	ret = gk104_fifo_gpfifo_kick(chan);
    117	if (ret && suspend)
    118		return ret;
    119
    120	if (offset) {
    121		nvkm_kmap(inst);
    122		nvkm_wo32(inst, (offset & 0xffff) + 0x00, 0x00000000);
    123		nvkm_wo32(inst, (offset & 0xffff) + 0x04, 0x00000000);
    124		if ((offset >>= 16)) {
    125			nvkm_wo32(inst, offset + 0x00, 0x00000000);
    126			nvkm_wo32(inst, offset + 0x04, 0x00000000);
    127		}
    128		nvkm_done(inst);
    129	}
    130
    131	return ret;
    132}
    133
    134static int
    135gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
    136			      struct nvkm_engine *engine)
    137{
    138	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
    139	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
    140	struct nvkm_gpuobj *inst = chan->base.inst;
    141	u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
    142
    143	if (offset) {
    144		u32 datalo = lower_32_bits(engn->vma->addr) | 0x00000004;
    145		u32 datahi = upper_32_bits(engn->vma->addr);
    146		nvkm_kmap(inst);
    147		nvkm_wo32(inst, (offset & 0xffff) + 0x00, datalo);
    148		nvkm_wo32(inst, (offset & 0xffff) + 0x04, datahi);
    149		if ((offset >>= 16)) {
    150			nvkm_wo32(inst, offset + 0x00, datalo);
    151			nvkm_wo32(inst, offset + 0x04, datahi);
    152		}
    153		nvkm_done(inst);
    154	}
    155
    156	return 0;
    157}
    158
    159void
    160gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
    161			      struct nvkm_engine *engine)
    162{
    163	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
    164	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
    165	nvkm_vmm_put(chan->base.vmm, &engn->vma);
    166	nvkm_gpuobj_del(&engn->inst);
    167}
    168
    169int
    170gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
    171			      struct nvkm_engine *engine,
    172			      struct nvkm_object *object)
    173{
    174	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
    175	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
    176	int ret;
    177
    178	if (!gk104_fifo_gpfifo_engine_addr(engine))
    179		return 0;
    180
    181	ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
    182	if (ret)
    183		return ret;
    184
    185	ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
    186	if (ret)
    187		return ret;
    188
    189	return nvkm_memory_map(engn->inst, 0, chan->base.vmm, engn->vma, NULL, 0);
    190}
    191
    192void
    193gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
    194{
    195	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
    196	struct gk104_fifo *fifo = chan->fifo;
    197	struct nvkm_device *device = fifo->base.engine.subdev.device;
    198	u32 coff = chan->base.chid * 8;
    199
    200	if (!list_empty(&chan->head)) {
    201		gk104_fifo_runlist_remove(fifo, chan);
    202		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
    203		gk104_fifo_gpfifo_kick(chan);
    204		gk104_fifo_runlist_update(fifo, chan->runl);
    205	}
    206
    207	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
    208}
    209
    210void
    211gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
    212{
    213	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
    214	struct gk104_fifo *fifo = chan->fifo;
    215	struct nvkm_device *device = fifo->base.engine.subdev.device;
    216	u32 addr = chan->base.inst->addr >> 12;
    217	u32 coff = chan->base.chid * 8;
    218
    219	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->runl << 16);
    220	nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
    221
    222	if (list_empty(&chan->head) && !chan->killed) {
    223		gk104_fifo_runlist_insert(fifo, chan);
    224		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
    225		gk104_fifo_runlist_update(fifo, chan->runl);
    226		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
    227	}
    228}
    229
    230void *
    231gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
    232{
    233	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
    234	nvkm_memory_unref(&chan->mthd);
    235	kfree(chan->cgrp);
    236	return chan;
    237}
    238
    239const struct nvkm_fifo_chan_func
    240gk104_fifo_gpfifo_func = {
    241	.dtor = gk104_fifo_gpfifo_dtor,
    242	.init = gk104_fifo_gpfifo_init,
    243	.fini = gk104_fifo_gpfifo_fini,
    244	.ntfy = gf100_fifo_chan_ntfy,
    245	.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
    246	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
    247	.engine_init = gk104_fifo_gpfifo_engine_init,
    248	.engine_fini = gk104_fifo_gpfifo_engine_fini,
    249};
    250
    251static int
    252gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
    253		       u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
    254		       const struct nvkm_oclass *oclass,
    255		       struct nvkm_object **pobject)
    256{
    257	struct gk104_fifo_chan *chan;
    258	int runlist = ffs(*runlists) -1, ret, i;
    259	u64 usermem;
    260
    261	if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
    262		return -EINVAL;
    263	*runlists = BIT_ULL(runlist);
    264
    265	/* Allocate the channel. */
    266	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
    267		return -ENOMEM;
    268	*pobject = &chan->base.object;
    269	chan->fifo = fifo;
    270	chan->runl = runlist;
    271	INIT_LIST_HEAD(&chan->head);
    272
    273	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
    274				  0x1000, 0x1000, true, vmm, 0, fifo->runlist[runlist].engm_sw,
    275				  1, fifo->user.bar->addr, 0x200,
    276				  oclass, &chan->base);
    277	if (ret)
    278		return ret;
    279
    280	*chid = chan->base.chid;
    281	*inst = chan->base.inst->addr;
    282
    283	/* Hack to support GPUs where even individual channels should be
    284	 * part of a channel group.
    285	 */
    286	if (fifo->func->cgrp_force) {
    287		if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
    288			return -ENOMEM;
    289		chan->cgrp->id = chan->base.chid;
    290		INIT_LIST_HEAD(&chan->cgrp->head);
    291		INIT_LIST_HEAD(&chan->cgrp->chan);
    292		chan->cgrp->chan_nr = 0;
    293	}
    294
    295	/* Clear channel control registers. */
    296	usermem = chan->base.chid * 0x200;
    297	ilength = order_base_2(ilength / 8);
    298
    299	nvkm_kmap(fifo->user.mem);
    300	for (i = 0; i < 0x200; i += 4)
    301		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
    302	nvkm_done(fifo->user.mem);
    303	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
    304
    305	/* RAMFC */
    306	nvkm_kmap(chan->base.inst);
    307	nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
    308	nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
    309	nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
    310	nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
    311	nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
    312	nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
    313					 (ilength << 16));
    314	nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
    315	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
    316	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
    317	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
    318	nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000);
    319	nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
    320	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
    321	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
    322	nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
    323	nvkm_done(chan->base.inst);
    324	return 0;
    325}
    326
    327int
    328gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
    329		      void *data, u32 size, struct nvkm_object **pobject)
    330{
    331	struct nvkm_object *parent = oclass->parent;
    332	union {
    333		struct kepler_channel_gpfifo_a_v0 v0;
    334	} *args = data;
    335	int ret = -ENOSYS;
    336
    337	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
    338	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
    339		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
    340				   "ioffset %016llx ilength %08x "
    341				   "runlist %016llx priv %d\n",
    342			   args->v0.version, args->v0.vmm, args->v0.ioffset,
    343			   args->v0.ilength, args->v0.runlist, args->v0.priv);
    344		return gk104_fifo_gpfifo_new_(fifo,
    345					      &args->v0.runlist,
    346					      &args->v0.chid,
    347					       args->v0.vmm,
    348					       args->v0.ioffset,
    349					       args->v0.ilength,
    350					      &args->v0.inst,
    351					       args->v0.priv,
    352					      oclass, pobject);
    353	}
    354
    355	return ret;
    356}