cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gk104.c (34516B)


      1/*
      2 * Copyright 2012 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Ben Skeggs
     23 */
     24#include "gk104.h"
     25#include "cgrp.h"
     26#include "changk104.h"
     27
     28#include <core/client.h>
     29#include <core/gpuobj.h>
     30#include <subdev/bar.h>
     31#include <subdev/fault.h>
     32#include <subdev/timer.h>
     33#include <subdev/top.h>
     34#include <engine/sw.h>
     35
     36#include <nvif/class.h>
     37#include <nvif/cl0080.h>
     38
     39void
     40gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
     41			 struct gk104_fifo_engine_status *status)
     42{
     43	struct nvkm_engine *engine = fifo->engine[engn].engine;
     44	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
     45	struct nvkm_device *device = subdev->device;
     46	u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
     47
     48	status->busy     = !!(stat & 0x80000000);
     49	status->faulted  = !!(stat & 0x40000000);
     50	status->next.tsg = !!(stat & 0x10000000);
     51	status->next.id  =   (stat & 0x0fff0000) >> 16;
     52	status->chsw     = !!(stat & 0x00008000);
     53	status->save     = !!(stat & 0x00004000);
     54	status->load     = !!(stat & 0x00002000);
     55	status->prev.tsg = !!(stat & 0x00001000);
     56	status->prev.id  =   (stat & 0x00000fff);
     57	status->chan     = NULL;
     58
     59	if (status->busy && status->chsw) {
     60		if (status->load && status->save) {
     61			if (engine && nvkm_engine_chsw_load(engine))
     62				status->chan = &status->next;
     63			else
     64				status->chan = &status->prev;
     65		} else
     66		if (status->load) {
     67			status->chan = &status->next;
     68		} else {
     69			status->chan = &status->prev;
     70		}
     71	} else
     72	if (status->load) {
     73		status->chan = &status->prev;
     74	}
     75
     76	nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
     77			   "save %d load %d %sid %d%s-> %sid %d%s\n",
     78		   engn, status->busy, status->faulted,
     79		   status->chsw, status->save, status->load,
     80		   status->prev.tsg ? "tsg" : "ch", status->prev.id,
     81		   status->chan == &status->prev ? "*" : " ",
     82		   status->next.tsg ? "tsg" : "ch", status->next.id,
     83		   status->chan == &status->next ? "*" : " ");
     84}
     85
     86int
     87gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
     88		     void *argv, u32 argc, struct nvkm_object **pobject)
     89{
     90	struct gk104_fifo *fifo = gk104_fifo(base);
     91	if (oclass->engn == &fifo->func->chan) {
     92		const struct gk104_fifo_chan_user *user = oclass->engn;
     93		return user->ctor(fifo, oclass, argv, argc, pobject);
     94	} else
     95	if (oclass->engn == &fifo->func->user) {
     96		const struct gk104_fifo_user_user *user = oclass->engn;
     97		return user->ctor(oclass, argv, argc, pobject);
     98	}
     99	WARN_ON(1);
    100	return -EINVAL;
    101}
    102
    103int
    104gk104_fifo_class_get(struct nvkm_fifo *base, int index,
    105		     struct nvkm_oclass *oclass)
    106{
    107	struct gk104_fifo *fifo = gk104_fifo(base);
    108	int c = 0;
    109
    110	if (fifo->func->user.ctor && c++ == index) {
    111		oclass->base =  fifo->func->user.user;
    112		oclass->engn = &fifo->func->user;
    113		return 0;
    114	}
    115
    116	if (fifo->func->chan.ctor && c++ == index) {
    117		oclass->base =  fifo->func->chan.user;
    118		oclass->engn = &fifo->func->chan;
    119		return 0;
    120	}
    121
    122	return c;
    123}
    124
    125void
    126gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
    127{
    128	struct nvkm_device *device = fifo->engine.subdev.device;
    129	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
    130}
    131
    132void
    133gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
    134{
    135	struct nvkm_device *device = fifo->engine.subdev.device;
    136	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
    137}
    138
    139void
    140gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
    141			  struct nvkm_memory *mem, int nr)
    142{
    143	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    144	struct nvkm_device *device = subdev->device;
    145	int target;
    146
    147	switch (nvkm_memory_target(mem)) {
    148	case NVKM_MEM_TARGET_VRAM: target = 0; break;
    149	case NVKM_MEM_TARGET_NCOH: target = 3; break;
    150	default:
    151		WARN_ON(1);
    152		return;
    153	}
    154
    155	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
    156				    (target << 28));
    157	nvkm_wr32(device, 0x002274, (runl << 20) | nr);
    158
    159	if (nvkm_msec(device, 2000,
    160		if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
    161			break;
    162	) < 0)
    163		nvkm_error(subdev, "runlist %d update timeout\n", runl);
    164}
    165
    166void
    167gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
    168{
    169	const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
    170	struct gk104_fifo_chan *chan;
    171	struct nvkm_memory *mem;
    172	struct nvkm_fifo_cgrp *cgrp;
    173	int nr = 0;
    174
    175	mutex_lock(&fifo->base.mutex);
    176	mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
    177	fifo->runlist[runl].next = !fifo->runlist[runl].next;
    178
    179	nvkm_kmap(mem);
    180	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
    181		func->chan(chan, mem, nr++ * func->size);
    182	}
    183
    184	list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
    185		func->cgrp(cgrp, mem, nr++ * func->size);
    186		list_for_each_entry(chan, &cgrp->chan, head) {
    187			func->chan(chan, mem, nr++ * func->size);
    188		}
    189	}
    190	nvkm_done(mem);
    191
    192	func->commit(fifo, runl, mem, nr);
    193	mutex_unlock(&fifo->base.mutex);
    194}
    195
    196void
    197gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
    198{
    199	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
    200	mutex_lock(&fifo->base.mutex);
    201	if (!list_empty(&chan->head)) {
    202		list_del_init(&chan->head);
    203		if (cgrp && !--cgrp->chan_nr)
    204			list_del_init(&cgrp->head);
    205	}
    206	mutex_unlock(&fifo->base.mutex);
    207}
    208
    209void
    210gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
    211{
    212	struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
    213	mutex_lock(&fifo->base.mutex);
    214	if (cgrp) {
    215		if (!cgrp->chan_nr++)
    216			list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
    217		list_add_tail(&chan->head, &cgrp->chan);
    218	} else {
    219		list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
    220	}
    221	mutex_unlock(&fifo->base.mutex);
    222}
    223
    224void
    225gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan,
    226			struct nvkm_memory *memory, u32 offset)
    227{
    228	nvkm_wo32(memory, offset + 0, chan->base.chid);
    229	nvkm_wo32(memory, offset + 4, 0x00000000);
    230}
    231
    232const struct gk104_fifo_runlist_func
    233gk104_fifo_runlist = {
    234	.size = 8,
    235	.chan = gk104_fifo_runlist_chan,
    236	.commit = gk104_fifo_runlist_commit,
    237};
    238
    239void
    240gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
    241{
    242	struct nvkm_device *device = fifo->base.engine.subdev.device;
    243	nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
    244}
    245
    246int
    247gk104_fifo_pbdma_nr(struct gk104_fifo *fifo)
    248{
    249	struct nvkm_device *device = fifo->base.engine.subdev.device;
    250	/* Determine number of PBDMAs by checking valid enable bits. */
    251	nvkm_wr32(device, 0x000204, 0xffffffff);
    252	return hweight32(nvkm_rd32(device, 0x000204));
    253}
    254
    255const struct gk104_fifo_pbdma_func
    256gk104_fifo_pbdma = {
    257	.nr = gk104_fifo_pbdma_nr,
    258	.init = gk104_fifo_pbdma_init,
    259};
    260
    261struct nvkm_engine *
    262gk104_fifo_id_engine(struct nvkm_fifo *base, int engi)
    263{
    264	if (engi == GK104_FIFO_ENGN_SW)
    265		return nvkm_device_engine(base->engine.subdev.device, NVKM_ENGINE_SW, 0);
    266
    267	return gk104_fifo(base)->engine[engi].engine;
    268}
    269
    270int
    271gk104_fifo_engine_id(struct nvkm_fifo *base, struct nvkm_engine *engine)
    272{
    273	struct gk104_fifo *fifo = gk104_fifo(base);
    274	int engn;
    275
    276	if (engine->subdev.type == NVKM_ENGINE_SW)
    277		return GK104_FIFO_ENGN_SW;
    278
    279	for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
    280		if (fifo->engine[engn].engine == engine)
    281			return engn;
    282	}
    283
    284	WARN_ON(1);
    285	return -1;
    286}
    287
    288static void
    289gk104_fifo_recover_work(struct work_struct *w)
    290{
    291	struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
    292	struct nvkm_device *device = fifo->base.engine.subdev.device;
    293	struct nvkm_engine *engine;
    294	unsigned long flags;
    295	u32 engm, runm, todo;
    296	int engn, runl;
    297
    298	spin_lock_irqsave(&fifo->base.lock, flags);
    299	runm = fifo->recover.runm;
    300	engm = fifo->recover.engm;
    301	fifo->recover.engm = 0;
    302	fifo->recover.runm = 0;
    303	spin_unlock_irqrestore(&fifo->base.lock, flags);
    304
    305	nvkm_mask(device, 0x002630, runm, runm);
    306
    307	for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
    308		if ((engine = fifo->engine[engn].engine)) {
    309			nvkm_subdev_fini(&engine->subdev, false);
    310			WARN_ON(nvkm_subdev_init(&engine->subdev));
    311		}
    312	}
    313
    314	for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
    315		gk104_fifo_runlist_update(fifo, runl);
    316
    317	nvkm_wr32(device, 0x00262c, runm);
    318	nvkm_mask(device, 0x002630, runm, 0x00000000);
    319}
    320
    321static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
    322
    323static void
    324gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
    325{
    326	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    327	struct nvkm_device *device = subdev->device;
    328	const u32 runm = BIT(runl);
    329
    330	assert_spin_locked(&fifo->base.lock);
    331	if (fifo->recover.runm & runm)
    332		return;
    333	fifo->recover.runm |= runm;
    334
    335	/* Block runlist to prevent channel assignment(s) from changing. */
    336	nvkm_mask(device, 0x002630, runm, runm);
    337
    338	/* Schedule recovery. */
    339	nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
    340	schedule_work(&fifo->recover.work);
    341}
    342
    343static struct gk104_fifo_chan *
    344gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
    345{
    346	struct gk104_fifo_chan *chan;
    347	struct nvkm_fifo_cgrp *cgrp;
    348
    349	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
    350		if (chan->base.chid == chid) {
    351			list_del_init(&chan->head);
    352			return chan;
    353		}
    354	}
    355
    356	list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
    357		if (cgrp->id == chid) {
    358			chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
    359			list_del_init(&chan->head);
    360			if (!--cgrp->chan_nr)
    361				list_del_init(&cgrp->head);
    362			return chan;
    363		}
    364	}
    365
    366	return NULL;
    367}
    368
    369static void
    370gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
    371{
    372	struct gk104_fifo *fifo = gk104_fifo(base);
    373	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    374	struct nvkm_device *device = subdev->device;
    375	const u32  stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
    376	const u32  runl = (stat & 0x000f0000) >> 16;
    377	const bool used = (stat & 0x00000001);
    378	unsigned long engn, engm = fifo->runlist[runl].engm;
    379	struct gk104_fifo_chan *chan;
    380
    381	assert_spin_locked(&fifo->base.lock);
    382	if (!used)
    383		return;
    384
    385	/* Lookup SW state for channel, and mark it as dead. */
    386	chan = gk104_fifo_recover_chid(fifo, runl, chid);
    387	if (chan) {
    388		chan->killed = true;
    389		nvkm_fifo_kevent(&fifo->base, chid);
    390	}
    391
    392	/* Disable channel. */
    393	nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
    394	nvkm_warn(subdev, "channel %d: killed\n", chid);
    395
    396	/* Block channel assignments from changing during recovery. */
    397	gk104_fifo_recover_runl(fifo, runl);
    398
    399	/* Schedule recovery for any engines the channel is on. */
    400	for_each_set_bit(engn, &engm, fifo->engine_nr) {
    401		struct gk104_fifo_engine_status status;
    402		gk104_fifo_engine_status(fifo, engn, &status);
    403		if (!status.chan || status.chan->id != chid)
    404			continue;
    405		gk104_fifo_recover_engn(fifo, engn);
    406	}
    407}
    408
    409static void
    410gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
    411{
    412	struct nvkm_engine *engine = fifo->engine[engn].engine;
    413	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    414	struct nvkm_device *device = subdev->device;
    415	const u32 runl = fifo->engine[engn].runl;
    416	const u32 engm = BIT(engn);
    417	struct gk104_fifo_engine_status status;
    418	int mmui = -1;
    419
    420	assert_spin_locked(&fifo->base.lock);
    421	if (fifo->recover.engm & engm)
    422		return;
    423	fifo->recover.engm |= engm;
    424
    425	/* Block channel assignments from changing during recovery. */
    426	gk104_fifo_recover_runl(fifo, runl);
    427
    428	/* Determine which channel (if any) is currently on the engine. */
    429	gk104_fifo_engine_status(fifo, engn, &status);
    430	if (status.chan) {
    431		/* The channel is not longer viable, kill it. */
    432		gk104_fifo_recover_chan(&fifo->base, status.chan->id);
    433	}
    434
    435	/* Determine MMU fault ID for the engine, if we're not being
    436	 * called from the fault handler already.
    437	 */
    438	if (!status.faulted && engine) {
    439		mmui = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
    440		if (mmui < 0) {
    441			const struct nvkm_enum *en = fifo->func->fault.engine;
    442			for (; en && en->name; en++) {
    443				if (en->data2 == engine->subdev.type &&
    444				    en->inst  == engine->subdev.inst) {
    445					mmui = en->value;
    446					break;
    447				}
    448			}
    449		}
    450		WARN_ON(mmui < 0);
    451	}
    452
    453	/* Trigger a MMU fault for the engine.
    454	 *
    455	 * No good idea why this is needed, but nvgpu does something similar,
    456	 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
    457	 */
    458	if (mmui >= 0) {
    459		nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
    460
    461		/* Wait for fault to trigger. */
    462		nvkm_msec(device, 2000,
    463			gk104_fifo_engine_status(fifo, engn, &status);
    464			if (status.faulted)
    465				break;
    466		);
    467
    468		/* Release MMU fault trigger, and ACK the fault. */
    469		nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
    470		nvkm_wr32(device, 0x00259c, BIT(mmui));
    471		nvkm_wr32(device, 0x002100, 0x10000000);
    472	}
    473
    474	/* Schedule recovery. */
    475	nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
    476	schedule_work(&fifo->recover.work);
    477}
    478
    479static void
    480gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
    481{
    482	struct gk104_fifo *fifo = gk104_fifo(base);
    483	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    484	struct nvkm_device *device = subdev->device;
    485	const struct nvkm_enum *er, *ee, *ec, *ea;
    486	struct nvkm_engine *engine = NULL;
    487	struct nvkm_fifo_chan *chan;
    488	unsigned long flags;
    489	const char *en = "";
    490	char ct[8] = "HUB/";
    491
    492	er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
    493	ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
    494	if (info->hub) {
    495		ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
    496	} else {
    497		ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
    498		snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
    499	}
    500	ea = nvkm_enum_find(fifo->func->fault.access, info->access);
    501
    502	if (ee && ee->data2) {
    503		switch (ee->data2) {
    504		case NVKM_SUBDEV_BAR:
    505			nvkm_bar_bar1_reset(device);
    506			break;
    507		case NVKM_SUBDEV_INSTMEM:
    508			nvkm_bar_bar2_reset(device);
    509			break;
    510		case NVKM_ENGINE_IFB:
    511			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
    512			break;
    513		default:
    514			engine = nvkm_device_engine(device, ee->data2, 0);
    515			break;
    516		}
    517	}
    518
    519	if (ee == NULL) {
    520		struct nvkm_subdev *subdev = nvkm_top_fault(device, info->engine);
    521		if (subdev) {
    522			if (subdev->func == &nvkm_engine)
    523				engine = container_of(subdev, typeof(*engine), subdev);
    524			en = engine->subdev.name;
    525		}
    526	} else {
    527		en = ee->name;
    528	}
    529
    530	spin_lock_irqsave(&fifo->base.lock, flags);
    531	chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
    532
    533	nvkm_error(subdev,
    534		   "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
    535		   "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
    536		   info->access, ea ? ea->name : "", info->addr,
    537		   info->engine, ee ? ee->name : en,
    538		   info->client, ct, ec ? ec->name : "",
    539		   info->reason, er ? er->name : "", chan ? chan->chid : -1,
    540		   info->inst, chan ? chan->object.client->name : "unknown");
    541
    542	/* Kill the channel that caused the fault. */
    543	if (chan)
    544		gk104_fifo_recover_chan(&fifo->base, chan->chid);
    545
    546	/* Channel recovery will probably have already done this for the
    547	 * correct engine(s), but just in case we can't find the channel
    548	 * information...
    549	 */
    550	if (engine) {
    551		int engn = fifo->base.func->engine_id(&fifo->base, engine);
    552		if (engn >= 0 && engn != GK104_FIFO_ENGN_SW)
    553			gk104_fifo_recover_engn(fifo, engn);
    554	}
    555
    556	spin_unlock_irqrestore(&fifo->base.lock, flags);
    557}
    558
    559static const struct nvkm_enum
    560gk104_fifo_bind_reason[] = {
    561	{ 0x01, "BIND_NOT_UNBOUND" },
    562	{ 0x02, "SNOOP_WITHOUT_BAR1" },
    563	{ 0x03, "UNBIND_WHILE_RUNNING" },
    564	{ 0x05, "INVALID_RUNLIST" },
    565	{ 0x06, "INVALID_CTX_TGT" },
    566	{ 0x0b, "UNBIND_WHILE_PARKED" },
    567	{}
    568};
    569
    570void
    571gk104_fifo_intr_bind(struct gk104_fifo *fifo)
    572{
    573	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    574	struct nvkm_device *device = subdev->device;
    575	u32 intr = nvkm_rd32(device, 0x00252c);
    576	u32 code = intr & 0x000000ff;
    577	const struct nvkm_enum *en =
    578		nvkm_enum_find(gk104_fifo_bind_reason, code);
    579
    580	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
    581}
    582
    583static const struct nvkm_enum
    584gk104_fifo_sched_reason[] = {
    585	{ 0x0a, "CTXSW_TIMEOUT" },
    586	{}
    587};
    588
    589static void
    590gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
    591{
    592	struct nvkm_device *device = fifo->base.engine.subdev.device;
    593	unsigned long flags, engm = 0;
    594	u32 engn;
    595
    596	/* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
    597	 * as MMU_FAULT cannot be triggered while it's pending.
    598	 */
    599	spin_lock_irqsave(&fifo->base.lock, flags);
    600	nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
    601	nvkm_wr32(device, 0x002100, 0x00000100);
    602
    603	for (engn = 0; engn < fifo->engine_nr; engn++) {
    604		struct gk104_fifo_engine_status status;
    605
    606		gk104_fifo_engine_status(fifo, engn, &status);
    607		if (!status.busy || !status.chsw)
    608			continue;
    609
    610		engm |= BIT(engn);
    611	}
    612
    613	for_each_set_bit(engn, &engm, fifo->engine_nr)
    614		gk104_fifo_recover_engn(fifo, engn);
    615
    616	nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
    617	spin_unlock_irqrestore(&fifo->base.lock, flags);
    618}
    619
    620static void
    621gk104_fifo_intr_sched(struct gk104_fifo *fifo)
    622{
    623	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    624	struct nvkm_device *device = subdev->device;
    625	u32 intr = nvkm_rd32(device, 0x00254c);
    626	u32 code = intr & 0x000000ff;
    627	const struct nvkm_enum *en =
    628		nvkm_enum_find(gk104_fifo_sched_reason, code);
    629
    630	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
    631
    632	switch (code) {
    633	case 0x0a:
    634		gk104_fifo_intr_sched_ctxsw(fifo);
    635		break;
    636	default:
    637		break;
    638	}
    639}
    640
    641void
    642gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
    643{
    644	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    645	struct nvkm_device *device = subdev->device;
    646	u32 stat = nvkm_rd32(device, 0x00256c);
    647	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
    648	nvkm_wr32(device, 0x00256c, stat);
    649}
    650
    651void
    652gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
    653{
    654	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    655	struct nvkm_device *device = subdev->device;
    656	u32 stat = nvkm_rd32(device, 0x00259c);
    657	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
    658}
    659
    660static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
    661	{ 0x00000001, "MEMREQ" },
    662	{ 0x00000002, "MEMACK_TIMEOUT" },
    663	{ 0x00000004, "MEMACK_EXTRA" },
    664	{ 0x00000008, "MEMDAT_TIMEOUT" },
    665	{ 0x00000010, "MEMDAT_EXTRA" },
    666	{ 0x00000020, "MEMFLUSH" },
    667	{ 0x00000040, "MEMOP" },
    668	{ 0x00000080, "LBCONNECT" },
    669	{ 0x00000100, "LBREQ" },
    670	{ 0x00000200, "LBACK_TIMEOUT" },
    671	{ 0x00000400, "LBACK_EXTRA" },
    672	{ 0x00000800, "LBDAT_TIMEOUT" },
    673	{ 0x00001000, "LBDAT_EXTRA" },
    674	{ 0x00002000, "GPFIFO" },
    675	{ 0x00004000, "GPPTR" },
    676	{ 0x00008000, "GPENTRY" },
    677	{ 0x00010000, "GPCRC" },
    678	{ 0x00020000, "PBPTR" },
    679	{ 0x00040000, "PBENTRY" },
    680	{ 0x00080000, "PBCRC" },
    681	{ 0x00100000, "XBARCONNECT" },
    682	{ 0x00200000, "METHOD" },
    683	{ 0x00400000, "METHODCRC" },
    684	{ 0x00800000, "DEVICE" },
    685	{ 0x02000000, "SEMAPHORE" },
    686	{ 0x04000000, "ACQUIRE" },
    687	{ 0x08000000, "PRI" },
    688	{ 0x20000000, "NO_CTXSW_SEG" },
    689	{ 0x40000000, "PBSEG" },
    690	{ 0x80000000, "SIGNATURE" },
    691	{}
    692};
    693
    694void
    695gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
    696{
    697	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    698	struct nvkm_device *device = subdev->device;
    699	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
    700	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
    701	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
    702	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
    703	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
    704	u32 subc = (addr & 0x00070000) >> 16;
    705	u32 mthd = (addr & 0x00003ffc);
    706	u32 show = stat;
    707	struct nvkm_fifo_chan *chan;
    708	unsigned long flags;
    709	char msg[128];
    710
    711	if (stat & 0x00800000) {
    712		if (device->sw) {
    713			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
    714				show &= ~0x00800000;
    715		}
    716	}
    717
    718	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
    719
    720	if (show) {
    721		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
    722		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
    723		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
    724				   "subc %d mthd %04x data %08x\n",
    725			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
    726			   chan ? chan->object.client->name : "unknown",
    727			   subc, mthd, data);
    728		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
    729	}
    730
    731	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
    732}
    733
    734static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
    735	{ 0x00000001, "HCE_RE_ILLEGAL_OP" },
    736	{ 0x00000002, "HCE_RE_ALIGNB" },
    737	{ 0x00000004, "HCE_PRIV" },
    738	{ 0x00000008, "HCE_ILLEGAL_MTHD" },
    739	{ 0x00000010, "HCE_ILLEGAL_CLASS" },
    740	{}
    741};
    742
    743void
    744gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
    745{
    746	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    747	struct nvkm_device *device = subdev->device;
    748	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
    749	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
    750	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
    751	char msg[128];
    752
    753	if (stat) {
    754		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
    755		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
    756			   unit, stat, msg, chid,
    757			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
    758			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
    759	}
    760
    761	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
    762}
    763
    764void
    765gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
    766{
    767	struct nvkm_device *device = fifo->base.engine.subdev.device;
    768	u32 mask = nvkm_rd32(device, 0x002a00);
    769	while (mask) {
    770		int runl = __ffs(mask);
    771		wake_up(&fifo->runlist[runl].wait);
    772		nvkm_wr32(device, 0x002a00, 1 << runl);
    773		mask &= ~(1 << runl);
    774	}
    775}
    776
    777void
    778gk104_fifo_intr_engine(struct gk104_fifo *fifo)
    779{
    780	nvkm_fifo_uevent(&fifo->base);
    781}
    782
    783static void
    784gk104_fifo_intr(struct nvkm_fifo *base)
    785{
    786	struct gk104_fifo *fifo = gk104_fifo(base);
    787	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    788	struct nvkm_device *device = subdev->device;
    789	u32 mask = nvkm_rd32(device, 0x002140);
    790	u32 stat = nvkm_rd32(device, 0x002100) & mask;
    791
    792	if (stat & 0x00000001) {
    793		gk104_fifo_intr_bind(fifo);
    794		nvkm_wr32(device, 0x002100, 0x00000001);
    795		stat &= ~0x00000001;
    796	}
    797
    798	if (stat & 0x00000010) {
    799		nvkm_error(subdev, "PIO_ERROR\n");
    800		nvkm_wr32(device, 0x002100, 0x00000010);
    801		stat &= ~0x00000010;
    802	}
    803
    804	if (stat & 0x00000100) {
    805		gk104_fifo_intr_sched(fifo);
    806		nvkm_wr32(device, 0x002100, 0x00000100);
    807		stat &= ~0x00000100;
    808	}
    809
    810	if (stat & 0x00010000) {
    811		gk104_fifo_intr_chsw(fifo);
    812		nvkm_wr32(device, 0x002100, 0x00010000);
    813		stat &= ~0x00010000;
    814	}
    815
    816	if (stat & 0x00800000) {
    817		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
    818		nvkm_wr32(device, 0x002100, 0x00800000);
    819		stat &= ~0x00800000;
    820	}
    821
    822	if (stat & 0x01000000) {
    823		nvkm_error(subdev, "LB_ERROR\n");
    824		nvkm_wr32(device, 0x002100, 0x01000000);
    825		stat &= ~0x01000000;
    826	}
    827
    828	if (stat & 0x08000000) {
    829		gk104_fifo_intr_dropped_fault(fifo);
    830		nvkm_wr32(device, 0x002100, 0x08000000);
    831		stat &= ~0x08000000;
    832	}
    833
    834	if (stat & 0x10000000) {
    835		u32 mask = nvkm_rd32(device, 0x00259c);
    836		while (mask) {
    837			u32 unit = __ffs(mask);
    838			fifo->func->intr.fault(&fifo->base, unit);
    839			nvkm_wr32(device, 0x00259c, (1 << unit));
    840			mask &= ~(1 << unit);
    841		}
    842		stat &= ~0x10000000;
    843	}
    844
    845	if (stat & 0x20000000) {
    846		u32 mask = nvkm_rd32(device, 0x0025a0);
    847		while (mask) {
    848			u32 unit = __ffs(mask);
    849			gk104_fifo_intr_pbdma_0(fifo, unit);
    850			gk104_fifo_intr_pbdma_1(fifo, unit);
    851			nvkm_wr32(device, 0x0025a0, (1 << unit));
    852			mask &= ~(1 << unit);
    853		}
    854		stat &= ~0x20000000;
    855	}
    856
    857	if (stat & 0x40000000) {
    858		gk104_fifo_intr_runlist(fifo);
    859		stat &= ~0x40000000;
    860	}
    861
    862	if (stat & 0x80000000) {
    863		nvkm_wr32(device, 0x002100, 0x80000000);
    864		gk104_fifo_intr_engine(fifo);
    865		stat &= ~0x80000000;
    866	}
    867
    868	if (stat) {
    869		nvkm_error(subdev, "INTR %08x\n", stat);
    870		nvkm_mask(device, 0x002140, stat, 0x00000000);
    871		nvkm_wr32(device, 0x002100, stat);
    872	}
    873}
    874
    875void
    876gk104_fifo_fini(struct nvkm_fifo *base)
    877{
    878	struct gk104_fifo *fifo = gk104_fifo(base);
    879	struct nvkm_device *device = fifo->base.engine.subdev.device;
    880	flush_work(&fifo->recover.work);
    881	/* allow mmu fault interrupts, even when we're not using fifo */
    882	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
    883}
    884
    885int
    886gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
    887{
    888	struct gk104_fifo *fifo = gk104_fifo(base);
    889	switch (mthd) {
    890	case NV_DEVICE_HOST_RUNLISTS:
    891		*data = (1ULL << fifo->runlist_nr) - 1;
    892		return 0;
    893	case NV_DEVICE_HOST_RUNLIST_ENGINES: {
    894		if (*data < fifo->runlist_nr) {
    895			unsigned long engm = fifo->runlist[*data].engm;
    896			struct nvkm_engine *engine;
    897			int engn;
    898			*data = 0;
    899			for_each_set_bit(engn, &engm, fifo->engine_nr) {
    900				if ((engine = fifo->engine[engn].engine)) {
    901#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
    902					switch (engine->subdev.type) {
    903					CASE(SW    );
    904					CASE(GR    );
    905					CASE(MPEG  );
    906					CASE(ME    );
    907					CASE(CIPHER);
    908					CASE(BSP   );
    909					CASE(VP    );
    910					CASE(CE    );
    911					CASE(SEC   );
    912					CASE(MSVLD );
    913					CASE(MSPDEC);
    914					CASE(MSPPP );
    915					CASE(MSENC );
    916					CASE(VIC   );
    917					CASE(SEC2  );
    918					CASE(NVDEC );
    919					CASE(NVENC );
    920					default:
    921						WARN_ON(1);
    922						break;
    923					}
    924				}
    925			}
    926			return 0;
    927		}
    928	}
    929		return -EINVAL;
    930	default:
    931		return -EINVAL;
    932	}
    933}
    934
    935int
    936gk104_fifo_oneinit(struct nvkm_fifo *base)
    937{
    938	struct gk104_fifo *fifo = gk104_fifo(base);
    939	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
    940	struct nvkm_device *device = subdev->device;
    941	struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
    942	struct nvkm_top_device *tdev;
    943	int pbid, ret, i, j;
    944	u32 *map;
    945
    946	fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
    947	nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
    948
    949	/* Read PBDMA->runlist(s) mapping from HW. */
    950	if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL)))
    951		return -ENOMEM;
    952
    953	for (i = 0; i < fifo->pbdma_nr; i++)
    954		map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
    955
    956	/* Determine runlist configuration from topology device info. */
    957	list_for_each_entry(tdev, &device->top->device, head) {
    958		const int engn = tdev->engine;
    959		char _en[16], *en;
    960
    961		if (engn < 0)
    962			continue;
    963
    964		/* Determine which PBDMA handles requests for this engine. */
    965		for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
    966			if (map[j] & BIT(tdev->runlist)) {
    967				pbid = j;
    968				break;
    969			}
    970		}
    971
    972		fifo->engine[engn].engine = nvkm_device_engine(device, tdev->type, tdev->inst);
    973		if (!fifo->engine[engn].engine) {
    974			snprintf(_en, sizeof(_en), "%s, %d",
    975				 nvkm_subdev_type[tdev->type], tdev->inst);
    976			en = _en;
    977		} else {
    978			en = fifo->engine[engn].engine->subdev.name;
    979		}
    980
    981		nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
    982			   tdev->engine, tdev->runlist, pbid, en);
    983
    984		fifo->engine[engn].runl = tdev->runlist;
    985		fifo->engine[engn].pbid = pbid;
    986		fifo->engine_nr = max(fifo->engine_nr, engn + 1);
    987		fifo->runlist[tdev->runlist].engm |= BIT(engn);
    988		fifo->runlist[tdev->runlist].engm_sw |= BIT(engn);
    989		if (tdev->type == NVKM_ENGINE_GR)
    990			fifo->runlist[tdev->runlist].engm_sw |= BIT(GK104_FIFO_ENGN_SW);
    991		fifo->runlist_nr = max(fifo->runlist_nr, tdev->runlist + 1);
    992	}
    993
    994	kfree(map);
    995
    996	for (i = 0; i < fifo->runlist_nr; i++) {
    997		for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) {
    998			ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
    999					      fifo->base.nr * 2/* TSG+chan */ *
   1000					      fifo->func->runlist->size,
   1001					      0x1000, false,
   1002					      &fifo->runlist[i].mem[j]);
   1003			if (ret)
   1004				return ret;
   1005		}
   1006
   1007		init_waitqueue_head(&fifo->runlist[i].wait);
   1008		INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
   1009		INIT_LIST_HEAD(&fifo->runlist[i].chan);
   1010	}
   1011
   1012	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
   1013			      fifo->base.nr * 0x200, 0x1000, true,
   1014			      &fifo->user.mem);
   1015	if (ret)
   1016		return ret;
   1017
   1018	ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
   1019			   &fifo->user.bar);
   1020	if (ret)
   1021		return ret;
   1022
   1023	return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
   1024}
   1025
   1026void
   1027gk104_fifo_init(struct nvkm_fifo *base)
   1028{
   1029	struct gk104_fifo *fifo = gk104_fifo(base);
   1030	struct nvkm_device *device = fifo->base.engine.subdev.device;
   1031	int i;
   1032
   1033	/* Enable PBDMAs. */
   1034	fifo->func->pbdma->init(fifo);
   1035
   1036	/* PBDMA[n] */
   1037	for (i = 0; i < fifo->pbdma_nr; i++) {
   1038		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
   1039		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
   1040		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
   1041	}
   1042
   1043	/* PBDMA[n].HCE */
   1044	for (i = 0; i < fifo->pbdma_nr; i++) {
   1045		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
   1046		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
   1047	}
   1048
   1049	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
   1050
   1051	if (fifo->func->pbdma->init_timeout)
   1052		fifo->func->pbdma->init_timeout(fifo);
   1053
   1054	nvkm_wr32(device, 0x002100, 0xffffffff);
   1055	nvkm_wr32(device, 0x002140, 0x7fffffff);
   1056}
   1057
   1058void *
   1059gk104_fifo_dtor(struct nvkm_fifo *base)
   1060{
   1061	struct gk104_fifo *fifo = gk104_fifo(base);
   1062	struct nvkm_device *device = fifo->base.engine.subdev.device;
   1063	int i;
   1064
   1065	nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
   1066	nvkm_memory_unref(&fifo->user.mem);
   1067
   1068	for (i = 0; i < fifo->runlist_nr; i++) {
   1069		nvkm_memory_unref(&fifo->runlist[i].mem[1]);
   1070		nvkm_memory_unref(&fifo->runlist[i].mem[0]);
   1071	}
   1072
   1073	return fifo;
   1074}
   1075
   1076static const struct nvkm_fifo_func
   1077gk104_fifo_ = {
   1078	.dtor = gk104_fifo_dtor,
   1079	.oneinit = gk104_fifo_oneinit,
   1080	.info = gk104_fifo_info,
   1081	.init = gk104_fifo_init,
   1082	.fini = gk104_fifo_fini,
   1083	.intr = gk104_fifo_intr,
   1084	.fault = gk104_fifo_fault,
   1085	.engine_id = gk104_fifo_engine_id,
   1086	.id_engine = gk104_fifo_id_engine,
   1087	.uevent_init = gk104_fifo_uevent_init,
   1088	.uevent_fini = gk104_fifo_uevent_fini,
   1089	.recover_chan = gk104_fifo_recover_chan,
   1090	.class_get = gk104_fifo_class_get,
   1091	.class_new = gk104_fifo_class_new,
   1092};
   1093
   1094int
   1095gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
   1096		enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo **pfifo)
   1097{
   1098	struct gk104_fifo *fifo;
   1099
   1100	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
   1101		return -ENOMEM;
   1102	fifo->func = func;
   1103	INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
   1104	*pfifo = &fifo->base;
   1105
   1106	return nvkm_fifo_ctor(&gk104_fifo_, device, type, inst, nr, &fifo->base);
   1107}
   1108
   1109const struct nvkm_enum
   1110gk104_fifo_fault_access[] = {
   1111	{ 0x0, "READ" },
   1112	{ 0x1, "WRITE" },
   1113	{}
   1114};
   1115
   1116const struct nvkm_enum
   1117gk104_fifo_fault_engine[] = {
   1118	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
   1119	{ 0x01, "DISPLAY" },
   1120	{ 0x02, "CAPTURE" },
   1121	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
   1122	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
   1123	{ 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
   1124	{ 0x06, "SCHED" },
   1125	{ 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
   1126	{ 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
   1127	{ 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
   1128	{ 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
   1129	{ 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
   1130	{ 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
   1131	{ 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
   1132	{ 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
   1133	{ 0x0f, "HOSTSR" },
   1134	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
   1135	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
   1136	{ 0x13, "PERF" },
   1137	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
   1138	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
   1139	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
   1140	{ 0x17, "PMU" },
   1141	{ 0x18, "PTP" },
   1142	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
   1143	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
   1144	{}
   1145};
   1146
   1147const struct nvkm_enum
   1148gk104_fifo_fault_reason[] = {
   1149	{ 0x00, "PDE" },
   1150	{ 0x01, "PDE_SIZE" },
   1151	{ 0x02, "PTE" },
   1152	{ 0x03, "VA_LIMIT_VIOLATION" },
   1153	{ 0x04, "UNBOUND_INST_BLOCK" },
   1154	{ 0x05, "PRIV_VIOLATION" },
   1155	{ 0x06, "RO_VIOLATION" },
   1156	{ 0x07, "WO_VIOLATION" },
   1157	{ 0x08, "PITCH_MASK_VIOLATION" },
   1158	{ 0x09, "WORK_CREATION" },
   1159	{ 0x0a, "UNSUPPORTED_APERTURE" },
   1160	{ 0x0b, "COMPRESSION_FAILURE" },
   1161	{ 0x0c, "UNSUPPORTED_KIND" },
   1162	{ 0x0d, "REGION_VIOLATION" },
   1163	{ 0x0e, "BOTH_PTES_VALID" },
   1164	{ 0x0f, "INFO_TYPE_POISONED" },
   1165	{}
   1166};
   1167
   1168const struct nvkm_enum
   1169gk104_fifo_fault_hubclient[] = {
   1170	{ 0x00, "VIP" },
   1171	{ 0x01, "CE0" },
   1172	{ 0x02, "CE1" },
   1173	{ 0x03, "DNISO" },
   1174	{ 0x04, "FE" },
   1175	{ 0x05, "FECS" },
   1176	{ 0x06, "HOST" },
   1177	{ 0x07, "HOST_CPU" },
   1178	{ 0x08, "HOST_CPU_NB" },
   1179	{ 0x09, "ISO" },
   1180	{ 0x0a, "MMU" },
   1181	{ 0x0b, "MSPDEC" },
   1182	{ 0x0c, "MSPPP" },
   1183	{ 0x0d, "MSVLD" },
   1184	{ 0x0e, "NISO" },
   1185	{ 0x0f, "P2P" },
   1186	{ 0x10, "PD" },
   1187	{ 0x11, "PERF" },
   1188	{ 0x12, "PMU" },
   1189	{ 0x13, "RASTERTWOD" },
   1190	{ 0x14, "SCC" },
   1191	{ 0x15, "SCC_NB" },
   1192	{ 0x16, "SEC" },
   1193	{ 0x17, "SSYNC" },
   1194	{ 0x18, "GR_CE" },
   1195	{ 0x19, "CE2" },
   1196	{ 0x1a, "XV" },
   1197	{ 0x1b, "MMU_NB" },
   1198	{ 0x1c, "MSENC" },
   1199	{ 0x1d, "DFALCON" },
   1200	{ 0x1e, "SKED" },
   1201	{ 0x1f, "AFALCON" },
   1202	{}
   1203};
   1204
   1205const struct nvkm_enum
   1206gk104_fifo_fault_gpcclient[] = {
   1207	{ 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
   1208	{ 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
   1209	{ 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
   1210	{ 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
   1211	{ 0x0c, "RAST" },
   1212	{ 0x0d, "GCC" },
   1213	{ 0x0e, "GPCCS" },
   1214	{ 0x0f, "PROP_0" },
   1215	{ 0x10, "PROP_1" },
   1216	{ 0x11, "PROP_2" },
   1217	{ 0x12, "PROP_3" },
   1218	{ 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
   1219	{ 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
   1220	{ 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
   1221	{ 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
   1222	{ 0x1f, "GPM" },
   1223	{ 0x20, "LTP_UTLB_0" },
   1224	{ 0x21, "LTP_UTLB_1" },
   1225	{ 0x22, "LTP_UTLB_2" },
   1226	{ 0x23, "LTP_UTLB_3" },
   1227	{ 0x24, "GPC_RGG_UTLB" },
   1228	{}
   1229};
   1230
   1231static const struct gk104_fifo_func
   1232gk104_fifo = {
   1233	.intr.fault = gf100_fifo_intr_fault,
   1234	.pbdma = &gk104_fifo_pbdma,
   1235	.fault.access = gk104_fifo_fault_access,
   1236	.fault.engine = gk104_fifo_fault_engine,
   1237	.fault.reason = gk104_fifo_fault_reason,
   1238	.fault.hubclient = gk104_fifo_fault_hubclient,
   1239	.fault.gpcclient = gk104_fifo_fault_gpcclient,
   1240	.runlist = &gk104_fifo_runlist,
   1241	.chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
   1242};
   1243
   1244int
   1245gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
   1246	       struct nvkm_fifo **pfifo)
   1247{
   1248	return gk104_fifo_new_(&gk104_fifo, device, type, inst, 4096, pfifo);
   1249}