channv50.c (8094B)
1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24#include "channv50.h" 25 26#include <core/client.h> 27#include <core/ramht.h> 28#include <subdev/mmu.h> 29#include <subdev/timer.h> 30 31static int 32nv50_fifo_chan_engine_addr(struct nvkm_engine *engine) 33{ 34 switch (engine->subdev.type) { 35 case NVKM_ENGINE_DMAOBJ: 36 case NVKM_ENGINE_SW : return -1; 37 case NVKM_ENGINE_GR : return 0x0000; 38 case NVKM_ENGINE_MPEG : return 0x0060; 39 default: 40 WARN_ON(1); 41 return -1; 42 } 43} 44 45struct nvkm_gpuobj ** 46nv50_fifo_chan_engine(struct nv50_fifo_chan *chan, struct nvkm_engine *engine) 47{ 48 int engi = chan->base.fifo->func->engine_id(chan->base.fifo, engine); 49 if (engi >= 0) 50 return &chan->engn[engi]; 51 return NULL; 52} 53 54static int 55nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, 56 struct nvkm_engine *engine, bool suspend) 57{ 58 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 59 struct nv50_fifo *fifo = chan->fifo; 60 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 61 struct nvkm_device *device = subdev->device; 62 int offset, ret = 0; 63 u32 me; 64 65 offset = nv50_fifo_chan_engine_addr(engine); 66 if (offset < 0) 67 return 0; 68 69 /* HW bug workaround: 70 * 71 * PFIFO will hang forever if the connected engines don't report 72 * that they've processed the context switch request. 73 * 74 * In order for the kickoff to work, we need to ensure all the 75 * connected engines are in a state where they can answer. 76 * 77 * Newer chipsets don't seem to suffer from this issue, and well, 78 * there's also a "ignore these engines" bitmask reg we can use 79 * if we hit the issue there.. 80 */ 81 me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001); 82 83 /* do the kickoff... */ 84 nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); 85 if (nvkm_msec(device, 2000, 86 if (nvkm_rd32(device, 0x0032fc) != 0xffffffff) 87 break; 88 ) < 0) { 89 nvkm_error(subdev, "channel %d [%s] unload timeout\n", 90 chan->base.chid, chan->base.object.client->name); 91 if (suspend) 92 ret = -EBUSY; 93 } 94 nvkm_wr32(device, 0x00b860, me); 95 96 if (ret == 0) { 97 nvkm_kmap(chan->eng); 98 nvkm_wo32(chan->eng, offset + 0x00, 0x00000000); 99 nvkm_wo32(chan->eng, offset + 0x04, 0x00000000); 100 nvkm_wo32(chan->eng, offset + 0x08, 0x00000000); 101 nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000); 102 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); 103 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); 104 nvkm_done(chan->eng); 105 } 106 107 return ret; 108} 109 110static int 111nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base, 112 struct nvkm_engine *engine) 113{ 114 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 115 struct nvkm_gpuobj *engn = *nv50_fifo_chan_engine(chan, engine); 116 u64 limit, start; 117 int offset; 118 119 offset = nv50_fifo_chan_engine_addr(engine); 120 if (offset < 0) 121 return 0; 122 limit = engn->addr + engn->size - 1; 123 start = engn->addr; 124 125 nvkm_kmap(chan->eng); 126 nvkm_wo32(chan->eng, offset + 0x00, 0x00190000); 127 nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit)); 128 nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start)); 129 nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 | 130 upper_32_bits(start)); 131 nvkm_wo32(chan->eng, offset + 0x10, 0x00000000); 132 nvkm_wo32(chan->eng, offset + 0x14, 0x00000000); 133 nvkm_done(chan->eng); 134 return 0; 135} 136 137void 138nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base, 139 struct nvkm_engine *engine) 140{ 141 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 142 nvkm_gpuobj_del(nv50_fifo_chan_engine(chan, engine)); 143} 144 145static int 146nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base, 147 struct nvkm_engine *engine, 148 struct nvkm_object *object) 149{ 150 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 151 152 if (nv50_fifo_chan_engine_addr(engine) < 0) 153 return 0; 154 155 return nvkm_object_bind(object, NULL, 0, nv50_fifo_chan_engine(chan, engine)); 156} 157 158void 159nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie) 160{ 161 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 162 nvkm_ramht_remove(chan->ramht, cookie); 163} 164 165static int 166nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base, 167 struct nvkm_object *object) 168{ 169 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 170 u32 handle = object->handle; 171 u32 context; 172 173 switch (object->engine->subdev.type) { 174 case NVKM_ENGINE_DMAOBJ: 175 case NVKM_ENGINE_SW : context = 0x00000000; break; 176 case NVKM_ENGINE_GR : context = 0x00100000; break; 177 case NVKM_ENGINE_MPEG : context = 0x00200000; break; 178 default: 179 WARN_ON(1); 180 return -EINVAL; 181 } 182 183 return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context); 184} 185 186void 187nv50_fifo_chan_fini(struct nvkm_fifo_chan *base) 188{ 189 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 190 struct nv50_fifo *fifo = chan->fifo; 191 struct nvkm_device *device = fifo->base.engine.subdev.device; 192 u32 chid = chan->base.chid; 193 194 /* remove channel from runlist, fifo will unload context */ 195 nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000); 196 nv50_fifo_runlist_update(fifo); 197 nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000); 198} 199 200static void 201nv50_fifo_chan_init(struct nvkm_fifo_chan *base) 202{ 203 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 204 struct nv50_fifo *fifo = chan->fifo; 205 struct nvkm_device *device = fifo->base.engine.subdev.device; 206 u64 addr = chan->ramfc->addr >> 12; 207 u32 chid = chan->base.chid; 208 209 nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr); 210 nv50_fifo_runlist_update(fifo); 211} 212 213void * 214nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base) 215{ 216 struct nv50_fifo_chan *chan = nv50_fifo_chan(base); 217 nvkm_ramht_del(&chan->ramht); 218 nvkm_gpuobj_del(&chan->pgd); 219 nvkm_gpuobj_del(&chan->eng); 220 nvkm_gpuobj_del(&chan->cache); 221 nvkm_gpuobj_del(&chan->ramfc); 222 return chan; 223} 224 225static const struct nvkm_fifo_chan_func 226nv50_fifo_chan_func = { 227 .dtor = nv50_fifo_chan_dtor, 228 .init = nv50_fifo_chan_init, 229 .fini = nv50_fifo_chan_fini, 230 .engine_ctor = nv50_fifo_chan_engine_ctor, 231 .engine_dtor = nv50_fifo_chan_engine_dtor, 232 .engine_init = nv50_fifo_chan_engine_init, 233 .engine_fini = nv50_fifo_chan_engine_fini, 234 .object_ctor = nv50_fifo_chan_object_ctor, 235 .object_dtor = nv50_fifo_chan_object_dtor, 236}; 237 238int 239nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push, 240 const struct nvkm_oclass *oclass, 241 struct nv50_fifo_chan *chan) 242{ 243 struct nvkm_device *device = fifo->base.engine.subdev.device; 244 int ret; 245 246 if (!vmm) 247 return -EINVAL; 248 249 ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base, 250 0x10000, 0x1000, false, vmm, push, 251 BIT(NV50_FIFO_ENGN_SW) | 252 BIT(NV50_FIFO_ENGN_GR) | 253 BIT(NV50_FIFO_ENGN_MPEG) | 254 BIT(NV50_FIFO_ENGN_DMA), 255 0, 0xc00000, 0x2000, oclass, &chan->base); 256 chan->fifo = fifo; 257 if (ret) 258 return ret; 259 260 ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst, 261 &chan->ramfc); 262 if (ret) 263 return ret; 264 265 ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst, 266 &chan->eng); 267 if (ret) 268 return ret; 269 270 ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst, 271 &chan->pgd); 272 if (ret) 273 return ret; 274 275 return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht); 276}