base.c (9176B)
1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24#include "priv.h" 25#include "chan.h" 26 27#include <core/client.h> 28#include <core/gpuobj.h> 29#include <core/notify.h> 30#include <subdev/mc.h> 31 32#include <nvif/event.h> 33#include <nvif/cl0080.h> 34#include <nvif/unpack.h> 35 36void 37nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid) 38{ 39 unsigned long flags; 40 if (WARN_ON(!fifo->func->recover_chan)) 41 return; 42 spin_lock_irqsave(&fifo->lock, flags); 43 fifo->func->recover_chan(fifo, chid); 44 spin_unlock_irqrestore(&fifo->lock, flags); 45} 46 47void 48nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags) 49{ 50 return fifo->func->pause(fifo, flags); 51} 52 53void 54nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags) 55{ 56 return fifo->func->start(fifo, flags); 57} 58 59void 60nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info) 61{ 62 return fifo->func->fault(fifo, info); 63} 64 65void 66nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags, 67 struct nvkm_fifo_chan **pchan) 68{ 69 struct nvkm_fifo_chan *chan = *pchan; 70 if (likely(chan)) { 71 *pchan = NULL; 72 spin_unlock_irqrestore(&fifo->lock, flags); 73 } 74} 75 76struct nvkm_fifo_chan * 77nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst) 78{ 79 struct nvkm_fifo_chan *chan; 80 list_for_each_entry(chan, &fifo->chan, head) { 81 if (chan->inst->addr == inst) { 82 list_del(&chan->head); 83 list_add(&chan->head, &fifo->chan); 84 return chan; 85 } 86 } 87 return NULL; 88} 89 90struct nvkm_fifo_chan * 91nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags) 92{ 93 struct nvkm_fifo_chan *chan; 94 unsigned long flags; 95 spin_lock_irqsave(&fifo->lock, flags); 96 if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) { 97 *rflags = flags; 98 return chan; 99 } 100 spin_unlock_irqrestore(&fifo->lock, flags); 101 return NULL; 102} 103 104struct nvkm_fifo_chan * 105nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags) 106{ 107 struct nvkm_fifo_chan *chan; 108 unsigned long flags; 109 spin_lock_irqsave(&fifo->lock, flags); 110 list_for_each_entry(chan, &fifo->chan, head) { 111 if (chan->chid == chid) { 112 list_del(&chan->head); 113 list_add(&chan->head, &fifo->chan); 114 *rflags = flags; 115 return chan; 116 } 117 } 118 spin_unlock_irqrestore(&fifo->lock, flags); 119 return NULL; 120} 121 122void 123nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid) 124{ 125 nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0); 126} 127 128static int 129nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size, 130 struct nvkm_notify *notify) 131{ 132 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); 133 if (size == 0) { 134 notify->size = 0; 135 notify->types = 1; 136 notify->index = chan->chid; 137 return 0; 138 } 139 return -ENOSYS; 140} 141 142static const struct nvkm_event_func 143nvkm_fifo_kevent_func = { 144 .ctor = nvkm_fifo_kevent_ctor, 145}; 146 147static int 148nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size, 149 struct nvkm_notify *notify) 150{ 151 if (size == 0) { 152 notify->size = 0; 153 notify->types = 1; 154 notify->index = 0; 155 return 0; 156 } 157 return -ENOSYS; 158} 159 160static const struct nvkm_event_func 161nvkm_fifo_cevent_func = { 162 .ctor = nvkm_fifo_cevent_ctor, 163}; 164 165void 166nvkm_fifo_cevent(struct nvkm_fifo *fifo) 167{ 168 nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); 169} 170 171static void 172nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index) 173{ 174 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 175 fifo->func->uevent_fini(fifo); 176} 177 178static void 179nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index) 180{ 181 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent); 182 fifo->func->uevent_init(fifo); 183} 184 185static int 186nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size, 187 struct nvkm_notify *notify) 188{ 189 union { 190 struct nvif_notify_uevent_req none; 191 } *req = data; 192 int ret = -ENOSYS; 193 194 if (!(ret = nvif_unvers(ret, &data, &size, req->none))) { 195 notify->size = sizeof(struct nvif_notify_uevent_rep); 196 notify->types = 1; 197 notify->index = 0; 198 } 199 200 return ret; 201} 202 203static const struct nvkm_event_func 204nvkm_fifo_uevent_func = { 205 .ctor = nvkm_fifo_uevent_ctor, 206 .init = nvkm_fifo_uevent_init, 207 .fini = nvkm_fifo_uevent_fini, 208}; 209 210void 211nvkm_fifo_uevent(struct nvkm_fifo *fifo) 212{ 213 struct nvif_notify_uevent_rep rep = { 214 }; 215 nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep)); 216} 217 218static int 219nvkm_fifo_class_new_(struct nvkm_device *device, 220 const struct nvkm_oclass *oclass, void *data, u32 size, 221 struct nvkm_object **pobject) 222{ 223 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); 224 return fifo->func->class_new(fifo, oclass, data, size, pobject); 225} 226 227static const struct nvkm_device_oclass 228nvkm_fifo_class_ = { 229 .ctor = nvkm_fifo_class_new_, 230}; 231 232static int 233nvkm_fifo_class_new(struct nvkm_device *device, 234 const struct nvkm_oclass *oclass, void *data, u32 size, 235 struct nvkm_object **pobject) 236{ 237 const struct nvkm_fifo_chan_oclass *sclass = oclass->engn; 238 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); 239 return sclass->ctor(fifo, oclass, data, size, pobject); 240} 241 242static const struct nvkm_device_oclass 243nvkm_fifo_class = { 244 .ctor = nvkm_fifo_class_new, 245}; 246 247static int 248nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, 249 const struct nvkm_device_oclass **class) 250{ 251 struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine); 252 const struct nvkm_fifo_chan_oclass *sclass; 253 int c = 0; 254 255 if (fifo->func->class_get) { 256 int ret = fifo->func->class_get(fifo, index, oclass); 257 if (ret == 0) 258 *class = &nvkm_fifo_class_; 259 return ret; 260 } 261 262 while ((sclass = fifo->func->chan[c])) { 263 if (c++ == index) { 264 oclass->base = sclass->base; 265 oclass->engn = sclass; 266 *class = &nvkm_fifo_class; 267 return 0; 268 } 269 } 270 271 return c; 272} 273 274static void 275nvkm_fifo_intr(struct nvkm_engine *engine) 276{ 277 struct nvkm_fifo *fifo = nvkm_fifo(engine); 278 fifo->func->intr(fifo); 279} 280 281static int 282nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend) 283{ 284 struct nvkm_fifo *fifo = nvkm_fifo(engine); 285 if (fifo->func->fini) 286 fifo->func->fini(fifo); 287 return 0; 288} 289 290static int 291nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data) 292{ 293 struct nvkm_fifo *fifo = nvkm_fifo(engine); 294 switch (mthd) { 295 case NV_DEVICE_HOST_CHANNELS: *data = fifo->nr; return 0; 296 default: 297 if (fifo->func->info) 298 return fifo->func->info(fifo, mthd, data); 299 break; 300 } 301 return -ENOSYS; 302} 303 304static int 305nvkm_fifo_oneinit(struct nvkm_engine *engine) 306{ 307 struct nvkm_fifo *fifo = nvkm_fifo(engine); 308 if (fifo->func->oneinit) 309 return fifo->func->oneinit(fifo); 310 return 0; 311} 312 313static void 314nvkm_fifo_preinit(struct nvkm_engine *engine) 315{ 316 nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0); 317} 318 319static int 320nvkm_fifo_init(struct nvkm_engine *engine) 321{ 322 struct nvkm_fifo *fifo = nvkm_fifo(engine); 323 fifo->func->init(fifo); 324 return 0; 325} 326 327static void * 328nvkm_fifo_dtor(struct nvkm_engine *engine) 329{ 330 struct nvkm_fifo *fifo = nvkm_fifo(engine); 331 void *data = fifo; 332 if (fifo->func->dtor) 333 data = fifo->func->dtor(fifo); 334 nvkm_event_fini(&fifo->kevent); 335 nvkm_event_fini(&fifo->cevent); 336 nvkm_event_fini(&fifo->uevent); 337 mutex_destroy(&fifo->mutex); 338 return data; 339} 340 341static const struct nvkm_engine_func 342nvkm_fifo = { 343 .dtor = nvkm_fifo_dtor, 344 .preinit = nvkm_fifo_preinit, 345 .oneinit = nvkm_fifo_oneinit, 346 .info = nvkm_fifo_info, 347 .init = nvkm_fifo_init, 348 .fini = nvkm_fifo_fini, 349 .intr = nvkm_fifo_intr, 350 .base.sclass = nvkm_fifo_class_get, 351}; 352 353int 354nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device, 355 enum nvkm_subdev_type type, int inst, int nr, struct nvkm_fifo *fifo) 356{ 357 int ret; 358 359 fifo->func = func; 360 INIT_LIST_HEAD(&fifo->chan); 361 spin_lock_init(&fifo->lock); 362 mutex_init(&fifo->mutex); 363 364 if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR)) 365 fifo->nr = NVKM_FIFO_CHID_NR; 366 else 367 fifo->nr = nr; 368 bitmap_clear(fifo->mask, 0, fifo->nr); 369 370 ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine); 371 if (ret) 372 return ret; 373 374 if (func->uevent_init) { 375 ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1, 376 &fifo->uevent); 377 if (ret) 378 return ret; 379 } 380 381 ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent); 382 if (ret) 383 return ret; 384 385 return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent); 386}