vmmnv41.c (3230B)
1/* 2 * Copyright 2017 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22#include "vmm.h" 23 24#include <subdev/timer.h> 25 26static void 27nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 28 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) 29{ 30 u32 data = (addr >> 7) | 0x00000001; /* VALID. */ 31 while (ptes--) { 32 VMM_WO032(pt, vmm, ptei++ * 4, data); 33 data += 0x00000020; 34 } 35} 36 37static void 38nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 39 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 40{ 41 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); 42} 43 44static void 45nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, 46 u32 ptei, u32 ptes, struct nvkm_vmm_map *map) 47{ 48#if PAGE_SHIFT == 12 49 nvkm_kmap(pt->memory); 50 while (ptes--) { 51 const u32 data = (*map->dma++ >> 7) | 0x00000001; 52 VMM_WO032(pt, vmm, ptei++ * 4, data); 53 } 54 nvkm_done(pt->memory); 55#else 56 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); 57#endif 58} 59 60static void 61nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm, 62 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes) 63{ 64 VMM_FO032(pt, vmm, ptei * 4, 0, ptes); 65} 66 67static const struct nvkm_vmm_desc_func 68nv41_vmm_desc_pgt = { 69 .unmap = nv41_vmm_pgt_unmap, 70 .dma = nv41_vmm_pgt_dma, 71 .sgl = nv41_vmm_pgt_sgl, 72}; 73 74static const struct nvkm_vmm_desc 75nv41_vmm_desc_12[] = { 76 { PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt }, 77 {} 78}; 79 80static void 81nv41_vmm_flush(struct nvkm_vmm *vmm, int level) 82{ 83 struct nvkm_device *device = vmm->mmu->subdev.device; 84 85 mutex_lock(&vmm->mmu->mutex); 86 nvkm_wr32(device, 0x100810, 0x00000022); 87 nvkm_msec(device, 2000, 88 if (nvkm_rd32(device, 0x100810) & 0x00000020) 89 break; 90 ); 91 nvkm_wr32(device, 0x100810, 0x00000000); 92 mutex_unlock(&vmm->mmu->mutex); 93} 94 95static const struct nvkm_vmm_func 96nv41_vmm = { 97 .valid = nv04_vmm_valid, 98 .flush = nv41_vmm_flush, 99 .page = { 100 { 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST }, 101 {} 102 } 103}; 104 105int 106nv41_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, 107 void *argv, u32 argc, struct lock_class_key *key, const char *name, 108 struct nvkm_vmm **pvmm) 109{ 110 return nv04_vmm_new_(&nv41_vmm, mmu, 0, managed, addr, size, 111 argv, argc, key, name, pvmm); 112}