vmm.h (16267B)
1#ifndef __NVKM_VMM_H__ 2#define __NVKM_VMM_H__ 3#include "priv.h" 4#include <core/memory.h> 5enum nvkm_memory_target; 6 7struct nvkm_vmm_pt { 8 /* Some GPUs have a mapping level with a dual page tables to 9 * support large and small pages in the same address-range. 10 * 11 * We track the state of both page tables in one place, which 12 * is why there's multiple PT pointers/refcounts here. 13 */ 14 struct nvkm_mmu_pt *pt[2]; 15 u32 refs[2]; 16 17 /* Page size handled by this PT. 18 * 19 * Tesla backend needs to know this when writinge PDEs, 20 * otherwise unnecessary. 21 */ 22 u8 page; 23 24 /* Entire page table sparse. 25 * 26 * Used to propagate sparseness to child page tables. 27 */ 28 bool sparse:1; 29 30 /* Tracking for page directories. 31 * 32 * The array is indexed by PDE, and will either point to the 33 * child page table, or indicate the PDE is marked as sparse. 34 **/ 35#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde) 36#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde) 37#define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY) 38 struct nvkm_vmm_pt **pde; 39 40 /* Tracking for dual page tables. 41 * 42 * There's one entry for each LPTE, keeping track of whether 43 * there are valid SPTEs in the same address-range. 44 * 45 * This information is used to manage LPTE state transitions. 46 */ 47#define NVKM_VMM_PTE_SPARSE 0x80 48#define NVKM_VMM_PTE_VALID 0x40 49#define NVKM_VMM_PTE_SPTES 0x3f 50 u8 pte[]; 51}; 52 53typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *, 54 struct nvkm_mmu_pt *, u32 ptei, u32 ptes); 55typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *, 56 struct nvkm_vmm_pt *, u32 pdei); 57typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *, 58 u32 ptei, u32 ptes, struct nvkm_vmm_map *); 59 60struct nvkm_vmm_desc_func { 61 nvkm_vmm_pxe_func invalid; 62 nvkm_vmm_pxe_func unmap; 63 nvkm_vmm_pxe_func sparse; 64 65 nvkm_vmm_pde_func pde; 66 67 nvkm_vmm_pte_func mem; 68 nvkm_vmm_pte_func dma; 69 nvkm_vmm_pte_func sgl; 70 71 nvkm_vmm_pte_func pfn; 72 bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes); 73 nvkm_vmm_pxe_func pfn_unmap; 74}; 75 76extern const struct nvkm_vmm_desc_func gf100_vmm_pgd; 77void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32); 78extern const struct nvkm_vmm_desc_func gf100_vmm_pgt; 79void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32); 80void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32, 81 struct nvkm_vmm_map *); 82void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32, 83 struct nvkm_vmm_map *); 84void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32, 85 struct nvkm_vmm_map *); 86 87void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32); 88 89struct nvkm_vmm_desc { 90 enum { 91 PGD, 92 PGT, 93 SPT, 94 LPT, 95 } type; 96 u8 bits; /* VMA bits covered by PT. */ 97 u8 size; /* Bytes-per-PTE. */ 98 u32 align; /* PT address alignment. */ 99 const struct nvkm_vmm_desc_func *func; 100}; 101 102extern const struct nvkm_vmm_desc nv50_vmm_desc_12[]; 103extern const struct nvkm_vmm_desc nv50_vmm_desc_16[]; 104 105extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[]; 106extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[]; 107extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[]; 108extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[]; 109 110extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[]; 111extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[]; 112extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[]; 113extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[]; 114 115extern const struct nvkm_vmm_desc gp100_vmm_desc_12[]; 116extern const struct nvkm_vmm_desc gp100_vmm_desc_16[]; 117 118struct nvkm_vmm_page { 119 u8 shift; 120 const struct nvkm_vmm_desc *desc; 121#define NVKM_VMM_PAGE_SPARSE 0x01 122#define NVKM_VMM_PAGE_VRAM 0x02 123#define NVKM_VMM_PAGE_HOST 0x04 124#define NVKM_VMM_PAGE_COMP 0x08 125#define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE) 126#define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM) 127#define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM) 128#define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST) 129#define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST) 130#define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST) 131#define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST) 132#define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP) 133#define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP) 134#define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP) 135#define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP) 136 u8 type; 137}; 138 139struct nvkm_vmm_func { 140 int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst); 141 void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst); 142 143 int (*aper)(enum nvkm_memory_target); 144 int (*valid)(struct nvkm_vmm *, void *argv, u32 argc, 145 struct nvkm_vmm_map *); 146 void (*flush)(struct nvkm_vmm *, int depth); 147 148 int (*mthd)(struct nvkm_vmm *, struct nvkm_client *, 149 u32 mthd, void *argv, u32 argc); 150 151 void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr); 152 153 u64 page_block; 154 const struct nvkm_vmm_page page[]; 155}; 156 157struct nvkm_vmm_join { 158 struct nvkm_memory *inst; 159 struct list_head head; 160}; 161 162int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, 163 u32 pd_header, bool managed, u64 addr, u64 size, 164 struct lock_class_key *, const char *name, 165 struct nvkm_vmm **); 166struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr); 167struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *, 168 u64 addr, u64 size); 169int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref, 170 bool sparse, u8 page, u8 align, u64 size, 171 struct nvkm_vma **pvma); 172void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *); 173void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn); 174void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *); 175 176#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL 177#define NVKM_VMM_PFN_ADDR_SHIFT 12 178#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL 179#define NVKM_VMM_PFN_HOST 0x0000000000000000ULL 180#define NVKM_VMM_PFN_VRAM 0x0000000000000010ULL 181#define NVKM_VMM_PFN_A 0x0000000000000004ULL 182#define NVKM_VMM_PFN_W 0x0000000000000002ULL 183#define NVKM_VMM_PFN_V 0x0000000000000001ULL 184#define NVKM_VMM_PFN_NONE 0x0000000000000000ULL 185 186int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn); 187int nvkm_vmm_pfn_unmap(struct nvkm_vmm *, u64 addr, u64 size); 188 189struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail); 190 191int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, 192 bool, u64, u64, void *, u32, struct lock_class_key *, 193 const char *, struct nvkm_vmm **); 194int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); 195 196int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); 197void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *); 198int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); 199void nv50_vmm_flush(struct nvkm_vmm *, int); 200 201int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, 202 struct nvkm_mmu *, bool, u64, u64, void *, u32, 203 struct lock_class_key *, const char *, struct nvkm_vmm **); 204int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base); 205int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); 206void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *); 207int gf100_vmm_aper(enum nvkm_memory_target); 208int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); 209void gf100_vmm_flush(struct nvkm_vmm *, int); 210void gf100_vmm_invalidate(struct nvkm_vmm *, u32 type); 211void gf100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr); 212 213int gk20a_vmm_aper(enum nvkm_memory_target); 214 215int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, 216 struct nvkm_mmu *, bool, u64, u64, void *, u32, 217 struct lock_class_key *, const char *, struct nvkm_vmm **); 218int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base); 219int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); 220 221int gp100_vmm_new_(const struct nvkm_vmm_func *, 222 struct nvkm_mmu *, bool, u64, u64, void *, u32, 223 struct lock_class_key *, const char *, struct nvkm_vmm **); 224int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); 225int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); 226void gp100_vmm_flush(struct nvkm_vmm *, int); 227int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32); 228void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr); 229 230int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); 231 232int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 233 struct lock_class_key *, const char *, struct nvkm_vmm **); 234int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 235 struct lock_class_key *, const char *, struct nvkm_vmm **); 236int nv44_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 237 struct lock_class_key *, const char *, struct nvkm_vmm **); 238int nv50_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 239 struct lock_class_key *, const char *, struct nvkm_vmm **); 240int mcp77_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 241 struct lock_class_key *, const char *, struct nvkm_vmm **); 242int g84_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 243 struct lock_class_key *, const char *, struct nvkm_vmm **); 244int gf100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 245 struct lock_class_key *, const char *, struct nvkm_vmm **); 246int gk104_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 247 struct lock_class_key *, const char *, struct nvkm_vmm **); 248int gk20a_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 249 struct lock_class_key *, const char *, struct nvkm_vmm **); 250int gm200_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32, 251 struct lock_class_key *, const char *, 252 struct nvkm_vmm **); 253int gm200_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 254 struct lock_class_key *, const char *, 255 struct nvkm_vmm **); 256int gm20b_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32, 257 struct lock_class_key *, const char *, 258 struct nvkm_vmm **); 259int gm20b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 260 struct lock_class_key *, const char *, 261 struct nvkm_vmm **); 262int gp100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 263 struct lock_class_key *, const char *, 264 struct nvkm_vmm **); 265int gp10b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 266 struct lock_class_key *, const char *, 267 struct nvkm_vmm **); 268int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 269 struct lock_class_key *, const char *, 270 struct nvkm_vmm **); 271int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, 272 struct lock_class_key *, const char *, 273 struct nvkm_vmm **); 274 275#define VMM_PRINT(l,v,p,f,a...) do { \ 276 struct nvkm_vmm *_vmm = (v); \ 277 if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \ 278 nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n", \ 279 _vmm->name, ##a); \ 280 } \ 281} while(0) 282#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a) 283#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a) 284#define VMM_SPAM(v,f,a...) VMM_PRINT(NV_DBG_SPAM , (v), dbg, f, ##a) 285 286#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do { \ 287 nvkm_kmap((PT)->memory); \ 288 while (PTEN) { \ 289 u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift; \ 290 u64 _addr = ((BASE) + MAP->off); \ 291 \ 292 if (_ptes > PTEN) { \ 293 MAP->off += PTEN << MAP->page->shift; \ 294 _ptes = PTEN; \ 295 } else { \ 296 MAP->off = 0; \ 297 NEXT; \ 298 } \ 299 \ 300 VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes); \ 301 \ 302 FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \ 303 PTEI += _ptes; \ 304 PTEN -= _ptes; \ 305 } \ 306 nvkm_done((PT)->memory); \ 307} while(0) 308 309#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL) \ 310 VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \ 311 ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT), \ 312 ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT), \ 313 (MAP->mem = MAP->mem->next)) 314#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL) \ 315 VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \ 316 *MAP->dma, PAGE_SIZE, MAP->dma++) 317#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL) \ 318 VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \ 319 sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl), \ 320 (MAP->sgl = sg_next(MAP->sgl))) 321 322#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c)) 323#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d)) 324#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \ 325 const u32 _pteo = (o); u##b _data = (d); \ 326 VMM_SPAM((v), " %010llx "f, (m)->addr + _pteo, _data, ##a); \ 327 VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \ 328} while(0) 329 330#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 32, WO, "%08x") 331#define VMM_FO032(m,v,o,d,c) \ 332 VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c)) 333 334#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 64, WO, "%016llx") 335#define VMM_FO064(m,v,o,d,c) \ 336 VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c)) 337 338#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \ 339 u32 _pteo = (o), _ptes = (c); \ 340 const u64 _addr = (m)->addr + _pteo; \ 341 VMM_SPAM((v), " %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a); \ 342 while (_ptes--) { \ 343 nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo)); \ 344 nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi)); \ 345 _pteo += 0x10; \ 346 } \ 347} while(0) 348 349#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "") 350#define VMM_FO128(m,v,o,lo,hi,c) do { \ 351 nvkm_kmap((m)->memory); \ 352 VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \ 353 nvkm_done((m)->memory); \ 354} while(0) 355#endif