cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmmgv100.c (2860B)


      1/*
      2 * Copyright 2018 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22#include "vmm.h"
     23
     24#include <subdev/fb.h>
     25#include <subdev/ltc.h>
     26
     27#include <nvif/ifc00d.h>
     28#include <nvif/unpack.h>
     29
     30int
     31gv100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
     32{
     33	u64 data[2], mask;
     34	int ret = gp100_vmm_join(vmm, inst), i;
     35	if (ret)
     36		return ret;
     37
     38	nvkm_kmap(inst);
     39	data[0] = nvkm_ro32(inst, 0x200);
     40	data[1] = nvkm_ro32(inst, 0x204);
     41	mask = BIT_ULL(0);
     42
     43	nvkm_wo32(inst, 0x21c, 0x00000000);
     44
     45	for (i = 0; i < 64; i++) {
     46		if (mask & BIT_ULL(i)) {
     47			nvkm_wo32(inst, 0x2a4 + (i * 0x10), data[1]);
     48			nvkm_wo32(inst, 0x2a0 + (i * 0x10), data[0]);
     49		} else {
     50			nvkm_wo32(inst, 0x2a4 + (i * 0x10), 0x00000001);
     51			nvkm_wo32(inst, 0x2a0 + (i * 0x10), 0x00000001);
     52		}
     53		nvkm_wo32(inst, 0x2a8 + (i * 0x10), 0x00000000);
     54	}
     55
     56	nvkm_wo32(inst, 0x298, lower_32_bits(mask));
     57	nvkm_wo32(inst, 0x29c, upper_32_bits(mask));
     58	nvkm_done(inst);
     59	return 0;
     60}
     61
     62static const struct nvkm_vmm_func
     63gv100_vmm = {
     64	.join = gv100_vmm_join,
     65	.part = gf100_vmm_part,
     66	.aper = gf100_vmm_aper,
     67	.valid = gp100_vmm_valid,
     68	.flush = gp100_vmm_flush,
     69	.mthd = gp100_vmm_mthd,
     70	.invalidate_pdb = gp100_vmm_invalidate_pdb,
     71	.page = {
     72		{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
     73		{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
     74		{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
     75		{ 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
     76		{ 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
     77		{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
     78		{}
     79	}
     80};
     81
     82int
     83gv100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
     84	      void *argv, u32 argc, struct lock_class_key *key,
     85	      const char *name, struct nvkm_vmm **pvmm)
     86{
     87	return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
     88			      argv, argc, key, name, pvmm);
     89}