cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nv40.c (7480B)


      1/*
      2 * Copyright 2012 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Ben Skeggs
     23 */
     24#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
     25#include "priv.h"
     26
     27#include <core/ramht.h>
     28#include <engine/gr/nv40.h>
     29
     30struct nv40_instmem {
     31	struct nvkm_instmem base;
     32	struct nvkm_mm heap;
     33	void __iomem *iomem;
     34};
     35
     36/******************************************************************************
     37 * instmem object implementation
     38 *****************************************************************************/
     39#define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
     40
     41struct nv40_instobj {
     42	struct nvkm_instobj base;
     43	struct nv40_instmem *imem;
     44	struct nvkm_mm_node *node;
     45};
     46
     47static void
     48nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
     49{
     50	struct nv40_instobj *iobj = nv40_instobj(memory);
     51	iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
     52}
     53
     54static u32
     55nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
     56{
     57	struct nv40_instobj *iobj = nv40_instobj(memory);
     58	return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
     59}
     60
     61static const struct nvkm_memory_ptrs
     62nv40_instobj_ptrs = {
     63	.rd32 = nv40_instobj_rd32,
     64	.wr32 = nv40_instobj_wr32,
     65};
     66
     67static void
     68nv40_instobj_release(struct nvkm_memory *memory)
     69{
     70	wmb();
     71}
     72
     73static void __iomem *
     74nv40_instobj_acquire(struct nvkm_memory *memory)
     75{
     76	struct nv40_instobj *iobj = nv40_instobj(memory);
     77	return iobj->imem->iomem + iobj->node->offset;
     78}
     79
     80static u64
     81nv40_instobj_size(struct nvkm_memory *memory)
     82{
     83	return nv40_instobj(memory)->node->length;
     84}
     85
     86static u64
     87nv40_instobj_addr(struct nvkm_memory *memory)
     88{
     89	return nv40_instobj(memory)->node->offset;
     90}
     91
     92static enum nvkm_memory_target
     93nv40_instobj_target(struct nvkm_memory *memory)
     94{
     95	return NVKM_MEM_TARGET_INST;
     96}
     97
     98static void *
     99nv40_instobj_dtor(struct nvkm_memory *memory)
    100{
    101	struct nv40_instobj *iobj = nv40_instobj(memory);
    102	mutex_lock(&iobj->imem->base.mutex);
    103	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
    104	mutex_unlock(&iobj->imem->base.mutex);
    105	nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
    106	return iobj;
    107}
    108
    109static const struct nvkm_memory_func
    110nv40_instobj_func = {
    111	.dtor = nv40_instobj_dtor,
    112	.target = nv40_instobj_target,
    113	.size = nv40_instobj_size,
    114	.addr = nv40_instobj_addr,
    115	.acquire = nv40_instobj_acquire,
    116	.release = nv40_instobj_release,
    117};
    118
    119static int
    120nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
    121		 struct nvkm_memory **pmemory)
    122{
    123	struct nv40_instmem *imem = nv40_instmem(base);
    124	struct nv40_instobj *iobj;
    125	int ret;
    126
    127	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
    128		return -ENOMEM;
    129	*pmemory = &iobj->base.memory;
    130
    131	nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base);
    132	iobj->base.memory.ptrs = &nv40_instobj_ptrs;
    133	iobj->imem = imem;
    134
    135	mutex_lock(&imem->base.mutex);
    136	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size, align ? align : 1, &iobj->node);
    137	mutex_unlock(&imem->base.mutex);
    138	return ret;
    139}
    140
    141/******************************************************************************
    142 * instmem subdev implementation
    143 *****************************************************************************/
    144
    145static u32
    146nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
    147{
    148	return ioread32_native(nv40_instmem(base)->iomem + addr);
    149}
    150
    151static void
    152nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
    153{
    154	iowrite32_native(data, nv40_instmem(base)->iomem + addr);
    155}
    156
    157static int
    158nv40_instmem_oneinit(struct nvkm_instmem *base)
    159{
    160	struct nv40_instmem *imem = nv40_instmem(base);
    161	struct nvkm_device *device = imem->base.subdev.device;
    162	int ret, vs;
    163
    164	/* PRAMIN aperture maps over the end of vram, reserve enough space
    165	 * to fit graphics contexts for every channel, the magics come
    166	 * from engine/gr/nv40.c
    167	 */
    168	vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
    169	if      (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
    170	else if (device->chipset  < 0x43) imem->base.reserved = 0x4f00 * vs;
    171	else if (nv44_gr_class(device))   imem->base.reserved = 0x4980 * vs;
    172	else				  imem->base.reserved = 0x4a40 * vs;
    173	imem->base.reserved += 16 * 1024;
    174	imem->base.reserved *= 32;		/* per-channel */
    175	imem->base.reserved += 512 * 1024;	/* pci(e)gart table */
    176	imem->base.reserved += 512 * 1024;	/* object storage */
    177	imem->base.reserved = round_up(imem->base.reserved, 4096);
    178
    179	ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
    180	if (ret)
    181		return ret;
    182
    183	/* 0x00000-0x10000: reserve for probable vbios image */
    184	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
    185			      &imem->base.vbios);
    186	if (ret)
    187		return ret;
    188
    189	/* 0x10000-0x18000: reserve for RAMHT */
    190	ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
    191	if (ret)
    192		return ret;
    193
    194	/* 0x18000-0x18200: reserve for RAMRO
    195	 * 0x18200-0x20000: padding
    196	 */
    197	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
    198			      &imem->base.ramro);
    199	if (ret)
    200		return ret;
    201
    202	/* 0x20000-0x21000: reserve for RAMFC
    203	 * 0x21000-0x40000: padding and some unknown crap
    204	 */
    205	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
    206			      &imem->base.ramfc);
    207	if (ret)
    208		return ret;
    209
    210	return 0;
    211}
    212
    213static void *
    214nv40_instmem_dtor(struct nvkm_instmem *base)
    215{
    216	struct nv40_instmem *imem = nv40_instmem(base);
    217	nvkm_memory_unref(&imem->base.ramfc);
    218	nvkm_memory_unref(&imem->base.ramro);
    219	nvkm_ramht_del(&imem->base.ramht);
    220	nvkm_memory_unref(&imem->base.vbios);
    221	nvkm_mm_fini(&imem->heap);
    222	if (imem->iomem)
    223		iounmap(imem->iomem);
    224	return imem;
    225}
    226
    227static const struct nvkm_instmem_func
    228nv40_instmem = {
    229	.dtor = nv40_instmem_dtor,
    230	.oneinit = nv40_instmem_oneinit,
    231	.rd32 = nv40_instmem_rd32,
    232	.wr32 = nv40_instmem_wr32,
    233	.memory_new = nv40_instobj_new,
    234	.zero = false,
    235};
    236
    237int
    238nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
    239		 struct nvkm_instmem **pimem)
    240{
    241	struct nv40_instmem *imem;
    242	int bar;
    243
    244	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
    245		return -ENOMEM;
    246	nvkm_instmem_ctor(&nv40_instmem, device, type, inst, &imem->base);
    247	*pimem = &imem->base;
    248
    249	/* map bar */
    250	if (device->func->resource_size(device, 2))
    251		bar = 2;
    252	else
    253		bar = 3;
    254
    255	imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
    256				 device->func->resource_size(device, bar));
    257	if (!imem->iomem) {
    258		nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
    259		return -EFAULT;
    260	}
    261
    262	return 0;
    263}