cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_ttm.c (63810B)


      1/*
      2 * Copyright 2009 Jerome Glisse.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sub license, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     20 *
     21 * The above copyright notice and this permission notice (including the
     22 * next paragraph) shall be included in all copies or substantial portions
     23 * of the Software.
     24 *
     25 */
     26/*
     27 * Authors:
     28 *    Jerome Glisse <glisse@freedesktop.org>
     29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
     30 *    Dave Airlie
     31 */
     32
     33#include <linux/dma-mapping.h>
     34#include <linux/iommu.h>
     35#include <linux/pagemap.h>
     36#include <linux/sched/task.h>
     37#include <linux/sched/mm.h>
     38#include <linux/seq_file.h>
     39#include <linux/slab.h>
     40#include <linux/swap.h>
     41#include <linux/swiotlb.h>
     42#include <linux/dma-buf.h>
     43#include <linux/sizes.h>
     44#include <linux/module.h>
     45
     46#include <drm/drm_drv.h>
     47#include <drm/ttm/ttm_bo_api.h>
     48#include <drm/ttm/ttm_bo_driver.h>
     49#include <drm/ttm/ttm_placement.h>
     50#include <drm/ttm/ttm_range_manager.h>
     51
     52#include <drm/amdgpu_drm.h>
     53#include <drm/drm_drv.h>
     54
     55#include "amdgpu.h"
     56#include "amdgpu_object.h"
     57#include "amdgpu_trace.h"
     58#include "amdgpu_amdkfd.h"
     59#include "amdgpu_sdma.h"
     60#include "amdgpu_ras.h"
     61#include "amdgpu_atomfirmware.h"
     62#include "amdgpu_res_cursor.h"
     63#include "bif/bif_4_1_d.h"
     64
     65MODULE_IMPORT_NS(DMA_BUF);
     66
     67#define AMDGPU_TTM_VRAM_MAX_DW_READ	(size_t)128
     68
     69static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
     70				   struct ttm_tt *ttm,
     71				   struct ttm_resource *bo_mem);
     72static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
     73				      struct ttm_tt *ttm);
     74
     75static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
     76				    unsigned int type,
     77				    uint64_t size_in_page)
     78{
     79	return ttm_range_man_init(&adev->mman.bdev, type,
     80				  false, size_in_page);
     81}
     82
     83/**
     84 * amdgpu_evict_flags - Compute placement flags
     85 *
     86 * @bo: The buffer object to evict
     87 * @placement: Possible destination(s) for evicted BO
     88 *
     89 * Fill in placement data when ttm_bo_evict() is called
     90 */
     91static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
     92				struct ttm_placement *placement)
     93{
     94	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
     95	struct amdgpu_bo *abo;
     96	static const struct ttm_place placements = {
     97		.fpfn = 0,
     98		.lpfn = 0,
     99		.mem_type = TTM_PL_SYSTEM,
    100		.flags = 0
    101	};
    102
    103	/* Don't handle scatter gather BOs */
    104	if (bo->type == ttm_bo_type_sg) {
    105		placement->num_placement = 0;
    106		placement->num_busy_placement = 0;
    107		return;
    108	}
    109
    110	/* Object isn't an AMDGPU object so ignore */
    111	if (!amdgpu_bo_is_amdgpu_bo(bo)) {
    112		placement->placement = &placements;
    113		placement->busy_placement = &placements;
    114		placement->num_placement = 1;
    115		placement->num_busy_placement = 1;
    116		return;
    117	}
    118
    119	abo = ttm_to_amdgpu_bo(bo);
    120	if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
    121		placement->num_placement = 0;
    122		placement->num_busy_placement = 0;
    123		return;
    124	}
    125
    126	switch (bo->resource->mem_type) {
    127	case AMDGPU_PL_GDS:
    128	case AMDGPU_PL_GWS:
    129	case AMDGPU_PL_OA:
    130		placement->num_placement = 0;
    131		placement->num_busy_placement = 0;
    132		return;
    133
    134	case TTM_PL_VRAM:
    135		if (!adev->mman.buffer_funcs_enabled) {
    136			/* Move to system memory */
    137			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
    138		} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
    139			   !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
    140			   amdgpu_bo_in_cpu_visible_vram(abo)) {
    141
    142			/* Try evicting to the CPU inaccessible part of VRAM
    143			 * first, but only set GTT as busy placement, so this
    144			 * BO will be evicted to GTT rather than causing other
    145			 * BOs to be evicted from VRAM
    146			 */
    147			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
    148							AMDGPU_GEM_DOMAIN_GTT |
    149							AMDGPU_GEM_DOMAIN_CPU);
    150			abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
    151			abo->placements[0].lpfn = 0;
    152			abo->placement.busy_placement = &abo->placements[1];
    153			abo->placement.num_busy_placement = 1;
    154		} else {
    155			/* Move to GTT memory */
    156			amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
    157							AMDGPU_GEM_DOMAIN_CPU);
    158		}
    159		break;
    160	case TTM_PL_TT:
    161	case AMDGPU_PL_PREEMPT:
    162	default:
    163		amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
    164		break;
    165	}
    166	*placement = abo->placement;
    167}
    168
    169/**
    170 * amdgpu_ttm_map_buffer - Map memory into the GART windows
    171 * @bo: buffer object to map
    172 * @mem: memory object to map
    173 * @mm_cur: range to map
    174 * @window: which GART window to use
    175 * @ring: DMA ring to use for the copy
    176 * @tmz: if we should setup a TMZ enabled mapping
    177 * @size: in number of bytes to map, out number of bytes mapped
    178 * @addr: resulting address inside the MC address space
    179 *
    180 * Setup one of the GART windows to access a specific piece of memory or return
    181 * the physical address for local memory.
    182 */
    183static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
    184				 struct ttm_resource *mem,
    185				 struct amdgpu_res_cursor *mm_cur,
    186				 unsigned window, struct amdgpu_ring *ring,
    187				 bool tmz, uint64_t *size, uint64_t *addr)
    188{
    189	struct amdgpu_device *adev = ring->adev;
    190	unsigned offset, num_pages, num_dw, num_bytes;
    191	uint64_t src_addr, dst_addr;
    192	struct dma_fence *fence;
    193	struct amdgpu_job *job;
    194	void *cpu_addr;
    195	uint64_t flags;
    196	unsigned int i;
    197	int r;
    198
    199	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
    200	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
    201
    202	if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
    203		return -EINVAL;
    204
    205	/* Map only what can't be accessed directly */
    206	if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
    207		*addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
    208			mm_cur->start;
    209		return 0;
    210	}
    211
    212
    213	/*
    214	 * If start begins at an offset inside the page, then adjust the size
    215	 * and addr accordingly
    216	 */
    217	offset = mm_cur->start & ~PAGE_MASK;
    218
    219	num_pages = PFN_UP(*size + offset);
    220	num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
    221
    222	*size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
    223
    224	*addr = adev->gmc.gart_start;
    225	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
    226		AMDGPU_GPU_PAGE_SIZE;
    227	*addr += offset;
    228
    229	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
    230	num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
    231
    232	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
    233				     AMDGPU_IB_POOL_DELAYED, &job);
    234	if (r)
    235		return r;
    236
    237	src_addr = num_dw * 4;
    238	src_addr += job->ibs[0].gpu_addr;
    239
    240	dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
    241	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
    242	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
    243				dst_addr, num_bytes, false);
    244
    245	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
    246	WARN_ON(job->ibs[0].length_dw > num_dw);
    247
    248	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
    249	if (tmz)
    250		flags |= AMDGPU_PTE_TMZ;
    251
    252	cpu_addr = &job->ibs[0].ptr[num_dw];
    253
    254	if (mem->mem_type == TTM_PL_TT) {
    255		dma_addr_t *dma_addr;
    256
    257		dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
    258		amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
    259	} else {
    260		dma_addr_t dma_address;
    261
    262		dma_address = mm_cur->start;
    263		dma_address += adev->vm_manager.vram_base_offset;
    264
    265		for (i = 0; i < num_pages; ++i) {
    266			amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
    267					flags, cpu_addr);
    268			dma_address += PAGE_SIZE;
    269		}
    270	}
    271
    272	r = amdgpu_job_submit(job, &adev->mman.entity,
    273			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
    274	if (r)
    275		goto error_free;
    276
    277	dma_fence_put(fence);
    278
    279	return r;
    280
    281error_free:
    282	amdgpu_job_free(job);
    283	return r;
    284}
    285
    286/**
    287 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
    288 * @adev: amdgpu device
    289 * @src: buffer/address where to read from
    290 * @dst: buffer/address where to write to
    291 * @size: number of bytes to copy
    292 * @tmz: if a secure copy should be used
    293 * @resv: resv object to sync to
    294 * @f: Returns the last fence if multiple jobs are submitted.
    295 *
    296 * The function copies @size bytes from {src->mem + src->offset} to
    297 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
    298 * move and different for a BO to BO copy.
    299 *
    300 */
    301int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
    302			       const struct amdgpu_copy_mem *src,
    303			       const struct amdgpu_copy_mem *dst,
    304			       uint64_t size, bool tmz,
    305			       struct dma_resv *resv,
    306			       struct dma_fence **f)
    307{
    308	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
    309	struct amdgpu_res_cursor src_mm, dst_mm;
    310	struct dma_fence *fence = NULL;
    311	int r = 0;
    312
    313	if (!adev->mman.buffer_funcs_enabled) {
    314		DRM_ERROR("Trying to move memory with ring turned off.\n");
    315		return -EINVAL;
    316	}
    317
    318	amdgpu_res_first(src->mem, src->offset, size, &src_mm);
    319	amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
    320
    321	mutex_lock(&adev->mman.gtt_window_lock);
    322	while (src_mm.remaining) {
    323		uint64_t from, to, cur_size;
    324		struct dma_fence *next;
    325
    326		/* Never copy more than 256MiB at once to avoid a timeout */
    327		cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
    328
    329		/* Map src to window 0 and dst to window 1. */
    330		r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
    331					  0, ring, tmz, &cur_size, &from);
    332		if (r)
    333			goto error;
    334
    335		r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
    336					  1, ring, tmz, &cur_size, &to);
    337		if (r)
    338			goto error;
    339
    340		r = amdgpu_copy_buffer(ring, from, to, cur_size,
    341				       resv, &next, false, true, tmz);
    342		if (r)
    343			goto error;
    344
    345		dma_fence_put(fence);
    346		fence = next;
    347
    348		amdgpu_res_next(&src_mm, cur_size);
    349		amdgpu_res_next(&dst_mm, cur_size);
    350	}
    351error:
    352	mutex_unlock(&adev->mman.gtt_window_lock);
    353	if (f)
    354		*f = dma_fence_get(fence);
    355	dma_fence_put(fence);
    356	return r;
    357}
    358
    359/*
    360 * amdgpu_move_blit - Copy an entire buffer to another buffer
    361 *
    362 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
    363 * help move buffers to and from VRAM.
    364 */
    365static int amdgpu_move_blit(struct ttm_buffer_object *bo,
    366			    bool evict,
    367			    struct ttm_resource *new_mem,
    368			    struct ttm_resource *old_mem)
    369{
    370	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
    371	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
    372	struct amdgpu_copy_mem src, dst;
    373	struct dma_fence *fence = NULL;
    374	int r;
    375
    376	src.bo = bo;
    377	dst.bo = bo;
    378	src.mem = old_mem;
    379	dst.mem = new_mem;
    380	src.offset = 0;
    381	dst.offset = 0;
    382
    383	r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
    384				       new_mem->num_pages << PAGE_SHIFT,
    385				       amdgpu_bo_encrypted(abo),
    386				       bo->base.resv, &fence);
    387	if (r)
    388		goto error;
    389
    390	/* clear the space being freed */
    391	if (old_mem->mem_type == TTM_PL_VRAM &&
    392	    (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
    393		struct dma_fence *wipe_fence = NULL;
    394
    395		r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
    396		if (r) {
    397			goto error;
    398		} else if (wipe_fence) {
    399			dma_fence_put(fence);
    400			fence = wipe_fence;
    401		}
    402	}
    403
    404	/* Always block for VM page tables before committing the new location */
    405	if (bo->type == ttm_bo_type_kernel)
    406		r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
    407	else
    408		r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
    409	dma_fence_put(fence);
    410	return r;
    411
    412error:
    413	if (fence)
    414		dma_fence_wait(fence, false);
    415	dma_fence_put(fence);
    416	return r;
    417}
    418
    419/*
    420 * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
    421 *
    422 * Called by amdgpu_bo_move()
    423 */
    424static bool amdgpu_mem_visible(struct amdgpu_device *adev,
    425			       struct ttm_resource *mem)
    426{
    427	uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
    428	struct amdgpu_res_cursor cursor;
    429
    430	if (mem->mem_type == TTM_PL_SYSTEM ||
    431	    mem->mem_type == TTM_PL_TT)
    432		return true;
    433	if (mem->mem_type != TTM_PL_VRAM)
    434		return false;
    435
    436	amdgpu_res_first(mem, 0, mem_size, &cursor);
    437
    438	/* ttm_resource_ioremap only supports contiguous memory */
    439	if (cursor.size != mem_size)
    440		return false;
    441
    442	return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
    443}
    444
    445/*
    446 * amdgpu_bo_move - Move a buffer object to a new memory location
    447 *
    448 * Called by ttm_bo_handle_move_mem()
    449 */
    450static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
    451			  struct ttm_operation_ctx *ctx,
    452			  struct ttm_resource *new_mem,
    453			  struct ttm_place *hop)
    454{
    455	struct amdgpu_device *adev;
    456	struct amdgpu_bo *abo;
    457	struct ttm_resource *old_mem = bo->resource;
    458	int r;
    459
    460	if (new_mem->mem_type == TTM_PL_TT ||
    461	    new_mem->mem_type == AMDGPU_PL_PREEMPT) {
    462		r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
    463		if (r)
    464			return r;
    465	}
    466
    467	/* Can't move a pinned BO */
    468	abo = ttm_to_amdgpu_bo(bo);
    469	if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
    470		return -EINVAL;
    471
    472	adev = amdgpu_ttm_adev(bo->bdev);
    473
    474	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
    475		ttm_bo_move_null(bo, new_mem);
    476		goto out;
    477	}
    478	if (old_mem->mem_type == TTM_PL_SYSTEM &&
    479	    (new_mem->mem_type == TTM_PL_TT ||
    480	     new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
    481		ttm_bo_move_null(bo, new_mem);
    482		goto out;
    483	}
    484	if ((old_mem->mem_type == TTM_PL_TT ||
    485	     old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
    486	    new_mem->mem_type == TTM_PL_SYSTEM) {
    487		r = ttm_bo_wait_ctx(bo, ctx);
    488		if (r)
    489			return r;
    490
    491		amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
    492		ttm_resource_free(bo, &bo->resource);
    493		ttm_bo_assign_mem(bo, new_mem);
    494		goto out;
    495	}
    496
    497	if (old_mem->mem_type == AMDGPU_PL_GDS ||
    498	    old_mem->mem_type == AMDGPU_PL_GWS ||
    499	    old_mem->mem_type == AMDGPU_PL_OA ||
    500	    new_mem->mem_type == AMDGPU_PL_GDS ||
    501	    new_mem->mem_type == AMDGPU_PL_GWS ||
    502	    new_mem->mem_type == AMDGPU_PL_OA) {
    503		/* Nothing to save here */
    504		ttm_bo_move_null(bo, new_mem);
    505		goto out;
    506	}
    507
    508	if (bo->type == ttm_bo_type_device &&
    509	    new_mem->mem_type == TTM_PL_VRAM &&
    510	    old_mem->mem_type != TTM_PL_VRAM) {
    511		/* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
    512		 * accesses the BO after it's moved.
    513		 */
    514		abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
    515	}
    516
    517	if (adev->mman.buffer_funcs_enabled) {
    518		if (((old_mem->mem_type == TTM_PL_SYSTEM &&
    519		      new_mem->mem_type == TTM_PL_VRAM) ||
    520		     (old_mem->mem_type == TTM_PL_VRAM &&
    521		      new_mem->mem_type == TTM_PL_SYSTEM))) {
    522			hop->fpfn = 0;
    523			hop->lpfn = 0;
    524			hop->mem_type = TTM_PL_TT;
    525			hop->flags = TTM_PL_FLAG_TEMPORARY;
    526			return -EMULTIHOP;
    527		}
    528
    529		r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
    530	} else {
    531		r = -ENODEV;
    532	}
    533
    534	if (r) {
    535		/* Check that all memory is CPU accessible */
    536		if (!amdgpu_mem_visible(adev, old_mem) ||
    537		    !amdgpu_mem_visible(adev, new_mem)) {
    538			pr_err("Move buffer fallback to memcpy unavailable\n");
    539			return r;
    540		}
    541
    542		r = ttm_bo_move_memcpy(bo, ctx, new_mem);
    543		if (r)
    544			return r;
    545	}
    546
    547out:
    548	/* update statistics */
    549	atomic64_add(bo->base.size, &adev->num_bytes_moved);
    550	amdgpu_bo_move_notify(bo, evict, new_mem);
    551	return 0;
    552}
    553
    554/*
    555 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
    556 *
    557 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
    558 */
    559static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
    560				     struct ttm_resource *mem)
    561{
    562	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
    563	size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
    564
    565	switch (mem->mem_type) {
    566	case TTM_PL_SYSTEM:
    567		/* system memory */
    568		return 0;
    569	case TTM_PL_TT:
    570	case AMDGPU_PL_PREEMPT:
    571		break;
    572	case TTM_PL_VRAM:
    573		mem->bus.offset = mem->start << PAGE_SHIFT;
    574		/* check if it's visible */
    575		if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
    576			return -EINVAL;
    577
    578		if (adev->mman.aper_base_kaddr &&
    579		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
    580			mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
    581					mem->bus.offset;
    582
    583		mem->bus.offset += adev->gmc.aper_base;
    584		mem->bus.is_iomem = true;
    585		break;
    586	default:
    587		return -EINVAL;
    588	}
    589	return 0;
    590}
    591
    592static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
    593					   unsigned long page_offset)
    594{
    595	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
    596	struct amdgpu_res_cursor cursor;
    597
    598	amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
    599			 &cursor);
    600	return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
    601}
    602
    603/**
    604 * amdgpu_ttm_domain_start - Returns GPU start address
    605 * @adev: amdgpu device object
    606 * @type: type of the memory
    607 *
    608 * Returns:
    609 * GPU start address of a memory domain
    610 */
    611
    612uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
    613{
    614	switch (type) {
    615	case TTM_PL_TT:
    616		return adev->gmc.gart_start;
    617	case TTM_PL_VRAM:
    618		return adev->gmc.vram_start;
    619	}
    620
    621	return 0;
    622}
    623
    624/*
    625 * TTM backend functions.
    626 */
    627struct amdgpu_ttm_tt {
    628	struct ttm_tt	ttm;
    629	struct drm_gem_object	*gobj;
    630	u64			offset;
    631	uint64_t		userptr;
    632	struct task_struct	*usertask;
    633	uint32_t		userflags;
    634	bool			bound;
    635#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
    636	struct hmm_range	*range;
    637#endif
    638};
    639
    640#ifdef CONFIG_DRM_AMDGPU_USERPTR
    641/*
    642 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
    643 * memory and start HMM tracking CPU page table update
    644 *
    645 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
    646 * once afterwards to stop HMM tracking
    647 */
    648int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
    649{
    650	struct ttm_tt *ttm = bo->tbo.ttm;
    651	struct amdgpu_ttm_tt *gtt = (void *)ttm;
    652	unsigned long start = gtt->userptr;
    653	struct vm_area_struct *vma;
    654	struct mm_struct *mm;
    655	bool readonly;
    656	int r = 0;
    657
    658	mm = bo->notifier.mm;
    659	if (unlikely(!mm)) {
    660		DRM_DEBUG_DRIVER("BO is not registered?\n");
    661		return -EFAULT;
    662	}
    663
    664	/* Another get_user_pages is running at the same time?? */
    665	if (WARN_ON(gtt->range))
    666		return -EFAULT;
    667
    668	if (!mmget_not_zero(mm)) /* Happens during process shutdown */
    669		return -ESRCH;
    670
    671	mmap_read_lock(mm);
    672	vma = vma_lookup(mm, start);
    673	if (unlikely(!vma)) {
    674		r = -EFAULT;
    675		goto out_unlock;
    676	}
    677	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
    678		vma->vm_file)) {
    679		r = -EPERM;
    680		goto out_unlock;
    681	}
    682
    683	readonly = amdgpu_ttm_tt_is_readonly(ttm);
    684	r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
    685				       ttm->num_pages, &gtt->range, readonly,
    686				       true, NULL);
    687out_unlock:
    688	mmap_read_unlock(mm);
    689	if (r)
    690		pr_debug("failed %d to get user pages 0x%lx\n", r, start);
    691
    692	mmput(mm);
    693
    694	return r;
    695}
    696
    697/*
    698 * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
    699 * Check if the pages backing this ttm range have been invalidated
    700 *
    701 * Returns: true if pages are still valid
    702 */
    703bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
    704{
    705	struct amdgpu_ttm_tt *gtt = (void *)ttm;
    706	bool r = false;
    707
    708	if (!gtt || !gtt->userptr)
    709		return false;
    710
    711	DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
    712		gtt->userptr, ttm->num_pages);
    713
    714	WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
    715		"No user pages to check\n");
    716
    717	if (gtt->range) {
    718		/*
    719		 * FIXME: Must always hold notifier_lock for this, and must
    720		 * not ignore the return code.
    721		 */
    722		r = amdgpu_hmm_range_get_pages_done(gtt->range);
    723		gtt->range = NULL;
    724	}
    725
    726	return !r;
    727}
    728#endif
    729
    730/*
    731 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
    732 *
    733 * Called by amdgpu_cs_list_validate(). This creates the page list
    734 * that backs user memory and will ultimately be mapped into the device
    735 * address space.
    736 */
    737void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
    738{
    739	unsigned long i;
    740
    741	for (i = 0; i < ttm->num_pages; ++i)
    742		ttm->pages[i] = pages ? pages[i] : NULL;
    743}
    744
    745/*
    746 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
    747 *
    748 * Called by amdgpu_ttm_backend_bind()
    749 **/
    750static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
    751				     struct ttm_tt *ttm)
    752{
    753	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
    754	struct amdgpu_ttm_tt *gtt = (void *)ttm;
    755	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
    756	enum dma_data_direction direction = write ?
    757		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
    758	int r;
    759
    760	/* Allocate an SG array and squash pages into it */
    761	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
    762				      (u64)ttm->num_pages << PAGE_SHIFT,
    763				      GFP_KERNEL);
    764	if (r)
    765		goto release_sg;
    766
    767	/* Map SG to device */
    768	r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
    769	if (r)
    770		goto release_sg;
    771
    772	/* convert SG to linear array of pages and dma addresses */
    773	drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
    774				       ttm->num_pages);
    775
    776	return 0;
    777
    778release_sg:
    779	kfree(ttm->sg);
    780	ttm->sg = NULL;
    781	return r;
    782}
    783
    784/*
    785 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
    786 */
    787static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
    788					struct ttm_tt *ttm)
    789{
    790	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
    791	struct amdgpu_ttm_tt *gtt = (void *)ttm;
    792	int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
    793	enum dma_data_direction direction = write ?
    794		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
    795
    796	/* double check that we don't free the table twice */
    797	if (!ttm->sg || !ttm->sg->sgl)
    798		return;
    799
    800	/* unmap the pages mapped to the device */
    801	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
    802	sg_free_table(ttm->sg);
    803
    804#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
    805	if (gtt->range) {
    806		unsigned long i;
    807
    808		for (i = 0; i < ttm->num_pages; i++) {
    809			if (ttm->pages[i] !=
    810			    hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
    811				break;
    812		}
    813
    814		WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
    815	}
    816#endif
    817}
    818
    819static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
    820				 struct ttm_buffer_object *tbo,
    821				 uint64_t flags)
    822{
    823	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
    824	struct ttm_tt *ttm = tbo->ttm;
    825	struct amdgpu_ttm_tt *gtt = (void *)ttm;
    826
    827	if (amdgpu_bo_encrypted(abo))
    828		flags |= AMDGPU_PTE_TMZ;
    829
    830	if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
    831		uint64_t page_idx = 1;
    832
    833		amdgpu_gart_bind(adev, gtt->offset, page_idx,
    834				 gtt->ttm.dma_address, flags);
    835
    836		/* The memory type of the first page defaults to UC. Now
    837		 * modify the memory type to NC from the second page of
    838		 * the BO onward.
    839		 */
    840		flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
    841		flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
    842
    843		amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
    844				 ttm->num_pages - page_idx,
    845				 &(gtt->ttm.dma_address[page_idx]), flags);
    846	} else {
    847		amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
    848				 gtt->ttm.dma_address, flags);
    849	}
    850}
    851
    852/*
    853 * amdgpu_ttm_backend_bind - Bind GTT memory
    854 *
    855 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
    856 * This handles binding GTT memory to the device address space.
    857 */
    858static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
    859				   struct ttm_tt *ttm,
    860				   struct ttm_resource *bo_mem)
    861{
    862	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
    863	struct amdgpu_ttm_tt *gtt = (void*)ttm;
    864	uint64_t flags;
    865	int r;
    866
    867	if (!bo_mem)
    868		return -EINVAL;
    869
    870	if (gtt->bound)
    871		return 0;
    872
    873	if (gtt->userptr) {
    874		r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
    875		if (r) {
    876			DRM_ERROR("failed to pin userptr\n");
    877			return r;
    878		}
    879	} else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
    880		if (!ttm->sg) {
    881			struct dma_buf_attachment *attach;
    882			struct sg_table *sgt;
    883
    884			attach = gtt->gobj->import_attach;
    885			sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
    886			if (IS_ERR(sgt))
    887				return PTR_ERR(sgt);
    888
    889			ttm->sg = sgt;
    890		}
    891
    892		drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
    893					       ttm->num_pages);
    894	}
    895
    896	if (!ttm->num_pages) {
    897		WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
    898		     ttm->num_pages, bo_mem, ttm);
    899	}
    900
    901	if (bo_mem->mem_type != TTM_PL_TT ||
    902	    !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
    903		gtt->offset = AMDGPU_BO_INVALID_OFFSET;
    904		return 0;
    905	}
    906
    907	/* compute PTE flags relevant to this BO memory */
    908	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
    909
    910	/* bind pages into GART page tables */
    911	gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
    912	amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
    913			 gtt->ttm.dma_address, flags);
    914	gtt->bound = true;
    915	return 0;
    916}
    917
    918/*
    919 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
    920 * through AGP or GART aperture.
    921 *
    922 * If bo is accessible through AGP aperture, then use AGP aperture
    923 * to access bo; otherwise allocate logical space in GART aperture
    924 * and map bo to GART aperture.
    925 */
    926int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
    927{
    928	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
    929	struct ttm_operation_ctx ctx = { false, false };
    930	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
    931	struct ttm_placement placement;
    932	struct ttm_place placements;
    933	struct ttm_resource *tmp;
    934	uint64_t addr, flags;
    935	int r;
    936
    937	if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
    938		return 0;
    939
    940	addr = amdgpu_gmc_agp_addr(bo);
    941	if (addr != AMDGPU_BO_INVALID_OFFSET) {
    942		bo->resource->start = addr >> PAGE_SHIFT;
    943		return 0;
    944	}
    945
    946	/* allocate GART space */
    947	placement.num_placement = 1;
    948	placement.placement = &placements;
    949	placement.num_busy_placement = 1;
    950	placement.busy_placement = &placements;
    951	placements.fpfn = 0;
    952	placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
    953	placements.mem_type = TTM_PL_TT;
    954	placements.flags = bo->resource->placement;
    955
    956	r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
    957	if (unlikely(r))
    958		return r;
    959
    960	/* compute PTE flags for this buffer object */
    961	flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
    962
    963	/* Bind pages */
    964	gtt->offset = (u64)tmp->start << PAGE_SHIFT;
    965	amdgpu_ttm_gart_bind(adev, bo, flags);
    966	amdgpu_gart_invalidate_tlb(adev);
    967	ttm_resource_free(bo, &bo->resource);
    968	ttm_bo_assign_mem(bo, tmp);
    969
    970	return 0;
    971}
    972
    973/*
    974 * amdgpu_ttm_recover_gart - Rebind GTT pages
    975 *
    976 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
    977 * rebind GTT pages during a GPU reset.
    978 */
    979void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
    980{
    981	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
    982	uint64_t flags;
    983
    984	if (!tbo->ttm)
    985		return;
    986
    987	flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
    988	amdgpu_ttm_gart_bind(adev, tbo, flags);
    989}
    990
    991/*
    992 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
    993 *
    994 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
    995 * ttm_tt_destroy().
    996 */
    997static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
    998				      struct ttm_tt *ttm)
    999{
   1000	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
   1001	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1002
   1003	/* if the pages have userptr pinning then clear that first */
   1004	if (gtt->userptr) {
   1005		amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
   1006	} else if (ttm->sg && gtt->gobj->import_attach) {
   1007		struct dma_buf_attachment *attach;
   1008
   1009		attach = gtt->gobj->import_attach;
   1010		dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
   1011		ttm->sg = NULL;
   1012	}
   1013
   1014	if (!gtt->bound)
   1015		return;
   1016
   1017	if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
   1018		return;
   1019
   1020	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
   1021	amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
   1022	gtt->bound = false;
   1023}
   1024
   1025static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
   1026				       struct ttm_tt *ttm)
   1027{
   1028	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1029
   1030	if (gtt->usertask)
   1031		put_task_struct(gtt->usertask);
   1032
   1033	ttm_tt_fini(&gtt->ttm);
   1034	kfree(gtt);
   1035}
   1036
   1037/**
   1038 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
   1039 *
   1040 * @bo: The buffer object to create a GTT ttm_tt object around
   1041 * @page_flags: Page flags to be added to the ttm_tt object
   1042 *
   1043 * Called by ttm_tt_create().
   1044 */
   1045static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
   1046					   uint32_t page_flags)
   1047{
   1048	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
   1049	struct amdgpu_ttm_tt *gtt;
   1050	enum ttm_caching caching;
   1051
   1052	gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
   1053	if (gtt == NULL) {
   1054		return NULL;
   1055	}
   1056	gtt->gobj = &bo->base;
   1057
   1058	if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
   1059		caching = ttm_write_combined;
   1060	else
   1061		caching = ttm_cached;
   1062
   1063	/* allocate space for the uninitialized page entries */
   1064	if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
   1065		kfree(gtt);
   1066		return NULL;
   1067	}
   1068	return &gtt->ttm;
   1069}
   1070
   1071/*
   1072 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
   1073 *
   1074 * Map the pages of a ttm_tt object to an address space visible
   1075 * to the underlying device.
   1076 */
   1077static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
   1078				  struct ttm_tt *ttm,
   1079				  struct ttm_operation_ctx *ctx)
   1080{
   1081	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
   1082	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1083	pgoff_t i;
   1084	int ret;
   1085
   1086	/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
   1087	if (gtt->userptr) {
   1088		ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
   1089		if (!ttm->sg)
   1090			return -ENOMEM;
   1091		return 0;
   1092	}
   1093
   1094	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
   1095		return 0;
   1096
   1097	ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
   1098	if (ret)
   1099		return ret;
   1100
   1101	for (i = 0; i < ttm->num_pages; ++i)
   1102		ttm->pages[i]->mapping = bdev->dev_mapping;
   1103
   1104	return 0;
   1105}
   1106
   1107/*
   1108 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
   1109 *
   1110 * Unmaps pages of a ttm_tt object from the device address space and
   1111 * unpopulates the page array backing it.
   1112 */
   1113static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
   1114				     struct ttm_tt *ttm)
   1115{
   1116	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1117	struct amdgpu_device *adev;
   1118	pgoff_t i;
   1119
   1120	amdgpu_ttm_backend_unbind(bdev, ttm);
   1121
   1122	if (gtt->userptr) {
   1123		amdgpu_ttm_tt_set_user_pages(ttm, NULL);
   1124		kfree(ttm->sg);
   1125		ttm->sg = NULL;
   1126		return;
   1127	}
   1128
   1129	if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
   1130		return;
   1131
   1132	for (i = 0; i < ttm->num_pages; ++i)
   1133		ttm->pages[i]->mapping = NULL;
   1134
   1135	adev = amdgpu_ttm_adev(bdev);
   1136	return ttm_pool_free(&adev->mman.bdev.pool, ttm);
   1137}
   1138
   1139/**
   1140 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
   1141 * task
   1142 *
   1143 * @tbo: The ttm_buffer_object that contains the userptr
   1144 * @user_addr:  The returned value
   1145 */
   1146int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
   1147			      uint64_t *user_addr)
   1148{
   1149	struct amdgpu_ttm_tt *gtt;
   1150
   1151	if (!tbo->ttm)
   1152		return -EINVAL;
   1153
   1154	gtt = (void *)tbo->ttm;
   1155	*user_addr = gtt->userptr;
   1156	return 0;
   1157}
   1158
   1159/**
   1160 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
   1161 * task
   1162 *
   1163 * @bo: The ttm_buffer_object to bind this userptr to
   1164 * @addr:  The address in the current tasks VM space to use
   1165 * @flags: Requirements of userptr object.
   1166 *
   1167 * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
   1168 * to current task
   1169 */
   1170int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
   1171			      uint64_t addr, uint32_t flags)
   1172{
   1173	struct amdgpu_ttm_tt *gtt;
   1174
   1175	if (!bo->ttm) {
   1176		/* TODO: We want a separate TTM object type for userptrs */
   1177		bo->ttm = amdgpu_ttm_tt_create(bo, 0);
   1178		if (bo->ttm == NULL)
   1179			return -ENOMEM;
   1180	}
   1181
   1182	/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
   1183	bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
   1184
   1185	gtt = (void *)bo->ttm;
   1186	gtt->userptr = addr;
   1187	gtt->userflags = flags;
   1188
   1189	if (gtt->usertask)
   1190		put_task_struct(gtt->usertask);
   1191	gtt->usertask = current->group_leader;
   1192	get_task_struct(gtt->usertask);
   1193
   1194	return 0;
   1195}
   1196
   1197/*
   1198 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
   1199 */
   1200struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
   1201{
   1202	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1203
   1204	if (gtt == NULL)
   1205		return NULL;
   1206
   1207	if (gtt->usertask == NULL)
   1208		return NULL;
   1209
   1210	return gtt->usertask->mm;
   1211}
   1212
   1213/*
   1214 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
   1215 * address range for the current task.
   1216 *
   1217 */
   1218bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
   1219				  unsigned long end, unsigned long *userptr)
   1220{
   1221	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1222	unsigned long size;
   1223
   1224	if (gtt == NULL || !gtt->userptr)
   1225		return false;
   1226
   1227	/* Return false if no part of the ttm_tt object lies within
   1228	 * the range
   1229	 */
   1230	size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
   1231	if (gtt->userptr > end || gtt->userptr + size <= start)
   1232		return false;
   1233
   1234	if (userptr)
   1235		*userptr = gtt->userptr;
   1236	return true;
   1237}
   1238
   1239/*
   1240 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
   1241 */
   1242bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
   1243{
   1244	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1245
   1246	if (gtt == NULL || !gtt->userptr)
   1247		return false;
   1248
   1249	return true;
   1250}
   1251
   1252/*
   1253 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
   1254 */
   1255bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
   1256{
   1257	struct amdgpu_ttm_tt *gtt = (void *)ttm;
   1258
   1259	if (gtt == NULL)
   1260		return false;
   1261
   1262	return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
   1263}
   1264
   1265/**
   1266 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
   1267 *
   1268 * @ttm: The ttm_tt object to compute the flags for
   1269 * @mem: The memory registry backing this ttm_tt object
   1270 *
   1271 * Figure out the flags to use for a VM PDE (Page Directory Entry).
   1272 */
   1273uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
   1274{
   1275	uint64_t flags = 0;
   1276
   1277	if (mem && mem->mem_type != TTM_PL_SYSTEM)
   1278		flags |= AMDGPU_PTE_VALID;
   1279
   1280	if (mem && (mem->mem_type == TTM_PL_TT ||
   1281		    mem->mem_type == AMDGPU_PL_PREEMPT)) {
   1282		flags |= AMDGPU_PTE_SYSTEM;
   1283
   1284		if (ttm->caching == ttm_cached)
   1285			flags |= AMDGPU_PTE_SNOOPED;
   1286	}
   1287
   1288	if (mem && mem->mem_type == TTM_PL_VRAM &&
   1289			mem->bus.caching == ttm_cached)
   1290		flags |= AMDGPU_PTE_SNOOPED;
   1291
   1292	return flags;
   1293}
   1294
   1295/**
   1296 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
   1297 *
   1298 * @adev: amdgpu_device pointer
   1299 * @ttm: The ttm_tt object to compute the flags for
   1300 * @mem: The memory registry backing this ttm_tt object
   1301 *
   1302 * Figure out the flags to use for a VM PTE (Page Table Entry).
   1303 */
   1304uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
   1305				 struct ttm_resource *mem)
   1306{
   1307	uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
   1308
   1309	flags |= adev->gart.gart_pte_flags;
   1310	flags |= AMDGPU_PTE_READABLE;
   1311
   1312	if (!amdgpu_ttm_tt_is_readonly(ttm))
   1313		flags |= AMDGPU_PTE_WRITEABLE;
   1314
   1315	return flags;
   1316}
   1317
   1318/*
   1319 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
   1320 * object.
   1321 *
   1322 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
   1323 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
   1324 * it can find space for a new object and by ttm_bo_force_list_clean() which is
   1325 * used to clean out a memory space.
   1326 */
   1327static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
   1328					    const struct ttm_place *place)
   1329{
   1330	unsigned long num_pages = bo->resource->num_pages;
   1331	struct dma_resv_iter resv_cursor;
   1332	struct amdgpu_res_cursor cursor;
   1333	struct dma_fence *f;
   1334
   1335	/* Swapout? */
   1336	if (bo->resource->mem_type == TTM_PL_SYSTEM)
   1337		return true;
   1338
   1339	if (bo->type == ttm_bo_type_kernel &&
   1340	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
   1341		return false;
   1342
   1343	/* If bo is a KFD BO, check if the bo belongs to the current process.
   1344	 * If true, then return false as any KFD process needs all its BOs to
   1345	 * be resident to run successfully
   1346	 */
   1347	dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
   1348				DMA_RESV_USAGE_BOOKKEEP, f) {
   1349		if (amdkfd_fence_check_mm(f, current->mm))
   1350			return false;
   1351	}
   1352
   1353	switch (bo->resource->mem_type) {
   1354	case AMDGPU_PL_PREEMPT:
   1355		/* Preemptible BOs don't own system resources managed by the
   1356		 * driver (pages, VRAM, GART space). They point to resources
   1357		 * owned by someone else (e.g. pageable memory in user mode
   1358		 * or a DMABuf). They are used in a preemptible context so we
   1359		 * can guarantee no deadlocks and good QoS in case of MMU
   1360		 * notifiers or DMABuf move notifiers from the resource owner.
   1361		 */
   1362		return false;
   1363	case TTM_PL_TT:
   1364		if (amdgpu_bo_is_amdgpu_bo(bo) &&
   1365		    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
   1366			return false;
   1367		return true;
   1368
   1369	case TTM_PL_VRAM:
   1370		/* Check each drm MM node individually */
   1371		amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
   1372				 &cursor);
   1373		while (cursor.remaining) {
   1374			if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
   1375			    && !(place->lpfn &&
   1376				 place->lpfn <= PFN_DOWN(cursor.start)))
   1377				return true;
   1378
   1379			amdgpu_res_next(&cursor, cursor.size);
   1380		}
   1381		return false;
   1382
   1383	default:
   1384		break;
   1385	}
   1386
   1387	return ttm_bo_eviction_valuable(bo, place);
   1388}
   1389
   1390static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
   1391				      void *buf, size_t size, bool write)
   1392{
   1393	while (size) {
   1394		uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
   1395		uint64_t bytes = 4 - (pos & 0x3);
   1396		uint32_t shift = (pos & 0x3) * 8;
   1397		uint32_t mask = 0xffffffff << shift;
   1398		uint32_t value = 0;
   1399
   1400		if (size < bytes) {
   1401			mask &= 0xffffffff >> (bytes - size) * 8;
   1402			bytes = size;
   1403		}
   1404
   1405		if (mask != 0xffffffff) {
   1406			amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
   1407			if (write) {
   1408				value &= ~mask;
   1409				value |= (*(uint32_t *)buf << shift) & mask;
   1410				amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
   1411			} else {
   1412				value = (value & mask) >> shift;
   1413				memcpy(buf, &value, bytes);
   1414			}
   1415		} else {
   1416			amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
   1417		}
   1418
   1419		pos += bytes;
   1420		buf += bytes;
   1421		size -= bytes;
   1422	}
   1423}
   1424
   1425static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
   1426					unsigned long offset, void *buf, int len, int write)
   1427{
   1428	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
   1429	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
   1430	struct amdgpu_res_cursor src_mm;
   1431	struct amdgpu_job *job;
   1432	struct dma_fence *fence;
   1433	uint64_t src_addr, dst_addr;
   1434	unsigned int num_dw;
   1435	int r, idx;
   1436
   1437	if (len != PAGE_SIZE)
   1438		return -EINVAL;
   1439
   1440	if (!adev->mman.sdma_access_ptr)
   1441		return -EACCES;
   1442
   1443	if (!drm_dev_enter(adev_to_drm(adev), &idx))
   1444		return -ENODEV;
   1445
   1446	if (write)
   1447		memcpy(adev->mman.sdma_access_ptr, buf, len);
   1448
   1449	num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
   1450	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
   1451	if (r)
   1452		goto out;
   1453
   1454	amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
   1455	src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
   1456	dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
   1457	if (write)
   1458		swap(src_addr, dst_addr);
   1459
   1460	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
   1461
   1462	amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
   1463	WARN_ON(job->ibs[0].length_dw > num_dw);
   1464
   1465	r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
   1466	if (r) {
   1467		amdgpu_job_free(job);
   1468		goto out;
   1469	}
   1470
   1471	if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
   1472		r = -ETIMEDOUT;
   1473	dma_fence_put(fence);
   1474
   1475	if (!(r || write))
   1476		memcpy(buf, adev->mman.sdma_access_ptr, len);
   1477out:
   1478	drm_dev_exit(idx);
   1479	return r;
   1480}
   1481
   1482/**
   1483 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
   1484 *
   1485 * @bo:  The buffer object to read/write
   1486 * @offset:  Offset into buffer object
   1487 * @buf:  Secondary buffer to write/read from
   1488 * @len: Length in bytes of access
   1489 * @write:  true if writing
   1490 *
   1491 * This is used to access VRAM that backs a buffer object via MMIO
   1492 * access for debugging purposes.
   1493 */
   1494static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
   1495				    unsigned long offset, void *buf, int len,
   1496				    int write)
   1497{
   1498	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
   1499	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
   1500	struct amdgpu_res_cursor cursor;
   1501	int ret = 0;
   1502
   1503	if (bo->resource->mem_type != TTM_PL_VRAM)
   1504		return -EIO;
   1505
   1506	if (amdgpu_device_has_timeouts_enabled(adev) &&
   1507			!amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
   1508		return len;
   1509
   1510	amdgpu_res_first(bo->resource, offset, len, &cursor);
   1511	while (cursor.remaining) {
   1512		size_t count, size = cursor.size;
   1513		loff_t pos = cursor.start;
   1514
   1515		count = amdgpu_device_aper_access(adev, pos, buf, size, write);
   1516		size -= count;
   1517		if (size) {
   1518			/* using MM to access rest vram and handle un-aligned address */
   1519			pos += count;
   1520			buf += count;
   1521			amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
   1522		}
   1523
   1524		ret += cursor.size;
   1525		buf += cursor.size;
   1526		amdgpu_res_next(&cursor, cursor.size);
   1527	}
   1528
   1529	return ret;
   1530}
   1531
   1532static void
   1533amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
   1534{
   1535	amdgpu_bo_move_notify(bo, false, NULL);
   1536}
   1537
   1538static struct ttm_device_funcs amdgpu_bo_driver = {
   1539	.ttm_tt_create = &amdgpu_ttm_tt_create,
   1540	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
   1541	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
   1542	.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
   1543	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
   1544	.evict_flags = &amdgpu_evict_flags,
   1545	.move = &amdgpu_bo_move,
   1546	.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
   1547	.release_notify = &amdgpu_bo_release_notify,
   1548	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
   1549	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
   1550	.access_memory = &amdgpu_ttm_access_memory,
   1551};
   1552
   1553/*
   1554 * Firmware Reservation functions
   1555 */
   1556/**
   1557 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
   1558 *
   1559 * @adev: amdgpu_device pointer
   1560 *
   1561 * free fw reserved vram if it has been reserved.
   1562 */
   1563static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
   1564{
   1565	amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
   1566		NULL, &adev->mman.fw_vram_usage_va);
   1567}
   1568
   1569/**
   1570 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
   1571 *
   1572 * @adev: amdgpu_device pointer
   1573 *
   1574 * create bo vram reservation from fw.
   1575 */
   1576static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
   1577{
   1578	uint64_t vram_size = adev->gmc.visible_vram_size;
   1579
   1580	adev->mman.fw_vram_usage_va = NULL;
   1581	adev->mman.fw_vram_usage_reserved_bo = NULL;
   1582
   1583	if (adev->mman.fw_vram_usage_size == 0 ||
   1584	    adev->mman.fw_vram_usage_size > vram_size)
   1585		return 0;
   1586
   1587	return amdgpu_bo_create_kernel_at(adev,
   1588					  adev->mman.fw_vram_usage_start_offset,
   1589					  adev->mman.fw_vram_usage_size,
   1590					  AMDGPU_GEM_DOMAIN_VRAM,
   1591					  &adev->mman.fw_vram_usage_reserved_bo,
   1592					  &adev->mman.fw_vram_usage_va);
   1593}
   1594
   1595/*
   1596 * Memoy training reservation functions
   1597 */
   1598
   1599/**
   1600 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
   1601 *
   1602 * @adev: amdgpu_device pointer
   1603 *
   1604 * free memory training reserved vram if it has been reserved.
   1605 */
   1606static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
   1607{
   1608	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
   1609
   1610	ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
   1611	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
   1612	ctx->c2p_bo = NULL;
   1613
   1614	return 0;
   1615}
   1616
   1617static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
   1618{
   1619	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
   1620
   1621	memset(ctx, 0, sizeof(*ctx));
   1622
   1623	ctx->c2p_train_data_offset =
   1624		ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
   1625	ctx->p2c_train_data_offset =
   1626		(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
   1627	ctx->train_data_size =
   1628		GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
   1629
   1630	DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
   1631			ctx->train_data_size,
   1632			ctx->p2c_train_data_offset,
   1633			ctx->c2p_train_data_offset);
   1634}
   1635
   1636/*
   1637 * reserve TMR memory at the top of VRAM which holds
   1638 * IP Discovery data and is protected by PSP.
   1639 */
   1640static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
   1641{
   1642	int ret;
   1643	struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
   1644	bool mem_train_support = false;
   1645
   1646	if (!amdgpu_sriov_vf(adev)) {
   1647		if (amdgpu_atomfirmware_mem_training_supported(adev))
   1648			mem_train_support = true;
   1649		else
   1650			DRM_DEBUG("memory training does not support!\n");
   1651	}
   1652
   1653	/*
   1654	 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
   1655	 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
   1656	 *
   1657	 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
   1658	 * discovery data and G6 memory training data respectively
   1659	 */
   1660	adev->mman.discovery_tmr_size =
   1661		amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
   1662	if (!adev->mman.discovery_tmr_size)
   1663		adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
   1664
   1665	if (mem_train_support) {
   1666		/* reserve vram for mem train according to TMR location */
   1667		amdgpu_ttm_training_data_block_init(adev);
   1668		ret = amdgpu_bo_create_kernel_at(adev,
   1669					 ctx->c2p_train_data_offset,
   1670					 ctx->train_data_size,
   1671					 AMDGPU_GEM_DOMAIN_VRAM,
   1672					 &ctx->c2p_bo,
   1673					 NULL);
   1674		if (ret) {
   1675			DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
   1676			amdgpu_ttm_training_reserve_vram_fini(adev);
   1677			return ret;
   1678		}
   1679		ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
   1680	}
   1681
   1682	ret = amdgpu_bo_create_kernel_at(adev,
   1683				adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
   1684				adev->mman.discovery_tmr_size,
   1685				AMDGPU_GEM_DOMAIN_VRAM,
   1686				&adev->mman.discovery_memory,
   1687				NULL);
   1688	if (ret) {
   1689		DRM_ERROR("alloc tmr failed(%d)!\n", ret);
   1690		amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
   1691		return ret;
   1692	}
   1693
   1694	return 0;
   1695}
   1696
   1697/*
   1698 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
   1699 * gtt/vram related fields.
   1700 *
   1701 * This initializes all of the memory space pools that the TTM layer
   1702 * will need such as the GTT space (system memory mapped to the device),
   1703 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
   1704 * can be mapped per VMID.
   1705 */
   1706int amdgpu_ttm_init(struct amdgpu_device *adev)
   1707{
   1708	uint64_t gtt_size;
   1709	int r;
   1710	u64 vis_vram_limit;
   1711
   1712	mutex_init(&adev->mman.gtt_window_lock);
   1713
   1714	/* No others user of address space so set it to 0 */
   1715	r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
   1716			       adev_to_drm(adev)->anon_inode->i_mapping,
   1717			       adev_to_drm(adev)->vma_offset_manager,
   1718			       adev->need_swiotlb,
   1719			       dma_addressing_limited(adev->dev));
   1720	if (r) {
   1721		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
   1722		return r;
   1723	}
   1724	adev->mman.initialized = true;
   1725
   1726	/* Initialize VRAM pool with all of VRAM divided into pages */
   1727	r = amdgpu_vram_mgr_init(adev);
   1728	if (r) {
   1729		DRM_ERROR("Failed initializing VRAM heap.\n");
   1730		return r;
   1731	}
   1732
   1733	/* Reduce size of CPU-visible VRAM if requested */
   1734	vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
   1735	if (amdgpu_vis_vram_limit > 0 &&
   1736	    vis_vram_limit <= adev->gmc.visible_vram_size)
   1737		adev->gmc.visible_vram_size = vis_vram_limit;
   1738
   1739	/* Change the size here instead of the init above so only lpfn is affected */
   1740	amdgpu_ttm_set_buffer_funcs_status(adev, false);
   1741#ifdef CONFIG_64BIT
   1742#ifdef CONFIG_X86
   1743	if (adev->gmc.xgmi.connected_to_cpu)
   1744		adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
   1745				adev->gmc.visible_vram_size);
   1746
   1747	else
   1748#endif
   1749		adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
   1750				adev->gmc.visible_vram_size);
   1751#endif
   1752
   1753	/*
   1754	 *The reserved vram for firmware must be pinned to the specified
   1755	 *place on the VRAM, so reserve it early.
   1756	 */
   1757	r = amdgpu_ttm_fw_reserve_vram_init(adev);
   1758	if (r) {
   1759		return r;
   1760	}
   1761
   1762	/*
   1763	 * only NAVI10 and onwards ASIC support for IP discovery.
   1764	 * If IP discovery enabled, a block of memory should be
   1765	 * reserved for IP discovey.
   1766	 */
   1767	if (adev->mman.discovery_bin) {
   1768		r = amdgpu_ttm_reserve_tmr(adev);
   1769		if (r)
   1770			return r;
   1771	}
   1772
   1773	/* allocate memory as required for VGA
   1774	 * This is used for VGA emulation and pre-OS scanout buffers to
   1775	 * avoid display artifacts while transitioning between pre-OS
   1776	 * and driver.  */
   1777	r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
   1778				       AMDGPU_GEM_DOMAIN_VRAM,
   1779				       &adev->mman.stolen_vga_memory,
   1780				       NULL);
   1781	if (r)
   1782		return r;
   1783	r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
   1784				       adev->mman.stolen_extended_size,
   1785				       AMDGPU_GEM_DOMAIN_VRAM,
   1786				       &adev->mman.stolen_extended_memory,
   1787				       NULL);
   1788	if (r)
   1789		return r;
   1790	r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
   1791				       adev->mman.stolen_reserved_size,
   1792				       AMDGPU_GEM_DOMAIN_VRAM,
   1793				       &adev->mman.stolen_reserved_memory,
   1794				       NULL);
   1795	if (r)
   1796		return r;
   1797
   1798	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
   1799		 (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
   1800
   1801	/* Compute GTT size, either based on 1/2 the size of RAM size
   1802	 * or whatever the user passed on module init */
   1803	if (amdgpu_gtt_size == -1) {
   1804		struct sysinfo si;
   1805
   1806		si_meminfo(&si);
   1807		/* Certain GL unit tests for large textures can cause problems
   1808		 * with the OOM killer since there is no way to link this memory
   1809		 * to a process.  This was originally mitigated (but not necessarily
   1810		 * eliminated) by limiting the GTT size.  The problem is this limit
   1811		 * is often too low for many modern games so just make the limit 1/2
   1812		 * of system memory which aligns with TTM. The OOM accounting needs
   1813		 * to be addressed, but we shouldn't prevent common 3D applications
   1814		 * from being usable just to potentially mitigate that corner case.
   1815		 */
   1816		gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
   1817			       (u64)si.totalram * si.mem_unit / 2);
   1818	} else {
   1819		gtt_size = (uint64_t)amdgpu_gtt_size << 20;
   1820	}
   1821
   1822	/* Initialize GTT memory pool */
   1823	r = amdgpu_gtt_mgr_init(adev, gtt_size);
   1824	if (r) {
   1825		DRM_ERROR("Failed initializing GTT heap.\n");
   1826		return r;
   1827	}
   1828	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
   1829		 (unsigned)(gtt_size / (1024 * 1024)));
   1830
   1831	/* Initialize preemptible memory pool */
   1832	r = amdgpu_preempt_mgr_init(adev);
   1833	if (r) {
   1834		DRM_ERROR("Failed initializing PREEMPT heap.\n");
   1835		return r;
   1836	}
   1837
   1838	/* Initialize various on-chip memory pools */
   1839	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
   1840	if (r) {
   1841		DRM_ERROR("Failed initializing GDS heap.\n");
   1842		return r;
   1843	}
   1844
   1845	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
   1846	if (r) {
   1847		DRM_ERROR("Failed initializing gws heap.\n");
   1848		return r;
   1849	}
   1850
   1851	r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
   1852	if (r) {
   1853		DRM_ERROR("Failed initializing oa heap.\n");
   1854		return r;
   1855	}
   1856
   1857	if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
   1858				AMDGPU_GEM_DOMAIN_GTT,
   1859				&adev->mman.sdma_access_bo, NULL,
   1860				&adev->mman.sdma_access_ptr))
   1861		DRM_WARN("Debug VRAM access will use slowpath MM access\n");
   1862
   1863	return 0;
   1864}
   1865
   1866/*
   1867 * amdgpu_ttm_fini - De-initialize the TTM memory pools
   1868 */
   1869void amdgpu_ttm_fini(struct amdgpu_device *adev)
   1870{
   1871	int idx;
   1872	if (!adev->mman.initialized)
   1873		return;
   1874
   1875	amdgpu_ttm_training_reserve_vram_fini(adev);
   1876	/* return the stolen vga memory back to VRAM */
   1877	amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
   1878	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
   1879	/* return the IP Discovery TMR memory back to VRAM */
   1880	amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
   1881	if (adev->mman.stolen_reserved_size)
   1882		amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
   1883				      NULL, NULL);
   1884	amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
   1885					&adev->mman.sdma_access_ptr);
   1886	amdgpu_ttm_fw_reserve_vram_fini(adev);
   1887
   1888	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
   1889
   1890		if (adev->mman.aper_base_kaddr)
   1891			iounmap(adev->mman.aper_base_kaddr);
   1892		adev->mman.aper_base_kaddr = NULL;
   1893
   1894		drm_dev_exit(idx);
   1895	}
   1896
   1897	amdgpu_vram_mgr_fini(adev);
   1898	amdgpu_gtt_mgr_fini(adev);
   1899	amdgpu_preempt_mgr_fini(adev);
   1900	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
   1901	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
   1902	ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
   1903	ttm_device_fini(&adev->mman.bdev);
   1904	adev->mman.initialized = false;
   1905	DRM_INFO("amdgpu: ttm finalized\n");
   1906}
   1907
   1908/**
   1909 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
   1910 *
   1911 * @adev: amdgpu_device pointer
   1912 * @enable: true when we can use buffer functions.
   1913 *
   1914 * Enable/disable use of buffer functions during suspend/resume. This should
   1915 * only be called at bootup or when userspace isn't running.
   1916 */
   1917void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
   1918{
   1919	struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
   1920	uint64_t size;
   1921	int r;
   1922
   1923	if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
   1924	    adev->mman.buffer_funcs_enabled == enable)
   1925		return;
   1926
   1927	if (enable) {
   1928		struct amdgpu_ring *ring;
   1929		struct drm_gpu_scheduler *sched;
   1930
   1931		ring = adev->mman.buffer_funcs_ring;
   1932		sched = &ring->sched;
   1933		r = drm_sched_entity_init(&adev->mman.entity,
   1934					  DRM_SCHED_PRIORITY_KERNEL, &sched,
   1935					  1, NULL);
   1936		if (r) {
   1937			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
   1938				  r);
   1939			return;
   1940		}
   1941	} else {
   1942		drm_sched_entity_destroy(&adev->mman.entity);
   1943		dma_fence_put(man->move);
   1944		man->move = NULL;
   1945	}
   1946
   1947	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
   1948	if (enable)
   1949		size = adev->gmc.real_vram_size;
   1950	else
   1951		size = adev->gmc.visible_vram_size;
   1952	man->size = size;
   1953	adev->mman.buffer_funcs_enabled = enable;
   1954}
   1955
   1956static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
   1957				  bool direct_submit,
   1958				  unsigned int num_dw,
   1959				  struct dma_resv *resv,
   1960				  bool vm_needs_flush,
   1961				  struct amdgpu_job **job)
   1962{
   1963	enum amdgpu_ib_pool_type pool = direct_submit ?
   1964		AMDGPU_IB_POOL_DIRECT :
   1965		AMDGPU_IB_POOL_DELAYED;
   1966	int r;
   1967
   1968	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
   1969	if (r)
   1970		return r;
   1971
   1972	if (vm_needs_flush) {
   1973		(*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
   1974							adev->gmc.pdb0_bo :
   1975							adev->gart.bo);
   1976		(*job)->vm_needs_flush = true;
   1977	}
   1978	if (resv) {
   1979		r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
   1980				     AMDGPU_SYNC_ALWAYS,
   1981				     AMDGPU_FENCE_OWNER_UNDEFINED);
   1982		if (r) {
   1983			DRM_ERROR("sync failed (%d).\n", r);
   1984			amdgpu_job_free(*job);
   1985			return r;
   1986		}
   1987	}
   1988	return 0;
   1989}
   1990
   1991int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
   1992		       uint64_t dst_offset, uint32_t byte_count,
   1993		       struct dma_resv *resv,
   1994		       struct dma_fence **fence, bool direct_submit,
   1995		       bool vm_needs_flush, bool tmz)
   1996{
   1997	struct amdgpu_device *adev = ring->adev;
   1998	unsigned num_loops, num_dw;
   1999	struct amdgpu_job *job;
   2000	uint32_t max_bytes;
   2001	unsigned i;
   2002	int r;
   2003
   2004	if (!direct_submit && !ring->sched.ready) {
   2005		DRM_ERROR("Trying to move memory with ring turned off.\n");
   2006		return -EINVAL;
   2007	}
   2008
   2009	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
   2010	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
   2011	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
   2012	r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
   2013				   resv, vm_needs_flush, &job);
   2014	if (r)
   2015		return r;
   2016
   2017	for (i = 0; i < num_loops; i++) {
   2018		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
   2019
   2020		amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
   2021					dst_offset, cur_size_in_bytes, tmz);
   2022
   2023		src_offset += cur_size_in_bytes;
   2024		dst_offset += cur_size_in_bytes;
   2025		byte_count -= cur_size_in_bytes;
   2026	}
   2027
   2028	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
   2029	WARN_ON(job->ibs[0].length_dw > num_dw);
   2030	if (direct_submit)
   2031		r = amdgpu_job_submit_direct(job, ring, fence);
   2032	else
   2033		r = amdgpu_job_submit(job, &adev->mman.entity,
   2034				      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
   2035	if (r)
   2036		goto error_free;
   2037
   2038	return r;
   2039
   2040error_free:
   2041	amdgpu_job_free(job);
   2042	DRM_ERROR("Error scheduling IBs (%d)\n", r);
   2043	return r;
   2044}
   2045
   2046static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
   2047			       uint64_t dst_addr, uint32_t byte_count,
   2048			       struct dma_resv *resv,
   2049			       struct dma_fence **fence,
   2050			       bool vm_needs_flush)
   2051{
   2052	struct amdgpu_device *adev = ring->adev;
   2053	unsigned int num_loops, num_dw;
   2054	struct amdgpu_job *job;
   2055	uint32_t max_bytes;
   2056	unsigned int i;
   2057	int r;
   2058
   2059	max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
   2060	num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
   2061	num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
   2062	r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
   2063				   &job);
   2064	if (r)
   2065		return r;
   2066
   2067	for (i = 0; i < num_loops; i++) {
   2068		uint32_t cur_size = min(byte_count, max_bytes);
   2069
   2070		amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
   2071					cur_size);
   2072
   2073		dst_addr += cur_size;
   2074		byte_count -= cur_size;
   2075	}
   2076
   2077	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
   2078	WARN_ON(job->ibs[0].length_dw > num_dw);
   2079	r = amdgpu_job_submit(job, &adev->mman.entity,
   2080			      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
   2081	if (r)
   2082		goto error_free;
   2083
   2084	return 0;
   2085
   2086error_free:
   2087	amdgpu_job_free(job);
   2088	return r;
   2089}
   2090
   2091int amdgpu_fill_buffer(struct amdgpu_bo *bo,
   2092			uint32_t src_data,
   2093			struct dma_resv *resv,
   2094			struct dma_fence **f)
   2095{
   2096	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
   2097	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
   2098	struct dma_fence *fence = NULL;
   2099	struct amdgpu_res_cursor dst;
   2100	int r;
   2101
   2102	if (!adev->mman.buffer_funcs_enabled) {
   2103		DRM_ERROR("Trying to clear memory with ring turned off.\n");
   2104		return -EINVAL;
   2105	}
   2106
   2107	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
   2108
   2109	mutex_lock(&adev->mman.gtt_window_lock);
   2110	while (dst.remaining) {
   2111		struct dma_fence *next;
   2112		uint64_t cur_size, to;
   2113
   2114		/* Never fill more than 256MiB at once to avoid timeouts */
   2115		cur_size = min(dst.size, 256ULL << 20);
   2116
   2117		r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
   2118					  1, ring, false, &cur_size, &to);
   2119		if (r)
   2120			goto error;
   2121
   2122		r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
   2123					&next, true);
   2124		if (r)
   2125			goto error;
   2126
   2127		dma_fence_put(fence);
   2128		fence = next;
   2129
   2130		amdgpu_res_next(&dst, cur_size);
   2131	}
   2132error:
   2133	mutex_unlock(&adev->mman.gtt_window_lock);
   2134	if (f)
   2135		*f = dma_fence_get(fence);
   2136	dma_fence_put(fence);
   2137	return r;
   2138}
   2139
   2140/**
   2141 * amdgpu_ttm_evict_resources - evict memory buffers
   2142 * @adev: amdgpu device object
   2143 * @mem_type: evicted BO's memory type
   2144 *
   2145 * Evicts all @mem_type buffers on the lru list of the memory type.
   2146 *
   2147 * Returns:
   2148 * 0 for success or a negative error code on failure.
   2149 */
   2150int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
   2151{
   2152	struct ttm_resource_manager *man;
   2153
   2154	switch (mem_type) {
   2155	case TTM_PL_VRAM:
   2156	case TTM_PL_TT:
   2157	case AMDGPU_PL_GWS:
   2158	case AMDGPU_PL_GDS:
   2159	case AMDGPU_PL_OA:
   2160		man = ttm_manager_type(&adev->mman.bdev, mem_type);
   2161		break;
   2162	default:
   2163		DRM_ERROR("Trying to evict invalid memory type\n");
   2164		return -EINVAL;
   2165	}
   2166
   2167	return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
   2168}
   2169
   2170#if defined(CONFIG_DEBUG_FS)
   2171
   2172static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
   2173{
   2174	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
   2175
   2176	return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
   2177}
   2178
   2179DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
   2180
   2181/*
   2182 * amdgpu_ttm_vram_read - Linear read access to VRAM
   2183 *
   2184 * Accesses VRAM via MMIO for debugging purposes.
   2185 */
   2186static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
   2187				    size_t size, loff_t *pos)
   2188{
   2189	struct amdgpu_device *adev = file_inode(f)->i_private;
   2190	ssize_t result = 0;
   2191
   2192	if (size & 0x3 || *pos & 0x3)
   2193		return -EINVAL;
   2194
   2195	if (*pos >= adev->gmc.mc_vram_size)
   2196		return -ENXIO;
   2197
   2198	size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
   2199	while (size) {
   2200		size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
   2201		uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
   2202
   2203		amdgpu_device_vram_access(adev, *pos, value, bytes, false);
   2204		if (copy_to_user(buf, value, bytes))
   2205			return -EFAULT;
   2206
   2207		result += bytes;
   2208		buf += bytes;
   2209		*pos += bytes;
   2210		size -= bytes;
   2211	}
   2212
   2213	return result;
   2214}
   2215
   2216/*
   2217 * amdgpu_ttm_vram_write - Linear write access to VRAM
   2218 *
   2219 * Accesses VRAM via MMIO for debugging purposes.
   2220 */
   2221static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
   2222				    size_t size, loff_t *pos)
   2223{
   2224	struct amdgpu_device *adev = file_inode(f)->i_private;
   2225	ssize_t result = 0;
   2226	int r;
   2227
   2228	if (size & 0x3 || *pos & 0x3)
   2229		return -EINVAL;
   2230
   2231	if (*pos >= adev->gmc.mc_vram_size)
   2232		return -ENXIO;
   2233
   2234	while (size) {
   2235		uint32_t value;
   2236
   2237		if (*pos >= adev->gmc.mc_vram_size)
   2238			return result;
   2239
   2240		r = get_user(value, (uint32_t *)buf);
   2241		if (r)
   2242			return r;
   2243
   2244		amdgpu_device_mm_access(adev, *pos, &value, 4, true);
   2245
   2246		result += 4;
   2247		buf += 4;
   2248		*pos += 4;
   2249		size -= 4;
   2250	}
   2251
   2252	return result;
   2253}
   2254
   2255static const struct file_operations amdgpu_ttm_vram_fops = {
   2256	.owner = THIS_MODULE,
   2257	.read = amdgpu_ttm_vram_read,
   2258	.write = amdgpu_ttm_vram_write,
   2259	.llseek = default_llseek,
   2260};
   2261
   2262/*
   2263 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
   2264 *
   2265 * This function is used to read memory that has been mapped to the
   2266 * GPU and the known addresses are not physical addresses but instead
   2267 * bus addresses (e.g., what you'd put in an IB or ring buffer).
   2268 */
   2269static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
   2270				 size_t size, loff_t *pos)
   2271{
   2272	struct amdgpu_device *adev = file_inode(f)->i_private;
   2273	struct iommu_domain *dom;
   2274	ssize_t result = 0;
   2275	int r;
   2276
   2277	/* retrieve the IOMMU domain if any for this device */
   2278	dom = iommu_get_domain_for_dev(adev->dev);
   2279
   2280	while (size) {
   2281		phys_addr_t addr = *pos & PAGE_MASK;
   2282		loff_t off = *pos & ~PAGE_MASK;
   2283		size_t bytes = PAGE_SIZE - off;
   2284		unsigned long pfn;
   2285		struct page *p;
   2286		void *ptr;
   2287
   2288		bytes = bytes < size ? bytes : size;
   2289
   2290		/* Translate the bus address to a physical address.  If
   2291		 * the domain is NULL it means there is no IOMMU active
   2292		 * and the address translation is the identity
   2293		 */
   2294		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
   2295
   2296		pfn = addr >> PAGE_SHIFT;
   2297		if (!pfn_valid(pfn))
   2298			return -EPERM;
   2299
   2300		p = pfn_to_page(pfn);
   2301		if (p->mapping != adev->mman.bdev.dev_mapping)
   2302			return -EPERM;
   2303
   2304		ptr = kmap(p);
   2305		r = copy_to_user(buf, ptr + off, bytes);
   2306		kunmap(p);
   2307		if (r)
   2308			return -EFAULT;
   2309
   2310		size -= bytes;
   2311		*pos += bytes;
   2312		result += bytes;
   2313	}
   2314
   2315	return result;
   2316}
   2317
   2318/*
   2319 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
   2320 *
   2321 * This function is used to write memory that has been mapped to the
   2322 * GPU and the known addresses are not physical addresses but instead
   2323 * bus addresses (e.g., what you'd put in an IB or ring buffer).
   2324 */
   2325static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
   2326				 size_t size, loff_t *pos)
   2327{
   2328	struct amdgpu_device *adev = file_inode(f)->i_private;
   2329	struct iommu_domain *dom;
   2330	ssize_t result = 0;
   2331	int r;
   2332
   2333	dom = iommu_get_domain_for_dev(adev->dev);
   2334
   2335	while (size) {
   2336		phys_addr_t addr = *pos & PAGE_MASK;
   2337		loff_t off = *pos & ~PAGE_MASK;
   2338		size_t bytes = PAGE_SIZE - off;
   2339		unsigned long pfn;
   2340		struct page *p;
   2341		void *ptr;
   2342
   2343		bytes = bytes < size ? bytes : size;
   2344
   2345		addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
   2346
   2347		pfn = addr >> PAGE_SHIFT;
   2348		if (!pfn_valid(pfn))
   2349			return -EPERM;
   2350
   2351		p = pfn_to_page(pfn);
   2352		if (p->mapping != adev->mman.bdev.dev_mapping)
   2353			return -EPERM;
   2354
   2355		ptr = kmap(p);
   2356		r = copy_from_user(ptr + off, buf, bytes);
   2357		kunmap(p);
   2358		if (r)
   2359			return -EFAULT;
   2360
   2361		size -= bytes;
   2362		*pos += bytes;
   2363		result += bytes;
   2364	}
   2365
   2366	return result;
   2367}
   2368
   2369static const struct file_operations amdgpu_ttm_iomem_fops = {
   2370	.owner = THIS_MODULE,
   2371	.read = amdgpu_iomem_read,
   2372	.write = amdgpu_iomem_write,
   2373	.llseek = default_llseek
   2374};
   2375
   2376#endif
   2377
   2378void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
   2379{
   2380#if defined(CONFIG_DEBUG_FS)
   2381	struct drm_minor *minor = adev_to_drm(adev)->primary;
   2382	struct dentry *root = minor->debugfs_root;
   2383
   2384	debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
   2385				 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
   2386	debugfs_create_file("amdgpu_iomem", 0444, root, adev,
   2387			    &amdgpu_ttm_iomem_fops);
   2388	debugfs_create_file("ttm_page_pool", 0444, root, adev,
   2389			    &amdgpu_ttm_page_pool_fops);
   2390	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
   2391							     TTM_PL_VRAM),
   2392					    root, "amdgpu_vram_mm");
   2393	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
   2394							     TTM_PL_TT),
   2395					    root, "amdgpu_gtt_mm");
   2396	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
   2397							     AMDGPU_PL_GDS),
   2398					    root, "amdgpu_gds_mm");
   2399	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
   2400							     AMDGPU_PL_GWS),
   2401					    root, "amdgpu_gws_mm");
   2402	ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
   2403							     AMDGPU_PL_OA),
   2404					    root, "amdgpu_oa_mm");
   2405
   2406#endif
   2407}