cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_object.c (41126B)


      1/*
      2 * Copyright 2009 Jerome Glisse.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sub license, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     20 *
     21 * The above copyright notice and this permission notice (including the
     22 * next paragraph) shall be included in all copies or substantial portions
     23 * of the Software.
     24 *
     25 */
     26/*
     27 * Authors:
     28 *    Jerome Glisse <glisse@freedesktop.org>
     29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
     30 *    Dave Airlie
     31 */
     32#include <linux/list.h>
     33#include <linux/slab.h>
     34#include <linux/dma-buf.h>
     35
     36#include <drm/drm_drv.h>
     37#include <drm/amdgpu_drm.h>
     38#include <drm/drm_cache.h>
     39#include "amdgpu.h"
     40#include "amdgpu_trace.h"
     41#include "amdgpu_amdkfd.h"
     42
     43/**
     44 * DOC: amdgpu_object
     45 *
     46 * This defines the interfaces to operate on an &amdgpu_bo buffer object which
     47 * represents memory used by driver (VRAM, system memory, etc.). The driver
     48 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
     49 * to create/destroy/set buffer object which are then managed by the kernel TTM
     50 * memory manager.
     51 * The interfaces are also used internally by kernel clients, including gfx,
     52 * uvd, etc. for kernel managed allocations used by the GPU.
     53 *
     54 */
     55
     56static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
     57{
     58	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
     59
     60	amdgpu_bo_kunmap(bo);
     61
     62	if (bo->tbo.base.import_attach)
     63		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
     64	drm_gem_object_release(&bo->tbo.base);
     65	amdgpu_bo_unref(&bo->parent);
     66	kvfree(bo);
     67}
     68
     69static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
     70{
     71	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
     72	struct amdgpu_bo_user *ubo;
     73
     74	ubo = to_amdgpu_bo_user(bo);
     75	kfree(ubo->metadata);
     76	amdgpu_bo_destroy(tbo);
     77}
     78
     79static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
     80{
     81	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
     82	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
     83	struct amdgpu_bo_vm *vmbo;
     84
     85	vmbo = to_amdgpu_bo_vm(bo);
     86	/* in case amdgpu_device_recover_vram got NULL of bo->parent */
     87	if (!list_empty(&vmbo->shadow_list)) {
     88		mutex_lock(&adev->shadow_list_lock);
     89		list_del_init(&vmbo->shadow_list);
     90		mutex_unlock(&adev->shadow_list_lock);
     91	}
     92
     93	amdgpu_bo_destroy(tbo);
     94}
     95
     96/**
     97 * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
     98 * @bo: buffer object to be checked
     99 *
    100 * Uses destroy function associated with the object to determine if this is
    101 * an &amdgpu_bo.
    102 *
    103 * Returns:
    104 * true if the object belongs to &amdgpu_bo, false if not.
    105 */
    106bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
    107{
    108	if (bo->destroy == &amdgpu_bo_destroy ||
    109	    bo->destroy == &amdgpu_bo_user_destroy ||
    110	    bo->destroy == &amdgpu_bo_vm_destroy)
    111		return true;
    112
    113	return false;
    114}
    115
    116/**
    117 * amdgpu_bo_placement_from_domain - set buffer's placement
    118 * @abo: &amdgpu_bo buffer object whose placement is to be set
    119 * @domain: requested domain
    120 *
    121 * Sets buffer's placement according to requested domain and the buffer's
    122 * flags.
    123 */
    124void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
    125{
    126	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
    127	struct ttm_placement *placement = &abo->placement;
    128	struct ttm_place *places = abo->placements;
    129	u64 flags = abo->flags;
    130	u32 c = 0;
    131
    132	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
    133		unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
    134
    135		places[c].fpfn = 0;
    136		places[c].lpfn = 0;
    137		places[c].mem_type = TTM_PL_VRAM;
    138		places[c].flags = 0;
    139
    140		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
    141			places[c].lpfn = visible_pfn;
    142		else
    143			places[c].flags |= TTM_PL_FLAG_TOPDOWN;
    144
    145		if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
    146			places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
    147		c++;
    148	}
    149
    150	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
    151		places[c].fpfn = 0;
    152		places[c].lpfn = 0;
    153		places[c].mem_type =
    154			abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
    155			AMDGPU_PL_PREEMPT : TTM_PL_TT;
    156		places[c].flags = 0;
    157		c++;
    158	}
    159
    160	if (domain & AMDGPU_GEM_DOMAIN_CPU) {
    161		places[c].fpfn = 0;
    162		places[c].lpfn = 0;
    163		places[c].mem_type = TTM_PL_SYSTEM;
    164		places[c].flags = 0;
    165		c++;
    166	}
    167
    168	if (domain & AMDGPU_GEM_DOMAIN_GDS) {
    169		places[c].fpfn = 0;
    170		places[c].lpfn = 0;
    171		places[c].mem_type = AMDGPU_PL_GDS;
    172		places[c].flags = 0;
    173		c++;
    174	}
    175
    176	if (domain & AMDGPU_GEM_DOMAIN_GWS) {
    177		places[c].fpfn = 0;
    178		places[c].lpfn = 0;
    179		places[c].mem_type = AMDGPU_PL_GWS;
    180		places[c].flags = 0;
    181		c++;
    182	}
    183
    184	if (domain & AMDGPU_GEM_DOMAIN_OA) {
    185		places[c].fpfn = 0;
    186		places[c].lpfn = 0;
    187		places[c].mem_type = AMDGPU_PL_OA;
    188		places[c].flags = 0;
    189		c++;
    190	}
    191
    192	if (!c) {
    193		places[c].fpfn = 0;
    194		places[c].lpfn = 0;
    195		places[c].mem_type = TTM_PL_SYSTEM;
    196		places[c].flags = 0;
    197		c++;
    198	}
    199
    200	BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
    201
    202	placement->num_placement = c;
    203	placement->placement = places;
    204
    205	placement->num_busy_placement = c;
    206	placement->busy_placement = places;
    207}
    208
    209/**
    210 * amdgpu_bo_create_reserved - create reserved BO for kernel use
    211 *
    212 * @adev: amdgpu device object
    213 * @size: size for the new BO
    214 * @align: alignment for the new BO
    215 * @domain: where to place it
    216 * @bo_ptr: used to initialize BOs in structures
    217 * @gpu_addr: GPU addr of the pinned BO
    218 * @cpu_addr: optional CPU address mapping
    219 *
    220 * Allocates and pins a BO for kernel internal use, and returns it still
    221 * reserved.
    222 *
    223 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
    224 *
    225 * Returns:
    226 * 0 on success, negative error code otherwise.
    227 */
    228int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
    229			      unsigned long size, int align,
    230			      u32 domain, struct amdgpu_bo **bo_ptr,
    231			      u64 *gpu_addr, void **cpu_addr)
    232{
    233	struct amdgpu_bo_param bp;
    234	bool free = false;
    235	int r;
    236
    237	if (!size) {
    238		amdgpu_bo_unref(bo_ptr);
    239		return 0;
    240	}
    241
    242	memset(&bp, 0, sizeof(bp));
    243	bp.size = size;
    244	bp.byte_align = align;
    245	bp.domain = domain;
    246	bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
    247		: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
    248	bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
    249	bp.type = ttm_bo_type_kernel;
    250	bp.resv = NULL;
    251	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
    252
    253	if (!*bo_ptr) {
    254		r = amdgpu_bo_create(adev, &bp, bo_ptr);
    255		if (r) {
    256			dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
    257				r);
    258			return r;
    259		}
    260		free = true;
    261	}
    262
    263	r = amdgpu_bo_reserve(*bo_ptr, false);
    264	if (r) {
    265		dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
    266		goto error_free;
    267	}
    268
    269	r = amdgpu_bo_pin(*bo_ptr, domain);
    270	if (r) {
    271		dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
    272		goto error_unreserve;
    273	}
    274
    275	r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
    276	if (r) {
    277		dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
    278		goto error_unpin;
    279	}
    280
    281	if (gpu_addr)
    282		*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
    283
    284	if (cpu_addr) {
    285		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
    286		if (r) {
    287			dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
    288			goto error_unpin;
    289		}
    290	}
    291
    292	return 0;
    293
    294error_unpin:
    295	amdgpu_bo_unpin(*bo_ptr);
    296error_unreserve:
    297	amdgpu_bo_unreserve(*bo_ptr);
    298
    299error_free:
    300	if (free)
    301		amdgpu_bo_unref(bo_ptr);
    302
    303	return r;
    304}
    305
    306/**
    307 * amdgpu_bo_create_kernel - create BO for kernel use
    308 *
    309 * @adev: amdgpu device object
    310 * @size: size for the new BO
    311 * @align: alignment for the new BO
    312 * @domain: where to place it
    313 * @bo_ptr:  used to initialize BOs in structures
    314 * @gpu_addr: GPU addr of the pinned BO
    315 * @cpu_addr: optional CPU address mapping
    316 *
    317 * Allocates and pins a BO for kernel internal use.
    318 *
    319 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
    320 *
    321 * Returns:
    322 * 0 on success, negative error code otherwise.
    323 */
    324int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
    325			    unsigned long size, int align,
    326			    u32 domain, struct amdgpu_bo **bo_ptr,
    327			    u64 *gpu_addr, void **cpu_addr)
    328{
    329	int r;
    330
    331	r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
    332				      gpu_addr, cpu_addr);
    333
    334	if (r)
    335		return r;
    336
    337	if (*bo_ptr)
    338		amdgpu_bo_unreserve(*bo_ptr);
    339
    340	return 0;
    341}
    342
    343/**
    344 * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
    345 *
    346 * @adev: amdgpu device object
    347 * @offset: offset of the BO
    348 * @size: size of the BO
    349 * @domain: where to place it
    350 * @bo_ptr:  used to initialize BOs in structures
    351 * @cpu_addr: optional CPU address mapping
    352 *
    353 * Creates a kernel BO at a specific offset in the address space of the domain.
    354 *
    355 * Returns:
    356 * 0 on success, negative error code otherwise.
    357 */
    358int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
    359			       uint64_t offset, uint64_t size, uint32_t domain,
    360			       struct amdgpu_bo **bo_ptr, void **cpu_addr)
    361{
    362	struct ttm_operation_ctx ctx = { false, false };
    363	unsigned int i;
    364	int r;
    365
    366	offset &= PAGE_MASK;
    367	size = ALIGN(size, PAGE_SIZE);
    368
    369	r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
    370				      NULL, cpu_addr);
    371	if (r)
    372		return r;
    373
    374	if ((*bo_ptr) == NULL)
    375		return 0;
    376
    377	/*
    378	 * Remove the original mem node and create a new one at the request
    379	 * position.
    380	 */
    381	if (cpu_addr)
    382		amdgpu_bo_kunmap(*bo_ptr);
    383
    384	ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
    385
    386	for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
    387		(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
    388		(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
    389	}
    390	r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
    391			     &(*bo_ptr)->tbo.resource, &ctx);
    392	if (r)
    393		goto error;
    394
    395	if (cpu_addr) {
    396		r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
    397		if (r)
    398			goto error;
    399	}
    400
    401	amdgpu_bo_unreserve(*bo_ptr);
    402	return 0;
    403
    404error:
    405	amdgpu_bo_unreserve(*bo_ptr);
    406	amdgpu_bo_unref(bo_ptr);
    407	return r;
    408}
    409
    410/**
    411 * amdgpu_bo_free_kernel - free BO for kernel use
    412 *
    413 * @bo: amdgpu BO to free
    414 * @gpu_addr: pointer to where the BO's GPU memory space address was stored
    415 * @cpu_addr: pointer to where the BO's CPU memory space address was stored
    416 *
    417 * unmaps and unpin a BO for kernel internal use.
    418 */
    419void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
    420			   void **cpu_addr)
    421{
    422	if (*bo == NULL)
    423		return;
    424
    425	if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
    426		if (cpu_addr)
    427			amdgpu_bo_kunmap(*bo);
    428
    429		amdgpu_bo_unpin(*bo);
    430		amdgpu_bo_unreserve(*bo);
    431	}
    432	amdgpu_bo_unref(bo);
    433
    434	if (gpu_addr)
    435		*gpu_addr = 0;
    436
    437	if (cpu_addr)
    438		*cpu_addr = NULL;
    439}
    440
    441/* Validate bo size is bit bigger then the request domain */
    442static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
    443					  unsigned long size, u32 domain)
    444{
    445	struct ttm_resource_manager *man = NULL;
    446
    447	/*
    448	 * If GTT is part of requested domains the check must succeed to
    449	 * allow fall back to GTT
    450	 */
    451	if (domain & AMDGPU_GEM_DOMAIN_GTT) {
    452		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
    453
    454		if (size < man->size)
    455			return true;
    456		else
    457			goto fail;
    458	}
    459
    460	if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
    461		man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
    462
    463		if (size < man->size)
    464			return true;
    465		else
    466			goto fail;
    467	}
    468
    469
    470	/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
    471	return true;
    472
    473fail:
    474	DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
    475		  man->size);
    476	return false;
    477}
    478
    479bool amdgpu_bo_support_uswc(u64 bo_flags)
    480{
    481
    482#ifdef CONFIG_X86_32
    483	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
    484	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
    485	 */
    486	return false;
    487#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
    488	/* Don't try to enable write-combining when it can't work, or things
    489	 * may be slow
    490	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
    491	 */
    492
    493#ifndef CONFIG_COMPILE_TEST
    494#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
    495	 thanks to write-combining
    496#endif
    497
    498	if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
    499		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
    500			      "better performance thanks to write-combining\n");
    501	return false;
    502#else
    503	/* For architectures that don't support WC memory,
    504	 * mask out the WC flag from the BO
    505	 */
    506	if (!drm_arch_can_wc_memory())
    507		return false;
    508
    509	return true;
    510#endif
    511}
    512
    513/**
    514 * amdgpu_bo_create - create an &amdgpu_bo buffer object
    515 * @adev: amdgpu device object
    516 * @bp: parameters to be used for the buffer object
    517 * @bo_ptr: pointer to the buffer object pointer
    518 *
    519 * Creates an &amdgpu_bo buffer object.
    520 *
    521 * Returns:
    522 * 0 for success or a negative error code on failure.
    523 */
    524int amdgpu_bo_create(struct amdgpu_device *adev,
    525			       struct amdgpu_bo_param *bp,
    526			       struct amdgpu_bo **bo_ptr)
    527{
    528	struct ttm_operation_ctx ctx = {
    529		.interruptible = (bp->type != ttm_bo_type_kernel),
    530		.no_wait_gpu = bp->no_wait_gpu,
    531		/* We opt to avoid OOM on system pages allocations */
    532		.gfp_retry_mayfail = true,
    533		.allow_res_evict = bp->type != ttm_bo_type_kernel,
    534		.resv = bp->resv
    535	};
    536	struct amdgpu_bo *bo;
    537	unsigned long page_align, size = bp->size;
    538	int r;
    539
    540	/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
    541	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
    542		/* GWS and OA don't need any alignment. */
    543		page_align = bp->byte_align;
    544		size <<= PAGE_SHIFT;
    545	} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
    546		/* Both size and alignment must be a multiple of 4. */
    547		page_align = ALIGN(bp->byte_align, 4);
    548		size = ALIGN(size, 4) << PAGE_SHIFT;
    549	} else {
    550		/* Memory should be aligned at least to a page size. */
    551		page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
    552		size = ALIGN(size, PAGE_SIZE);
    553	}
    554
    555	if (!amdgpu_bo_validate_size(adev, size, bp->domain))
    556		return -ENOMEM;
    557
    558	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
    559
    560	*bo_ptr = NULL;
    561	bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
    562	if (bo == NULL)
    563		return -ENOMEM;
    564	drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
    565	bo->vm_bo = NULL;
    566	bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
    567		bp->domain;
    568	bo->allowed_domains = bo->preferred_domains;
    569	if (bp->type != ttm_bo_type_kernel &&
    570	    !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
    571	    bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
    572		bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
    573
    574	bo->flags = bp->flags;
    575
    576	if (!amdgpu_bo_support_uswc(bo->flags))
    577		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
    578
    579	if (adev->ras_enabled)
    580		bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
    581
    582	bo->tbo.bdev = &adev->mman.bdev;
    583	if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
    584			  AMDGPU_GEM_DOMAIN_GDS))
    585		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
    586	else
    587		amdgpu_bo_placement_from_domain(bo, bp->domain);
    588	if (bp->type == ttm_bo_type_kernel)
    589		bo->tbo.priority = 1;
    590
    591	if (!bp->destroy)
    592		bp->destroy = &amdgpu_bo_destroy;
    593
    594	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
    595				 &bo->placement, page_align, &ctx,  NULL,
    596				 bp->resv, bp->destroy);
    597	if (unlikely(r != 0))
    598		return r;
    599
    600	if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
    601	    bo->tbo.resource->mem_type == TTM_PL_VRAM &&
    602	    bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
    603		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
    604					     ctx.bytes_moved);
    605	else
    606		amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
    607
    608	if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
    609	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
    610		struct dma_fence *fence;
    611
    612		r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
    613		if (unlikely(r))
    614			goto fail_unreserve;
    615
    616		dma_resv_add_fence(bo->tbo.base.resv, fence,
    617				   DMA_RESV_USAGE_KERNEL);
    618		dma_fence_put(fence);
    619	}
    620	if (!bp->resv)
    621		amdgpu_bo_unreserve(bo);
    622	*bo_ptr = bo;
    623
    624	trace_amdgpu_bo_create(bo);
    625
    626	/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
    627	if (bp->type == ttm_bo_type_device)
    628		bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
    629
    630	return 0;
    631
    632fail_unreserve:
    633	if (!bp->resv)
    634		dma_resv_unlock(bo->tbo.base.resv);
    635	amdgpu_bo_unref(&bo);
    636	return r;
    637}
    638
    639/**
    640 * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
    641 * @adev: amdgpu device object
    642 * @bp: parameters to be used for the buffer object
    643 * @ubo_ptr: pointer to the buffer object pointer
    644 *
    645 * Create a BO to be used by user application;
    646 *
    647 * Returns:
    648 * 0 for success or a negative error code on failure.
    649 */
    650
    651int amdgpu_bo_create_user(struct amdgpu_device *adev,
    652			  struct amdgpu_bo_param *bp,
    653			  struct amdgpu_bo_user **ubo_ptr)
    654{
    655	struct amdgpu_bo *bo_ptr;
    656	int r;
    657
    658	bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
    659	bp->destroy = &amdgpu_bo_user_destroy;
    660	r = amdgpu_bo_create(adev, bp, &bo_ptr);
    661	if (r)
    662		return r;
    663
    664	*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
    665	return r;
    666}
    667
    668/**
    669 * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
    670 * @adev: amdgpu device object
    671 * @bp: parameters to be used for the buffer object
    672 * @vmbo_ptr: pointer to the buffer object pointer
    673 *
    674 * Create a BO to be for GPUVM.
    675 *
    676 * Returns:
    677 * 0 for success or a negative error code on failure.
    678 */
    679
    680int amdgpu_bo_create_vm(struct amdgpu_device *adev,
    681			struct amdgpu_bo_param *bp,
    682			struct amdgpu_bo_vm **vmbo_ptr)
    683{
    684	struct amdgpu_bo *bo_ptr;
    685	int r;
    686
    687	/* bo_ptr_size will be determined by the caller and it depends on
    688	 * num of amdgpu_vm_pt entries.
    689	 */
    690	BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
    691	bp->destroy = &amdgpu_bo_vm_destroy;
    692	r = amdgpu_bo_create(adev, bp, &bo_ptr);
    693	if (r)
    694		return r;
    695
    696	*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
    697	INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
    698	return r;
    699}
    700
    701/**
    702 * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
    703 *
    704 * @vmbo: BO that will be inserted into the shadow list
    705 *
    706 * Insert a BO to the shadow list.
    707 */
    708void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
    709{
    710	struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
    711
    712	mutex_lock(&adev->shadow_list_lock);
    713	list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
    714	mutex_unlock(&adev->shadow_list_lock);
    715}
    716
    717/**
    718 * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
    719 *
    720 * @shadow: &amdgpu_bo shadow to be restored
    721 * @fence: dma_fence associated with the operation
    722 *
    723 * Copies a buffer object's shadow content back to the object.
    724 * This is used for recovering a buffer from its shadow in case of a gpu
    725 * reset where vram context may be lost.
    726 *
    727 * Returns:
    728 * 0 for success or a negative error code on failure.
    729 */
    730int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
    731
    732{
    733	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
    734	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
    735	uint64_t shadow_addr, parent_addr;
    736
    737	shadow_addr = amdgpu_bo_gpu_offset(shadow);
    738	parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
    739
    740	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
    741				  amdgpu_bo_size(shadow), NULL, fence,
    742				  true, false, false);
    743}
    744
    745/**
    746 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
    747 * @bo: &amdgpu_bo buffer object to be mapped
    748 * @ptr: kernel virtual address to be returned
    749 *
    750 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
    751 * amdgpu_bo_kptr() to get the kernel virtual address.
    752 *
    753 * Returns:
    754 * 0 for success or a negative error code on failure.
    755 */
    756int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
    757{
    758	void *kptr;
    759	long r;
    760
    761	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
    762		return -EPERM;
    763
    764	r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
    765				  false, MAX_SCHEDULE_TIMEOUT);
    766	if (r < 0)
    767		return r;
    768
    769	kptr = amdgpu_bo_kptr(bo);
    770	if (kptr) {
    771		if (ptr)
    772			*ptr = kptr;
    773		return 0;
    774	}
    775
    776	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
    777	if (r)
    778		return r;
    779
    780	if (ptr)
    781		*ptr = amdgpu_bo_kptr(bo);
    782
    783	return 0;
    784}
    785
    786/**
    787 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
    788 * @bo: &amdgpu_bo buffer object
    789 *
    790 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
    791 *
    792 * Returns:
    793 * the virtual address of a buffer object area.
    794 */
    795void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
    796{
    797	bool is_iomem;
    798
    799	return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
    800}
    801
    802/**
    803 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
    804 * @bo: &amdgpu_bo buffer object to be unmapped
    805 *
    806 * Unmaps a kernel map set up by amdgpu_bo_kmap().
    807 */
    808void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
    809{
    810	if (bo->kmap.bo)
    811		ttm_bo_kunmap(&bo->kmap);
    812}
    813
    814/**
    815 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
    816 * @bo: &amdgpu_bo buffer object
    817 *
    818 * References the contained &ttm_buffer_object.
    819 *
    820 * Returns:
    821 * a refcounted pointer to the &amdgpu_bo buffer object.
    822 */
    823struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
    824{
    825	if (bo == NULL)
    826		return NULL;
    827
    828	ttm_bo_get(&bo->tbo);
    829	return bo;
    830}
    831
    832/**
    833 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
    834 * @bo: &amdgpu_bo buffer object
    835 *
    836 * Unreferences the contained &ttm_buffer_object and clear the pointer
    837 */
    838void amdgpu_bo_unref(struct amdgpu_bo **bo)
    839{
    840	struct ttm_buffer_object *tbo;
    841
    842	if ((*bo) == NULL)
    843		return;
    844
    845	tbo = &((*bo)->tbo);
    846	ttm_bo_put(tbo);
    847	*bo = NULL;
    848}
    849
    850/**
    851 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
    852 * @bo: &amdgpu_bo buffer object to be pinned
    853 * @domain: domain to be pinned to
    854 * @min_offset: the start of requested address range
    855 * @max_offset: the end of requested address range
    856 *
    857 * Pins the buffer object according to requested domain and address range. If
    858 * the memory is unbound gart memory, binds the pages into gart table. Adjusts
    859 * pin_count and pin_size accordingly.
    860 *
    861 * Pinning means to lock pages in memory along with keeping them at a fixed
    862 * offset. It is required when a buffer can not be moved, for example, when
    863 * a display buffer is being scanned out.
    864 *
    865 * Compared with amdgpu_bo_pin(), this function gives more flexibility on
    866 * where to pin a buffer if there are specific restrictions on where a buffer
    867 * must be located.
    868 *
    869 * Returns:
    870 * 0 for success or a negative error code on failure.
    871 */
    872int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
    873			     u64 min_offset, u64 max_offset)
    874{
    875	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
    876	struct ttm_operation_ctx ctx = { false, false };
    877	int r, i;
    878
    879	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
    880		return -EPERM;
    881
    882	if (WARN_ON_ONCE(min_offset > max_offset))
    883		return -EINVAL;
    884
    885	/* A shared bo cannot be migrated to VRAM */
    886	if (bo->tbo.base.import_attach) {
    887		if (domain & AMDGPU_GEM_DOMAIN_GTT)
    888			domain = AMDGPU_GEM_DOMAIN_GTT;
    889		else
    890			return -EINVAL;
    891	}
    892
    893	if (bo->tbo.pin_count) {
    894		uint32_t mem_type = bo->tbo.resource->mem_type;
    895		uint32_t mem_flags = bo->tbo.resource->placement;
    896
    897		if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
    898			return -EINVAL;
    899
    900		if ((mem_type == TTM_PL_VRAM) &&
    901		    (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
    902		    !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
    903			return -EINVAL;
    904
    905		ttm_bo_pin(&bo->tbo);
    906
    907		if (max_offset != 0) {
    908			u64 domain_start = amdgpu_ttm_domain_start(adev,
    909								   mem_type);
    910			WARN_ON_ONCE(max_offset <
    911				     (amdgpu_bo_gpu_offset(bo) - domain_start));
    912		}
    913
    914		return 0;
    915	}
    916
    917	/* This assumes only APU display buffers are pinned with (VRAM|GTT).
    918	 * See function amdgpu_display_supported_domains()
    919	 */
    920	domain = amdgpu_bo_get_preferred_domain(adev, domain);
    921
    922	if (bo->tbo.base.import_attach)
    923		dma_buf_pin(bo->tbo.base.import_attach);
    924
    925	/* force to pin into visible video ram */
    926	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
    927		bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
    928	amdgpu_bo_placement_from_domain(bo, domain);
    929	for (i = 0; i < bo->placement.num_placement; i++) {
    930		unsigned fpfn, lpfn;
    931
    932		fpfn = min_offset >> PAGE_SHIFT;
    933		lpfn = max_offset >> PAGE_SHIFT;
    934
    935		if (fpfn > bo->placements[i].fpfn)
    936			bo->placements[i].fpfn = fpfn;
    937		if (!bo->placements[i].lpfn ||
    938		    (lpfn && lpfn < bo->placements[i].lpfn))
    939			bo->placements[i].lpfn = lpfn;
    940	}
    941
    942	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
    943	if (unlikely(r)) {
    944		dev_err(adev->dev, "%p pin failed\n", bo);
    945		goto error;
    946	}
    947
    948	ttm_bo_pin(&bo->tbo);
    949
    950	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
    951	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
    952		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
    953		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
    954			     &adev->visible_pin_size);
    955	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
    956		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
    957	}
    958
    959error:
    960	return r;
    961}
    962
    963/**
    964 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
    965 * @bo: &amdgpu_bo buffer object to be pinned
    966 * @domain: domain to be pinned to
    967 *
    968 * A simple wrapper to amdgpu_bo_pin_restricted().
    969 * Provides a simpler API for buffers that do not have any strict restrictions
    970 * on where a buffer must be located.
    971 *
    972 * Returns:
    973 * 0 for success or a negative error code on failure.
    974 */
    975int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
    976{
    977	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
    978	return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
    979}
    980
    981/**
    982 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
    983 * @bo: &amdgpu_bo buffer object to be unpinned
    984 *
    985 * Decreases the pin_count, and clears the flags if pin_count reaches 0.
    986 * Changes placement and pin size accordingly.
    987 *
    988 * Returns:
    989 * 0 for success or a negative error code on failure.
    990 */
    991void amdgpu_bo_unpin(struct amdgpu_bo *bo)
    992{
    993	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
    994
    995	ttm_bo_unpin(&bo->tbo);
    996	if (bo->tbo.pin_count)
    997		return;
    998
    999	if (bo->tbo.base.import_attach)
   1000		dma_buf_unpin(bo->tbo.base.import_attach);
   1001
   1002	if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
   1003		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
   1004		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
   1005			     &adev->visible_pin_size);
   1006	} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
   1007		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
   1008	}
   1009}
   1010
   1011static const char *amdgpu_vram_names[] = {
   1012	"UNKNOWN",
   1013	"GDDR1",
   1014	"DDR2",
   1015	"GDDR3",
   1016	"GDDR4",
   1017	"GDDR5",
   1018	"HBM",
   1019	"DDR3",
   1020	"DDR4",
   1021	"GDDR6",
   1022	"DDR5",
   1023	"LPDDR4",
   1024	"LPDDR5"
   1025};
   1026
   1027/**
   1028 * amdgpu_bo_init - initialize memory manager
   1029 * @adev: amdgpu device object
   1030 *
   1031 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
   1032 *
   1033 * Returns:
   1034 * 0 for success or a negative error code on failure.
   1035 */
   1036int amdgpu_bo_init(struct amdgpu_device *adev)
   1037{
   1038	/* On A+A platform, VRAM can be mapped as WB */
   1039	if (!adev->gmc.xgmi.connected_to_cpu) {
   1040		/* reserve PAT memory space to WC for VRAM */
   1041		int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
   1042				adev->gmc.aper_size);
   1043
   1044		if (r) {
   1045			DRM_ERROR("Unable to set WC memtype for the aperture base\n");
   1046			return r;
   1047		}
   1048
   1049		/* Add an MTRR for the VRAM */
   1050		adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
   1051				adev->gmc.aper_size);
   1052	}
   1053
   1054	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
   1055		 adev->gmc.mc_vram_size >> 20,
   1056		 (unsigned long long)adev->gmc.aper_size >> 20);
   1057	DRM_INFO("RAM width %dbits %s\n",
   1058		 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
   1059	return amdgpu_ttm_init(adev);
   1060}
   1061
   1062/**
   1063 * amdgpu_bo_fini - tear down memory manager
   1064 * @adev: amdgpu device object
   1065 *
   1066 * Reverses amdgpu_bo_init() to tear down memory manager.
   1067 */
   1068void amdgpu_bo_fini(struct amdgpu_device *adev)
   1069{
   1070	int idx;
   1071
   1072	amdgpu_ttm_fini(adev);
   1073
   1074	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
   1075
   1076		if (!adev->gmc.xgmi.connected_to_cpu) {
   1077			arch_phys_wc_del(adev->gmc.vram_mtrr);
   1078			arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
   1079		}
   1080		drm_dev_exit(idx);
   1081	}
   1082}
   1083
   1084/**
   1085 * amdgpu_bo_set_tiling_flags - set tiling flags
   1086 * @bo: &amdgpu_bo buffer object
   1087 * @tiling_flags: new flags
   1088 *
   1089 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
   1090 * kernel driver to set the tiling flags on a buffer.
   1091 *
   1092 * Returns:
   1093 * 0 for success or a negative error code on failure.
   1094 */
   1095int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
   1096{
   1097	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
   1098	struct amdgpu_bo_user *ubo;
   1099
   1100	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
   1101	if (adev->family <= AMDGPU_FAMILY_CZ &&
   1102	    AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
   1103		return -EINVAL;
   1104
   1105	ubo = to_amdgpu_bo_user(bo);
   1106	ubo->tiling_flags = tiling_flags;
   1107	return 0;
   1108}
   1109
   1110/**
   1111 * amdgpu_bo_get_tiling_flags - get tiling flags
   1112 * @bo: &amdgpu_bo buffer object
   1113 * @tiling_flags: returned flags
   1114 *
   1115 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
   1116 * set the tiling flags on a buffer.
   1117 */
   1118void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
   1119{
   1120	struct amdgpu_bo_user *ubo;
   1121
   1122	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
   1123	dma_resv_assert_held(bo->tbo.base.resv);
   1124	ubo = to_amdgpu_bo_user(bo);
   1125
   1126	if (tiling_flags)
   1127		*tiling_flags = ubo->tiling_flags;
   1128}
   1129
   1130/**
   1131 * amdgpu_bo_set_metadata - set metadata
   1132 * @bo: &amdgpu_bo buffer object
   1133 * @metadata: new metadata
   1134 * @metadata_size: size of the new metadata
   1135 * @flags: flags of the new metadata
   1136 *
   1137 * Sets buffer object's metadata, its size and flags.
   1138 * Used via GEM ioctl.
   1139 *
   1140 * Returns:
   1141 * 0 for success or a negative error code on failure.
   1142 */
   1143int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
   1144			    uint32_t metadata_size, uint64_t flags)
   1145{
   1146	struct amdgpu_bo_user *ubo;
   1147	void *buffer;
   1148
   1149	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
   1150	ubo = to_amdgpu_bo_user(bo);
   1151	if (!metadata_size) {
   1152		if (ubo->metadata_size) {
   1153			kfree(ubo->metadata);
   1154			ubo->metadata = NULL;
   1155			ubo->metadata_size = 0;
   1156		}
   1157		return 0;
   1158	}
   1159
   1160	if (metadata == NULL)
   1161		return -EINVAL;
   1162
   1163	buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
   1164	if (buffer == NULL)
   1165		return -ENOMEM;
   1166
   1167	kfree(ubo->metadata);
   1168	ubo->metadata_flags = flags;
   1169	ubo->metadata = buffer;
   1170	ubo->metadata_size = metadata_size;
   1171
   1172	return 0;
   1173}
   1174
   1175/**
   1176 * amdgpu_bo_get_metadata - get metadata
   1177 * @bo: &amdgpu_bo buffer object
   1178 * @buffer: returned metadata
   1179 * @buffer_size: size of the buffer
   1180 * @metadata_size: size of the returned metadata
   1181 * @flags: flags of the returned metadata
   1182 *
   1183 * Gets buffer object's metadata, its size and flags. buffer_size shall not be
   1184 * less than metadata_size.
   1185 * Used via GEM ioctl.
   1186 *
   1187 * Returns:
   1188 * 0 for success or a negative error code on failure.
   1189 */
   1190int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
   1191			   size_t buffer_size, uint32_t *metadata_size,
   1192			   uint64_t *flags)
   1193{
   1194	struct amdgpu_bo_user *ubo;
   1195
   1196	if (!buffer && !metadata_size)
   1197		return -EINVAL;
   1198
   1199	BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
   1200	ubo = to_amdgpu_bo_user(bo);
   1201	if (metadata_size)
   1202		*metadata_size = ubo->metadata_size;
   1203
   1204	if (buffer) {
   1205		if (buffer_size < ubo->metadata_size)
   1206			return -EINVAL;
   1207
   1208		if (ubo->metadata_size)
   1209			memcpy(buffer, ubo->metadata, ubo->metadata_size);
   1210	}
   1211
   1212	if (flags)
   1213		*flags = ubo->metadata_flags;
   1214
   1215	return 0;
   1216}
   1217
   1218/**
   1219 * amdgpu_bo_move_notify - notification about a memory move
   1220 * @bo: pointer to a buffer object
   1221 * @evict: if this move is evicting the buffer from the graphics address space
   1222 * @new_mem: new information of the bufer object
   1223 *
   1224 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
   1225 * bookkeeping.
   1226 * TTM driver callback which is called when ttm moves a buffer.
   1227 */
   1228void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
   1229			   bool evict,
   1230			   struct ttm_resource *new_mem)
   1231{
   1232	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
   1233	struct amdgpu_bo *abo;
   1234	struct ttm_resource *old_mem = bo->resource;
   1235
   1236	if (!amdgpu_bo_is_amdgpu_bo(bo))
   1237		return;
   1238
   1239	abo = ttm_to_amdgpu_bo(bo);
   1240	amdgpu_vm_bo_invalidate(adev, abo, evict);
   1241
   1242	amdgpu_bo_kunmap(abo);
   1243
   1244	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
   1245	    bo->resource->mem_type != TTM_PL_SYSTEM)
   1246		dma_buf_move_notify(abo->tbo.base.dma_buf);
   1247
   1248	/* remember the eviction */
   1249	if (evict)
   1250		atomic64_inc(&adev->num_evictions);
   1251
   1252	/* update statistics */
   1253	if (!new_mem)
   1254		return;
   1255
   1256	/* move_notify is called before move happens */
   1257	trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
   1258}
   1259
   1260void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
   1261				uint64_t *gtt_mem, uint64_t *cpu_mem)
   1262{
   1263	unsigned int domain;
   1264
   1265	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
   1266	switch (domain) {
   1267	case AMDGPU_GEM_DOMAIN_VRAM:
   1268		*vram_mem += amdgpu_bo_size(bo);
   1269		break;
   1270	case AMDGPU_GEM_DOMAIN_GTT:
   1271		*gtt_mem += amdgpu_bo_size(bo);
   1272		break;
   1273	case AMDGPU_GEM_DOMAIN_CPU:
   1274	default:
   1275		*cpu_mem += amdgpu_bo_size(bo);
   1276		break;
   1277	}
   1278}
   1279
   1280/**
   1281 * amdgpu_bo_release_notify - notification about a BO being released
   1282 * @bo: pointer to a buffer object
   1283 *
   1284 * Wipes VRAM buffers whose contents should not be leaked before the
   1285 * memory is released.
   1286 */
   1287void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
   1288{
   1289	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
   1290	struct dma_fence *fence = NULL;
   1291	struct amdgpu_bo *abo;
   1292	int r;
   1293
   1294	if (!amdgpu_bo_is_amdgpu_bo(bo))
   1295		return;
   1296
   1297	abo = ttm_to_amdgpu_bo(bo);
   1298
   1299	if (abo->kfd_bo)
   1300		amdgpu_amdkfd_release_notify(abo);
   1301
   1302	/* We only remove the fence if the resv has individualized. */
   1303	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
   1304			&& bo->base.resv != &bo->base._resv);
   1305	if (bo->base.resv == &bo->base._resv)
   1306		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
   1307
   1308	if (bo->resource->mem_type != TTM_PL_VRAM ||
   1309	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
   1310	    adev->in_suspend || adev->shutdown)
   1311		return;
   1312
   1313	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
   1314		return;
   1315
   1316	r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
   1317	if (!WARN_ON(r)) {
   1318		amdgpu_bo_fence(abo, fence, false);
   1319		dma_fence_put(fence);
   1320	}
   1321
   1322	dma_resv_unlock(bo->base.resv);
   1323}
   1324
   1325/**
   1326 * amdgpu_bo_fault_reserve_notify - notification about a memory fault
   1327 * @bo: pointer to a buffer object
   1328 *
   1329 * Notifies the driver we are taking a fault on this BO and have reserved it,
   1330 * also performs bookkeeping.
   1331 * TTM driver callback for dealing with vm faults.
   1332 *
   1333 * Returns:
   1334 * 0 for success or a negative error code on failure.
   1335 */
   1336vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
   1337{
   1338	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
   1339	struct ttm_operation_ctx ctx = { false, false };
   1340	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
   1341	unsigned long offset;
   1342	int r;
   1343
   1344	/* Remember that this BO was accessed by the CPU */
   1345	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
   1346
   1347	if (bo->resource->mem_type != TTM_PL_VRAM)
   1348		return 0;
   1349
   1350	offset = bo->resource->start << PAGE_SHIFT;
   1351	if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
   1352		return 0;
   1353
   1354	/* Can't move a pinned BO to visible VRAM */
   1355	if (abo->tbo.pin_count > 0)
   1356		return VM_FAULT_SIGBUS;
   1357
   1358	/* hurrah the memory is not visible ! */
   1359	atomic64_inc(&adev->num_vram_cpu_page_faults);
   1360	amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
   1361					AMDGPU_GEM_DOMAIN_GTT);
   1362
   1363	/* Avoid costly evictions; only set GTT as a busy placement */
   1364	abo->placement.num_busy_placement = 1;
   1365	abo->placement.busy_placement = &abo->placements[1];
   1366
   1367	r = ttm_bo_validate(bo, &abo->placement, &ctx);
   1368	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
   1369		return VM_FAULT_NOPAGE;
   1370	else if (unlikely(r))
   1371		return VM_FAULT_SIGBUS;
   1372
   1373	offset = bo->resource->start << PAGE_SHIFT;
   1374	/* this should never happen */
   1375	if (bo->resource->mem_type == TTM_PL_VRAM &&
   1376	    (offset + bo->base.size) > adev->gmc.visible_vram_size)
   1377		return VM_FAULT_SIGBUS;
   1378
   1379	ttm_bo_move_to_lru_tail_unlocked(bo);
   1380	return 0;
   1381}
   1382
   1383/**
   1384 * amdgpu_bo_fence - add fence to buffer object
   1385 *
   1386 * @bo: buffer object in question
   1387 * @fence: fence to add
   1388 * @shared: true if fence should be added shared
   1389 *
   1390 */
   1391void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
   1392		     bool shared)
   1393{
   1394	struct dma_resv *resv = bo->tbo.base.resv;
   1395	int r;
   1396
   1397	r = dma_resv_reserve_fences(resv, 1);
   1398	if (r) {
   1399		/* As last resort on OOM we block for the fence */
   1400		dma_fence_wait(fence, false);
   1401		return;
   1402	}
   1403
   1404	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
   1405			   DMA_RESV_USAGE_WRITE);
   1406}
   1407
   1408/**
   1409 * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
   1410 *
   1411 * @adev: amdgpu device pointer
   1412 * @resv: reservation object to sync to
   1413 * @sync_mode: synchronization mode
   1414 * @owner: fence owner
   1415 * @intr: Whether the wait is interruptible
   1416 *
   1417 * Extract the fences from the reservation object and waits for them to finish.
   1418 *
   1419 * Returns:
   1420 * 0 on success, errno otherwise.
   1421 */
   1422int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
   1423			     enum amdgpu_sync_mode sync_mode, void *owner,
   1424			     bool intr)
   1425{
   1426	struct amdgpu_sync sync;
   1427	int r;
   1428
   1429	amdgpu_sync_create(&sync);
   1430	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
   1431	r = amdgpu_sync_wait(&sync, intr);
   1432	amdgpu_sync_free(&sync);
   1433	return r;
   1434}
   1435
   1436/**
   1437 * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
   1438 * @bo: buffer object to wait for
   1439 * @owner: fence owner
   1440 * @intr: Whether the wait is interruptible
   1441 *
   1442 * Wrapper to wait for fences in a BO.
   1443 * Returns:
   1444 * 0 on success, errno otherwise.
   1445 */
   1446int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
   1447{
   1448	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
   1449
   1450	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
   1451					AMDGPU_SYNC_NE_OWNER, owner, intr);
   1452}
   1453
   1454/**
   1455 * amdgpu_bo_gpu_offset - return GPU offset of bo
   1456 * @bo:	amdgpu object for which we query the offset
   1457 *
   1458 * Note: object should either be pinned or reserved when calling this
   1459 * function, it might be useful to add check for this for debugging.
   1460 *
   1461 * Returns:
   1462 * current GPU offset of the object.
   1463 */
   1464u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
   1465{
   1466	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
   1467	WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
   1468		     !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
   1469	WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
   1470	WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
   1471		     !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
   1472
   1473	return amdgpu_bo_gpu_offset_no_check(bo);
   1474}
   1475
   1476/**
   1477 * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
   1478 * @bo:	amdgpu object for which we query the offset
   1479 *
   1480 * Returns:
   1481 * current GPU offset of the object without raising warnings.
   1482 */
   1483u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
   1484{
   1485	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
   1486	uint64_t offset;
   1487
   1488	offset = (bo->tbo.resource->start << PAGE_SHIFT) +
   1489		 amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
   1490
   1491	return amdgpu_gmc_sign_extend(offset);
   1492}
   1493
   1494/**
   1495 * amdgpu_bo_get_preferred_domain - get preferred domain
   1496 * @adev: amdgpu device object
   1497 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
   1498 *
   1499 * Returns:
   1500 * Which of the allowed domains is preferred for allocating the BO.
   1501 */
   1502uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
   1503					    uint32_t domain)
   1504{
   1505	if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
   1506		domain = AMDGPU_GEM_DOMAIN_VRAM;
   1507		if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
   1508			domain = AMDGPU_GEM_DOMAIN_GTT;
   1509	}
   1510	return domain;
   1511}
   1512
   1513#if defined(CONFIG_DEBUG_FS)
   1514#define amdgpu_bo_print_flag(m, bo, flag)		        \
   1515	do {							\
   1516		if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
   1517			seq_printf((m), " " #flag);		\
   1518		}						\
   1519	} while (0)
   1520
   1521/**
   1522 * amdgpu_bo_print_info - print BO info in debugfs file
   1523 *
   1524 * @id: Index or Id of the BO
   1525 * @bo: Requested BO for printing info
   1526 * @m: debugfs file
   1527 *
   1528 * Print BO information in debugfs file
   1529 *
   1530 * Returns:
   1531 * Size of the BO in bytes.
   1532 */
   1533u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
   1534{
   1535	struct dma_buf_attachment *attachment;
   1536	struct dma_buf *dma_buf;
   1537	unsigned int domain;
   1538	const char *placement;
   1539	unsigned int pin_count;
   1540	u64 size;
   1541
   1542	domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
   1543	switch (domain) {
   1544	case AMDGPU_GEM_DOMAIN_VRAM:
   1545		placement = "VRAM";
   1546		break;
   1547	case AMDGPU_GEM_DOMAIN_GTT:
   1548		placement = " GTT";
   1549		break;
   1550	case AMDGPU_GEM_DOMAIN_CPU:
   1551	default:
   1552		placement = " CPU";
   1553		break;
   1554	}
   1555
   1556	size = amdgpu_bo_size(bo);
   1557	seq_printf(m, "\t\t0x%08x: %12lld byte %s",
   1558			id, size, placement);
   1559
   1560	pin_count = READ_ONCE(bo->tbo.pin_count);
   1561	if (pin_count)
   1562		seq_printf(m, " pin count %d", pin_count);
   1563
   1564	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
   1565	attachment = READ_ONCE(bo->tbo.base.import_attach);
   1566
   1567	if (attachment)
   1568		seq_printf(m, " imported from %p", dma_buf);
   1569	else if (dma_buf)
   1570		seq_printf(m, " exported as %p", dma_buf);
   1571
   1572	amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
   1573	amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
   1574	amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
   1575	amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
   1576	amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
   1577	amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
   1578	amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
   1579
   1580	seq_puts(m, "\n");
   1581
   1582	return size;
   1583}
   1584#endif