cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ttm_bo_util.c (17454B)


      1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
      2/**************************************************************************
      3 *
      4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
      5 * All Rights Reserved.
      6 *
      7 * Permission is hereby granted, free of charge, to any person obtaining a
      8 * copy of this software and associated documentation files (the
      9 * "Software"), to deal in the Software without restriction, including
     10 * without limitation the rights to use, copy, modify, merge, publish,
     11 * distribute, sub license, and/or sell copies of the Software, and to
     12 * permit persons to whom the Software is furnished to do so, subject to
     13 * the following conditions:
     14 *
     15 * The above copyright notice and this permission notice (including the
     16 * next paragraph) shall be included in all copies or substantial portions
     17 * of the Software.
     18 *
     19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
     26 *
     27 **************************************************************************/
     28/*
     29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
     30 */
     31
     32#include <drm/ttm/ttm_bo_driver.h>
     33#include <drm/ttm/ttm_placement.h>
     34#include <drm/drm_cache.h>
     35#include <drm/drm_vma_manager.h>
     36#include <linux/iosys-map.h>
     37#include <linux/io.h>
     38#include <linux/highmem.h>
     39#include <linux/wait.h>
     40#include <linux/slab.h>
     41#include <linux/vmalloc.h>
     42#include <linux/module.h>
     43#include <linux/dma-resv.h>
     44
     45struct ttm_transfer_obj {
     46	struct ttm_buffer_object base;
     47	struct ttm_buffer_object *bo;
     48};
     49
     50int ttm_mem_io_reserve(struct ttm_device *bdev,
     51		       struct ttm_resource *mem)
     52{
     53	if (mem->bus.offset || mem->bus.addr)
     54		return 0;
     55
     56	mem->bus.is_iomem = false;
     57	if (!bdev->funcs->io_mem_reserve)
     58		return 0;
     59
     60	return bdev->funcs->io_mem_reserve(bdev, mem);
     61}
     62
     63void ttm_mem_io_free(struct ttm_device *bdev,
     64		     struct ttm_resource *mem)
     65{
     66	if (!mem)
     67		return;
     68
     69	if (!mem->bus.offset && !mem->bus.addr)
     70		return;
     71
     72	if (bdev->funcs->io_mem_free)
     73		bdev->funcs->io_mem_free(bdev, mem);
     74
     75	mem->bus.offset = 0;
     76	mem->bus.addr = NULL;
     77}
     78
     79/**
     80 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
     81 * @clear: Whether to clear rather than copy.
     82 * @num_pages: Number of pages of the operation.
     83 * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
     84 * @src_iter: A struct ttm_kmap_iter representing the source resource.
     85 *
     86 * This function is intended to be able to move out async under a
     87 * dma-fence if desired.
     88 */
     89void ttm_move_memcpy(bool clear,
     90		     u32 num_pages,
     91		     struct ttm_kmap_iter *dst_iter,
     92		     struct ttm_kmap_iter *src_iter)
     93{
     94	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
     95	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
     96	struct iosys_map src_map, dst_map;
     97	pgoff_t i;
     98
     99	/* Single TTM move. NOP */
    100	if (dst_ops->maps_tt && src_ops->maps_tt)
    101		return;
    102
    103	/* Don't move nonexistent data. Clear destination instead. */
    104	if (clear) {
    105		for (i = 0; i < num_pages; ++i) {
    106			dst_ops->map_local(dst_iter, &dst_map, i);
    107			if (dst_map.is_iomem)
    108				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
    109			else
    110				memset(dst_map.vaddr, 0, PAGE_SIZE);
    111			if (dst_ops->unmap_local)
    112				dst_ops->unmap_local(dst_iter, &dst_map);
    113		}
    114		return;
    115	}
    116
    117	for (i = 0; i < num_pages; ++i) {
    118		dst_ops->map_local(dst_iter, &dst_map, i);
    119		src_ops->map_local(src_iter, &src_map, i);
    120
    121		drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
    122
    123		if (src_ops->unmap_local)
    124			src_ops->unmap_local(src_iter, &src_map);
    125		if (dst_ops->unmap_local)
    126			dst_ops->unmap_local(dst_iter, &dst_map);
    127	}
    128}
    129EXPORT_SYMBOL(ttm_move_memcpy);
    130
    131int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
    132		       struct ttm_operation_ctx *ctx,
    133		       struct ttm_resource *dst_mem)
    134{
    135	struct ttm_device *bdev = bo->bdev;
    136	struct ttm_resource_manager *dst_man =
    137		ttm_manager_type(bo->bdev, dst_mem->mem_type);
    138	struct ttm_tt *ttm = bo->ttm;
    139	struct ttm_resource *src_mem = bo->resource;
    140	struct ttm_resource_manager *src_man =
    141		ttm_manager_type(bdev, src_mem->mem_type);
    142	union {
    143		struct ttm_kmap_iter_tt tt;
    144		struct ttm_kmap_iter_linear_io io;
    145	} _dst_iter, _src_iter;
    146	struct ttm_kmap_iter *dst_iter, *src_iter;
    147	bool clear;
    148	int ret = 0;
    149
    150	if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
    151		    dst_man->use_tt)) {
    152		ret = ttm_tt_populate(bdev, ttm, ctx);
    153		if (ret)
    154			return ret;
    155	}
    156
    157	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
    158	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
    159		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
    160	if (IS_ERR(dst_iter))
    161		return PTR_ERR(dst_iter);
    162
    163	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
    164	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
    165		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
    166	if (IS_ERR(src_iter)) {
    167		ret = PTR_ERR(src_iter);
    168		goto out_src_iter;
    169	}
    170
    171	clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
    172	if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
    173		ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
    174
    175	if (!src_iter->ops->maps_tt)
    176		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
    177	ttm_bo_move_sync_cleanup(bo, dst_mem);
    178
    179out_src_iter:
    180	if (!dst_iter->ops->maps_tt)
    181		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
    182
    183	return ret;
    184}
    185EXPORT_SYMBOL(ttm_bo_move_memcpy);
    186
    187static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
    188{
    189	struct ttm_transfer_obj *fbo;
    190
    191	fbo = container_of(bo, struct ttm_transfer_obj, base);
    192	dma_resv_fini(&fbo->base.base._resv);
    193	ttm_bo_put(fbo->bo);
    194	kfree(fbo);
    195}
    196
    197/**
    198 * ttm_buffer_object_transfer
    199 *
    200 * @bo: A pointer to a struct ttm_buffer_object.
    201 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
    202 * holding the data of @bo with the old placement.
    203 *
    204 * This is a utility function that may be called after an accelerated move
    205 * has been scheduled. A new buffer object is created as a placeholder for
    206 * the old data while it's being copied. When that buffer object is idle,
    207 * it can be destroyed, releasing the space of the old placement.
    208 * Returns:
    209 * !0: Failure.
    210 */
    211
    212static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
    213				      struct ttm_buffer_object **new_obj)
    214{
    215	struct ttm_transfer_obj *fbo;
    216	int ret;
    217
    218	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
    219	if (!fbo)
    220		return -ENOMEM;
    221
    222	fbo->base = *bo;
    223
    224	/**
    225	 * Fix up members that we shouldn't copy directly:
    226	 * TODO: Explicit member copy would probably be better here.
    227	 */
    228
    229	atomic_inc(&ttm_glob.bo_count);
    230	INIT_LIST_HEAD(&fbo->base.ddestroy);
    231	drm_vma_node_reset(&fbo->base.base.vma_node);
    232
    233	kref_init(&fbo->base.kref);
    234	fbo->base.destroy = &ttm_transfered_destroy;
    235	fbo->base.pin_count = 0;
    236	if (bo->type != ttm_bo_type_sg)
    237		fbo->base.base.resv = &fbo->base.base._resv;
    238
    239	if (fbo->base.resource) {
    240		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
    241		bo->resource = NULL;
    242	}
    243
    244	dma_resv_init(&fbo->base.base._resv);
    245	fbo->base.base.dev = NULL;
    246	ret = dma_resv_trylock(&fbo->base.base._resv);
    247	WARN_ON(!ret);
    248
    249	ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
    250	if (ret) {
    251		kfree(fbo);
    252		return ret;
    253	}
    254
    255	ttm_bo_get(bo);
    256	fbo->bo = bo;
    257
    258	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
    259
    260	*new_obj = &fbo->base;
    261	return 0;
    262}
    263
    264pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
    265		     pgprot_t tmp)
    266{
    267	struct ttm_resource_manager *man;
    268	enum ttm_caching caching;
    269
    270	man = ttm_manager_type(bo->bdev, res->mem_type);
    271	caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
    272
    273	return ttm_prot_from_caching(caching, tmp);
    274}
    275EXPORT_SYMBOL(ttm_io_prot);
    276
    277static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
    278			  unsigned long offset,
    279			  unsigned long size,
    280			  struct ttm_bo_kmap_obj *map)
    281{
    282	struct ttm_resource *mem = bo->resource;
    283
    284	if (bo->resource->bus.addr) {
    285		map->bo_kmap_type = ttm_bo_map_premapped;
    286		map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
    287	} else {
    288		resource_size_t res = bo->resource->bus.offset + offset;
    289
    290		map->bo_kmap_type = ttm_bo_map_iomap;
    291		if (mem->bus.caching == ttm_write_combined)
    292			map->virtual = ioremap_wc(res, size);
    293#ifdef CONFIG_X86
    294		else if (mem->bus.caching == ttm_cached)
    295			map->virtual = ioremap_cache(res, size);
    296#endif
    297		else
    298			map->virtual = ioremap(res, size);
    299	}
    300	return (!map->virtual) ? -ENOMEM : 0;
    301}
    302
    303static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
    304			   unsigned long start_page,
    305			   unsigned long num_pages,
    306			   struct ttm_bo_kmap_obj *map)
    307{
    308	struct ttm_resource *mem = bo->resource;
    309	struct ttm_operation_ctx ctx = {
    310		.interruptible = false,
    311		.no_wait_gpu = false
    312	};
    313	struct ttm_tt *ttm = bo->ttm;
    314	pgprot_t prot;
    315	int ret;
    316
    317	BUG_ON(!ttm);
    318
    319	ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
    320	if (ret)
    321		return ret;
    322
    323	if (num_pages == 1 && ttm->caching == ttm_cached) {
    324		/*
    325		 * We're mapping a single page, and the desired
    326		 * page protection is consistent with the bo.
    327		 */
    328
    329		map->bo_kmap_type = ttm_bo_map_kmap;
    330		map->page = ttm->pages[start_page];
    331		map->virtual = kmap(map->page);
    332	} else {
    333		/*
    334		 * We need to use vmap to get the desired page protection
    335		 * or to make the buffer object look contiguous.
    336		 */
    337		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
    338		map->bo_kmap_type = ttm_bo_map_vmap;
    339		map->virtual = vmap(ttm->pages + start_page, num_pages,
    340				    0, prot);
    341	}
    342	return (!map->virtual) ? -ENOMEM : 0;
    343}
    344
    345int ttm_bo_kmap(struct ttm_buffer_object *bo,
    346		unsigned long start_page, unsigned long num_pages,
    347		struct ttm_bo_kmap_obj *map)
    348{
    349	unsigned long offset, size;
    350	int ret;
    351
    352	map->virtual = NULL;
    353	map->bo = bo;
    354	if (num_pages > bo->resource->num_pages)
    355		return -EINVAL;
    356	if ((start_page + num_pages) > bo->resource->num_pages)
    357		return -EINVAL;
    358
    359	ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
    360	if (ret)
    361		return ret;
    362	if (!bo->resource->bus.is_iomem) {
    363		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
    364	} else {
    365		offset = start_page << PAGE_SHIFT;
    366		size = num_pages << PAGE_SHIFT;
    367		return ttm_bo_ioremap(bo, offset, size, map);
    368	}
    369}
    370EXPORT_SYMBOL(ttm_bo_kmap);
    371
    372void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
    373{
    374	if (!map->virtual)
    375		return;
    376	switch (map->bo_kmap_type) {
    377	case ttm_bo_map_iomap:
    378		iounmap(map->virtual);
    379		break;
    380	case ttm_bo_map_vmap:
    381		vunmap(map->virtual);
    382		break;
    383	case ttm_bo_map_kmap:
    384		kunmap(map->page);
    385		break;
    386	case ttm_bo_map_premapped:
    387		break;
    388	default:
    389		BUG();
    390	}
    391	ttm_mem_io_free(map->bo->bdev, map->bo->resource);
    392	map->virtual = NULL;
    393	map->page = NULL;
    394}
    395EXPORT_SYMBOL(ttm_bo_kunmap);
    396
    397int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
    398{
    399	struct ttm_resource *mem = bo->resource;
    400	int ret;
    401
    402	ret = ttm_mem_io_reserve(bo->bdev, mem);
    403	if (ret)
    404		return ret;
    405
    406	if (mem->bus.is_iomem) {
    407		void __iomem *vaddr_iomem;
    408
    409		if (mem->bus.addr)
    410			vaddr_iomem = (void __iomem *)mem->bus.addr;
    411		else if (mem->bus.caching == ttm_write_combined)
    412			vaddr_iomem = ioremap_wc(mem->bus.offset,
    413						 bo->base.size);
    414#ifdef CONFIG_X86
    415		else if (mem->bus.caching == ttm_cached)
    416			vaddr_iomem = ioremap_cache(mem->bus.offset,
    417						  bo->base.size);
    418#endif
    419		else
    420			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
    421
    422		if (!vaddr_iomem)
    423			return -ENOMEM;
    424
    425		iosys_map_set_vaddr_iomem(map, vaddr_iomem);
    426
    427	} else {
    428		struct ttm_operation_ctx ctx = {
    429			.interruptible = false,
    430			.no_wait_gpu = false
    431		};
    432		struct ttm_tt *ttm = bo->ttm;
    433		pgprot_t prot;
    434		void *vaddr;
    435
    436		ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
    437		if (ret)
    438			return ret;
    439
    440		/*
    441		 * We need to use vmap to get the desired page protection
    442		 * or to make the buffer object look contiguous.
    443		 */
    444		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
    445		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
    446		if (!vaddr)
    447			return -ENOMEM;
    448
    449		iosys_map_set_vaddr(map, vaddr);
    450	}
    451
    452	return 0;
    453}
    454EXPORT_SYMBOL(ttm_bo_vmap);
    455
    456void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
    457{
    458	struct ttm_resource *mem = bo->resource;
    459
    460	if (iosys_map_is_null(map))
    461		return;
    462
    463	if (!map->is_iomem)
    464		vunmap(map->vaddr);
    465	else if (!mem->bus.addr)
    466		iounmap(map->vaddr_iomem);
    467	iosys_map_clear(map);
    468
    469	ttm_mem_io_free(bo->bdev, bo->resource);
    470}
    471EXPORT_SYMBOL(ttm_bo_vunmap);
    472
    473static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
    474				 bool dst_use_tt)
    475{
    476	int ret;
    477	ret = ttm_bo_wait(bo, false, false);
    478	if (ret)
    479		return ret;
    480
    481	if (!dst_use_tt)
    482		ttm_bo_tt_destroy(bo);
    483	ttm_resource_free(bo, &bo->resource);
    484	return 0;
    485}
    486
    487static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
    488				struct dma_fence *fence,
    489				bool dst_use_tt)
    490{
    491	struct ttm_buffer_object *ghost_obj;
    492	int ret;
    493
    494	/**
    495	 * This should help pipeline ordinary buffer moves.
    496	 *
    497	 * Hang old buffer memory on a new buffer object,
    498	 * and leave it to be released when the GPU
    499	 * operation has completed.
    500	 */
    501
    502	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
    503	if (ret)
    504		return ret;
    505
    506	dma_resv_add_fence(&ghost_obj->base._resv, fence,
    507			   DMA_RESV_USAGE_KERNEL);
    508
    509	/**
    510	 * If we're not moving to fixed memory, the TTM object
    511	 * needs to stay alive. Otherwhise hang it on the ghost
    512	 * bo to be unbound and destroyed.
    513	 */
    514
    515	if (dst_use_tt)
    516		ghost_obj->ttm = NULL;
    517	else
    518		bo->ttm = NULL;
    519
    520	dma_resv_unlock(&ghost_obj->base._resv);
    521	ttm_bo_put(ghost_obj);
    522	return 0;
    523}
    524
    525static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
    526				       struct dma_fence *fence)
    527{
    528	struct ttm_device *bdev = bo->bdev;
    529	struct ttm_resource_manager *from;
    530
    531	from = ttm_manager_type(bdev, bo->resource->mem_type);
    532
    533	/**
    534	 * BO doesn't have a TTM we need to bind/unbind. Just remember
    535	 * this eviction and free up the allocation
    536	 */
    537	spin_lock(&from->move_lock);
    538	if (!from->move || dma_fence_is_later(fence, from->move)) {
    539		dma_fence_put(from->move);
    540		from->move = dma_fence_get(fence);
    541	}
    542	spin_unlock(&from->move_lock);
    543
    544	ttm_resource_free(bo, &bo->resource);
    545}
    546
    547int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
    548			      struct dma_fence *fence,
    549			      bool evict,
    550			      bool pipeline,
    551			      struct ttm_resource *new_mem)
    552{
    553	struct ttm_device *bdev = bo->bdev;
    554	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
    555	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
    556	int ret = 0;
    557
    558	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
    559	if (!evict)
    560		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
    561	else if (!from->use_tt && pipeline)
    562		ttm_bo_move_pipeline_evict(bo, fence);
    563	else
    564		ret = ttm_bo_wait_free_node(bo, man->use_tt);
    565
    566	if (ret)
    567		return ret;
    568
    569	ttm_bo_assign_mem(bo, new_mem);
    570
    571	return 0;
    572}
    573EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
    574
    575void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
    576			      struct ttm_resource *new_mem)
    577{
    578	struct ttm_device *bdev = bo->bdev;
    579	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
    580	int ret;
    581
    582	ret = ttm_bo_wait_free_node(bo, man->use_tt);
    583	if (WARN_ON(ret))
    584		return;
    585
    586	ttm_bo_assign_mem(bo, new_mem);
    587}
    588EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
    589
    590/**
    591 * ttm_bo_pipeline_gutting - purge the contents of a bo
    592 * @bo: The buffer object
    593 *
    594 * Purge the contents of a bo, async if the bo is not idle.
    595 * After a successful call, the bo is left unpopulated in
    596 * system placement. The function may wait uninterruptible
    597 * for idle on OOM.
    598 *
    599 * Return: 0 if successful, negative error code on failure.
    600 */
    601int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
    602{
    603	static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
    604	struct ttm_buffer_object *ghost;
    605	struct ttm_resource *sys_res;
    606	struct ttm_tt *ttm;
    607	int ret;
    608
    609	ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
    610	if (ret)
    611		return ret;
    612
    613	/* If already idle, no need for ghost object dance. */
    614	ret = ttm_bo_wait(bo, false, true);
    615	if (ret != -EBUSY) {
    616		if (!bo->ttm) {
    617			/* See comment below about clearing. */
    618			ret = ttm_tt_create(bo, true);
    619			if (ret)
    620				goto error_free_sys_mem;
    621		} else {
    622			ttm_tt_unpopulate(bo->bdev, bo->ttm);
    623			if (bo->type == ttm_bo_type_device)
    624				ttm_tt_mark_for_clear(bo->ttm);
    625		}
    626		ttm_resource_free(bo, &bo->resource);
    627		ttm_bo_assign_mem(bo, sys_res);
    628		return 0;
    629	}
    630
    631	/*
    632	 * We need an unpopulated ttm_tt after giving our current one,
    633	 * if any, to the ghost object. And we can't afford to fail
    634	 * creating one *after* the operation. If the bo subsequently gets
    635	 * resurrected, make sure it's cleared (if ttm_bo_type_device)
    636	 * to avoid leaking sensitive information to user-space.
    637	 */
    638
    639	ttm = bo->ttm;
    640	bo->ttm = NULL;
    641	ret = ttm_tt_create(bo, true);
    642	swap(bo->ttm, ttm);
    643	if (ret)
    644		goto error_free_sys_mem;
    645
    646	ret = ttm_buffer_object_transfer(bo, &ghost);
    647	if (ret)
    648		goto error_destroy_tt;
    649
    650	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
    651	/* Last resort, wait for the BO to be idle when we are OOM */
    652	if (ret)
    653		ttm_bo_wait(bo, false, false);
    654
    655	dma_resv_unlock(&ghost->base._resv);
    656	ttm_bo_put(ghost);
    657	bo->ttm = ttm;
    658	ttm_bo_assign_mem(bo, sys_res);
    659	return 0;
    660
    661error_destroy_tt:
    662	ttm_tt_destroy(bo->bdev, ttm);
    663
    664error_free_sys_mem:
    665	ttm_resource_free(bo, &sys_res);
    666	return ret;
    667}