cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nouveau_bo.c (33322B)


      1/*
      2 * Copyright 2007 Dave Airlied
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the "Software"),
      7 * to deal in the Software without restriction, including without limitation
      8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9 * and/or sell copies of the Software, and to permit persons to whom the
     10 * Software is furnished to do so, subject to the following conditions:
     11 *
     12 * The above copyright notice and this permission notice (including the next
     13 * paragraph) shall be included in all copies or substantial portions of the
     14 * Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22 * OTHER DEALINGS IN THE SOFTWARE.
     23 */
     24/*
     25 * Authors: Dave Airlied <airlied@linux.ie>
     26 *	    Ben Skeggs   <darktama@iinet.net.au>
     27 *	    Jeremy Kolb  <jkolb@brandeis.edu>
     28 */
     29
     30#include <linux/dma-mapping.h>
     31
     32#include "nouveau_drv.h"
     33#include "nouveau_chan.h"
     34#include "nouveau_fence.h"
     35
     36#include "nouveau_bo.h"
     37#include "nouveau_ttm.h"
     38#include "nouveau_gem.h"
     39#include "nouveau_mem.h"
     40#include "nouveau_vmm.h"
     41
     42#include <nvif/class.h>
     43#include <nvif/if500b.h>
     44#include <nvif/if900b.h>
     45
     46static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
     47			       struct ttm_resource *reg);
     48static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
     49
     50/*
     51 * NV10-NV40 tiling helpers
     52 */
     53
     54static void
     55nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
     56			   u32 addr, u32 size, u32 pitch, u32 flags)
     57{
     58	struct nouveau_drm *drm = nouveau_drm(dev);
     59	int i = reg - drm->tile.reg;
     60	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
     61	struct nvkm_fb_tile *tile = &fb->tile.region[i];
     62
     63	nouveau_fence_unref(&reg->fence);
     64
     65	if (tile->pitch)
     66		nvkm_fb_tile_fini(fb, i, tile);
     67
     68	if (pitch)
     69		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
     70
     71	nvkm_fb_tile_prog(fb, i, tile);
     72}
     73
     74static struct nouveau_drm_tile *
     75nv10_bo_get_tile_region(struct drm_device *dev, int i)
     76{
     77	struct nouveau_drm *drm = nouveau_drm(dev);
     78	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
     79
     80	spin_lock(&drm->tile.lock);
     81
     82	if (!tile->used &&
     83	    (!tile->fence || nouveau_fence_done(tile->fence)))
     84		tile->used = true;
     85	else
     86		tile = NULL;
     87
     88	spin_unlock(&drm->tile.lock);
     89	return tile;
     90}
     91
     92static void
     93nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
     94			struct dma_fence *fence)
     95{
     96	struct nouveau_drm *drm = nouveau_drm(dev);
     97
     98	if (tile) {
     99		spin_lock(&drm->tile.lock);
    100		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
    101		tile->used = false;
    102		spin_unlock(&drm->tile.lock);
    103	}
    104}
    105
    106static struct nouveau_drm_tile *
    107nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
    108		   u32 size, u32 pitch, u32 zeta)
    109{
    110	struct nouveau_drm *drm = nouveau_drm(dev);
    111	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
    112	struct nouveau_drm_tile *tile, *found = NULL;
    113	int i;
    114
    115	for (i = 0; i < fb->tile.regions; i++) {
    116		tile = nv10_bo_get_tile_region(dev, i);
    117
    118		if (pitch && !found) {
    119			found = tile;
    120			continue;
    121
    122		} else if (tile && fb->tile.region[i].pitch) {
    123			/* Kill an unused tile region. */
    124			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
    125		}
    126
    127		nv10_bo_put_tile_region(dev, tile, NULL);
    128	}
    129
    130	if (found)
    131		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
    132	return found;
    133}
    134
    135static void
    136nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
    137{
    138	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    139	struct drm_device *dev = drm->dev;
    140	struct nouveau_bo *nvbo = nouveau_bo(bo);
    141
    142	WARN_ON(nvbo->bo.pin_count > 0);
    143	nouveau_bo_del_io_reserve_lru(bo);
    144	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
    145
    146	/*
    147	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
    148	 * initialized, so don't attempt to release it.
    149	 */
    150	if (bo->base.dev)
    151		drm_gem_object_release(&bo->base);
    152	else
    153		dma_resv_fini(&bo->base._resv);
    154
    155	kfree(nvbo);
    156}
    157
    158static inline u64
    159roundup_64(u64 x, u32 y)
    160{
    161	x += y - 1;
    162	do_div(x, y);
    163	return x * y;
    164}
    165
    166static void
    167nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
    168{
    169	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
    170	struct nvif_device *device = &drm->client.device;
    171
    172	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
    173		if (nvbo->mode) {
    174			if (device->info.chipset >= 0x40) {
    175				*align = 65536;
    176				*size = roundup_64(*size, 64 * nvbo->mode);
    177
    178			} else if (device->info.chipset >= 0x30) {
    179				*align = 32768;
    180				*size = roundup_64(*size, 64 * nvbo->mode);
    181
    182			} else if (device->info.chipset >= 0x20) {
    183				*align = 16384;
    184				*size = roundup_64(*size, 64 * nvbo->mode);
    185
    186			} else if (device->info.chipset >= 0x10) {
    187				*align = 16384;
    188				*size = roundup_64(*size, 32 * nvbo->mode);
    189			}
    190		}
    191	} else {
    192		*size = roundup_64(*size, (1 << nvbo->page));
    193		*align = max((1 <<  nvbo->page), *align);
    194	}
    195
    196	*size = roundup_64(*size, PAGE_SIZE);
    197}
    198
    199struct nouveau_bo *
    200nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
    201		 u32 tile_mode, u32 tile_flags)
    202{
    203	struct nouveau_drm *drm = cli->drm;
    204	struct nouveau_bo *nvbo;
    205	struct nvif_mmu *mmu = &cli->mmu;
    206	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
    207	int i, pi = -1;
    208
    209	if (!*size) {
    210		NV_WARN(drm, "skipped size %016llx\n", *size);
    211		return ERR_PTR(-EINVAL);
    212	}
    213
    214	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
    215	if (!nvbo)
    216		return ERR_PTR(-ENOMEM);
    217	INIT_LIST_HEAD(&nvbo->head);
    218	INIT_LIST_HEAD(&nvbo->entry);
    219	INIT_LIST_HEAD(&nvbo->vma_list);
    220	nvbo->bo.bdev = &drm->ttm.bdev;
    221
    222	/* This is confusing, and doesn't actually mean we want an uncached
    223	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
    224	 * into in nouveau_gem_new().
    225	 */
    226	if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
    227		/* Determine if we can get a cache-coherent map, forcing
    228		 * uncached mapping if we can't.
    229		 */
    230		if (!nouveau_drm_use_coherent_gpu_mapping(drm))
    231			nvbo->force_coherent = true;
    232	}
    233
    234	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
    235		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
    236		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
    237			kfree(nvbo);
    238			return ERR_PTR(-EINVAL);
    239		}
    240
    241		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
    242	} else
    243	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
    244		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
    245		nvbo->comp = (tile_flags & 0x00030000) >> 16;
    246		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
    247			kfree(nvbo);
    248			return ERR_PTR(-EINVAL);
    249		}
    250	} else {
    251		nvbo->zeta = (tile_flags & 0x00000007);
    252	}
    253	nvbo->mode = tile_mode;
    254	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
    255
    256	/* Determine the desirable target GPU page size for the buffer. */
    257	for (i = 0; i < vmm->page_nr; i++) {
    258		/* Because we cannot currently allow VMM maps to fail
    259		 * during buffer migration, we need to determine page
    260		 * size for the buffer up-front, and pre-allocate its
    261		 * page tables.
    262		 *
    263		 * Skip page sizes that can't support needed domains.
    264		 */
    265		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
    266		    (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
    267			continue;
    268		if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
    269		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
    270			continue;
    271
    272		/* Select this page size if it's the first that supports
    273		 * the potential memory domains, or when it's compatible
    274		 * with the requested compression settings.
    275		 */
    276		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
    277			pi = i;
    278
    279		/* Stop once the buffer is larger than the current page size. */
    280		if (*size >= 1ULL << vmm->page[i].shift)
    281			break;
    282	}
    283
    284	if (WARN_ON(pi < 0))
    285		return ERR_PTR(-EINVAL);
    286
    287	/* Disable compression if suitable settings couldn't be found. */
    288	if (nvbo->comp && !vmm->page[pi].comp) {
    289		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
    290			nvbo->kind = mmu->kind[nvbo->kind];
    291		nvbo->comp = 0;
    292	}
    293	nvbo->page = vmm->page[pi].shift;
    294
    295	nouveau_bo_fixup_align(nvbo, align, size);
    296
    297	return nvbo;
    298}
    299
    300int
    301nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
    302		struct sg_table *sg, struct dma_resv *robj)
    303{
    304	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
    305	int ret;
    306
    307	nouveau_bo_placement_set(nvbo, domain, 0);
    308	INIT_LIST_HEAD(&nvbo->io_reserve_lru);
    309
    310	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
    311			  &nvbo->placement, align >> PAGE_SHIFT, false, sg,
    312			  robj, nouveau_bo_del_ttm);
    313	if (ret) {
    314		/* ttm will call nouveau_bo_del_ttm if it fails.. */
    315		return ret;
    316	}
    317
    318	return 0;
    319}
    320
    321int
    322nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
    323	       uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
    324	       struct sg_table *sg, struct dma_resv *robj,
    325	       struct nouveau_bo **pnvbo)
    326{
    327	struct nouveau_bo *nvbo;
    328	int ret;
    329
    330	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
    331				tile_flags);
    332	if (IS_ERR(nvbo))
    333		return PTR_ERR(nvbo);
    334
    335	nvbo->bo.base.size = size;
    336	dma_resv_init(&nvbo->bo.base._resv);
    337	drm_vma_node_reset(&nvbo->bo.base.vma_node);
    338
    339	ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
    340	if (ret)
    341		return ret;
    342
    343	*pnvbo = nvbo;
    344	return 0;
    345}
    346
    347static void
    348set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
    349{
    350	*n = 0;
    351
    352	if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
    353		pl[*n].mem_type = TTM_PL_VRAM;
    354		pl[*n].flags = 0;
    355		(*n)++;
    356	}
    357	if (domain & NOUVEAU_GEM_DOMAIN_GART) {
    358		pl[*n].mem_type = TTM_PL_TT;
    359		pl[*n].flags = 0;
    360		(*n)++;
    361	}
    362	if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
    363		pl[*n].mem_type = TTM_PL_SYSTEM;
    364		pl[(*n)++].flags = 0;
    365	}
    366}
    367
    368static void
    369set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
    370{
    371	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
    372	u64 vram_size = drm->client.device.info.ram_size;
    373	unsigned i, fpfn, lpfn;
    374
    375	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
    376	    nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
    377	    nvbo->bo.base.size < vram_size / 4) {
    378		/*
    379		 * Make sure that the color and depth buffers are handled
    380		 * by independent memory controller units. Up to a 9x
    381		 * speed up when alpha-blending and depth-test are enabled
    382		 * at the same time.
    383		 */
    384		if (nvbo->zeta) {
    385			fpfn = (vram_size / 2) >> PAGE_SHIFT;
    386			lpfn = ~0;
    387		} else {
    388			fpfn = 0;
    389			lpfn = (vram_size / 2) >> PAGE_SHIFT;
    390		}
    391		for (i = 0; i < nvbo->placement.num_placement; ++i) {
    392			nvbo->placements[i].fpfn = fpfn;
    393			nvbo->placements[i].lpfn = lpfn;
    394		}
    395		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
    396			nvbo->busy_placements[i].fpfn = fpfn;
    397			nvbo->busy_placements[i].lpfn = lpfn;
    398		}
    399	}
    400}
    401
    402void
    403nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
    404			 uint32_t busy)
    405{
    406	struct ttm_placement *pl = &nvbo->placement;
    407
    408	pl->placement = nvbo->placements;
    409	set_placement_list(nvbo->placements, &pl->num_placement, domain);
    410
    411	pl->busy_placement = nvbo->busy_placements;
    412	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
    413			   domain | busy);
    414
    415	set_placement_range(nvbo, domain);
    416}
    417
    418int
    419nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
    420{
    421	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
    422	struct ttm_buffer_object *bo = &nvbo->bo;
    423	bool force = false, evict = false;
    424	int ret;
    425
    426	ret = ttm_bo_reserve(bo, false, false, NULL);
    427	if (ret)
    428		return ret;
    429
    430	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
    431	    domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
    432		if (!nvbo->contig) {
    433			nvbo->contig = true;
    434			force = true;
    435			evict = true;
    436		}
    437	}
    438
    439	if (nvbo->bo.pin_count) {
    440		bool error = evict;
    441
    442		switch (bo->resource->mem_type) {
    443		case TTM_PL_VRAM:
    444			error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
    445			break;
    446		case TTM_PL_TT:
    447			error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
    448			break;
    449		default:
    450			break;
    451		}
    452
    453		if (error) {
    454			NV_ERROR(drm, "bo %p pinned elsewhere: "
    455				      "0x%08x vs 0x%08x\n", bo,
    456				 bo->resource->mem_type, domain);
    457			ret = -EBUSY;
    458		}
    459		ttm_bo_pin(&nvbo->bo);
    460		goto out;
    461	}
    462
    463	if (evict) {
    464		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
    465		ret = nouveau_bo_validate(nvbo, false, false);
    466		if (ret)
    467			goto out;
    468	}
    469
    470	nouveau_bo_placement_set(nvbo, domain, 0);
    471	ret = nouveau_bo_validate(nvbo, false, false);
    472	if (ret)
    473		goto out;
    474
    475	ttm_bo_pin(&nvbo->bo);
    476
    477	switch (bo->resource->mem_type) {
    478	case TTM_PL_VRAM:
    479		drm->gem.vram_available -= bo->base.size;
    480		break;
    481	case TTM_PL_TT:
    482		drm->gem.gart_available -= bo->base.size;
    483		break;
    484	default:
    485		break;
    486	}
    487
    488out:
    489	if (force && ret)
    490		nvbo->contig = false;
    491	ttm_bo_unreserve(bo);
    492	return ret;
    493}
    494
    495int
    496nouveau_bo_unpin(struct nouveau_bo *nvbo)
    497{
    498	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
    499	struct ttm_buffer_object *bo = &nvbo->bo;
    500	int ret;
    501
    502	ret = ttm_bo_reserve(bo, false, false, NULL);
    503	if (ret)
    504		return ret;
    505
    506	ttm_bo_unpin(&nvbo->bo);
    507	if (!nvbo->bo.pin_count) {
    508		switch (bo->resource->mem_type) {
    509		case TTM_PL_VRAM:
    510			drm->gem.vram_available += bo->base.size;
    511			break;
    512		case TTM_PL_TT:
    513			drm->gem.gart_available += bo->base.size;
    514			break;
    515		default:
    516			break;
    517		}
    518	}
    519
    520	ttm_bo_unreserve(bo);
    521	return 0;
    522}
    523
    524int
    525nouveau_bo_map(struct nouveau_bo *nvbo)
    526{
    527	int ret;
    528
    529	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
    530	if (ret)
    531		return ret;
    532
    533	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
    534
    535	ttm_bo_unreserve(&nvbo->bo);
    536	return ret;
    537}
    538
    539void
    540nouveau_bo_unmap(struct nouveau_bo *nvbo)
    541{
    542	if (!nvbo)
    543		return;
    544
    545	ttm_bo_kunmap(&nvbo->kmap);
    546}
    547
    548void
    549nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
    550{
    551	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
    552	struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
    553	int i, j;
    554
    555	if (!ttm_dma || !ttm_dma->dma_address)
    556		return;
    557	if (!ttm_dma->pages) {
    558		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
    559		return;
    560	}
    561
    562	/* Don't waste time looping if the object is coherent */
    563	if (nvbo->force_coherent)
    564		return;
    565
    566	i = 0;
    567	while (i < ttm_dma->num_pages) {
    568		struct page *p = ttm_dma->pages[i];
    569		size_t num_pages = 1;
    570
    571		for (j = i + 1; j < ttm_dma->num_pages; ++j) {
    572			if (++p != ttm_dma->pages[j])
    573				break;
    574
    575			++num_pages;
    576		}
    577		dma_sync_single_for_device(drm->dev->dev,
    578					   ttm_dma->dma_address[i],
    579					   num_pages * PAGE_SIZE, DMA_TO_DEVICE);
    580		i += num_pages;
    581	}
    582}
    583
    584void
    585nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
    586{
    587	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
    588	struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
    589	int i, j;
    590
    591	if (!ttm_dma || !ttm_dma->dma_address)
    592		return;
    593	if (!ttm_dma->pages) {
    594		NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
    595		return;
    596	}
    597
    598	/* Don't waste time looping if the object is coherent */
    599	if (nvbo->force_coherent)
    600		return;
    601
    602	i = 0;
    603	while (i < ttm_dma->num_pages) {
    604		struct page *p = ttm_dma->pages[i];
    605		size_t num_pages = 1;
    606
    607		for (j = i + 1; j < ttm_dma->num_pages; ++j) {
    608			if (++p != ttm_dma->pages[j])
    609				break;
    610
    611			++num_pages;
    612		}
    613
    614		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
    615					num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
    616		i += num_pages;
    617	}
    618}
    619
    620void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
    621{
    622	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    623	struct nouveau_bo *nvbo = nouveau_bo(bo);
    624
    625	mutex_lock(&drm->ttm.io_reserve_mutex);
    626	list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
    627	mutex_unlock(&drm->ttm.io_reserve_mutex);
    628}
    629
    630void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
    631{
    632	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    633	struct nouveau_bo *nvbo = nouveau_bo(bo);
    634
    635	mutex_lock(&drm->ttm.io_reserve_mutex);
    636	list_del_init(&nvbo->io_reserve_lru);
    637	mutex_unlock(&drm->ttm.io_reserve_mutex);
    638}
    639
    640int
    641nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
    642		    bool no_wait_gpu)
    643{
    644	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
    645	int ret;
    646
    647	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
    648	if (ret)
    649		return ret;
    650
    651	nouveau_bo_sync_for_device(nvbo);
    652
    653	return 0;
    654}
    655
    656void
    657nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
    658{
    659	bool is_iomem;
    660	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
    661
    662	mem += index;
    663
    664	if (is_iomem)
    665		iowrite16_native(val, (void __force __iomem *)mem);
    666	else
    667		*mem = val;
    668}
    669
    670u32
    671nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
    672{
    673	bool is_iomem;
    674	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
    675
    676	mem += index;
    677
    678	if (is_iomem)
    679		return ioread32_native((void __force __iomem *)mem);
    680	else
    681		return *mem;
    682}
    683
    684void
    685nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
    686{
    687	bool is_iomem;
    688	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
    689
    690	mem += index;
    691
    692	if (is_iomem)
    693		iowrite32_native(val, (void __force __iomem *)mem);
    694	else
    695		*mem = val;
    696}
    697
    698static struct ttm_tt *
    699nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
    700{
    701#if IS_ENABLED(CONFIG_AGP)
    702	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    703
    704	if (drm->agp.bridge) {
    705		return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
    706	}
    707#endif
    708
    709	return nouveau_sgdma_create_ttm(bo, page_flags);
    710}
    711
    712static int
    713nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
    714		    struct ttm_resource *reg)
    715{
    716#if IS_ENABLED(CONFIG_AGP)
    717	struct nouveau_drm *drm = nouveau_bdev(bdev);
    718#endif
    719	if (!reg)
    720		return -EINVAL;
    721#if IS_ENABLED(CONFIG_AGP)
    722	if (drm->agp.bridge)
    723		return ttm_agp_bind(ttm, reg);
    724#endif
    725	return nouveau_sgdma_bind(bdev, ttm, reg);
    726}
    727
    728static void
    729nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
    730{
    731#if IS_ENABLED(CONFIG_AGP)
    732	struct nouveau_drm *drm = nouveau_bdev(bdev);
    733
    734	if (drm->agp.bridge) {
    735		ttm_agp_unbind(ttm);
    736		return;
    737	}
    738#endif
    739	nouveau_sgdma_unbind(bdev, ttm);
    740}
    741
    742static void
    743nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
    744{
    745	struct nouveau_bo *nvbo = nouveau_bo(bo);
    746
    747	switch (bo->resource->mem_type) {
    748	case TTM_PL_VRAM:
    749		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
    750					 NOUVEAU_GEM_DOMAIN_CPU);
    751		break;
    752	default:
    753		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
    754		break;
    755	}
    756
    757	*pl = nvbo->placement;
    758}
    759
    760static int
    761nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
    762		     struct ttm_resource *reg)
    763{
    764	struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
    765	struct nouveau_mem *new_mem = nouveau_mem(reg);
    766	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
    767	int ret;
    768
    769	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
    770			   old_mem->mem.size, &old_mem->vma[0]);
    771	if (ret)
    772		return ret;
    773
    774	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
    775			   new_mem->mem.size, &old_mem->vma[1]);
    776	if (ret)
    777		goto done;
    778
    779	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
    780	if (ret)
    781		goto done;
    782
    783	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
    784done:
    785	if (ret) {
    786		nvif_vmm_put(vmm, &old_mem->vma[1]);
    787		nvif_vmm_put(vmm, &old_mem->vma[0]);
    788	}
    789	return 0;
    790}
    791
    792static int
    793nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
    794		     struct ttm_operation_ctx *ctx,
    795		     struct ttm_resource *new_reg)
    796{
    797	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    798	struct nouveau_channel *chan = drm->ttm.chan;
    799	struct nouveau_cli *cli = (void *)chan->user.client;
    800	struct nouveau_fence *fence;
    801	int ret;
    802
    803	/* create temporary vmas for the transfer and attach them to the
    804	 * old nvkm_mem node, these will get cleaned up after ttm has
    805	 * destroyed the ttm_resource
    806	 */
    807	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
    808		ret = nouveau_bo_move_prep(drm, bo, new_reg);
    809		if (ret)
    810			return ret;
    811	}
    812
    813	if (drm_drv_uses_atomic_modeset(drm->dev))
    814		mutex_lock(&cli->mutex);
    815	else
    816		mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
    817	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
    818	if (ret == 0) {
    819		ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
    820		if (ret == 0) {
    821			ret = nouveau_fence_new(chan, false, &fence);
    822			if (ret == 0) {
    823				ret = ttm_bo_move_accel_cleanup(bo,
    824								&fence->base,
    825								evict, false,
    826								new_reg);
    827				nouveau_fence_unref(&fence);
    828			}
    829		}
    830	}
    831	mutex_unlock(&cli->mutex);
    832	return ret;
    833}
    834
    835void
    836nouveau_bo_move_init(struct nouveau_drm *drm)
    837{
    838	static const struct _method_table {
    839		const char *name;
    840		int engine;
    841		s32 oclass;
    842		int (*exec)(struct nouveau_channel *,
    843			    struct ttm_buffer_object *,
    844			    struct ttm_resource *, struct ttm_resource *);
    845		int (*init)(struct nouveau_channel *, u32 handle);
    846	} _methods[] = {
    847		{  "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
    848		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
    849		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
    850		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
    851		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
    852		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
    853		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
    854		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
    855		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
    856		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
    857		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
    858		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
    859		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
    860		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
    861		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
    862		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
    863		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
    864		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
    865		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
    866		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
    867		{},
    868	};
    869	const struct _method_table *mthd = _methods;
    870	const char *name = "CPU";
    871	int ret;
    872
    873	do {
    874		struct nouveau_channel *chan;
    875
    876		if (mthd->engine)
    877			chan = drm->cechan;
    878		else
    879			chan = drm->channel;
    880		if (chan == NULL)
    881			continue;
    882
    883		ret = nvif_object_ctor(&chan->user, "ttmBoMove",
    884				       mthd->oclass | (mthd->engine << 16),
    885				       mthd->oclass, NULL, 0,
    886				       &drm->ttm.copy);
    887		if (ret == 0) {
    888			ret = mthd->init(chan, drm->ttm.copy.handle);
    889			if (ret) {
    890				nvif_object_dtor(&drm->ttm.copy);
    891				continue;
    892			}
    893
    894			drm->ttm.move = mthd->exec;
    895			drm->ttm.chan = chan;
    896			name = mthd->name;
    897			break;
    898		}
    899	} while ((++mthd)->exec);
    900
    901	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
    902}
    903
    904static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
    905				 struct ttm_resource *new_reg)
    906{
    907	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
    908	struct nouveau_bo *nvbo = nouveau_bo(bo);
    909	struct nouveau_vma *vma;
    910
    911	/* ttm can now (stupidly) pass the driver bos it didn't create... */
    912	if (bo->destroy != nouveau_bo_del_ttm)
    913		return;
    914
    915	nouveau_bo_del_io_reserve_lru(bo);
    916
    917	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
    918	    mem->mem.page == nvbo->page) {
    919		list_for_each_entry(vma, &nvbo->vma_list, head) {
    920			nouveau_vma_map(vma, mem);
    921		}
    922	} else {
    923		list_for_each_entry(vma, &nvbo->vma_list, head) {
    924			WARN_ON(ttm_bo_wait(bo, false, false));
    925			nouveau_vma_unmap(vma);
    926		}
    927	}
    928
    929	if (new_reg)
    930		nvbo->offset = (new_reg->start << PAGE_SHIFT);
    931
    932}
    933
    934static int
    935nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
    936		   struct nouveau_drm_tile **new_tile)
    937{
    938	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    939	struct drm_device *dev = drm->dev;
    940	struct nouveau_bo *nvbo = nouveau_bo(bo);
    941	u64 offset = new_reg->start << PAGE_SHIFT;
    942
    943	*new_tile = NULL;
    944	if (new_reg->mem_type != TTM_PL_VRAM)
    945		return 0;
    946
    947	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
    948		*new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
    949					       nvbo->mode, nvbo->zeta);
    950	}
    951
    952	return 0;
    953}
    954
    955static void
    956nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
    957		      struct nouveau_drm_tile *new_tile,
    958		      struct nouveau_drm_tile **old_tile)
    959{
    960	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    961	struct drm_device *dev = drm->dev;
    962	struct dma_fence *fence;
    963	int ret;
    964
    965	ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE,
    966				     &fence);
    967	if (ret)
    968		dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE,
    969				      false, MAX_SCHEDULE_TIMEOUT);
    970
    971	nv10_bo_put_tile_region(dev, *old_tile, fence);
    972	*old_tile = new_tile;
    973}
    974
    975static int
    976nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
    977		struct ttm_operation_ctx *ctx,
    978		struct ttm_resource *new_reg,
    979		struct ttm_place *hop)
    980{
    981	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
    982	struct nouveau_bo *nvbo = nouveau_bo(bo);
    983	struct ttm_resource *old_reg = bo->resource;
    984	struct nouveau_drm_tile *new_tile = NULL;
    985	int ret = 0;
    986
    987
    988	if (new_reg->mem_type == TTM_PL_TT) {
    989		ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
    990		if (ret)
    991			return ret;
    992	}
    993
    994	nouveau_bo_move_ntfy(bo, new_reg);
    995	ret = ttm_bo_wait_ctx(bo, ctx);
    996	if (ret)
    997		goto out_ntfy;
    998
    999	if (nvbo->bo.pin_count)
   1000		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
   1001
   1002	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
   1003		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
   1004		if (ret)
   1005			goto out_ntfy;
   1006	}
   1007
   1008	/* Fake bo copy. */
   1009	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
   1010		ttm_bo_move_null(bo, new_reg);
   1011		goto out;
   1012	}
   1013
   1014	if (old_reg->mem_type == TTM_PL_SYSTEM &&
   1015	    new_reg->mem_type == TTM_PL_TT) {
   1016		ttm_bo_move_null(bo, new_reg);
   1017		goto out;
   1018	}
   1019
   1020	if (old_reg->mem_type == TTM_PL_TT &&
   1021	    new_reg->mem_type == TTM_PL_SYSTEM) {
   1022		nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
   1023		ttm_resource_free(bo, &bo->resource);
   1024		ttm_bo_assign_mem(bo, new_reg);
   1025		goto out;
   1026	}
   1027
   1028	/* Hardware assisted copy. */
   1029	if (drm->ttm.move) {
   1030		if ((old_reg->mem_type == TTM_PL_SYSTEM &&
   1031		     new_reg->mem_type == TTM_PL_VRAM) ||
   1032		    (old_reg->mem_type == TTM_PL_VRAM &&
   1033		     new_reg->mem_type == TTM_PL_SYSTEM)) {
   1034			hop->fpfn = 0;
   1035			hop->lpfn = 0;
   1036			hop->mem_type = TTM_PL_TT;
   1037			hop->flags = 0;
   1038			return -EMULTIHOP;
   1039		}
   1040		ret = nouveau_bo_move_m2mf(bo, evict, ctx,
   1041					   new_reg);
   1042	} else
   1043		ret = -ENODEV;
   1044
   1045	if (ret) {
   1046		/* Fallback to software copy. */
   1047		ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
   1048	}
   1049
   1050out:
   1051	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
   1052		if (ret)
   1053			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
   1054		else
   1055			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
   1056	}
   1057out_ntfy:
   1058	if (ret) {
   1059		nouveau_bo_move_ntfy(bo, bo->resource);
   1060	}
   1061	return ret;
   1062}
   1063
   1064static void
   1065nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
   1066			       struct ttm_resource *reg)
   1067{
   1068	struct nouveau_mem *mem = nouveau_mem(reg);
   1069
   1070	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
   1071		switch (reg->mem_type) {
   1072		case TTM_PL_TT:
   1073			if (mem->kind)
   1074				nvif_object_unmap_handle(&mem->mem.object);
   1075			break;
   1076		case TTM_PL_VRAM:
   1077			nvif_object_unmap_handle(&mem->mem.object);
   1078			break;
   1079		default:
   1080			break;
   1081		}
   1082	}
   1083}
   1084
   1085static int
   1086nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
   1087{
   1088	struct nouveau_drm *drm = nouveau_bdev(bdev);
   1089	struct nvkm_device *device = nvxx_device(&drm->client.device);
   1090	struct nouveau_mem *mem = nouveau_mem(reg);
   1091	struct nvif_mmu *mmu = &drm->client.mmu;
   1092	int ret;
   1093
   1094	mutex_lock(&drm->ttm.io_reserve_mutex);
   1095retry:
   1096	switch (reg->mem_type) {
   1097	case TTM_PL_SYSTEM:
   1098		/* System memory */
   1099		ret = 0;
   1100		goto out;
   1101	case TTM_PL_TT:
   1102#if IS_ENABLED(CONFIG_AGP)
   1103		if (drm->agp.bridge) {
   1104			reg->bus.offset = (reg->start << PAGE_SHIFT) +
   1105				drm->agp.base;
   1106			reg->bus.is_iomem = !drm->agp.cma;
   1107			reg->bus.caching = ttm_write_combined;
   1108		}
   1109#endif
   1110		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
   1111		    !mem->kind) {
   1112			/* untiled */
   1113			ret = 0;
   1114			break;
   1115		}
   1116		fallthrough;	/* tiled memory */
   1117	case TTM_PL_VRAM:
   1118		reg->bus.offset = (reg->start << PAGE_SHIFT) +
   1119			device->func->resource_addr(device, 1);
   1120		reg->bus.is_iomem = true;
   1121
   1122		/* Some BARs do not support being ioremapped WC */
   1123		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
   1124		    mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
   1125			reg->bus.caching = ttm_uncached;
   1126		else
   1127			reg->bus.caching = ttm_write_combined;
   1128
   1129		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
   1130			union {
   1131				struct nv50_mem_map_v0 nv50;
   1132				struct gf100_mem_map_v0 gf100;
   1133			} args;
   1134			u64 handle, length;
   1135			u32 argc = 0;
   1136
   1137			switch (mem->mem.object.oclass) {
   1138			case NVIF_CLASS_MEM_NV50:
   1139				args.nv50.version = 0;
   1140				args.nv50.ro = 0;
   1141				args.nv50.kind = mem->kind;
   1142				args.nv50.comp = mem->comp;
   1143				argc = sizeof(args.nv50);
   1144				break;
   1145			case NVIF_CLASS_MEM_GF100:
   1146				args.gf100.version = 0;
   1147				args.gf100.ro = 0;
   1148				args.gf100.kind = mem->kind;
   1149				argc = sizeof(args.gf100);
   1150				break;
   1151			default:
   1152				WARN_ON(1);
   1153				break;
   1154			}
   1155
   1156			ret = nvif_object_map_handle(&mem->mem.object,
   1157						     &args, argc,
   1158						     &handle, &length);
   1159			if (ret != 1) {
   1160				if (WARN_ON(ret == 0))
   1161					ret = -EINVAL;
   1162				goto out;
   1163			}
   1164
   1165			reg->bus.offset = handle;
   1166		}
   1167		ret = 0;
   1168		break;
   1169	default:
   1170		ret = -EINVAL;
   1171	}
   1172
   1173out:
   1174	if (ret == -ENOSPC) {
   1175		struct nouveau_bo *nvbo;
   1176
   1177		nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
   1178						typeof(*nvbo),
   1179						io_reserve_lru);
   1180		if (nvbo) {
   1181			list_del_init(&nvbo->io_reserve_lru);
   1182			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
   1183					   bdev->dev_mapping);
   1184			nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
   1185			goto retry;
   1186		}
   1187
   1188	}
   1189	mutex_unlock(&drm->ttm.io_reserve_mutex);
   1190	return ret;
   1191}
   1192
   1193static void
   1194nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
   1195{
   1196	struct nouveau_drm *drm = nouveau_bdev(bdev);
   1197
   1198	mutex_lock(&drm->ttm.io_reserve_mutex);
   1199	nouveau_ttm_io_mem_free_locked(drm, reg);
   1200	mutex_unlock(&drm->ttm.io_reserve_mutex);
   1201}
   1202
   1203vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
   1204{
   1205	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
   1206	struct nouveau_bo *nvbo = nouveau_bo(bo);
   1207	struct nvkm_device *device = nvxx_device(&drm->client.device);
   1208	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
   1209	int i, ret;
   1210
   1211	/* as long as the bo isn't in vram, and isn't tiled, we've got
   1212	 * nothing to do here.
   1213	 */
   1214	if (bo->resource->mem_type != TTM_PL_VRAM) {
   1215		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
   1216		    !nvbo->kind)
   1217			return 0;
   1218
   1219		if (bo->resource->mem_type != TTM_PL_SYSTEM)
   1220			return 0;
   1221
   1222		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
   1223
   1224	} else {
   1225		/* make sure bo is in mappable vram */
   1226		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
   1227		    bo->resource->start + bo->resource->num_pages < mappable)
   1228			return 0;
   1229
   1230		for (i = 0; i < nvbo->placement.num_placement; ++i) {
   1231			nvbo->placements[i].fpfn = 0;
   1232			nvbo->placements[i].lpfn = mappable;
   1233		}
   1234
   1235		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
   1236			nvbo->busy_placements[i].fpfn = 0;
   1237			nvbo->busy_placements[i].lpfn = mappable;
   1238		}
   1239
   1240		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
   1241	}
   1242
   1243	ret = nouveau_bo_validate(nvbo, false, false);
   1244	if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
   1245		return VM_FAULT_NOPAGE;
   1246	else if (unlikely(ret))
   1247		return VM_FAULT_SIGBUS;
   1248
   1249	ttm_bo_move_to_lru_tail_unlocked(bo);
   1250	return 0;
   1251}
   1252
   1253static int
   1254nouveau_ttm_tt_populate(struct ttm_device *bdev,
   1255			struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
   1256{
   1257	struct ttm_tt *ttm_dma = (void *)ttm;
   1258	struct nouveau_drm *drm;
   1259	bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
   1260
   1261	if (ttm_tt_is_populated(ttm))
   1262		return 0;
   1263
   1264	if (slave && ttm->sg) {
   1265		drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
   1266					       ttm->num_pages);
   1267		return 0;
   1268	}
   1269
   1270	drm = nouveau_bdev(bdev);
   1271
   1272	return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
   1273}
   1274
   1275static void
   1276nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
   1277			  struct ttm_tt *ttm)
   1278{
   1279	struct nouveau_drm *drm;
   1280	bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
   1281
   1282	if (slave)
   1283		return;
   1284
   1285	nouveau_ttm_tt_unbind(bdev, ttm);
   1286
   1287	drm = nouveau_bdev(bdev);
   1288
   1289	return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
   1290}
   1291
   1292static void
   1293nouveau_ttm_tt_destroy(struct ttm_device *bdev,
   1294		       struct ttm_tt *ttm)
   1295{
   1296#if IS_ENABLED(CONFIG_AGP)
   1297	struct nouveau_drm *drm = nouveau_bdev(bdev);
   1298	if (drm->agp.bridge) {
   1299		ttm_agp_destroy(ttm);
   1300		return;
   1301	}
   1302#endif
   1303	nouveau_sgdma_destroy(bdev, ttm);
   1304}
   1305
   1306void
   1307nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
   1308{
   1309	struct dma_resv *resv = nvbo->bo.base.resv;
   1310
   1311	if (!fence)
   1312		return;
   1313
   1314	dma_resv_add_fence(resv, &fence->base, exclusive ?
   1315			   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
   1316}
   1317
   1318static void
   1319nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
   1320{
   1321	nouveau_bo_move_ntfy(bo, NULL);
   1322}
   1323
   1324struct ttm_device_funcs nouveau_bo_driver = {
   1325	.ttm_tt_create = &nouveau_ttm_tt_create,
   1326	.ttm_tt_populate = &nouveau_ttm_tt_populate,
   1327	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
   1328	.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
   1329	.eviction_valuable = ttm_bo_eviction_valuable,
   1330	.evict_flags = nouveau_bo_evict_flags,
   1331	.delete_mem_notify = nouveau_bo_delete_mem_notify,
   1332	.move = nouveau_bo_move,
   1333	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
   1334	.io_mem_free = &nouveau_ttm_io_mem_free,
   1335};