cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nouveau_dmem.c (18882B)


      1/*
      2 * Copyright 2018 Red Hat Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 */
     22#include "nouveau_dmem.h"
     23#include "nouveau_drv.h"
     24#include "nouveau_chan.h"
     25#include "nouveau_dma.h"
     26#include "nouveau_mem.h"
     27#include "nouveau_bo.h"
     28#include "nouveau_svm.h"
     29
     30#include <nvif/class.h>
     31#include <nvif/object.h>
     32#include <nvif/push906f.h>
     33#include <nvif/if000c.h>
     34#include <nvif/if500b.h>
     35#include <nvif/if900b.h>
     36#include <nvif/if000c.h>
     37
     38#include <nvhw/class/cla0b5.h>
     39
     40#include <linux/sched/mm.h>
     41#include <linux/hmm.h>
     42#include <linux/memremap.h>
     43#include <linux/migrate.h>
     44
     45/*
     46 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
     47 * it in vram while in use. We likely want to overhaul memory management for
     48 * nouveau to be more page like (not necessarily with system page size but a
     49 * bigger page size) at lowest level and have some shim layer on top that would
     50 * provide the same functionality as TTM.
     51 */
     52#define DMEM_CHUNK_SIZE (2UL << 20)
     53#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
     54
     55enum nouveau_aper {
     56	NOUVEAU_APER_VIRT,
     57	NOUVEAU_APER_VRAM,
     58	NOUVEAU_APER_HOST,
     59};
     60
     61typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
     62				      enum nouveau_aper, u64 dst_addr,
     63				      enum nouveau_aper, u64 src_addr);
     64typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
     65				      enum nouveau_aper, u64 dst_addr);
     66
     67struct nouveau_dmem_chunk {
     68	struct list_head list;
     69	struct nouveau_bo *bo;
     70	struct nouveau_drm *drm;
     71	unsigned long callocated;
     72	struct dev_pagemap pagemap;
     73};
     74
     75struct nouveau_dmem_migrate {
     76	nouveau_migrate_copy_t copy_func;
     77	nouveau_clear_page_t clear_func;
     78	struct nouveau_channel *chan;
     79};
     80
     81struct nouveau_dmem {
     82	struct nouveau_drm *drm;
     83	struct nouveau_dmem_migrate migrate;
     84	struct list_head chunks;
     85	struct mutex mutex;
     86	struct page *free_pages;
     87	spinlock_t lock;
     88};
     89
     90static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
     91{
     92	return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
     93}
     94
     95static struct nouveau_drm *page_to_drm(struct page *page)
     96{
     97	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
     98
     99	return chunk->drm;
    100}
    101
    102unsigned long nouveau_dmem_page_addr(struct page *page)
    103{
    104	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
    105	unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
    106				chunk->pagemap.range.start;
    107
    108	return chunk->bo->offset + off;
    109}
    110
    111static void nouveau_dmem_page_free(struct page *page)
    112{
    113	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
    114	struct nouveau_dmem *dmem = chunk->drm->dmem;
    115
    116	spin_lock(&dmem->lock);
    117	page->zone_device_data = dmem->free_pages;
    118	dmem->free_pages = page;
    119
    120	WARN_ON(!chunk->callocated);
    121	chunk->callocated--;
    122	/*
    123	 * FIXME when chunk->callocated reach 0 we should add the chunk to
    124	 * a reclaim list so that it can be freed in case of memory pressure.
    125	 */
    126	spin_unlock(&dmem->lock);
    127}
    128
    129static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
    130{
    131	if (fence) {
    132		nouveau_fence_wait(*fence, true, false);
    133		nouveau_fence_unref(fence);
    134	} else {
    135		/*
    136		 * FIXME wait for channel to be IDLE before calling finalizing
    137		 * the hmem object.
    138		 */
    139	}
    140}
    141
    142static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
    143		struct vm_fault *vmf, struct migrate_vma *args,
    144		dma_addr_t *dma_addr)
    145{
    146	struct device *dev = drm->dev->dev;
    147	struct page *dpage, *spage;
    148	struct nouveau_svmm *svmm;
    149
    150	spage = migrate_pfn_to_page(args->src[0]);
    151	if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
    152		return 0;
    153
    154	dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
    155	if (!dpage)
    156		return VM_FAULT_SIGBUS;
    157	lock_page(dpage);
    158
    159	*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
    160	if (dma_mapping_error(dev, *dma_addr))
    161		goto error_free_page;
    162
    163	svmm = spage->zone_device_data;
    164	mutex_lock(&svmm->mutex);
    165	nouveau_svmm_invalidate(svmm, args->start, args->end);
    166	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
    167			NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
    168		goto error_dma_unmap;
    169	mutex_unlock(&svmm->mutex);
    170
    171	args->dst[0] = migrate_pfn(page_to_pfn(dpage));
    172	return 0;
    173
    174error_dma_unmap:
    175	mutex_unlock(&svmm->mutex);
    176	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
    177error_free_page:
    178	__free_page(dpage);
    179	return VM_FAULT_SIGBUS;
    180}
    181
    182static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
    183{
    184	struct nouveau_drm *drm = page_to_drm(vmf->page);
    185	struct nouveau_dmem *dmem = drm->dmem;
    186	struct nouveau_fence *fence;
    187	unsigned long src = 0, dst = 0;
    188	dma_addr_t dma_addr = 0;
    189	vm_fault_t ret;
    190	struct migrate_vma args = {
    191		.vma		= vmf->vma,
    192		.start		= vmf->address,
    193		.end		= vmf->address + PAGE_SIZE,
    194		.src		= &src,
    195		.dst		= &dst,
    196		.pgmap_owner	= drm->dev,
    197		.flags		= MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
    198	};
    199
    200	/*
    201	 * FIXME what we really want is to find some heuristic to migrate more
    202	 * than just one page on CPU fault. When such fault happens it is very
    203	 * likely that more surrounding page will CPU fault too.
    204	 */
    205	if (migrate_vma_setup(&args) < 0)
    206		return VM_FAULT_SIGBUS;
    207	if (!args.cpages)
    208		return 0;
    209
    210	ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
    211	if (ret || dst == 0)
    212		goto done;
    213
    214	nouveau_fence_new(dmem->migrate.chan, false, &fence);
    215	migrate_vma_pages(&args);
    216	nouveau_dmem_fence_done(&fence);
    217	dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
    218done:
    219	migrate_vma_finalize(&args);
    220	return ret;
    221}
    222
    223static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
    224	.page_free		= nouveau_dmem_page_free,
    225	.migrate_to_ram		= nouveau_dmem_migrate_to_ram,
    226};
    227
    228static int
    229nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
    230{
    231	struct nouveau_dmem_chunk *chunk;
    232	struct resource *res;
    233	struct page *page;
    234	void *ptr;
    235	unsigned long i, pfn_first;
    236	int ret;
    237
    238	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
    239	if (chunk == NULL) {
    240		ret = -ENOMEM;
    241		goto out;
    242	}
    243
    244	/* Allocate unused physical address space for device private pages. */
    245	res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
    246				      "nouveau_dmem");
    247	if (IS_ERR(res)) {
    248		ret = PTR_ERR(res);
    249		goto out_free;
    250	}
    251
    252	chunk->drm = drm;
    253	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
    254	chunk->pagemap.range.start = res->start;
    255	chunk->pagemap.range.end = res->end;
    256	chunk->pagemap.nr_range = 1;
    257	chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
    258	chunk->pagemap.owner = drm->dev;
    259
    260	ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
    261			     NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
    262			     &chunk->bo);
    263	if (ret)
    264		goto out_release;
    265
    266	ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
    267	if (ret)
    268		goto out_bo_free;
    269
    270	ptr = memremap_pages(&chunk->pagemap, numa_node_id());
    271	if (IS_ERR(ptr)) {
    272		ret = PTR_ERR(ptr);
    273		goto out_bo_unpin;
    274	}
    275
    276	mutex_lock(&drm->dmem->mutex);
    277	list_add(&chunk->list, &drm->dmem->chunks);
    278	mutex_unlock(&drm->dmem->mutex);
    279
    280	pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
    281	page = pfn_to_page(pfn_first);
    282	spin_lock(&drm->dmem->lock);
    283	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
    284		page->zone_device_data = drm->dmem->free_pages;
    285		drm->dmem->free_pages = page;
    286	}
    287	*ppage = page;
    288	chunk->callocated++;
    289	spin_unlock(&drm->dmem->lock);
    290
    291	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
    292		DMEM_CHUNK_SIZE >> 20);
    293
    294	return 0;
    295
    296out_bo_unpin:
    297	nouveau_bo_unpin(chunk->bo);
    298out_bo_free:
    299	nouveau_bo_ref(NULL, &chunk->bo);
    300out_release:
    301	release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
    302out_free:
    303	kfree(chunk);
    304out:
    305	return ret;
    306}
    307
    308static struct page *
    309nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
    310{
    311	struct nouveau_dmem_chunk *chunk;
    312	struct page *page = NULL;
    313	int ret;
    314
    315	spin_lock(&drm->dmem->lock);
    316	if (drm->dmem->free_pages) {
    317		page = drm->dmem->free_pages;
    318		drm->dmem->free_pages = page->zone_device_data;
    319		chunk = nouveau_page_to_chunk(page);
    320		chunk->callocated++;
    321		spin_unlock(&drm->dmem->lock);
    322	} else {
    323		spin_unlock(&drm->dmem->lock);
    324		ret = nouveau_dmem_chunk_alloc(drm, &page);
    325		if (ret)
    326			return NULL;
    327	}
    328
    329	lock_page(page);
    330	return page;
    331}
    332
    333static void
    334nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
    335{
    336	unlock_page(page);
    337	put_page(page);
    338}
    339
    340void
    341nouveau_dmem_resume(struct nouveau_drm *drm)
    342{
    343	struct nouveau_dmem_chunk *chunk;
    344	int ret;
    345
    346	if (drm->dmem == NULL)
    347		return;
    348
    349	mutex_lock(&drm->dmem->mutex);
    350	list_for_each_entry(chunk, &drm->dmem->chunks, list) {
    351		ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
    352		/* FIXME handle pin failure */
    353		WARN_ON(ret);
    354	}
    355	mutex_unlock(&drm->dmem->mutex);
    356}
    357
    358void
    359nouveau_dmem_suspend(struct nouveau_drm *drm)
    360{
    361	struct nouveau_dmem_chunk *chunk;
    362
    363	if (drm->dmem == NULL)
    364		return;
    365
    366	mutex_lock(&drm->dmem->mutex);
    367	list_for_each_entry(chunk, &drm->dmem->chunks, list)
    368		nouveau_bo_unpin(chunk->bo);
    369	mutex_unlock(&drm->dmem->mutex);
    370}
    371
    372void
    373nouveau_dmem_fini(struct nouveau_drm *drm)
    374{
    375	struct nouveau_dmem_chunk *chunk, *tmp;
    376
    377	if (drm->dmem == NULL)
    378		return;
    379
    380	mutex_lock(&drm->dmem->mutex);
    381
    382	list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
    383		nouveau_bo_unpin(chunk->bo);
    384		nouveau_bo_ref(NULL, &chunk->bo);
    385		list_del(&chunk->list);
    386		memunmap_pages(&chunk->pagemap);
    387		release_mem_region(chunk->pagemap.range.start,
    388				   range_len(&chunk->pagemap.range));
    389		kfree(chunk);
    390	}
    391
    392	mutex_unlock(&drm->dmem->mutex);
    393}
    394
    395static int
    396nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
    397		    enum nouveau_aper dst_aper, u64 dst_addr,
    398		    enum nouveau_aper src_aper, u64 src_addr)
    399{
    400	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
    401	u32 launch_dma = 0;
    402	int ret;
    403
    404	ret = PUSH_WAIT(push, 13);
    405	if (ret)
    406		return ret;
    407
    408	if (src_aper != NOUVEAU_APER_VIRT) {
    409		switch (src_aper) {
    410		case NOUVEAU_APER_VRAM:
    411			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
    412				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
    413			break;
    414		case NOUVEAU_APER_HOST:
    415			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
    416				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
    417			break;
    418		default:
    419			return -EINVAL;
    420		}
    421
    422		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
    423	}
    424
    425	if (dst_aper != NOUVEAU_APER_VIRT) {
    426		switch (dst_aper) {
    427		case NOUVEAU_APER_VRAM:
    428			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
    429				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
    430			break;
    431		case NOUVEAU_APER_HOST:
    432			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
    433				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
    434			break;
    435		default:
    436			return -EINVAL;
    437		}
    438
    439		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
    440	}
    441
    442	PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
    443		  NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
    444
    445				OFFSET_IN_LOWER, lower_32_bits(src_addr),
    446
    447				OFFSET_OUT_UPPER,
    448		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
    449
    450				OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
    451				PITCH_IN, PAGE_SIZE,
    452				PITCH_OUT, PAGE_SIZE,
    453				LINE_LENGTH_IN, PAGE_SIZE,
    454				LINE_COUNT, npages);
    455
    456	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
    457		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
    458		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
    459		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
    460		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
    461		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
    462		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
    463		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
    464		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
    465		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
    466	return 0;
    467}
    468
    469static int
    470nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
    471		     enum nouveau_aper dst_aper, u64 dst_addr)
    472{
    473	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
    474	u32 launch_dma = 0;
    475	int ret;
    476
    477	ret = PUSH_WAIT(push, 12);
    478	if (ret)
    479		return ret;
    480
    481	switch (dst_aper) {
    482	case NOUVEAU_APER_VRAM:
    483		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
    484			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
    485		break;
    486	case NOUVEAU_APER_HOST:
    487		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
    488			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
    489		break;
    490	default:
    491		return -EINVAL;
    492	}
    493
    494	launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
    495
    496	PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
    497				SET_REMAP_CONST_B, 0,
    498
    499				SET_REMAP_COMPONENTS,
    500		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
    501		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
    502		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
    503		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
    504
    505	PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
    506		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
    507
    508				OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
    509
    510	PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
    511
    512	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
    513		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
    514		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
    515		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
    516		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
    517		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
    518		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
    519		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
    520		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
    521		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
    522	return 0;
    523}
    524
    525static int
    526nouveau_dmem_migrate_init(struct nouveau_drm *drm)
    527{
    528	switch (drm->ttm.copy.oclass) {
    529	case PASCAL_DMA_COPY_A:
    530	case PASCAL_DMA_COPY_B:
    531	case  VOLTA_DMA_COPY_A:
    532	case TURING_DMA_COPY_A:
    533		drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
    534		drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
    535		drm->dmem->migrate.chan = drm->ttm.chan;
    536		return 0;
    537	default:
    538		break;
    539	}
    540	return -ENODEV;
    541}
    542
    543void
    544nouveau_dmem_init(struct nouveau_drm *drm)
    545{
    546	int ret;
    547
    548	/* This only make sense on PASCAL or newer */
    549	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
    550		return;
    551
    552	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
    553		return;
    554
    555	drm->dmem->drm = drm;
    556	mutex_init(&drm->dmem->mutex);
    557	INIT_LIST_HEAD(&drm->dmem->chunks);
    558	mutex_init(&drm->dmem->mutex);
    559	spin_lock_init(&drm->dmem->lock);
    560
    561	/* Initialize migration dma helpers before registering memory */
    562	ret = nouveau_dmem_migrate_init(drm);
    563	if (ret) {
    564		kfree(drm->dmem);
    565		drm->dmem = NULL;
    566	}
    567}
    568
    569static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
    570		struct nouveau_svmm *svmm, unsigned long src,
    571		dma_addr_t *dma_addr, u64 *pfn)
    572{
    573	struct device *dev = drm->dev->dev;
    574	struct page *dpage, *spage;
    575	unsigned long paddr;
    576
    577	spage = migrate_pfn_to_page(src);
    578	if (!(src & MIGRATE_PFN_MIGRATE))
    579		goto out;
    580
    581	dpage = nouveau_dmem_page_alloc_locked(drm);
    582	if (!dpage)
    583		goto out;
    584
    585	paddr = nouveau_dmem_page_addr(dpage);
    586	if (spage) {
    587		*dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
    588					 DMA_BIDIRECTIONAL);
    589		if (dma_mapping_error(dev, *dma_addr))
    590			goto out_free_page;
    591		if (drm->dmem->migrate.copy_func(drm, 1,
    592			NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
    593			goto out_dma_unmap;
    594	} else {
    595		*dma_addr = DMA_MAPPING_ERROR;
    596		if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
    597			NOUVEAU_APER_VRAM, paddr))
    598			goto out_free_page;
    599	}
    600
    601	dpage->zone_device_data = svmm;
    602	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
    603		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
    604	if (src & MIGRATE_PFN_WRITE)
    605		*pfn |= NVIF_VMM_PFNMAP_V0_W;
    606	return migrate_pfn(page_to_pfn(dpage));
    607
    608out_dma_unmap:
    609	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
    610out_free_page:
    611	nouveau_dmem_page_free_locked(drm, dpage);
    612out:
    613	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
    614	return 0;
    615}
    616
    617static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
    618		struct nouveau_svmm *svmm, struct migrate_vma *args,
    619		dma_addr_t *dma_addrs, u64 *pfns)
    620{
    621	struct nouveau_fence *fence;
    622	unsigned long addr = args->start, nr_dma = 0, i;
    623
    624	for (i = 0; addr < args->end; i++) {
    625		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
    626				args->src[i], dma_addrs + nr_dma, pfns + i);
    627		if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
    628			nr_dma++;
    629		addr += PAGE_SIZE;
    630	}
    631
    632	nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
    633	migrate_vma_pages(args);
    634	nouveau_dmem_fence_done(&fence);
    635	nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
    636
    637	while (nr_dma--) {
    638		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
    639				DMA_BIDIRECTIONAL);
    640	}
    641	migrate_vma_finalize(args);
    642}
    643
    644int
    645nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
    646			 struct nouveau_svmm *svmm,
    647			 struct vm_area_struct *vma,
    648			 unsigned long start,
    649			 unsigned long end)
    650{
    651	unsigned long npages = (end - start) >> PAGE_SHIFT;
    652	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
    653	dma_addr_t *dma_addrs;
    654	struct migrate_vma args = {
    655		.vma		= vma,
    656		.start		= start,
    657		.pgmap_owner	= drm->dev,
    658		.flags		= MIGRATE_VMA_SELECT_SYSTEM,
    659	};
    660	unsigned long i;
    661	u64 *pfns;
    662	int ret = -ENOMEM;
    663
    664	if (drm->dmem == NULL)
    665		return -ENODEV;
    666
    667	args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
    668	if (!args.src)
    669		goto out;
    670	args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
    671	if (!args.dst)
    672		goto out_free_src;
    673
    674	dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
    675	if (!dma_addrs)
    676		goto out_free_dst;
    677
    678	pfns = nouveau_pfns_alloc(max);
    679	if (!pfns)
    680		goto out_free_dma;
    681
    682	for (i = 0; i < npages; i += max) {
    683		args.end = start + (max << PAGE_SHIFT);
    684		ret = migrate_vma_setup(&args);
    685		if (ret)
    686			goto out_free_pfns;
    687
    688		if (args.cpages)
    689			nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
    690						   pfns);
    691		args.start = args.end;
    692	}
    693
    694	ret = 0;
    695out_free_pfns:
    696	nouveau_pfns_free(pfns);
    697out_free_dma:
    698	kfree(dma_addrs);
    699out_free_dst:
    700	kfree(args.dst);
    701out_free_src:
    702	kfree(args.src);
    703out:
    704	return ret;
    705}