cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kfd_svm.c (107420B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/*
      3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the "Software"),
      7 * to deal in the Software without restriction, including without limitation
      8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9 * and/or sell copies of the Software, and to permit persons to whom the
     10 * Software is furnished to do so, subject to the following conditions:
     11 *
     12 * The above copyright notice and this permission notice shall be included in
     13 * all copies or substantial portions of the Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     21 * OTHER DEALINGS IN THE SOFTWARE.
     22 */
     23
     24#include <linux/types.h>
     25#include <linux/sched/task.h>
     26#include "amdgpu_sync.h"
     27#include "amdgpu_object.h"
     28#include "amdgpu_vm.h"
     29#include "amdgpu_mn.h"
     30#include "amdgpu.h"
     31#include "amdgpu_xgmi.h"
     32#include "kfd_priv.h"
     33#include "kfd_svm.h"
     34#include "kfd_migrate.h"
     35
     36#ifdef dev_fmt
     37#undef dev_fmt
     38#endif
     39#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
     40
     41#define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
     42
     43/* Long enough to ensure no retry fault comes after svm range is restored and
     44 * page table is updated.
     45 */
     46#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	2000
     47
     48struct criu_svm_metadata {
     49	struct list_head list;
     50	struct kfd_criu_svm_range_priv_data data;
     51};
     52
     53static void svm_range_evict_svm_bo_worker(struct work_struct *work);
     54static bool
     55svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
     56				    const struct mmu_notifier_range *range,
     57				    unsigned long cur_seq);
     58static int
     59svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
     60		   uint64_t *bo_s, uint64_t *bo_l);
     61static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
     62	.invalidate = svm_range_cpu_invalidate_pagetables,
     63};
     64
     65/**
     66 * svm_range_unlink - unlink svm_range from lists and interval tree
     67 * @prange: svm range structure to be removed
     68 *
     69 * Remove the svm_range from the svms and svm_bo lists and the svms
     70 * interval tree.
     71 *
     72 * Context: The caller must hold svms->lock
     73 */
     74static void svm_range_unlink(struct svm_range *prange)
     75{
     76	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
     77		 prange, prange->start, prange->last);
     78
     79	if (prange->svm_bo) {
     80		spin_lock(&prange->svm_bo->list_lock);
     81		list_del(&prange->svm_bo_list);
     82		spin_unlock(&prange->svm_bo->list_lock);
     83	}
     84
     85	list_del(&prange->list);
     86	if (prange->it_node.start != 0 && prange->it_node.last != 0)
     87		interval_tree_remove(&prange->it_node, &prange->svms->objects);
     88}
     89
     90static void
     91svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
     92{
     93	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
     94		 prange, prange->start, prange->last);
     95
     96	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
     97				     prange->start << PAGE_SHIFT,
     98				     prange->npages << PAGE_SHIFT,
     99				     &svm_range_mn_ops);
    100}
    101
    102/**
    103 * svm_range_add_to_svms - add svm range to svms
    104 * @prange: svm range structure to be added
    105 *
    106 * Add the svm range to svms interval tree and link list
    107 *
    108 * Context: The caller must hold svms->lock
    109 */
    110static void svm_range_add_to_svms(struct svm_range *prange)
    111{
    112	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
    113		 prange, prange->start, prange->last);
    114
    115	list_move_tail(&prange->list, &prange->svms->list);
    116	prange->it_node.start = prange->start;
    117	prange->it_node.last = prange->last;
    118	interval_tree_insert(&prange->it_node, &prange->svms->objects);
    119}
    120
    121static void svm_range_remove_notifier(struct svm_range *prange)
    122{
    123	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
    124		 prange->svms, prange,
    125		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
    126		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
    127
    128	if (prange->notifier.interval_tree.start != 0 &&
    129	    prange->notifier.interval_tree.last != 0)
    130		mmu_interval_notifier_remove(&prange->notifier);
    131}
    132
    133static bool
    134svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
    135{
    136	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
    137	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
    138}
    139
    140static int
    141svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
    142		      unsigned long offset, unsigned long npages,
    143		      unsigned long *hmm_pfns, uint32_t gpuidx)
    144{
    145	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
    146	dma_addr_t *addr = prange->dma_addr[gpuidx];
    147	struct device *dev = adev->dev;
    148	struct page *page;
    149	int i, r;
    150
    151	if (!addr) {
    152		addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
    153		if (!addr)
    154			return -ENOMEM;
    155		prange->dma_addr[gpuidx] = addr;
    156	}
    157
    158	addr += offset;
    159	for (i = 0; i < npages; i++) {
    160		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
    161			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
    162
    163		page = hmm_pfn_to_page(hmm_pfns[i]);
    164		if (is_zone_device_page(page)) {
    165			struct amdgpu_device *bo_adev =
    166					amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
    167
    168			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
    169				   bo_adev->vm_manager.vram_base_offset -
    170				   bo_adev->kfd.dev->pgmap.range.start;
    171			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
    172			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
    173			continue;
    174		}
    175		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
    176		r = dma_mapping_error(dev, addr[i]);
    177		if (r) {
    178			dev_err(dev, "failed %d dma_map_page\n", r);
    179			return r;
    180		}
    181		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
    182				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
    183	}
    184	return 0;
    185}
    186
    187static int
    188svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
    189		  unsigned long offset, unsigned long npages,
    190		  unsigned long *hmm_pfns)
    191{
    192	struct kfd_process *p;
    193	uint32_t gpuidx;
    194	int r;
    195
    196	p = container_of(prange->svms, struct kfd_process, svms);
    197
    198	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
    199		struct kfd_process_device *pdd;
    200
    201		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
    202		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
    203		if (!pdd) {
    204			pr_debug("failed to find device idx %d\n", gpuidx);
    205			return -EINVAL;
    206		}
    207
    208		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
    209					  hmm_pfns, gpuidx);
    210		if (r)
    211			break;
    212	}
    213
    214	return r;
    215}
    216
    217void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
    218			 unsigned long offset, unsigned long npages)
    219{
    220	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
    221	int i;
    222
    223	if (!dma_addr)
    224		return;
    225
    226	for (i = offset; i < offset + npages; i++) {
    227		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
    228			continue;
    229		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
    230		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
    231		dma_addr[i] = 0;
    232	}
    233}
    234
    235void svm_range_free_dma_mappings(struct svm_range *prange)
    236{
    237	struct kfd_process_device *pdd;
    238	dma_addr_t *dma_addr;
    239	struct device *dev;
    240	struct kfd_process *p;
    241	uint32_t gpuidx;
    242
    243	p = container_of(prange->svms, struct kfd_process, svms);
    244
    245	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
    246		dma_addr = prange->dma_addr[gpuidx];
    247		if (!dma_addr)
    248			continue;
    249
    250		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
    251		if (!pdd) {
    252			pr_debug("failed to find device idx %d\n", gpuidx);
    253			continue;
    254		}
    255		dev = &pdd->dev->pdev->dev;
    256		svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
    257		kvfree(dma_addr);
    258		prange->dma_addr[gpuidx] = NULL;
    259	}
    260}
    261
    262static void svm_range_free(struct svm_range *prange)
    263{
    264	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
    265		 prange->start, prange->last);
    266
    267	svm_range_vram_node_free(prange);
    268	svm_range_free_dma_mappings(prange);
    269	mutex_destroy(&prange->lock);
    270	mutex_destroy(&prange->migrate_mutex);
    271	kfree(prange);
    272}
    273
    274static void
    275svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
    276				 uint8_t *granularity, uint32_t *flags)
    277{
    278	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
    279	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
    280	*granularity = 9;
    281	*flags =
    282		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
    283}
    284
    285static struct
    286svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
    287			 uint64_t last)
    288{
    289	uint64_t size = last - start + 1;
    290	struct svm_range *prange;
    291	struct kfd_process *p;
    292
    293	prange = kzalloc(sizeof(*prange), GFP_KERNEL);
    294	if (!prange)
    295		return NULL;
    296	prange->npages = size;
    297	prange->svms = svms;
    298	prange->start = start;
    299	prange->last = last;
    300	INIT_LIST_HEAD(&prange->list);
    301	INIT_LIST_HEAD(&prange->update_list);
    302	INIT_LIST_HEAD(&prange->svm_bo_list);
    303	INIT_LIST_HEAD(&prange->deferred_list);
    304	INIT_LIST_HEAD(&prange->child_list);
    305	atomic_set(&prange->invalid, 0);
    306	prange->validate_timestamp = 0;
    307	mutex_init(&prange->migrate_mutex);
    308	mutex_init(&prange->lock);
    309
    310	p = container_of(svms, struct kfd_process, svms);
    311	if (p->xnack_enabled)
    312		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
    313			    MAX_GPU_INSTANCE);
    314
    315	svm_range_set_default_attributes(&prange->preferred_loc,
    316					 &prange->prefetch_loc,
    317					 &prange->granularity, &prange->flags);
    318
    319	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
    320
    321	return prange;
    322}
    323
    324static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
    325{
    326	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
    327		return false;
    328
    329	return true;
    330}
    331
    332static void svm_range_bo_release(struct kref *kref)
    333{
    334	struct svm_range_bo *svm_bo;
    335
    336	svm_bo = container_of(kref, struct svm_range_bo, kref);
    337	pr_debug("svm_bo 0x%p\n", svm_bo);
    338
    339	spin_lock(&svm_bo->list_lock);
    340	while (!list_empty(&svm_bo->range_list)) {
    341		struct svm_range *prange =
    342				list_first_entry(&svm_bo->range_list,
    343						struct svm_range, svm_bo_list);
    344		/* list_del_init tells a concurrent svm_range_vram_node_new when
    345		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
    346		 */
    347		list_del_init(&prange->svm_bo_list);
    348		spin_unlock(&svm_bo->list_lock);
    349
    350		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
    351			 prange->start, prange->last);
    352		mutex_lock(&prange->lock);
    353		prange->svm_bo = NULL;
    354		mutex_unlock(&prange->lock);
    355
    356		spin_lock(&svm_bo->list_lock);
    357	}
    358	spin_unlock(&svm_bo->list_lock);
    359	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
    360		/* We're not in the eviction worker.
    361		 * Signal the fence and synchronize with any
    362		 * pending eviction work.
    363		 */
    364		dma_fence_signal(&svm_bo->eviction_fence->base);
    365		cancel_work_sync(&svm_bo->eviction_work);
    366	}
    367	dma_fence_put(&svm_bo->eviction_fence->base);
    368	amdgpu_bo_unref(&svm_bo->bo);
    369	kfree(svm_bo);
    370}
    371
    372static void svm_range_bo_wq_release(struct work_struct *work)
    373{
    374	struct svm_range_bo *svm_bo;
    375
    376	svm_bo = container_of(work, struct svm_range_bo, release_work);
    377	svm_range_bo_release(&svm_bo->kref);
    378}
    379
    380static void svm_range_bo_release_async(struct kref *kref)
    381{
    382	struct svm_range_bo *svm_bo;
    383
    384	svm_bo = container_of(kref, struct svm_range_bo, kref);
    385	pr_debug("svm_bo 0x%p\n", svm_bo);
    386	INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
    387	schedule_work(&svm_bo->release_work);
    388}
    389
    390void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
    391{
    392	kref_put(&svm_bo->kref, svm_range_bo_release_async);
    393}
    394
    395static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
    396{
    397	if (svm_bo)
    398		kref_put(&svm_bo->kref, svm_range_bo_release);
    399}
    400
    401static bool
    402svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
    403{
    404	struct amdgpu_device *bo_adev;
    405
    406	mutex_lock(&prange->lock);
    407	if (!prange->svm_bo) {
    408		mutex_unlock(&prange->lock);
    409		return false;
    410	}
    411	if (prange->ttm_res) {
    412		/* We still have a reference, all is well */
    413		mutex_unlock(&prange->lock);
    414		return true;
    415	}
    416	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
    417		/*
    418		 * Migrate from GPU to GPU, remove range from source bo_adev
    419		 * svm_bo range list, and return false to allocate svm_bo from
    420		 * destination adev.
    421		 */
    422		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
    423		if (bo_adev != adev) {
    424			mutex_unlock(&prange->lock);
    425
    426			spin_lock(&prange->svm_bo->list_lock);
    427			list_del_init(&prange->svm_bo_list);
    428			spin_unlock(&prange->svm_bo->list_lock);
    429
    430			svm_range_bo_unref(prange->svm_bo);
    431			return false;
    432		}
    433		if (READ_ONCE(prange->svm_bo->evicting)) {
    434			struct dma_fence *f;
    435			struct svm_range_bo *svm_bo;
    436			/* The BO is getting evicted,
    437			 * we need to get a new one
    438			 */
    439			mutex_unlock(&prange->lock);
    440			svm_bo = prange->svm_bo;
    441			f = dma_fence_get(&svm_bo->eviction_fence->base);
    442			svm_range_bo_unref(prange->svm_bo);
    443			/* wait for the fence to avoid long spin-loop
    444			 * at list_empty_careful
    445			 */
    446			dma_fence_wait(f, false);
    447			dma_fence_put(f);
    448		} else {
    449			/* The BO was still around and we got
    450			 * a new reference to it
    451			 */
    452			mutex_unlock(&prange->lock);
    453			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
    454				 prange->svms, prange->start, prange->last);
    455
    456			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
    457			return true;
    458		}
    459
    460	} else {
    461		mutex_unlock(&prange->lock);
    462	}
    463
    464	/* We need a new svm_bo. Spin-loop to wait for concurrent
    465	 * svm_range_bo_release to finish removing this range from
    466	 * its range list. After this, it is safe to reuse the
    467	 * svm_bo pointer and svm_bo_list head.
    468	 */
    469	while (!list_empty_careful(&prange->svm_bo_list))
    470		;
    471
    472	return false;
    473}
    474
    475static struct svm_range_bo *svm_range_bo_new(void)
    476{
    477	struct svm_range_bo *svm_bo;
    478
    479	svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
    480	if (!svm_bo)
    481		return NULL;
    482
    483	kref_init(&svm_bo->kref);
    484	INIT_LIST_HEAD(&svm_bo->range_list);
    485	spin_lock_init(&svm_bo->list_lock);
    486
    487	return svm_bo;
    488}
    489
    490int
    491svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
    492			bool clear)
    493{
    494	struct amdgpu_bo_param bp;
    495	struct svm_range_bo *svm_bo;
    496	struct amdgpu_bo_user *ubo;
    497	struct amdgpu_bo *bo;
    498	struct kfd_process *p;
    499	struct mm_struct *mm;
    500	int r;
    501
    502	p = container_of(prange->svms, struct kfd_process, svms);
    503	pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
    504		 prange->start, prange->last);
    505
    506	if (svm_range_validate_svm_bo(adev, prange))
    507		return 0;
    508
    509	svm_bo = svm_range_bo_new();
    510	if (!svm_bo) {
    511		pr_debug("failed to alloc svm bo\n");
    512		return -ENOMEM;
    513	}
    514	mm = get_task_mm(p->lead_thread);
    515	if (!mm) {
    516		pr_debug("failed to get mm\n");
    517		kfree(svm_bo);
    518		return -ESRCH;
    519	}
    520	svm_bo->svms = prange->svms;
    521	svm_bo->eviction_fence =
    522		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
    523					   mm,
    524					   svm_bo);
    525	mmput(mm);
    526	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
    527	svm_bo->evicting = 0;
    528	memset(&bp, 0, sizeof(bp));
    529	bp.size = prange->npages * PAGE_SIZE;
    530	bp.byte_align = PAGE_SIZE;
    531	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
    532	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
    533	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
    534	bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
    535	bp.type = ttm_bo_type_device;
    536	bp.resv = NULL;
    537
    538	r = amdgpu_bo_create_user(adev, &bp, &ubo);
    539	if (r) {
    540		pr_debug("failed %d to create bo\n", r);
    541		goto create_bo_failed;
    542	}
    543	bo = &ubo->bo;
    544	r = amdgpu_bo_reserve(bo, true);
    545	if (r) {
    546		pr_debug("failed %d to reserve bo\n", r);
    547		goto reserve_bo_failed;
    548	}
    549
    550	r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
    551	if (r) {
    552		pr_debug("failed %d to reserve bo\n", r);
    553		amdgpu_bo_unreserve(bo);
    554		goto reserve_bo_failed;
    555	}
    556	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
    557
    558	amdgpu_bo_unreserve(bo);
    559
    560	svm_bo->bo = bo;
    561	prange->svm_bo = svm_bo;
    562	prange->ttm_res = bo->tbo.resource;
    563	prange->offset = 0;
    564
    565	spin_lock(&svm_bo->list_lock);
    566	list_add(&prange->svm_bo_list, &svm_bo->range_list);
    567	spin_unlock(&svm_bo->list_lock);
    568
    569	return 0;
    570
    571reserve_bo_failed:
    572	amdgpu_bo_unref(&bo);
    573create_bo_failed:
    574	dma_fence_put(&svm_bo->eviction_fence->base);
    575	kfree(svm_bo);
    576	prange->ttm_res = NULL;
    577
    578	return r;
    579}
    580
    581void svm_range_vram_node_free(struct svm_range *prange)
    582{
    583	svm_range_bo_unref(prange->svm_bo);
    584	prange->ttm_res = NULL;
    585}
    586
    587struct amdgpu_device *
    588svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
    589{
    590	struct kfd_process_device *pdd;
    591	struct kfd_process *p;
    592	int32_t gpu_idx;
    593
    594	p = container_of(prange->svms, struct kfd_process, svms);
    595
    596	gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
    597	if (gpu_idx < 0) {
    598		pr_debug("failed to get device by id 0x%x\n", gpu_id);
    599		return NULL;
    600	}
    601	pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
    602	if (!pdd) {
    603		pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
    604		return NULL;
    605	}
    606
    607	return pdd->dev->adev;
    608}
    609
    610struct kfd_process_device *
    611svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
    612{
    613	struct kfd_process *p;
    614	int32_t gpu_idx, gpuid;
    615	int r;
    616
    617	p = container_of(prange->svms, struct kfd_process, svms);
    618
    619	r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
    620	if (r) {
    621		pr_debug("failed to get device id by adev %p\n", adev);
    622		return NULL;
    623	}
    624
    625	return kfd_process_device_from_gpuidx(p, gpu_idx);
    626}
    627
    628static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
    629{
    630	struct ttm_operation_ctx ctx = { false, false };
    631
    632	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
    633
    634	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
    635}
    636
    637static int
    638svm_range_check_attr(struct kfd_process *p,
    639		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
    640{
    641	uint32_t i;
    642
    643	for (i = 0; i < nattr; i++) {
    644		uint32_t val = attrs[i].value;
    645		int gpuidx = MAX_GPU_INSTANCE;
    646
    647		switch (attrs[i].type) {
    648		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
    649			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
    650			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
    651				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
    652			break;
    653		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
    654			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
    655				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
    656			break;
    657		case KFD_IOCTL_SVM_ATTR_ACCESS:
    658		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
    659		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
    660			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
    661			break;
    662		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
    663			break;
    664		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
    665			break;
    666		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
    667			break;
    668		default:
    669			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
    670			return -EINVAL;
    671		}
    672
    673		if (gpuidx < 0) {
    674			pr_debug("no GPU 0x%x found\n", val);
    675			return -EINVAL;
    676		} else if (gpuidx < MAX_GPU_INSTANCE &&
    677			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
    678			pr_debug("GPU 0x%x not supported\n", val);
    679			return -EINVAL;
    680		}
    681	}
    682
    683	return 0;
    684}
    685
    686static void
    687svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
    688		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
    689		      bool *update_mapping)
    690{
    691	uint32_t i;
    692	int gpuidx;
    693
    694	for (i = 0; i < nattr; i++) {
    695		switch (attrs[i].type) {
    696		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
    697			prange->preferred_loc = attrs[i].value;
    698			break;
    699		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
    700			prange->prefetch_loc = attrs[i].value;
    701			break;
    702		case KFD_IOCTL_SVM_ATTR_ACCESS:
    703		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
    704		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
    705			*update_mapping = true;
    706			gpuidx = kfd_process_gpuidx_from_gpuid(p,
    707							       attrs[i].value);
    708			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
    709				bitmap_clear(prange->bitmap_access, gpuidx, 1);
    710				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
    711			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
    712				bitmap_set(prange->bitmap_access, gpuidx, 1);
    713				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
    714			} else {
    715				bitmap_clear(prange->bitmap_access, gpuidx, 1);
    716				bitmap_set(prange->bitmap_aip, gpuidx, 1);
    717			}
    718			break;
    719		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
    720			*update_mapping = true;
    721			prange->flags |= attrs[i].value;
    722			break;
    723		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
    724			*update_mapping = true;
    725			prange->flags &= ~attrs[i].value;
    726			break;
    727		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
    728			prange->granularity = attrs[i].value;
    729			break;
    730		default:
    731			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
    732		}
    733	}
    734}
    735
    736static bool
    737svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
    738			uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
    739{
    740	uint32_t i;
    741	int gpuidx;
    742
    743	for (i = 0; i < nattr; i++) {
    744		switch (attrs[i].type) {
    745		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
    746			if (prange->preferred_loc != attrs[i].value)
    747				return false;
    748			break;
    749		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
    750			/* Prefetch should always trigger a migration even
    751			 * if the value of the attribute didn't change.
    752			 */
    753			return false;
    754		case KFD_IOCTL_SVM_ATTR_ACCESS:
    755		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
    756		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
    757			gpuidx = kfd_process_gpuidx_from_gpuid(p,
    758							       attrs[i].value);
    759			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
    760				if (test_bit(gpuidx, prange->bitmap_access) ||
    761				    test_bit(gpuidx, prange->bitmap_aip))
    762					return false;
    763			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
    764				if (!test_bit(gpuidx, prange->bitmap_access))
    765					return false;
    766			} else {
    767				if (!test_bit(gpuidx, prange->bitmap_aip))
    768					return false;
    769			}
    770			break;
    771		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
    772			if ((prange->flags & attrs[i].value) != attrs[i].value)
    773				return false;
    774			break;
    775		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
    776			if ((prange->flags & attrs[i].value) != 0)
    777				return false;
    778			break;
    779		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
    780			if (prange->granularity != attrs[i].value)
    781				return false;
    782			break;
    783		default:
    784			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
    785		}
    786	}
    787
    788	return true;
    789}
    790
    791/**
    792 * svm_range_debug_dump - print all range information from svms
    793 * @svms: svm range list header
    794 *
    795 * debug output svm range start, end, prefetch location from svms
    796 * interval tree and link list
    797 *
    798 * Context: The caller must hold svms->lock
    799 */
    800static void svm_range_debug_dump(struct svm_range_list *svms)
    801{
    802	struct interval_tree_node *node;
    803	struct svm_range *prange;
    804
    805	pr_debug("dump svms 0x%p list\n", svms);
    806	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
    807
    808	list_for_each_entry(prange, &svms->list, list) {
    809		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
    810			 prange, prange->start, prange->npages,
    811			 prange->start + prange->npages - 1,
    812			 prange->actual_loc);
    813	}
    814
    815	pr_debug("dump svms 0x%p interval tree\n", svms);
    816	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
    817	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
    818	while (node) {
    819		prange = container_of(node, struct svm_range, it_node);
    820		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
    821			 prange, prange->start, prange->npages,
    822			 prange->start + prange->npages - 1,
    823			 prange->actual_loc);
    824		node = interval_tree_iter_next(node, 0, ~0ULL);
    825	}
    826}
    827
    828static int
    829svm_range_split_array(void *ppnew, void *ppold, size_t size,
    830		      uint64_t old_start, uint64_t old_n,
    831		      uint64_t new_start, uint64_t new_n)
    832{
    833	unsigned char *new, *old, *pold;
    834	uint64_t d;
    835
    836	if (!ppold)
    837		return 0;
    838	pold = *(unsigned char **)ppold;
    839	if (!pold)
    840		return 0;
    841
    842	new = kvmalloc_array(new_n, size, GFP_KERNEL);
    843	if (!new)
    844		return -ENOMEM;
    845
    846	d = (new_start - old_start) * size;
    847	memcpy(new, pold + d, new_n * size);
    848
    849	old = kvmalloc_array(old_n, size, GFP_KERNEL);
    850	if (!old) {
    851		kvfree(new);
    852		return -ENOMEM;
    853	}
    854
    855	d = (new_start == old_start) ? new_n * size : 0;
    856	memcpy(old, pold + d, old_n * size);
    857
    858	kvfree(pold);
    859	*(void **)ppold = old;
    860	*(void **)ppnew = new;
    861
    862	return 0;
    863}
    864
    865static int
    866svm_range_split_pages(struct svm_range *new, struct svm_range *old,
    867		      uint64_t start, uint64_t last)
    868{
    869	uint64_t npages = last - start + 1;
    870	int i, r;
    871
    872	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
    873		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
    874					  sizeof(*old->dma_addr[i]), old->start,
    875					  npages, new->start, new->npages);
    876		if (r)
    877			return r;
    878	}
    879
    880	return 0;
    881}
    882
    883static int
    884svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
    885		      uint64_t start, uint64_t last)
    886{
    887	uint64_t npages = last - start + 1;
    888
    889	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
    890		 new->svms, new, new->start, start, last);
    891
    892	if (new->start == old->start) {
    893		new->offset = old->offset;
    894		old->offset += new->npages;
    895	} else {
    896		new->offset = old->offset + npages;
    897	}
    898
    899	new->svm_bo = svm_range_bo_ref(old->svm_bo);
    900	new->ttm_res = old->ttm_res;
    901
    902	spin_lock(&new->svm_bo->list_lock);
    903	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
    904	spin_unlock(&new->svm_bo->list_lock);
    905
    906	return 0;
    907}
    908
    909/**
    910 * svm_range_split_adjust - split range and adjust
    911 *
    912 * @new: new range
    913 * @old: the old range
    914 * @start: the old range adjust to start address in pages
    915 * @last: the old range adjust to last address in pages
    916 *
    917 * Copy system memory dma_addr or vram ttm_res in old range to new
    918 * range from new_start up to size new->npages, the remaining old range is from
    919 * start to last
    920 *
    921 * Return:
    922 * 0 - OK, -ENOMEM - out of memory
    923 */
    924static int
    925svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
    926		      uint64_t start, uint64_t last)
    927{
    928	int r;
    929
    930	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
    931		 new->svms, new->start, old->start, old->last, start, last);
    932
    933	if (new->start < old->start ||
    934	    new->last > old->last) {
    935		WARN_ONCE(1, "invalid new range start or last\n");
    936		return -EINVAL;
    937	}
    938
    939	r = svm_range_split_pages(new, old, start, last);
    940	if (r)
    941		return r;
    942
    943	if (old->actual_loc && old->ttm_res) {
    944		r = svm_range_split_nodes(new, old, start, last);
    945		if (r)
    946			return r;
    947	}
    948
    949	old->npages = last - start + 1;
    950	old->start = start;
    951	old->last = last;
    952	new->flags = old->flags;
    953	new->preferred_loc = old->preferred_loc;
    954	new->prefetch_loc = old->prefetch_loc;
    955	new->actual_loc = old->actual_loc;
    956	new->granularity = old->granularity;
    957	new->mapped_to_gpu = old->mapped_to_gpu;
    958	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
    959	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
    960
    961	return 0;
    962}
    963
    964/**
    965 * svm_range_split - split a range in 2 ranges
    966 *
    967 * @prange: the svm range to split
    968 * @start: the remaining range start address in pages
    969 * @last: the remaining range last address in pages
    970 * @new: the result new range generated
    971 *
    972 * Two cases only:
    973 * case 1: if start == prange->start
    974 *         prange ==> prange[start, last]
    975 *         new range [last + 1, prange->last]
    976 *
    977 * case 2: if last == prange->last
    978 *         prange ==> prange[start, last]
    979 *         new range [prange->start, start - 1]
    980 *
    981 * Return:
    982 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
    983 */
    984static int
    985svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
    986		struct svm_range **new)
    987{
    988	uint64_t old_start = prange->start;
    989	uint64_t old_last = prange->last;
    990	struct svm_range_list *svms;
    991	int r = 0;
    992
    993	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
    994		 old_start, old_last, start, last);
    995
    996	if (old_start != start && old_last != last)
    997		return -EINVAL;
    998	if (start < old_start || last > old_last)
    999		return -EINVAL;
   1000
   1001	svms = prange->svms;
   1002	if (old_start == start)
   1003		*new = svm_range_new(svms, last + 1, old_last);
   1004	else
   1005		*new = svm_range_new(svms, old_start, start - 1);
   1006	if (!*new)
   1007		return -ENOMEM;
   1008
   1009	r = svm_range_split_adjust(*new, prange, start, last);
   1010	if (r) {
   1011		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
   1012			 r, old_start, old_last, start, last);
   1013		svm_range_free(*new);
   1014		*new = NULL;
   1015	}
   1016
   1017	return r;
   1018}
   1019
   1020static int
   1021svm_range_split_tail(struct svm_range *prange,
   1022		     uint64_t new_last, struct list_head *insert_list)
   1023{
   1024	struct svm_range *tail;
   1025	int r = svm_range_split(prange, prange->start, new_last, &tail);
   1026
   1027	if (!r)
   1028		list_add(&tail->list, insert_list);
   1029	return r;
   1030}
   1031
   1032static int
   1033svm_range_split_head(struct svm_range *prange,
   1034		     uint64_t new_start, struct list_head *insert_list)
   1035{
   1036	struct svm_range *head;
   1037	int r = svm_range_split(prange, new_start, prange->last, &head);
   1038
   1039	if (!r)
   1040		list_add(&head->list, insert_list);
   1041	return r;
   1042}
   1043
   1044static void
   1045svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
   1046		    struct svm_range *pchild, enum svm_work_list_ops op)
   1047{
   1048	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
   1049		 pchild, pchild->start, pchild->last, prange, op);
   1050
   1051	pchild->work_item.mm = mm;
   1052	pchild->work_item.op = op;
   1053	list_add_tail(&pchild->child_list, &prange->child_list);
   1054}
   1055
   1056/**
   1057 * svm_range_split_by_granularity - collect ranges within granularity boundary
   1058 *
   1059 * @p: the process with svms list
   1060 * @mm: mm structure
   1061 * @addr: the vm fault address in pages, to split the prange
   1062 * @parent: parent range if prange is from child list
   1063 * @prange: prange to split
   1064 *
   1065 * Trims @prange to be a single aligned block of prange->granularity if
   1066 * possible. The head and tail are added to the child_list in @parent.
   1067 *
   1068 * Context: caller must hold mmap_read_lock and prange->lock
   1069 *
   1070 * Return:
   1071 * 0 - OK, otherwise error code
   1072 */
   1073int
   1074svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
   1075			       unsigned long addr, struct svm_range *parent,
   1076			       struct svm_range *prange)
   1077{
   1078	struct svm_range *head, *tail;
   1079	unsigned long start, last, size;
   1080	int r;
   1081
   1082	/* Align splited range start and size to granularity size, then a single
   1083	 * PTE will be used for whole range, this reduces the number of PTE
   1084	 * updated and the L1 TLB space used for translation.
   1085	 */
   1086	size = 1UL << prange->granularity;
   1087	start = ALIGN_DOWN(addr, size);
   1088	last = ALIGN(addr + 1, size) - 1;
   1089
   1090	pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
   1091		 prange->svms, prange->start, prange->last, start, last, size);
   1092
   1093	if (start > prange->start) {
   1094		r = svm_range_split(prange, start, prange->last, &head);
   1095		if (r)
   1096			return r;
   1097		svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
   1098	}
   1099
   1100	if (last < prange->last) {
   1101		r = svm_range_split(prange, prange->start, last, &tail);
   1102		if (r)
   1103			return r;
   1104		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
   1105	}
   1106
   1107	/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
   1108	if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
   1109		prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
   1110		pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
   1111			 prange, prange->start, prange->last,
   1112			 SVM_OP_ADD_RANGE_AND_MAP);
   1113	}
   1114	return 0;
   1115}
   1116
   1117static uint64_t
   1118svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
   1119			int domain)
   1120{
   1121	struct amdgpu_device *bo_adev;
   1122	uint32_t flags = prange->flags;
   1123	uint32_t mapping_flags = 0;
   1124	uint64_t pte_flags;
   1125	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
   1126	bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
   1127
   1128	if (domain == SVM_RANGE_VRAM_DOMAIN)
   1129		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
   1130
   1131	switch (KFD_GC_VERSION(adev->kfd.dev)) {
   1132	case IP_VERSION(9, 4, 1):
   1133		if (domain == SVM_RANGE_VRAM_DOMAIN) {
   1134			if (bo_adev == adev) {
   1135				mapping_flags |= coherent ?
   1136					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
   1137			} else {
   1138				mapping_flags |= coherent ?
   1139					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
   1140				if (amdgpu_xgmi_same_hive(adev, bo_adev))
   1141					snoop = true;
   1142			}
   1143		} else {
   1144			mapping_flags |= coherent ?
   1145				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
   1146		}
   1147		break;
   1148	case IP_VERSION(9, 4, 2):
   1149		if (domain == SVM_RANGE_VRAM_DOMAIN) {
   1150			if (bo_adev == adev) {
   1151				mapping_flags |= coherent ?
   1152					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
   1153				if (adev->gmc.xgmi.connected_to_cpu)
   1154					snoop = true;
   1155			} else {
   1156				mapping_flags |= coherent ?
   1157					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
   1158				if (amdgpu_xgmi_same_hive(adev, bo_adev))
   1159					snoop = true;
   1160			}
   1161		} else {
   1162			mapping_flags |= coherent ?
   1163				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
   1164		}
   1165		break;
   1166	default:
   1167		mapping_flags |= coherent ?
   1168			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
   1169	}
   1170
   1171	mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
   1172
   1173	if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
   1174		mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
   1175	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
   1176		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
   1177
   1178	pte_flags = AMDGPU_PTE_VALID;
   1179	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
   1180	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
   1181
   1182	pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
   1183	return pte_flags;
   1184}
   1185
   1186static int
   1187svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
   1188			 uint64_t start, uint64_t last,
   1189			 struct dma_fence **fence)
   1190{
   1191	uint64_t init_pte_value = 0;
   1192
   1193	pr_debug("[0x%llx 0x%llx]\n", start, last);
   1194
   1195	return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
   1196				      last, init_pte_value, 0, 0, NULL, NULL,
   1197				      fence);
   1198}
   1199
   1200static int
   1201svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
   1202			  unsigned long last)
   1203{
   1204	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
   1205	struct kfd_process_device *pdd;
   1206	struct dma_fence *fence = NULL;
   1207	struct kfd_process *p;
   1208	uint32_t gpuidx;
   1209	int r = 0;
   1210
   1211	if (!prange->mapped_to_gpu) {
   1212		pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
   1213			 prange, prange->start, prange->last);
   1214		return 0;
   1215	}
   1216
   1217	if (prange->start == start && prange->last == last) {
   1218		pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
   1219		prange->mapped_to_gpu = false;
   1220	}
   1221
   1222	bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
   1223		  MAX_GPU_INSTANCE);
   1224	p = container_of(prange->svms, struct kfd_process, svms);
   1225
   1226	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
   1227		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
   1228		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
   1229		if (!pdd) {
   1230			pr_debug("failed to find device idx %d\n", gpuidx);
   1231			return -EINVAL;
   1232		}
   1233
   1234		r = svm_range_unmap_from_gpu(pdd->dev->adev,
   1235					     drm_priv_to_vm(pdd->drm_priv),
   1236					     start, last, &fence);
   1237		if (r)
   1238			break;
   1239
   1240		if (fence) {
   1241			r = dma_fence_wait(fence, false);
   1242			dma_fence_put(fence);
   1243			fence = NULL;
   1244			if (r)
   1245				break;
   1246		}
   1247		kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
   1248	}
   1249
   1250	return r;
   1251}
   1252
   1253static int
   1254svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
   1255		     unsigned long offset, unsigned long npages, bool readonly,
   1256		     dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
   1257		     struct dma_fence **fence, bool flush_tlb)
   1258{
   1259	struct amdgpu_device *adev = pdd->dev->adev;
   1260	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
   1261	uint64_t pte_flags;
   1262	unsigned long last_start;
   1263	int last_domain;
   1264	int r = 0;
   1265	int64_t i, j;
   1266
   1267	last_start = prange->start + offset;
   1268
   1269	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
   1270		 last_start, last_start + npages - 1, readonly);
   1271
   1272	for (i = offset; i < offset + npages; i++) {
   1273		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
   1274		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
   1275
   1276		/* Collect all pages in the same address range and memory domain
   1277		 * that can be mapped with a single call to update mapping.
   1278		 */
   1279		if (i < offset + npages - 1 &&
   1280		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
   1281			continue;
   1282
   1283		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
   1284			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
   1285
   1286		pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
   1287		if (readonly)
   1288			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
   1289
   1290		pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
   1291			 prange->svms, last_start, prange->start + i,
   1292			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
   1293			 pte_flags);
   1294
   1295		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
   1296					   last_start, prange->start + i,
   1297					   pte_flags,
   1298					   (last_start - prange->start) << PAGE_SHIFT,
   1299					   bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
   1300					   NULL, dma_addr, &vm->last_update);
   1301
   1302		for (j = last_start - prange->start; j <= i; j++)
   1303			dma_addr[j] |= last_domain;
   1304
   1305		if (r) {
   1306			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
   1307			goto out;
   1308		}
   1309		last_start = prange->start + i + 1;
   1310	}
   1311
   1312	r = amdgpu_vm_update_pdes(adev, vm, false);
   1313	if (r) {
   1314		pr_debug("failed %d to update directories 0x%lx\n", r,
   1315			 prange->start);
   1316		goto out;
   1317	}
   1318
   1319	if (fence)
   1320		*fence = dma_fence_get(vm->last_update);
   1321
   1322out:
   1323	return r;
   1324}
   1325
   1326static int
   1327svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
   1328		      unsigned long npages, bool readonly,
   1329		      unsigned long *bitmap, bool wait, bool flush_tlb)
   1330{
   1331	struct kfd_process_device *pdd;
   1332	struct amdgpu_device *bo_adev;
   1333	struct kfd_process *p;
   1334	struct dma_fence *fence = NULL;
   1335	uint32_t gpuidx;
   1336	int r = 0;
   1337
   1338	if (prange->svm_bo && prange->ttm_res)
   1339		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
   1340	else
   1341		bo_adev = NULL;
   1342
   1343	p = container_of(prange->svms, struct kfd_process, svms);
   1344	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
   1345		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
   1346		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
   1347		if (!pdd) {
   1348			pr_debug("failed to find device idx %d\n", gpuidx);
   1349			return -EINVAL;
   1350		}
   1351
   1352		pdd = kfd_bind_process_to_device(pdd->dev, p);
   1353		if (IS_ERR(pdd))
   1354			return -EINVAL;
   1355
   1356		if (bo_adev && pdd->dev->adev != bo_adev &&
   1357		    !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
   1358			pr_debug("cannot map to device idx %d\n", gpuidx);
   1359			continue;
   1360		}
   1361
   1362		r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
   1363					 prange->dma_addr[gpuidx],
   1364					 bo_adev, wait ? &fence : NULL,
   1365					 flush_tlb);
   1366		if (r)
   1367			break;
   1368
   1369		if (fence) {
   1370			r = dma_fence_wait(fence, false);
   1371			dma_fence_put(fence);
   1372			fence = NULL;
   1373			if (r) {
   1374				pr_debug("failed %d to dma fence wait\n", r);
   1375				break;
   1376			}
   1377		}
   1378
   1379		kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
   1380	}
   1381
   1382	return r;
   1383}
   1384
   1385struct svm_validate_context {
   1386	struct kfd_process *process;
   1387	struct svm_range *prange;
   1388	bool intr;
   1389	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
   1390	struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
   1391	struct list_head validate_list;
   1392	struct ww_acquire_ctx ticket;
   1393};
   1394
   1395static int svm_range_reserve_bos(struct svm_validate_context *ctx)
   1396{
   1397	struct kfd_process_device *pdd;
   1398	struct amdgpu_vm *vm;
   1399	uint32_t gpuidx;
   1400	int r;
   1401
   1402	INIT_LIST_HEAD(&ctx->validate_list);
   1403	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
   1404		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
   1405		if (!pdd) {
   1406			pr_debug("failed to find device idx %d\n", gpuidx);
   1407			return -EINVAL;
   1408		}
   1409		vm = drm_priv_to_vm(pdd->drm_priv);
   1410
   1411		ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
   1412		ctx->tv[gpuidx].num_shared = 4;
   1413		list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
   1414	}
   1415
   1416	r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
   1417				   ctx->intr, NULL);
   1418	if (r) {
   1419		pr_debug("failed %d to reserve bo\n", r);
   1420		return r;
   1421	}
   1422
   1423	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
   1424		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
   1425		if (!pdd) {
   1426			pr_debug("failed to find device idx %d\n", gpuidx);
   1427			r = -EINVAL;
   1428			goto unreserve_out;
   1429		}
   1430
   1431		r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
   1432					      drm_priv_to_vm(pdd->drm_priv),
   1433					      svm_range_bo_validate, NULL);
   1434		if (r) {
   1435			pr_debug("failed %d validate pt bos\n", r);
   1436			goto unreserve_out;
   1437		}
   1438	}
   1439
   1440	return 0;
   1441
   1442unreserve_out:
   1443	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
   1444	return r;
   1445}
   1446
   1447static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
   1448{
   1449	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
   1450}
   1451
   1452static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
   1453{
   1454	struct kfd_process_device *pdd;
   1455
   1456	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
   1457
   1458	return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
   1459}
   1460
   1461/*
   1462 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
   1463 *
   1464 * To prevent concurrent destruction or change of range attributes, the
   1465 * svm_read_lock must be held. The caller must not hold the svm_write_lock
   1466 * because that would block concurrent evictions and lead to deadlocks. To
   1467 * serialize concurrent migrations or validations of the same range, the
   1468 * prange->migrate_mutex must be held.
   1469 *
   1470 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
   1471 * eviction fence.
   1472 *
   1473 * The following sequence ensures race-free validation and GPU mapping:
   1474 *
   1475 * 1. Reserve page table (and SVM BO if range is in VRAM)
   1476 * 2. hmm_range_fault to get page addresses (if system memory)
   1477 * 3. DMA-map pages (if system memory)
   1478 * 4-a. Take notifier lock
   1479 * 4-b. Check that pages still valid (mmu_interval_read_retry)
   1480 * 4-c. Check that the range was not split or otherwise invalidated
   1481 * 4-d. Update GPU page table
   1482 * 4.e. Release notifier lock
   1483 * 5. Release page table (and SVM BO) reservation
   1484 */
   1485static int svm_range_validate_and_map(struct mm_struct *mm,
   1486				      struct svm_range *prange, int32_t gpuidx,
   1487				      bool intr, bool wait, bool flush_tlb)
   1488{
   1489	struct svm_validate_context ctx;
   1490	unsigned long start, end, addr;
   1491	struct kfd_process *p;
   1492	void *owner;
   1493	int32_t idx;
   1494	int r = 0;
   1495
   1496	ctx.process = container_of(prange->svms, struct kfd_process, svms);
   1497	ctx.prange = prange;
   1498	ctx.intr = intr;
   1499
   1500	if (gpuidx < MAX_GPU_INSTANCE) {
   1501		bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
   1502		bitmap_set(ctx.bitmap, gpuidx, 1);
   1503	} else if (ctx.process->xnack_enabled) {
   1504		bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
   1505
   1506		/* If prefetch range to GPU, or GPU retry fault migrate range to
   1507		 * GPU, which has ACCESS attribute to the range, create mapping
   1508		 * on that GPU.
   1509		 */
   1510		if (prange->actual_loc) {
   1511			gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
   1512							prange->actual_loc);
   1513			if (gpuidx < 0) {
   1514				WARN_ONCE(1, "failed get device by id 0x%x\n",
   1515					 prange->actual_loc);
   1516				return -EINVAL;
   1517			}
   1518			if (test_bit(gpuidx, prange->bitmap_access))
   1519				bitmap_set(ctx.bitmap, gpuidx, 1);
   1520		}
   1521	} else {
   1522		bitmap_or(ctx.bitmap, prange->bitmap_access,
   1523			  prange->bitmap_aip, MAX_GPU_INSTANCE);
   1524	}
   1525
   1526	if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
   1527		if (!prange->mapped_to_gpu)
   1528			return 0;
   1529
   1530		bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
   1531	}
   1532
   1533	if (prange->actual_loc && !prange->ttm_res) {
   1534		/* This should never happen. actual_loc gets set by
   1535		 * svm_migrate_ram_to_vram after allocating a BO.
   1536		 */
   1537		WARN_ONCE(1, "VRAM BO missing during validation\n");
   1538		return -EINVAL;
   1539	}
   1540
   1541	svm_range_reserve_bos(&ctx);
   1542
   1543	p = container_of(prange->svms, struct kfd_process, svms);
   1544	owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
   1545						MAX_GPU_INSTANCE));
   1546	for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
   1547		if (kfd_svm_page_owner(p, idx) != owner) {
   1548			owner = NULL;
   1549			break;
   1550		}
   1551	}
   1552
   1553	start = prange->start << PAGE_SHIFT;
   1554	end = (prange->last + 1) << PAGE_SHIFT;
   1555	for (addr = start; addr < end && !r; ) {
   1556		struct hmm_range *hmm_range;
   1557		struct vm_area_struct *vma;
   1558		unsigned long next;
   1559		unsigned long offset;
   1560		unsigned long npages;
   1561		bool readonly;
   1562
   1563		vma = find_vma(mm, addr);
   1564		if (!vma || addr < vma->vm_start) {
   1565			r = -EFAULT;
   1566			goto unreserve_out;
   1567		}
   1568		readonly = !(vma->vm_flags & VM_WRITE);
   1569
   1570		next = min(vma->vm_end, end);
   1571		npages = (next - addr) >> PAGE_SHIFT;
   1572		WRITE_ONCE(p->svms.faulting_task, current);
   1573		r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
   1574					       addr, npages, &hmm_range,
   1575					       readonly, true, owner);
   1576		WRITE_ONCE(p->svms.faulting_task, NULL);
   1577		if (r) {
   1578			pr_debug("failed %d to get svm range pages\n", r);
   1579			goto unreserve_out;
   1580		}
   1581
   1582		offset = (addr - start) >> PAGE_SHIFT;
   1583		r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
   1584				      hmm_range->hmm_pfns);
   1585		if (r) {
   1586			pr_debug("failed %d to dma map range\n", r);
   1587			goto unreserve_out;
   1588		}
   1589
   1590		svm_range_lock(prange);
   1591		if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
   1592			pr_debug("hmm update the range, need validate again\n");
   1593			r = -EAGAIN;
   1594			goto unlock_out;
   1595		}
   1596		if (!list_empty(&prange->child_list)) {
   1597			pr_debug("range split by unmap in parallel, validate again\n");
   1598			r = -EAGAIN;
   1599			goto unlock_out;
   1600		}
   1601
   1602		r = svm_range_map_to_gpus(prange, offset, npages, readonly,
   1603					  ctx.bitmap, wait, flush_tlb);
   1604
   1605unlock_out:
   1606		svm_range_unlock(prange);
   1607
   1608		addr = next;
   1609	}
   1610
   1611	if (addr == end) {
   1612		prange->validated_once = true;
   1613		prange->mapped_to_gpu = true;
   1614	}
   1615
   1616unreserve_out:
   1617	svm_range_unreserve_bos(&ctx);
   1618
   1619	if (!r)
   1620		prange->validate_timestamp = ktime_to_us(ktime_get());
   1621
   1622	return r;
   1623}
   1624
   1625/**
   1626 * svm_range_list_lock_and_flush_work - flush pending deferred work
   1627 *
   1628 * @svms: the svm range list
   1629 * @mm: the mm structure
   1630 *
   1631 * Context: Returns with mmap write lock held, pending deferred work flushed
   1632 *
   1633 */
   1634void
   1635svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
   1636				   struct mm_struct *mm)
   1637{
   1638retry_flush_work:
   1639	flush_work(&svms->deferred_list_work);
   1640	mmap_write_lock(mm);
   1641
   1642	if (list_empty(&svms->deferred_range_list))
   1643		return;
   1644	mmap_write_unlock(mm);
   1645	pr_debug("retry flush\n");
   1646	goto retry_flush_work;
   1647}
   1648
   1649static void svm_range_restore_work(struct work_struct *work)
   1650{
   1651	struct delayed_work *dwork = to_delayed_work(work);
   1652	struct amdkfd_process_info *process_info;
   1653	struct svm_range_list *svms;
   1654	struct svm_range *prange;
   1655	struct kfd_process *p;
   1656	struct mm_struct *mm;
   1657	int evicted_ranges;
   1658	int invalid;
   1659	int r;
   1660
   1661	svms = container_of(dwork, struct svm_range_list, restore_work);
   1662	evicted_ranges = atomic_read(&svms->evicted_ranges);
   1663	if (!evicted_ranges)
   1664		return;
   1665
   1666	pr_debug("restore svm ranges\n");
   1667
   1668	p = container_of(svms, struct kfd_process, svms);
   1669	process_info = p->kgd_process_info;
   1670
   1671	/* Keep mm reference when svm_range_validate_and_map ranges */
   1672	mm = get_task_mm(p->lead_thread);
   1673	if (!mm) {
   1674		pr_debug("svms 0x%p process mm gone\n", svms);
   1675		return;
   1676	}
   1677
   1678	mutex_lock(&process_info->lock);
   1679	svm_range_list_lock_and_flush_work(svms, mm);
   1680	mutex_lock(&svms->lock);
   1681
   1682	evicted_ranges = atomic_read(&svms->evicted_ranges);
   1683
   1684	list_for_each_entry(prange, &svms->list, list) {
   1685		invalid = atomic_read(&prange->invalid);
   1686		if (!invalid)
   1687			continue;
   1688
   1689		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
   1690			 prange->svms, prange, prange->start, prange->last,
   1691			 invalid);
   1692
   1693		/*
   1694		 * If range is migrating, wait for migration is done.
   1695		 */
   1696		mutex_lock(&prange->migrate_mutex);
   1697
   1698		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
   1699					       false, true, false);
   1700		if (r)
   1701			pr_debug("failed %d to map 0x%lx to gpus\n", r,
   1702				 prange->start);
   1703
   1704		mutex_unlock(&prange->migrate_mutex);
   1705		if (r)
   1706			goto out_reschedule;
   1707
   1708		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
   1709			goto out_reschedule;
   1710	}
   1711
   1712	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
   1713	    evicted_ranges)
   1714		goto out_reschedule;
   1715
   1716	evicted_ranges = 0;
   1717
   1718	r = kgd2kfd_resume_mm(mm);
   1719	if (r) {
   1720		/* No recovery from this failure. Probably the CP is
   1721		 * hanging. No point trying again.
   1722		 */
   1723		pr_debug("failed %d to resume KFD\n", r);
   1724	}
   1725
   1726	pr_debug("restore svm ranges successfully\n");
   1727
   1728out_reschedule:
   1729	mutex_unlock(&svms->lock);
   1730	mmap_write_unlock(mm);
   1731	mutex_unlock(&process_info->lock);
   1732	mmput(mm);
   1733
   1734	/* If validation failed, reschedule another attempt */
   1735	if (evicted_ranges) {
   1736		pr_debug("reschedule to restore svm range\n");
   1737		schedule_delayed_work(&svms->restore_work,
   1738			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
   1739	}
   1740}
   1741
   1742/**
   1743 * svm_range_evict - evict svm range
   1744 * @prange: svm range structure
   1745 * @mm: current process mm_struct
   1746 * @start: starting process queue number
   1747 * @last: last process queue number
   1748 *
   1749 * Stop all queues of the process to ensure GPU doesn't access the memory, then
   1750 * return to let CPU evict the buffer and proceed CPU pagetable update.
   1751 *
   1752 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
   1753 * If invalidation happens while restore work is running, restore work will
   1754 * restart to ensure to get the latest CPU pages mapping to GPU, then start
   1755 * the queues.
   1756 */
   1757static int
   1758svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
   1759		unsigned long start, unsigned long last)
   1760{
   1761	struct svm_range_list *svms = prange->svms;
   1762	struct svm_range *pchild;
   1763	struct kfd_process *p;
   1764	int r = 0;
   1765
   1766	p = container_of(svms, struct kfd_process, svms);
   1767
   1768	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
   1769		 svms, prange->start, prange->last, start, last);
   1770
   1771	if (!p->xnack_enabled) {
   1772		int evicted_ranges;
   1773
   1774		list_for_each_entry(pchild, &prange->child_list, child_list) {
   1775			mutex_lock_nested(&pchild->lock, 1);
   1776			if (pchild->start <= last && pchild->last >= start) {
   1777				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
   1778					 pchild->start, pchild->last);
   1779				atomic_inc(&pchild->invalid);
   1780			}
   1781			mutex_unlock(&pchild->lock);
   1782		}
   1783
   1784		if (prange->start <= last && prange->last >= start)
   1785			atomic_inc(&prange->invalid);
   1786
   1787		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
   1788		if (evicted_ranges != 1)
   1789			return r;
   1790
   1791		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
   1792			 prange->svms, prange->start, prange->last);
   1793
   1794		/* First eviction, stop the queues */
   1795		r = kgd2kfd_quiesce_mm(mm);
   1796		if (r)
   1797			pr_debug("failed to quiesce KFD\n");
   1798
   1799		pr_debug("schedule to restore svm %p ranges\n", svms);
   1800		schedule_delayed_work(&svms->restore_work,
   1801			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
   1802	} else {
   1803		unsigned long s, l;
   1804
   1805		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
   1806			 prange->svms, start, last);
   1807		list_for_each_entry(pchild, &prange->child_list, child_list) {
   1808			mutex_lock_nested(&pchild->lock, 1);
   1809			s = max(start, pchild->start);
   1810			l = min(last, pchild->last);
   1811			if (l >= s)
   1812				svm_range_unmap_from_gpus(pchild, s, l);
   1813			mutex_unlock(&pchild->lock);
   1814		}
   1815		s = max(start, prange->start);
   1816		l = min(last, prange->last);
   1817		if (l >= s)
   1818			svm_range_unmap_from_gpus(prange, s, l);
   1819	}
   1820
   1821	return r;
   1822}
   1823
   1824static struct svm_range *svm_range_clone(struct svm_range *old)
   1825{
   1826	struct svm_range *new;
   1827
   1828	new = svm_range_new(old->svms, old->start, old->last);
   1829	if (!new)
   1830		return NULL;
   1831
   1832	if (old->svm_bo) {
   1833		new->ttm_res = old->ttm_res;
   1834		new->offset = old->offset;
   1835		new->svm_bo = svm_range_bo_ref(old->svm_bo);
   1836		spin_lock(&new->svm_bo->list_lock);
   1837		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
   1838		spin_unlock(&new->svm_bo->list_lock);
   1839	}
   1840	new->flags = old->flags;
   1841	new->preferred_loc = old->preferred_loc;
   1842	new->prefetch_loc = old->prefetch_loc;
   1843	new->actual_loc = old->actual_loc;
   1844	new->granularity = old->granularity;
   1845	new->mapped_to_gpu = old->mapped_to_gpu;
   1846	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
   1847	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
   1848
   1849	return new;
   1850}
   1851
   1852/**
   1853 * svm_range_add - add svm range and handle overlap
   1854 * @p: the range add to this process svms
   1855 * @start: page size aligned
   1856 * @size: page size aligned
   1857 * @nattr: number of attributes
   1858 * @attrs: array of attributes
   1859 * @update_list: output, the ranges need validate and update GPU mapping
   1860 * @insert_list: output, the ranges need insert to svms
   1861 * @remove_list: output, the ranges are replaced and need remove from svms
   1862 *
   1863 * Check if the virtual address range has overlap with any existing ranges,
   1864 * split partly overlapping ranges and add new ranges in the gaps. All changes
   1865 * should be applied to the range_list and interval tree transactionally. If
   1866 * any range split or allocation fails, the entire update fails. Therefore any
   1867 * existing overlapping svm_ranges are cloned and the original svm_ranges left
   1868 * unchanged.
   1869 *
   1870 * If the transaction succeeds, the caller can update and insert clones and
   1871 * new ranges, then free the originals.
   1872 *
   1873 * Otherwise the caller can free the clones and new ranges, while the old
   1874 * svm_ranges remain unchanged.
   1875 *
   1876 * Context: Process context, caller must hold svms->lock
   1877 *
   1878 * Return:
   1879 * 0 - OK, otherwise error code
   1880 */
   1881static int
   1882svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
   1883	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
   1884	      struct list_head *update_list, struct list_head *insert_list,
   1885	      struct list_head *remove_list)
   1886{
   1887	unsigned long last = start + size - 1UL;
   1888	struct svm_range_list *svms = &p->svms;
   1889	struct interval_tree_node *node;
   1890	struct svm_range *prange;
   1891	struct svm_range *tmp;
   1892	int r = 0;
   1893
   1894	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
   1895
   1896	INIT_LIST_HEAD(update_list);
   1897	INIT_LIST_HEAD(insert_list);
   1898	INIT_LIST_HEAD(remove_list);
   1899
   1900	node = interval_tree_iter_first(&svms->objects, start, last);
   1901	while (node) {
   1902		struct interval_tree_node *next;
   1903		unsigned long next_start;
   1904
   1905		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
   1906			 node->last);
   1907
   1908		prange = container_of(node, struct svm_range, it_node);
   1909		next = interval_tree_iter_next(node, start, last);
   1910		next_start = min(node->last, last) + 1;
   1911
   1912		if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
   1913			/* nothing to do */
   1914		} else if (node->start < start || node->last > last) {
   1915			/* node intersects the update range and its attributes
   1916			 * will change. Clone and split it, apply updates only
   1917			 * to the overlapping part
   1918			 */
   1919			struct svm_range *old = prange;
   1920
   1921			prange = svm_range_clone(old);
   1922			if (!prange) {
   1923				r = -ENOMEM;
   1924				goto out;
   1925			}
   1926
   1927			list_add(&old->update_list, remove_list);
   1928			list_add(&prange->list, insert_list);
   1929			list_add(&prange->update_list, update_list);
   1930
   1931			if (node->start < start) {
   1932				pr_debug("change old range start\n");
   1933				r = svm_range_split_head(prange, start,
   1934							 insert_list);
   1935				if (r)
   1936					goto out;
   1937			}
   1938			if (node->last > last) {
   1939				pr_debug("change old range last\n");
   1940				r = svm_range_split_tail(prange, last,
   1941							 insert_list);
   1942				if (r)
   1943					goto out;
   1944			}
   1945		} else {
   1946			/* The node is contained within start..last,
   1947			 * just update it
   1948			 */
   1949			list_add(&prange->update_list, update_list);
   1950		}
   1951
   1952		/* insert a new node if needed */
   1953		if (node->start > start) {
   1954			prange = svm_range_new(svms, start, node->start - 1);
   1955			if (!prange) {
   1956				r = -ENOMEM;
   1957				goto out;
   1958			}
   1959
   1960			list_add(&prange->list, insert_list);
   1961			list_add(&prange->update_list, update_list);
   1962		}
   1963
   1964		node = next;
   1965		start = next_start;
   1966	}
   1967
   1968	/* add a final range at the end if needed */
   1969	if (start <= last) {
   1970		prange = svm_range_new(svms, start, last);
   1971		if (!prange) {
   1972			r = -ENOMEM;
   1973			goto out;
   1974		}
   1975		list_add(&prange->list, insert_list);
   1976		list_add(&prange->update_list, update_list);
   1977	}
   1978
   1979out:
   1980	if (r)
   1981		list_for_each_entry_safe(prange, tmp, insert_list, list)
   1982			svm_range_free(prange);
   1983
   1984	return r;
   1985}
   1986
   1987static void
   1988svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
   1989					    struct svm_range *prange)
   1990{
   1991	unsigned long start;
   1992	unsigned long last;
   1993
   1994	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
   1995	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
   1996
   1997	if (prange->start == start && prange->last == last)
   1998		return;
   1999
   2000	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
   2001		  prange->svms, prange, start, last, prange->start,
   2002		  prange->last);
   2003
   2004	if (start != 0 && last != 0) {
   2005		interval_tree_remove(&prange->it_node, &prange->svms->objects);
   2006		svm_range_remove_notifier(prange);
   2007	}
   2008	prange->it_node.start = prange->start;
   2009	prange->it_node.last = prange->last;
   2010
   2011	interval_tree_insert(&prange->it_node, &prange->svms->objects);
   2012	svm_range_add_notifier_locked(mm, prange);
   2013}
   2014
   2015static void
   2016svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
   2017			 struct mm_struct *mm)
   2018{
   2019	switch (prange->work_item.op) {
   2020	case SVM_OP_NULL:
   2021		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
   2022			 svms, prange, prange->start, prange->last);
   2023		break;
   2024	case SVM_OP_UNMAP_RANGE:
   2025		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
   2026			 svms, prange, prange->start, prange->last);
   2027		svm_range_unlink(prange);
   2028		svm_range_remove_notifier(prange);
   2029		svm_range_free(prange);
   2030		break;
   2031	case SVM_OP_UPDATE_RANGE_NOTIFIER:
   2032		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
   2033			 svms, prange, prange->start, prange->last);
   2034		svm_range_update_notifier_and_interval_tree(mm, prange);
   2035		break;
   2036	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
   2037		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
   2038			 svms, prange, prange->start, prange->last);
   2039		svm_range_update_notifier_and_interval_tree(mm, prange);
   2040		/* TODO: implement deferred validation and mapping */
   2041		break;
   2042	case SVM_OP_ADD_RANGE:
   2043		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
   2044			 prange->start, prange->last);
   2045		svm_range_add_to_svms(prange);
   2046		svm_range_add_notifier_locked(mm, prange);
   2047		break;
   2048	case SVM_OP_ADD_RANGE_AND_MAP:
   2049		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
   2050			 prange, prange->start, prange->last);
   2051		svm_range_add_to_svms(prange);
   2052		svm_range_add_notifier_locked(mm, prange);
   2053		/* TODO: implement deferred validation and mapping */
   2054		break;
   2055	default:
   2056		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
   2057			 prange->work_item.op);
   2058	}
   2059}
   2060
   2061static void svm_range_drain_retry_fault(struct svm_range_list *svms)
   2062{
   2063	struct kfd_process_device *pdd;
   2064	struct kfd_process *p;
   2065	int drain;
   2066	uint32_t i;
   2067
   2068	p = container_of(svms, struct kfd_process, svms);
   2069
   2070restart:
   2071	drain = atomic_read(&svms->drain_pagefaults);
   2072	if (!drain)
   2073		return;
   2074
   2075	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
   2076		pdd = p->pdds[i];
   2077		if (!pdd)
   2078			continue;
   2079
   2080		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
   2081
   2082		amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
   2083						     &pdd->dev->adev->irq.ih1);
   2084		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
   2085	}
   2086	if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
   2087		goto restart;
   2088}
   2089
   2090static void svm_range_deferred_list_work(struct work_struct *work)
   2091{
   2092	struct svm_range_list *svms;
   2093	struct svm_range *prange;
   2094	struct mm_struct *mm;
   2095
   2096	svms = container_of(work, struct svm_range_list, deferred_list_work);
   2097	pr_debug("enter svms 0x%p\n", svms);
   2098
   2099	spin_lock(&svms->deferred_list_lock);
   2100	while (!list_empty(&svms->deferred_range_list)) {
   2101		prange = list_first_entry(&svms->deferred_range_list,
   2102					  struct svm_range, deferred_list);
   2103		spin_unlock(&svms->deferred_list_lock);
   2104
   2105		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
   2106			 prange->start, prange->last, prange->work_item.op);
   2107
   2108		mm = prange->work_item.mm;
   2109retry:
   2110		mmap_write_lock(mm);
   2111
   2112		/* Checking for the need to drain retry faults must be inside
   2113		 * mmap write lock to serialize with munmap notifiers.
   2114		 */
   2115		if (unlikely(atomic_read(&svms->drain_pagefaults))) {
   2116			mmap_write_unlock(mm);
   2117			svm_range_drain_retry_fault(svms);
   2118			goto retry;
   2119		}
   2120
   2121		/* Remove from deferred_list must be inside mmap write lock, for
   2122		 * two race cases:
   2123		 * 1. unmap_from_cpu may change work_item.op and add the range
   2124		 *    to deferred_list again, cause use after free bug.
   2125		 * 2. svm_range_list_lock_and_flush_work may hold mmap write
   2126		 *    lock and continue because deferred_list is empty, but
   2127		 *    deferred_list work is actually waiting for mmap lock.
   2128		 */
   2129		spin_lock(&svms->deferred_list_lock);
   2130		list_del_init(&prange->deferred_list);
   2131		spin_unlock(&svms->deferred_list_lock);
   2132
   2133		mutex_lock(&svms->lock);
   2134		mutex_lock(&prange->migrate_mutex);
   2135		while (!list_empty(&prange->child_list)) {
   2136			struct svm_range *pchild;
   2137
   2138			pchild = list_first_entry(&prange->child_list,
   2139						struct svm_range, child_list);
   2140			pr_debug("child prange 0x%p op %d\n", pchild,
   2141				 pchild->work_item.op);
   2142			list_del_init(&pchild->child_list);
   2143			svm_range_handle_list_op(svms, pchild, mm);
   2144		}
   2145		mutex_unlock(&prange->migrate_mutex);
   2146
   2147		svm_range_handle_list_op(svms, prange, mm);
   2148		mutex_unlock(&svms->lock);
   2149		mmap_write_unlock(mm);
   2150
   2151		/* Pairs with mmget in svm_range_add_list_work */
   2152		mmput(mm);
   2153
   2154		spin_lock(&svms->deferred_list_lock);
   2155	}
   2156	spin_unlock(&svms->deferred_list_lock);
   2157	pr_debug("exit svms 0x%p\n", svms);
   2158}
   2159
   2160void
   2161svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
   2162			struct mm_struct *mm, enum svm_work_list_ops op)
   2163{
   2164	spin_lock(&svms->deferred_list_lock);
   2165	/* if prange is on the deferred list */
   2166	if (!list_empty(&prange->deferred_list)) {
   2167		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
   2168		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
   2169		if (op != SVM_OP_NULL &&
   2170		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
   2171			prange->work_item.op = op;
   2172	} else {
   2173		prange->work_item.op = op;
   2174
   2175		/* Pairs with mmput in deferred_list_work */
   2176		mmget(mm);
   2177		prange->work_item.mm = mm;
   2178		list_add_tail(&prange->deferred_list,
   2179			      &prange->svms->deferred_range_list);
   2180		pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
   2181			 prange, prange->start, prange->last, op);
   2182	}
   2183	spin_unlock(&svms->deferred_list_lock);
   2184}
   2185
   2186void schedule_deferred_list_work(struct svm_range_list *svms)
   2187{
   2188	spin_lock(&svms->deferred_list_lock);
   2189	if (!list_empty(&svms->deferred_range_list))
   2190		schedule_work(&svms->deferred_list_work);
   2191	spin_unlock(&svms->deferred_list_lock);
   2192}
   2193
   2194static void
   2195svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
   2196		      struct svm_range *prange, unsigned long start,
   2197		      unsigned long last)
   2198{
   2199	struct svm_range *head;
   2200	struct svm_range *tail;
   2201
   2202	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
   2203		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
   2204			 prange->start, prange->last);
   2205		return;
   2206	}
   2207	if (start > prange->last || last < prange->start)
   2208		return;
   2209
   2210	head = tail = prange;
   2211	if (start > prange->start)
   2212		svm_range_split(prange, prange->start, start - 1, &tail);
   2213	if (last < tail->last)
   2214		svm_range_split(tail, last + 1, tail->last, &head);
   2215
   2216	if (head != prange && tail != prange) {
   2217		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
   2218		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
   2219	} else if (tail != prange) {
   2220		svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
   2221	} else if (head != prange) {
   2222		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
   2223	} else if (parent != prange) {
   2224		prange->work_item.op = SVM_OP_UNMAP_RANGE;
   2225	}
   2226}
   2227
   2228static void
   2229svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
   2230			 unsigned long start, unsigned long last)
   2231{
   2232	struct svm_range_list *svms;
   2233	struct svm_range *pchild;
   2234	struct kfd_process *p;
   2235	unsigned long s, l;
   2236	bool unmap_parent;
   2237
   2238	p = kfd_lookup_process_by_mm(mm);
   2239	if (!p)
   2240		return;
   2241	svms = &p->svms;
   2242
   2243	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
   2244		 prange, prange->start, prange->last, start, last);
   2245
   2246	/* Make sure pending page faults are drained in the deferred worker
   2247	 * before the range is freed to avoid straggler interrupts on
   2248	 * unmapped memory causing "phantom faults".
   2249	 */
   2250	atomic_inc(&svms->drain_pagefaults);
   2251
   2252	unmap_parent = start <= prange->start && last >= prange->last;
   2253
   2254	list_for_each_entry(pchild, &prange->child_list, child_list) {
   2255		mutex_lock_nested(&pchild->lock, 1);
   2256		s = max(start, pchild->start);
   2257		l = min(last, pchild->last);
   2258		if (l >= s)
   2259			svm_range_unmap_from_gpus(pchild, s, l);
   2260		svm_range_unmap_split(mm, prange, pchild, start, last);
   2261		mutex_unlock(&pchild->lock);
   2262	}
   2263	s = max(start, prange->start);
   2264	l = min(last, prange->last);
   2265	if (l >= s)
   2266		svm_range_unmap_from_gpus(prange, s, l);
   2267	svm_range_unmap_split(mm, prange, prange, start, last);
   2268
   2269	if (unmap_parent)
   2270		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
   2271	else
   2272		svm_range_add_list_work(svms, prange, mm,
   2273					SVM_OP_UPDATE_RANGE_NOTIFIER);
   2274	schedule_deferred_list_work(svms);
   2275
   2276	kfd_unref_process(p);
   2277}
   2278
   2279/**
   2280 * svm_range_cpu_invalidate_pagetables - interval notifier callback
   2281 * @mni: mmu_interval_notifier struct
   2282 * @range: mmu_notifier_range struct
   2283 * @cur_seq: value to pass to mmu_interval_set_seq()
   2284 *
   2285 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
   2286 * is from migration, or CPU page invalidation callback.
   2287 *
   2288 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
   2289 * work thread, and split prange if only part of prange is unmapped.
   2290 *
   2291 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
   2292 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
   2293 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
   2294 * update GPU mapping to recover.
   2295 *
   2296 * Context: mmap lock, notifier_invalidate_start lock are held
   2297 *          for invalidate event, prange lock is held if this is from migration
   2298 */
   2299static bool
   2300svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
   2301				    const struct mmu_notifier_range *range,
   2302				    unsigned long cur_seq)
   2303{
   2304	struct svm_range *prange;
   2305	unsigned long start;
   2306	unsigned long last;
   2307
   2308	if (range->event == MMU_NOTIFY_RELEASE)
   2309		return true;
   2310	if (!mmget_not_zero(mni->mm))
   2311		return true;
   2312
   2313	start = mni->interval_tree.start;
   2314	last = mni->interval_tree.last;
   2315	start = max(start, range->start) >> PAGE_SHIFT;
   2316	last = min(last, range->end - 1) >> PAGE_SHIFT;
   2317	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
   2318		 start, last, range->start >> PAGE_SHIFT,
   2319		 (range->end - 1) >> PAGE_SHIFT,
   2320		 mni->interval_tree.start >> PAGE_SHIFT,
   2321		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
   2322
   2323	prange = container_of(mni, struct svm_range, notifier);
   2324
   2325	svm_range_lock(prange);
   2326	mmu_interval_set_seq(mni, cur_seq);
   2327
   2328	switch (range->event) {
   2329	case MMU_NOTIFY_UNMAP:
   2330		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
   2331		break;
   2332	default:
   2333		svm_range_evict(prange, mni->mm, start, last);
   2334		break;
   2335	}
   2336
   2337	svm_range_unlock(prange);
   2338	mmput(mni->mm);
   2339
   2340	return true;
   2341}
   2342
   2343/**
   2344 * svm_range_from_addr - find svm range from fault address
   2345 * @svms: svm range list header
   2346 * @addr: address to search range interval tree, in pages
   2347 * @parent: parent range if range is on child list
   2348 *
   2349 * Context: The caller must hold svms->lock
   2350 *
   2351 * Return: the svm_range found or NULL
   2352 */
   2353struct svm_range *
   2354svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
   2355		    struct svm_range **parent)
   2356{
   2357	struct interval_tree_node *node;
   2358	struct svm_range *prange;
   2359	struct svm_range *pchild;
   2360
   2361	node = interval_tree_iter_first(&svms->objects, addr, addr);
   2362	if (!node)
   2363		return NULL;
   2364
   2365	prange = container_of(node, struct svm_range, it_node);
   2366	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
   2367		 addr, prange->start, prange->last, node->start, node->last);
   2368
   2369	if (addr >= prange->start && addr <= prange->last) {
   2370		if (parent)
   2371			*parent = prange;
   2372		return prange;
   2373	}
   2374	list_for_each_entry(pchild, &prange->child_list, child_list)
   2375		if (addr >= pchild->start && addr <= pchild->last) {
   2376			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
   2377				 addr, pchild->start, pchild->last);
   2378			if (parent)
   2379				*parent = prange;
   2380			return pchild;
   2381		}
   2382
   2383	return NULL;
   2384}
   2385
   2386/* svm_range_best_restore_location - decide the best fault restore location
   2387 * @prange: svm range structure
   2388 * @adev: the GPU on which vm fault happened
   2389 *
   2390 * This is only called when xnack is on, to decide the best location to restore
   2391 * the range mapping after GPU vm fault. Caller uses the best location to do
   2392 * migration if actual loc is not best location, then update GPU page table
   2393 * mapping to the best location.
   2394 *
   2395 * If the preferred loc is accessible by faulting GPU, use preferred loc.
   2396 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
   2397 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
   2398 *    if range actual loc is cpu, best_loc is cpu
   2399 *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
   2400 *    range actual loc.
   2401 * Otherwise, GPU no access, best_loc is -1.
   2402 *
   2403 * Return:
   2404 * -1 means vm fault GPU no access
   2405 * 0 for CPU or GPU id
   2406 */
   2407static int32_t
   2408svm_range_best_restore_location(struct svm_range *prange,
   2409				struct amdgpu_device *adev,
   2410				int32_t *gpuidx)
   2411{
   2412	struct amdgpu_device *bo_adev, *preferred_adev;
   2413	struct kfd_process *p;
   2414	uint32_t gpuid;
   2415	int r;
   2416
   2417	p = container_of(prange->svms, struct kfd_process, svms);
   2418
   2419	r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
   2420	if (r < 0) {
   2421		pr_debug("failed to get gpuid from kgd\n");
   2422		return -1;
   2423	}
   2424
   2425	if (prange->preferred_loc == gpuid ||
   2426	    prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
   2427		return prange->preferred_loc;
   2428	} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
   2429		preferred_adev = svm_range_get_adev_by_id(prange,
   2430							prange->preferred_loc);
   2431		if (amdgpu_xgmi_same_hive(adev, preferred_adev))
   2432			return prange->preferred_loc;
   2433		/* fall through */
   2434	}
   2435
   2436	if (test_bit(*gpuidx, prange->bitmap_access))
   2437		return gpuid;
   2438
   2439	if (test_bit(*gpuidx, prange->bitmap_aip)) {
   2440		if (!prange->actual_loc)
   2441			return 0;
   2442
   2443		bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
   2444		if (amdgpu_xgmi_same_hive(adev, bo_adev))
   2445			return prange->actual_loc;
   2446		else
   2447			return 0;
   2448	}
   2449
   2450	return -1;
   2451}
   2452
   2453static int
   2454svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
   2455			       unsigned long *start, unsigned long *last,
   2456			       bool *is_heap_stack)
   2457{
   2458	struct vm_area_struct *vma;
   2459	struct interval_tree_node *node;
   2460	unsigned long start_limit, end_limit;
   2461
   2462	vma = find_vma(p->mm, addr << PAGE_SHIFT);
   2463	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
   2464		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
   2465		return -EFAULT;
   2466	}
   2467
   2468	*is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
   2469			  vma->vm_end >= vma->vm_mm->start_brk) ||
   2470			 (vma->vm_start <= vma->vm_mm->start_stack &&
   2471			  vma->vm_end >= vma->vm_mm->start_stack);
   2472
   2473	start_limit = max(vma->vm_start >> PAGE_SHIFT,
   2474		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
   2475	end_limit = min(vma->vm_end >> PAGE_SHIFT,
   2476		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
   2477	/* First range that starts after the fault address */
   2478	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
   2479	if (node) {
   2480		end_limit = min(end_limit, node->start);
   2481		/* Last range that ends before the fault address */
   2482		node = container_of(rb_prev(&node->rb),
   2483				    struct interval_tree_node, rb);
   2484	} else {
   2485		/* Last range must end before addr because
   2486		 * there was no range after addr
   2487		 */
   2488		node = container_of(rb_last(&p->svms.objects.rb_root),
   2489				    struct interval_tree_node, rb);
   2490	}
   2491	if (node) {
   2492		if (node->last >= addr) {
   2493			WARN(1, "Overlap with prev node and page fault addr\n");
   2494			return -EFAULT;
   2495		}
   2496		start_limit = max(start_limit, node->last + 1);
   2497	}
   2498
   2499	*start = start_limit;
   2500	*last = end_limit - 1;
   2501
   2502	pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
   2503		 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
   2504		 *start, *last, *is_heap_stack);
   2505
   2506	return 0;
   2507}
   2508
   2509static int
   2510svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
   2511			   uint64_t *bo_s, uint64_t *bo_l)
   2512{
   2513	struct amdgpu_bo_va_mapping *mapping;
   2514	struct interval_tree_node *node;
   2515	struct amdgpu_bo *bo = NULL;
   2516	unsigned long userptr;
   2517	uint32_t i;
   2518	int r;
   2519
   2520	for (i = 0; i < p->n_pdds; i++) {
   2521		struct amdgpu_vm *vm;
   2522
   2523		if (!p->pdds[i]->drm_priv)
   2524			continue;
   2525
   2526		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
   2527		r = amdgpu_bo_reserve(vm->root.bo, false);
   2528		if (r)
   2529			return r;
   2530
   2531		/* Check userptr by searching entire vm->va interval tree */
   2532		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
   2533		while (node) {
   2534			mapping = container_of((struct rb_node *)node,
   2535					       struct amdgpu_bo_va_mapping, rb);
   2536			bo = mapping->bo_va->base.bo;
   2537
   2538			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
   2539							 start << PAGE_SHIFT,
   2540							 last << PAGE_SHIFT,
   2541							 &userptr)) {
   2542				node = interval_tree_iter_next(node, 0, ~0ULL);
   2543				continue;
   2544			}
   2545
   2546			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
   2547				 start, last);
   2548			if (bo_s && bo_l) {
   2549				*bo_s = userptr >> PAGE_SHIFT;
   2550				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
   2551			}
   2552			amdgpu_bo_unreserve(vm->root.bo);
   2553			return -EADDRINUSE;
   2554		}
   2555		amdgpu_bo_unreserve(vm->root.bo);
   2556	}
   2557	return 0;
   2558}
   2559
   2560static struct
   2561svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
   2562						struct kfd_process *p,
   2563						struct mm_struct *mm,
   2564						int64_t addr)
   2565{
   2566	struct svm_range *prange = NULL;
   2567	unsigned long start, last;
   2568	uint32_t gpuid, gpuidx;
   2569	bool is_heap_stack;
   2570	uint64_t bo_s = 0;
   2571	uint64_t bo_l = 0;
   2572	int r;
   2573
   2574	if (svm_range_get_range_boundaries(p, addr, &start, &last,
   2575					   &is_heap_stack))
   2576		return NULL;
   2577
   2578	r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
   2579	if (r != -EADDRINUSE)
   2580		r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
   2581
   2582	if (r == -EADDRINUSE) {
   2583		if (addr >= bo_s && addr <= bo_l)
   2584			return NULL;
   2585
   2586		/* Create one page svm range if 2MB range overlapping */
   2587		start = addr;
   2588		last = addr;
   2589	}
   2590
   2591	prange = svm_range_new(&p->svms, start, last);
   2592	if (!prange) {
   2593		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
   2594		return NULL;
   2595	}
   2596	if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
   2597		pr_debug("failed to get gpuid from kgd\n");
   2598		svm_range_free(prange);
   2599		return NULL;
   2600	}
   2601
   2602	if (is_heap_stack)
   2603		prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
   2604
   2605	svm_range_add_to_svms(prange);
   2606	svm_range_add_notifier_locked(mm, prange);
   2607
   2608	return prange;
   2609}
   2610
   2611/* svm_range_skip_recover - decide if prange can be recovered
   2612 * @prange: svm range structure
   2613 *
   2614 * GPU vm retry fault handle skip recover the range for cases:
   2615 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
   2616 *    deferred list work will drain the stale fault before free the prange.
   2617 * 2. prange is on deferred list to add interval notifier after split, or
   2618 * 3. prange is child range, it is split from parent prange, recover later
   2619 *    after interval notifier is added.
   2620 *
   2621 * Return: true to skip recover, false to recover
   2622 */
   2623static bool svm_range_skip_recover(struct svm_range *prange)
   2624{
   2625	struct svm_range_list *svms = prange->svms;
   2626
   2627	spin_lock(&svms->deferred_list_lock);
   2628	if (list_empty(&prange->deferred_list) &&
   2629	    list_empty(&prange->child_list)) {
   2630		spin_unlock(&svms->deferred_list_lock);
   2631		return false;
   2632	}
   2633	spin_unlock(&svms->deferred_list_lock);
   2634
   2635	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
   2636		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
   2637			 svms, prange, prange->start, prange->last);
   2638		return true;
   2639	}
   2640	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
   2641	    prange->work_item.op == SVM_OP_ADD_RANGE) {
   2642		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
   2643			 svms, prange, prange->start, prange->last);
   2644		return true;
   2645	}
   2646	return false;
   2647}
   2648
   2649static void
   2650svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
   2651		      int32_t gpuidx)
   2652{
   2653	struct kfd_process_device *pdd;
   2654
   2655	/* fault is on different page of same range
   2656	 * or fault is skipped to recover later
   2657	 * or fault is on invalid virtual address
   2658	 */
   2659	if (gpuidx == MAX_GPU_INSTANCE) {
   2660		uint32_t gpuid;
   2661		int r;
   2662
   2663		r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
   2664		if (r < 0)
   2665			return;
   2666	}
   2667
   2668	/* fault is recovered
   2669	 * or fault cannot recover because GPU no access on the range
   2670	 */
   2671	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
   2672	if (pdd)
   2673		WRITE_ONCE(pdd->faults, pdd->faults + 1);
   2674}
   2675
   2676static bool
   2677svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
   2678{
   2679	unsigned long requested = VM_READ;
   2680
   2681	if (write_fault)
   2682		requested |= VM_WRITE;
   2683
   2684	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
   2685		vma->vm_flags);
   2686	return (vma->vm_flags & requested) == requested;
   2687}
   2688
   2689int
   2690svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
   2691			uint64_t addr, bool write_fault)
   2692{
   2693	struct mm_struct *mm = NULL;
   2694	struct svm_range_list *svms;
   2695	struct svm_range *prange;
   2696	struct kfd_process *p;
   2697	uint64_t timestamp;
   2698	int32_t best_loc;
   2699	int32_t gpuidx = MAX_GPU_INSTANCE;
   2700	bool write_locked = false;
   2701	struct vm_area_struct *vma;
   2702	int r = 0;
   2703
   2704	if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
   2705		pr_debug("device does not support SVM\n");
   2706		return -EFAULT;
   2707	}
   2708
   2709	p = kfd_lookup_process_by_pasid(pasid);
   2710	if (!p) {
   2711		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
   2712		return 0;
   2713	}
   2714	svms = &p->svms;
   2715
   2716	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
   2717
   2718	if (atomic_read(&svms->drain_pagefaults)) {
   2719		pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
   2720		r = 0;
   2721		goto out;
   2722	}
   2723
   2724	if (!p->xnack_enabled) {
   2725		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
   2726		r = -EFAULT;
   2727		goto out;
   2728	}
   2729
   2730	/* p->lead_thread is available as kfd_process_wq_release flush the work
   2731	 * before releasing task ref.
   2732	 */
   2733	mm = get_task_mm(p->lead_thread);
   2734	if (!mm) {
   2735		pr_debug("svms 0x%p failed to get mm\n", svms);
   2736		r = 0;
   2737		goto out;
   2738	}
   2739
   2740	mmap_read_lock(mm);
   2741retry_write_locked:
   2742	mutex_lock(&svms->lock);
   2743	prange = svm_range_from_addr(svms, addr, NULL);
   2744	if (!prange) {
   2745		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
   2746			 svms, addr);
   2747		if (!write_locked) {
   2748			/* Need the write lock to create new range with MMU notifier.
   2749			 * Also flush pending deferred work to make sure the interval
   2750			 * tree is up to date before we add a new range
   2751			 */
   2752			mutex_unlock(&svms->lock);
   2753			mmap_read_unlock(mm);
   2754			mmap_write_lock(mm);
   2755			write_locked = true;
   2756			goto retry_write_locked;
   2757		}
   2758		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
   2759		if (!prange) {
   2760			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
   2761				 svms, addr);
   2762			mmap_write_downgrade(mm);
   2763			r = -EFAULT;
   2764			goto out_unlock_svms;
   2765		}
   2766	}
   2767	if (write_locked)
   2768		mmap_write_downgrade(mm);
   2769
   2770	mutex_lock(&prange->migrate_mutex);
   2771
   2772	if (svm_range_skip_recover(prange)) {
   2773		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
   2774		r = 0;
   2775		goto out_unlock_range;
   2776	}
   2777
   2778	timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
   2779	/* skip duplicate vm fault on different pages of same range */
   2780	if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
   2781		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
   2782			 svms, prange->start, prange->last);
   2783		r = 0;
   2784		goto out_unlock_range;
   2785	}
   2786
   2787	/* __do_munmap removed VMA, return success as we are handling stale
   2788	 * retry fault.
   2789	 */
   2790	vma = find_vma(mm, addr << PAGE_SHIFT);
   2791	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
   2792		pr_debug("address 0x%llx VMA is removed\n", addr);
   2793		r = 0;
   2794		goto out_unlock_range;
   2795	}
   2796
   2797	if (!svm_fault_allowed(vma, write_fault)) {
   2798		pr_debug("fault addr 0x%llx no %s permission\n", addr,
   2799			write_fault ? "write" : "read");
   2800		r = -EPERM;
   2801		goto out_unlock_range;
   2802	}
   2803
   2804	best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
   2805	if (best_loc == -1) {
   2806		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
   2807			 svms, prange->start, prange->last);
   2808		r = -EACCES;
   2809		goto out_unlock_range;
   2810	}
   2811
   2812	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
   2813		 svms, prange->start, prange->last, best_loc,
   2814		 prange->actual_loc);
   2815
   2816	if (prange->actual_loc != best_loc) {
   2817		if (best_loc) {
   2818			r = svm_migrate_to_vram(prange, best_loc, mm);
   2819			if (r) {
   2820				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
   2821					 r, addr);
   2822				/* Fallback to system memory if migration to
   2823				 * VRAM failed
   2824				 */
   2825				if (prange->actual_loc)
   2826					r = svm_migrate_vram_to_ram(prange, mm);
   2827				else
   2828					r = 0;
   2829			}
   2830		} else {
   2831			r = svm_migrate_vram_to_ram(prange, mm);
   2832		}
   2833		if (r) {
   2834			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
   2835				 r, svms, prange->start, prange->last);
   2836			goto out_unlock_range;
   2837		}
   2838	}
   2839
   2840	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
   2841	if (r)
   2842		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
   2843			 r, svms, prange->start, prange->last);
   2844
   2845out_unlock_range:
   2846	mutex_unlock(&prange->migrate_mutex);
   2847out_unlock_svms:
   2848	mutex_unlock(&svms->lock);
   2849	mmap_read_unlock(mm);
   2850
   2851	svm_range_count_fault(adev, p, gpuidx);
   2852
   2853	mmput(mm);
   2854out:
   2855	kfd_unref_process(p);
   2856
   2857	if (r == -EAGAIN) {
   2858		pr_debug("recover vm fault later\n");
   2859		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
   2860		r = 0;
   2861	}
   2862	return r;
   2863}
   2864
   2865void svm_range_list_fini(struct kfd_process *p)
   2866{
   2867	struct svm_range *prange;
   2868	struct svm_range *next;
   2869
   2870	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
   2871
   2872	cancel_delayed_work_sync(&p->svms.restore_work);
   2873
   2874	/* Ensure list work is finished before process is destroyed */
   2875	flush_work(&p->svms.deferred_list_work);
   2876
   2877	/*
   2878	 * Ensure no retry fault comes in afterwards, as page fault handler will
   2879	 * not find kfd process and take mm lock to recover fault.
   2880	 */
   2881	atomic_inc(&p->svms.drain_pagefaults);
   2882	svm_range_drain_retry_fault(&p->svms);
   2883
   2884	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
   2885		svm_range_unlink(prange);
   2886		svm_range_remove_notifier(prange);
   2887		svm_range_free(prange);
   2888	}
   2889
   2890	mutex_destroy(&p->svms.lock);
   2891
   2892	pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
   2893}
   2894
   2895int svm_range_list_init(struct kfd_process *p)
   2896{
   2897	struct svm_range_list *svms = &p->svms;
   2898	int i;
   2899
   2900	svms->objects = RB_ROOT_CACHED;
   2901	mutex_init(&svms->lock);
   2902	INIT_LIST_HEAD(&svms->list);
   2903	atomic_set(&svms->evicted_ranges, 0);
   2904	atomic_set(&svms->drain_pagefaults, 0);
   2905	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
   2906	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
   2907	INIT_LIST_HEAD(&svms->deferred_range_list);
   2908	INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
   2909	spin_lock_init(&svms->deferred_list_lock);
   2910
   2911	for (i = 0; i < p->n_pdds; i++)
   2912		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
   2913			bitmap_set(svms->bitmap_supported, i, 1);
   2914
   2915	return 0;
   2916}
   2917
   2918/**
   2919 * svm_range_check_vm - check if virtual address range mapped already
   2920 * @p: current kfd_process
   2921 * @start: range start address, in pages
   2922 * @last: range last address, in pages
   2923 * @bo_s: mapping start address in pages if address range already mapped
   2924 * @bo_l: mapping last address in pages if address range already mapped
   2925 *
   2926 * The purpose is to avoid virtual address ranges already allocated by
   2927 * kfd_ioctl_alloc_memory_of_gpu ioctl.
   2928 * It looks for each pdd in the kfd_process.
   2929 *
   2930 * Context: Process context
   2931 *
   2932 * Return 0 - OK, if the range is not mapped.
   2933 * Otherwise error code:
   2934 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
   2935 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
   2936 * a signal. Release all buffer reservations and return to user-space.
   2937 */
   2938static int
   2939svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
   2940		   uint64_t *bo_s, uint64_t *bo_l)
   2941{
   2942	struct amdgpu_bo_va_mapping *mapping;
   2943	struct interval_tree_node *node;
   2944	uint32_t i;
   2945	int r;
   2946
   2947	for (i = 0; i < p->n_pdds; i++) {
   2948		struct amdgpu_vm *vm;
   2949
   2950		if (!p->pdds[i]->drm_priv)
   2951			continue;
   2952
   2953		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
   2954		r = amdgpu_bo_reserve(vm->root.bo, false);
   2955		if (r)
   2956			return r;
   2957
   2958		node = interval_tree_iter_first(&vm->va, start, last);
   2959		if (node) {
   2960			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
   2961				 start, last);
   2962			mapping = container_of((struct rb_node *)node,
   2963					       struct amdgpu_bo_va_mapping, rb);
   2964			if (bo_s && bo_l) {
   2965				*bo_s = mapping->start;
   2966				*bo_l = mapping->last;
   2967			}
   2968			amdgpu_bo_unreserve(vm->root.bo);
   2969			return -EADDRINUSE;
   2970		}
   2971		amdgpu_bo_unreserve(vm->root.bo);
   2972	}
   2973
   2974	return 0;
   2975}
   2976
   2977/**
   2978 * svm_range_is_valid - check if virtual address range is valid
   2979 * @p: current kfd_process
   2980 * @start: range start address, in pages
   2981 * @size: range size, in pages
   2982 *
   2983 * Valid virtual address range means it belongs to one or more VMAs
   2984 *
   2985 * Context: Process context
   2986 *
   2987 * Return:
   2988 *  0 - OK, otherwise error code
   2989 */
   2990static int
   2991svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
   2992{
   2993	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
   2994	struct vm_area_struct *vma;
   2995	unsigned long end;
   2996	unsigned long start_unchg = start;
   2997
   2998	start <<= PAGE_SHIFT;
   2999	end = start + (size << PAGE_SHIFT);
   3000	do {
   3001		vma = find_vma(p->mm, start);
   3002		if (!vma || start < vma->vm_start ||
   3003		    (vma->vm_flags & device_vma))
   3004			return -EFAULT;
   3005		start = min(end, vma->vm_end);
   3006	} while (start < end);
   3007
   3008	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
   3009				  NULL);
   3010}
   3011
   3012/**
   3013 * svm_range_best_prefetch_location - decide the best prefetch location
   3014 * @prange: svm range structure
   3015 *
   3016 * For xnack off:
   3017 * If range map to single GPU, the best prefetch location is prefetch_loc, which
   3018 * can be CPU or GPU.
   3019 *
   3020 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
   3021 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
   3022 * the best prefetch location is always CPU, because GPU can not have coherent
   3023 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
   3024 *
   3025 * For xnack on:
   3026 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
   3027 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
   3028 *
   3029 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
   3030 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
   3031 * prefetch location is always CPU.
   3032 *
   3033 * Context: Process context
   3034 *
   3035 * Return:
   3036 * 0 for CPU or GPU id
   3037 */
   3038static uint32_t
   3039svm_range_best_prefetch_location(struct svm_range *prange)
   3040{
   3041	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
   3042	uint32_t best_loc = prange->prefetch_loc;
   3043	struct kfd_process_device *pdd;
   3044	struct amdgpu_device *bo_adev;
   3045	struct kfd_process *p;
   3046	uint32_t gpuidx;
   3047
   3048	p = container_of(prange->svms, struct kfd_process, svms);
   3049
   3050	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
   3051		goto out;
   3052
   3053	bo_adev = svm_range_get_adev_by_id(prange, best_loc);
   3054	if (!bo_adev) {
   3055		WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
   3056		best_loc = 0;
   3057		goto out;
   3058	}
   3059
   3060	if (p->xnack_enabled)
   3061		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
   3062	else
   3063		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
   3064			  MAX_GPU_INSTANCE);
   3065
   3066	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
   3067		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
   3068		if (!pdd) {
   3069			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
   3070			continue;
   3071		}
   3072
   3073		if (pdd->dev->adev == bo_adev)
   3074			continue;
   3075
   3076		if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
   3077			best_loc = 0;
   3078			break;
   3079		}
   3080	}
   3081
   3082out:
   3083	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
   3084		 p->xnack_enabled, &p->svms, prange->start, prange->last,
   3085		 best_loc);
   3086
   3087	return best_loc;
   3088}
   3089
   3090/* FIXME: This is a workaround for page locking bug when some pages are
   3091 * invalid during migration to VRAM
   3092 */
   3093void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
   3094			void *owner)
   3095{
   3096	struct hmm_range *hmm_range;
   3097	int r;
   3098
   3099	if (prange->validated_once)
   3100		return;
   3101
   3102	r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
   3103				       prange->start << PAGE_SHIFT,
   3104				       prange->npages, &hmm_range,
   3105				       false, true, owner);
   3106	if (!r) {
   3107		amdgpu_hmm_range_get_pages_done(hmm_range);
   3108		prange->validated_once = true;
   3109	}
   3110}
   3111
   3112/* svm_range_trigger_migration - start page migration if prefetch loc changed
   3113 * @mm: current process mm_struct
   3114 * @prange: svm range structure
   3115 * @migrated: output, true if migration is triggered
   3116 *
   3117 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
   3118 * from ram to vram.
   3119 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
   3120 * from vram to ram.
   3121 *
   3122 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
   3123 * and restore work:
   3124 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
   3125 *    stops all queues, schedule restore work
   3126 * 2. svm_range_restore_work wait for migration is done by
   3127 *    a. svm_range_validate_vram takes prange->migrate_mutex
   3128 *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
   3129 * 3. restore work update mappings of GPU, resume all queues.
   3130 *
   3131 * Context: Process context
   3132 *
   3133 * Return:
   3134 * 0 - OK, otherwise - error code of migration
   3135 */
   3136static int
   3137svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
   3138			    bool *migrated)
   3139{
   3140	uint32_t best_loc;
   3141	int r = 0;
   3142
   3143	*migrated = false;
   3144	best_loc = svm_range_best_prefetch_location(prange);
   3145
   3146	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
   3147	    best_loc == prange->actual_loc)
   3148		return 0;
   3149
   3150	if (!best_loc) {
   3151		r = svm_migrate_vram_to_ram(prange, mm);
   3152		*migrated = !r;
   3153		return r;
   3154	}
   3155
   3156	r = svm_migrate_to_vram(prange, best_loc, mm);
   3157	*migrated = !r;
   3158
   3159	return r;
   3160}
   3161
   3162int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
   3163{
   3164	if (!fence)
   3165		return -EINVAL;
   3166
   3167	if (dma_fence_is_signaled(&fence->base))
   3168		return 0;
   3169
   3170	if (fence->svm_bo) {
   3171		WRITE_ONCE(fence->svm_bo->evicting, 1);
   3172		schedule_work(&fence->svm_bo->eviction_work);
   3173	}
   3174
   3175	return 0;
   3176}
   3177
   3178static void svm_range_evict_svm_bo_worker(struct work_struct *work)
   3179{
   3180	struct svm_range_bo *svm_bo;
   3181	struct kfd_process *p;
   3182	struct mm_struct *mm;
   3183	int r = 0;
   3184
   3185	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
   3186	if (!svm_bo_ref_unless_zero(svm_bo))
   3187		return; /* svm_bo was freed while eviction was pending */
   3188
   3189	/* svm_range_bo_release destroys this worker thread. So during
   3190	 * the lifetime of this thread, kfd_process and mm will be valid.
   3191	 */
   3192	p = container_of(svm_bo->svms, struct kfd_process, svms);
   3193	mm = p->mm;
   3194	if (!mm)
   3195		return;
   3196
   3197	mmap_read_lock(mm);
   3198	spin_lock(&svm_bo->list_lock);
   3199	while (!list_empty(&svm_bo->range_list) && !r) {
   3200		struct svm_range *prange =
   3201				list_first_entry(&svm_bo->range_list,
   3202						struct svm_range, svm_bo_list);
   3203		int retries = 3;
   3204
   3205		list_del_init(&prange->svm_bo_list);
   3206		spin_unlock(&svm_bo->list_lock);
   3207
   3208		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
   3209			 prange->start, prange->last);
   3210
   3211		mutex_lock(&prange->migrate_mutex);
   3212		do {
   3213			r = svm_migrate_vram_to_ram(prange,
   3214						svm_bo->eviction_fence->mm);
   3215		} while (!r && prange->actual_loc && --retries);
   3216
   3217		if (!r && prange->actual_loc)
   3218			pr_info_once("Migration failed during eviction");
   3219
   3220		if (!prange->actual_loc) {
   3221			mutex_lock(&prange->lock);
   3222			prange->svm_bo = NULL;
   3223			mutex_unlock(&prange->lock);
   3224		}
   3225		mutex_unlock(&prange->migrate_mutex);
   3226
   3227		spin_lock(&svm_bo->list_lock);
   3228	}
   3229	spin_unlock(&svm_bo->list_lock);
   3230	mmap_read_unlock(mm);
   3231
   3232	dma_fence_signal(&svm_bo->eviction_fence->base);
   3233
   3234	/* This is the last reference to svm_bo, after svm_range_vram_node_free
   3235	 * has been called in svm_migrate_vram_to_ram
   3236	 */
   3237	WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
   3238	svm_range_bo_unref(svm_bo);
   3239}
   3240
   3241static int
   3242svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
   3243		   uint64_t start, uint64_t size, uint32_t nattr,
   3244		   struct kfd_ioctl_svm_attribute *attrs)
   3245{
   3246	struct amdkfd_process_info *process_info = p->kgd_process_info;
   3247	struct list_head update_list;
   3248	struct list_head insert_list;
   3249	struct list_head remove_list;
   3250	struct svm_range_list *svms;
   3251	struct svm_range *prange;
   3252	struct svm_range *next;
   3253	bool update_mapping = false;
   3254	bool flush_tlb;
   3255	int r = 0;
   3256
   3257	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
   3258		 p->pasid, &p->svms, start, start + size - 1, size);
   3259
   3260	r = svm_range_check_attr(p, nattr, attrs);
   3261	if (r)
   3262		return r;
   3263
   3264	svms = &p->svms;
   3265
   3266	mutex_lock(&process_info->lock);
   3267
   3268	svm_range_list_lock_and_flush_work(svms, mm);
   3269
   3270	r = svm_range_is_valid(p, start, size);
   3271	if (r) {
   3272		pr_debug("invalid range r=%d\n", r);
   3273		mmap_write_unlock(mm);
   3274		goto out;
   3275	}
   3276
   3277	mutex_lock(&svms->lock);
   3278
   3279	/* Add new range and split existing ranges as needed */
   3280	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
   3281			  &insert_list, &remove_list);
   3282	if (r) {
   3283		mutex_unlock(&svms->lock);
   3284		mmap_write_unlock(mm);
   3285		goto out;
   3286	}
   3287	/* Apply changes as a transaction */
   3288	list_for_each_entry_safe(prange, next, &insert_list, list) {
   3289		svm_range_add_to_svms(prange);
   3290		svm_range_add_notifier_locked(mm, prange);
   3291	}
   3292	list_for_each_entry(prange, &update_list, update_list) {
   3293		svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
   3294		/* TODO: unmap ranges from GPU that lost access */
   3295	}
   3296	list_for_each_entry_safe(prange, next, &remove_list, update_list) {
   3297		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
   3298			 prange->svms, prange, prange->start,
   3299			 prange->last);
   3300		svm_range_unlink(prange);
   3301		svm_range_remove_notifier(prange);
   3302		svm_range_free(prange);
   3303	}
   3304
   3305	mmap_write_downgrade(mm);
   3306	/* Trigger migrations and revalidate and map to GPUs as needed. If
   3307	 * this fails we may be left with partially completed actions. There
   3308	 * is no clean way of rolling back to the previous state in such a
   3309	 * case because the rollback wouldn't be guaranteed to work either.
   3310	 */
   3311	list_for_each_entry(prange, &update_list, update_list) {
   3312		bool migrated;
   3313
   3314		mutex_lock(&prange->migrate_mutex);
   3315
   3316		r = svm_range_trigger_migration(mm, prange, &migrated);
   3317		if (r)
   3318			goto out_unlock_range;
   3319
   3320		if (migrated && !p->xnack_enabled) {
   3321			pr_debug("restore_work will update mappings of GPUs\n");
   3322			mutex_unlock(&prange->migrate_mutex);
   3323			continue;
   3324		}
   3325
   3326		if (!migrated && !update_mapping) {
   3327			mutex_unlock(&prange->migrate_mutex);
   3328			continue;
   3329		}
   3330
   3331		flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
   3332
   3333		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
   3334					       true, true, flush_tlb);
   3335		if (r)
   3336			pr_debug("failed %d to map svm range\n", r);
   3337
   3338out_unlock_range:
   3339		mutex_unlock(&prange->migrate_mutex);
   3340		if (r)
   3341			break;
   3342	}
   3343
   3344	svm_range_debug_dump(svms);
   3345
   3346	mutex_unlock(&svms->lock);
   3347	mmap_read_unlock(mm);
   3348out:
   3349	mutex_unlock(&process_info->lock);
   3350
   3351	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
   3352		 &p->svms, start, start + size - 1, r);
   3353
   3354	return r;
   3355}
   3356
   3357static int
   3358svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
   3359		   uint64_t start, uint64_t size, uint32_t nattr,
   3360		   struct kfd_ioctl_svm_attribute *attrs)
   3361{
   3362	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
   3363	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
   3364	bool get_preferred_loc = false;
   3365	bool get_prefetch_loc = false;
   3366	bool get_granularity = false;
   3367	bool get_accessible = false;
   3368	bool get_flags = false;
   3369	uint64_t last = start + size - 1UL;
   3370	uint8_t granularity = 0xff;
   3371	struct interval_tree_node *node;
   3372	struct svm_range_list *svms;
   3373	struct svm_range *prange;
   3374	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
   3375	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
   3376	uint32_t flags_and = 0xffffffff;
   3377	uint32_t flags_or = 0;
   3378	int gpuidx;
   3379	uint32_t i;
   3380	int r = 0;
   3381
   3382	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
   3383		 start + size - 1, nattr);
   3384
   3385	/* Flush pending deferred work to avoid racing with deferred actions from
   3386	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
   3387	 * can still race with get_attr because we don't hold the mmap lock. But that
   3388	 * would be a race condition in the application anyway, and undefined
   3389	 * behaviour is acceptable in that case.
   3390	 */
   3391	flush_work(&p->svms.deferred_list_work);
   3392
   3393	mmap_read_lock(mm);
   3394	r = svm_range_is_valid(p, start, size);
   3395	mmap_read_unlock(mm);
   3396	if (r) {
   3397		pr_debug("invalid range r=%d\n", r);
   3398		return r;
   3399	}
   3400
   3401	for (i = 0; i < nattr; i++) {
   3402		switch (attrs[i].type) {
   3403		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
   3404			get_preferred_loc = true;
   3405			break;
   3406		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
   3407			get_prefetch_loc = true;
   3408			break;
   3409		case KFD_IOCTL_SVM_ATTR_ACCESS:
   3410			get_accessible = true;
   3411			break;
   3412		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
   3413		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
   3414			get_flags = true;
   3415			break;
   3416		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
   3417			get_granularity = true;
   3418			break;
   3419		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
   3420		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
   3421			fallthrough;
   3422		default:
   3423			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
   3424			return -EINVAL;
   3425		}
   3426	}
   3427
   3428	svms = &p->svms;
   3429
   3430	mutex_lock(&svms->lock);
   3431
   3432	node = interval_tree_iter_first(&svms->objects, start, last);
   3433	if (!node) {
   3434		pr_debug("range attrs not found return default values\n");
   3435		svm_range_set_default_attributes(&location, &prefetch_loc,
   3436						 &granularity, &flags_and);
   3437		flags_or = flags_and;
   3438		if (p->xnack_enabled)
   3439			bitmap_copy(bitmap_access, svms->bitmap_supported,
   3440				    MAX_GPU_INSTANCE);
   3441		else
   3442			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
   3443		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
   3444		goto fill_values;
   3445	}
   3446	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
   3447	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
   3448
   3449	while (node) {
   3450		struct interval_tree_node *next;
   3451
   3452		prange = container_of(node, struct svm_range, it_node);
   3453		next = interval_tree_iter_next(node, start, last);
   3454
   3455		if (get_preferred_loc) {
   3456			if (prange->preferred_loc ==
   3457					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
   3458			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
   3459			     location != prange->preferred_loc)) {
   3460				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
   3461				get_preferred_loc = false;
   3462			} else {
   3463				location = prange->preferred_loc;
   3464			}
   3465		}
   3466		if (get_prefetch_loc) {
   3467			if (prange->prefetch_loc ==
   3468					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
   3469			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
   3470			     prefetch_loc != prange->prefetch_loc)) {
   3471				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
   3472				get_prefetch_loc = false;
   3473			} else {
   3474				prefetch_loc = prange->prefetch_loc;
   3475			}
   3476		}
   3477		if (get_accessible) {
   3478			bitmap_and(bitmap_access, bitmap_access,
   3479				   prange->bitmap_access, MAX_GPU_INSTANCE);
   3480			bitmap_and(bitmap_aip, bitmap_aip,
   3481				   prange->bitmap_aip, MAX_GPU_INSTANCE);
   3482		}
   3483		if (get_flags) {
   3484			flags_and &= prange->flags;
   3485			flags_or |= prange->flags;
   3486		}
   3487
   3488		if (get_granularity && prange->granularity < granularity)
   3489			granularity = prange->granularity;
   3490
   3491		node = next;
   3492	}
   3493fill_values:
   3494	mutex_unlock(&svms->lock);
   3495
   3496	for (i = 0; i < nattr; i++) {
   3497		switch (attrs[i].type) {
   3498		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
   3499			attrs[i].value = location;
   3500			break;
   3501		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
   3502			attrs[i].value = prefetch_loc;
   3503			break;
   3504		case KFD_IOCTL_SVM_ATTR_ACCESS:
   3505			gpuidx = kfd_process_gpuidx_from_gpuid(p,
   3506							       attrs[i].value);
   3507			if (gpuidx < 0) {
   3508				pr_debug("invalid gpuid %x\n", attrs[i].value);
   3509				return -EINVAL;
   3510			}
   3511			if (test_bit(gpuidx, bitmap_access))
   3512				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
   3513			else if (test_bit(gpuidx, bitmap_aip))
   3514				attrs[i].type =
   3515					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
   3516			else
   3517				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
   3518			break;
   3519		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
   3520			attrs[i].value = flags_and;
   3521			break;
   3522		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
   3523			attrs[i].value = ~flags_or;
   3524			break;
   3525		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
   3526			attrs[i].value = (uint32_t)granularity;
   3527			break;
   3528		}
   3529	}
   3530
   3531	return 0;
   3532}
   3533
   3534int kfd_criu_resume_svm(struct kfd_process *p)
   3535{
   3536	struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
   3537	int nattr_common = 4, nattr_accessibility = 1;
   3538	struct criu_svm_metadata *criu_svm_md = NULL;
   3539	struct svm_range_list *svms = &p->svms;
   3540	struct criu_svm_metadata *next = NULL;
   3541	uint32_t set_flags = 0xffffffff;
   3542	int i, j, num_attrs, ret = 0;
   3543	uint64_t set_attr_size;
   3544	struct mm_struct *mm;
   3545
   3546	if (list_empty(&svms->criu_svm_metadata_list)) {
   3547		pr_debug("No SVM data from CRIU restore stage 2\n");
   3548		return ret;
   3549	}
   3550
   3551	mm = get_task_mm(p->lead_thread);
   3552	if (!mm) {
   3553		pr_err("failed to get mm for the target process\n");
   3554		return -ESRCH;
   3555	}
   3556
   3557	num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
   3558
   3559	i = j = 0;
   3560	list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
   3561		pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
   3562			 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
   3563
   3564		for (j = 0; j < num_attrs; j++) {
   3565			pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
   3566				 i, j, criu_svm_md->data.attrs[j].type,
   3567				 i, j, criu_svm_md->data.attrs[j].value);
   3568			switch (criu_svm_md->data.attrs[j].type) {
   3569			/* During Checkpoint operation, the query for
   3570			 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
   3571			 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
   3572			 * not used by the range which was checkpointed. Care
   3573			 * must be taken to not restore with an invalid value
   3574			 * otherwise the gpuidx value will be invalid and
   3575			 * set_attr would eventually fail so just replace those
   3576			 * with another dummy attribute such as
   3577			 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
   3578			 */
   3579			case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
   3580				if (criu_svm_md->data.attrs[j].value ==
   3581				    KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
   3582					criu_svm_md->data.attrs[j].type =
   3583						KFD_IOCTL_SVM_ATTR_SET_FLAGS;
   3584					criu_svm_md->data.attrs[j].value = 0;
   3585				}
   3586				break;
   3587			case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
   3588				set_flags = criu_svm_md->data.attrs[j].value;
   3589				break;
   3590			default:
   3591				break;
   3592			}
   3593		}
   3594
   3595		/* CLR_FLAGS is not available via get_attr during checkpoint but
   3596		 * it needs to be inserted before restoring the ranges so
   3597		 * allocate extra space for it before calling set_attr
   3598		 */
   3599		set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
   3600						(num_attrs + 1);
   3601		set_attr_new = krealloc(set_attr, set_attr_size,
   3602					    GFP_KERNEL);
   3603		if (!set_attr_new) {
   3604			ret = -ENOMEM;
   3605			goto exit;
   3606		}
   3607		set_attr = set_attr_new;
   3608
   3609		memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
   3610					sizeof(struct kfd_ioctl_svm_attribute));
   3611		set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
   3612		set_attr[num_attrs].value = ~set_flags;
   3613
   3614		ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
   3615					 criu_svm_md->data.size, num_attrs + 1,
   3616					 set_attr);
   3617		if (ret) {
   3618			pr_err("CRIU: failed to set range attributes\n");
   3619			goto exit;
   3620		}
   3621
   3622		i++;
   3623	}
   3624exit:
   3625	kfree(set_attr);
   3626	list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
   3627		pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
   3628						criu_svm_md->data.start_addr);
   3629		kfree(criu_svm_md);
   3630	}
   3631
   3632	mmput(mm);
   3633	return ret;
   3634
   3635}
   3636
   3637int kfd_criu_restore_svm(struct kfd_process *p,
   3638			 uint8_t __user *user_priv_ptr,
   3639			 uint64_t *priv_data_offset,
   3640			 uint64_t max_priv_data_size)
   3641{
   3642	uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
   3643	int nattr_common = 4, nattr_accessibility = 1;
   3644	struct criu_svm_metadata *criu_svm_md = NULL;
   3645	struct svm_range_list *svms = &p->svms;
   3646	uint32_t num_devices;
   3647	int ret = 0;
   3648
   3649	num_devices = p->n_pdds;
   3650	/* Handle one SVM range object at a time, also the number of gpus are
   3651	 * assumed to be same on the restore node, checking must be done while
   3652	 * evaluating the topology earlier
   3653	 */
   3654
   3655	svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
   3656		(nattr_common + nattr_accessibility * num_devices);
   3657	svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
   3658
   3659	svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
   3660								svm_attrs_size;
   3661
   3662	criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
   3663	if (!criu_svm_md) {
   3664		pr_err("failed to allocate memory to store svm metadata\n");
   3665		return -ENOMEM;
   3666	}
   3667	if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
   3668		ret = -EINVAL;
   3669		goto exit;
   3670	}
   3671
   3672	ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
   3673			     svm_priv_data_size);
   3674	if (ret) {
   3675		ret = -EFAULT;
   3676		goto exit;
   3677	}
   3678	*priv_data_offset += svm_priv_data_size;
   3679
   3680	list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
   3681
   3682	return 0;
   3683
   3684
   3685exit:
   3686	kfree(criu_svm_md);
   3687	return ret;
   3688}
   3689
   3690int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
   3691		       uint64_t *svm_priv_data_size)
   3692{
   3693	uint64_t total_size, accessibility_size, common_attr_size;
   3694	int nattr_common = 4, nattr_accessibility = 1;
   3695	int num_devices = p->n_pdds;
   3696	struct svm_range_list *svms;
   3697	struct svm_range *prange;
   3698	uint32_t count = 0;
   3699
   3700	*svm_priv_data_size = 0;
   3701
   3702	svms = &p->svms;
   3703	if (!svms)
   3704		return -EINVAL;
   3705
   3706	mutex_lock(&svms->lock);
   3707	list_for_each_entry(prange, &svms->list, list) {
   3708		pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
   3709			 prange, prange->start, prange->npages,
   3710			 prange->start + prange->npages - 1);
   3711		count++;
   3712	}
   3713	mutex_unlock(&svms->lock);
   3714
   3715	*num_svm_ranges = count;
   3716	/* Only the accessbility attributes need to be queried for all the gpus
   3717	 * individually, remaining ones are spanned across the entire process
   3718	 * regardless of the various gpu nodes. Of the remaining attributes,
   3719	 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
   3720	 *
   3721	 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
   3722	 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
   3723	 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
   3724	 * KFD_IOCTL_SVM_ATTR_GRANULARITY
   3725	 *
   3726	 * ** ACCESSBILITY ATTRIBUTES **
   3727	 * (Considered as one, type is altered during query, value is gpuid)
   3728	 * KFD_IOCTL_SVM_ATTR_ACCESS
   3729	 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
   3730	 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
   3731	 */
   3732	if (*num_svm_ranges > 0) {
   3733		common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
   3734			nattr_common;
   3735		accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
   3736			nattr_accessibility * num_devices;
   3737
   3738		total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
   3739			common_attr_size + accessibility_size;
   3740
   3741		*svm_priv_data_size = *num_svm_ranges * total_size;
   3742	}
   3743
   3744	pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
   3745		 *svm_priv_data_size);
   3746	return 0;
   3747}
   3748
   3749int kfd_criu_checkpoint_svm(struct kfd_process *p,
   3750			    uint8_t __user *user_priv_data,
   3751			    uint64_t *priv_data_offset)
   3752{
   3753	struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
   3754	struct kfd_ioctl_svm_attribute *query_attr = NULL;
   3755	uint64_t svm_priv_data_size, query_attr_size = 0;
   3756	int index, nattr_common = 4, ret = 0;
   3757	struct svm_range_list *svms;
   3758	int num_devices = p->n_pdds;
   3759	struct svm_range *prange;
   3760	struct mm_struct *mm;
   3761
   3762	svms = &p->svms;
   3763	if (!svms)
   3764		return -EINVAL;
   3765
   3766	mm = get_task_mm(p->lead_thread);
   3767	if (!mm) {
   3768		pr_err("failed to get mm for the target process\n");
   3769		return -ESRCH;
   3770	}
   3771
   3772	query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
   3773				(nattr_common + num_devices);
   3774
   3775	query_attr = kzalloc(query_attr_size, GFP_KERNEL);
   3776	if (!query_attr) {
   3777		ret = -ENOMEM;
   3778		goto exit;
   3779	}
   3780
   3781	query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
   3782	query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
   3783	query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
   3784	query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
   3785
   3786	for (index = 0; index < num_devices; index++) {
   3787		struct kfd_process_device *pdd = p->pdds[index];
   3788
   3789		query_attr[index + nattr_common].type =
   3790			KFD_IOCTL_SVM_ATTR_ACCESS;
   3791		query_attr[index + nattr_common].value = pdd->user_gpu_id;
   3792	}
   3793
   3794	svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
   3795
   3796	svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
   3797	if (!svm_priv) {
   3798		ret = -ENOMEM;
   3799		goto exit_query;
   3800	}
   3801
   3802	index = 0;
   3803	list_for_each_entry(prange, &svms->list, list) {
   3804
   3805		svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
   3806		svm_priv->start_addr = prange->start;
   3807		svm_priv->size = prange->npages;
   3808		memcpy(&svm_priv->attrs, query_attr, query_attr_size);
   3809		pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
   3810			 prange, prange->start, prange->npages,
   3811			 prange->start + prange->npages - 1,
   3812			 prange->npages * PAGE_SIZE);
   3813
   3814		ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
   3815					 svm_priv->size,
   3816					 (nattr_common + num_devices),
   3817					 svm_priv->attrs);
   3818		if (ret) {
   3819			pr_err("CRIU: failed to obtain range attributes\n");
   3820			goto exit_priv;
   3821		}
   3822
   3823		if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
   3824				 svm_priv_data_size)) {
   3825			pr_err("Failed to copy svm priv to user\n");
   3826			ret = -EFAULT;
   3827			goto exit_priv;
   3828		}
   3829
   3830		*priv_data_offset += svm_priv_data_size;
   3831
   3832	}
   3833
   3834
   3835exit_priv:
   3836	kfree(svm_priv);
   3837exit_query:
   3838	kfree(query_attr);
   3839exit:
   3840	mmput(mm);
   3841	return ret;
   3842}
   3843
   3844int
   3845svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
   3846	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
   3847{
   3848	struct mm_struct *mm = current->mm;
   3849	int r;
   3850
   3851	start >>= PAGE_SHIFT;
   3852	size >>= PAGE_SHIFT;
   3853
   3854	switch (op) {
   3855	case KFD_IOCTL_SVM_OP_SET_ATTR:
   3856		r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
   3857		break;
   3858	case KFD_IOCTL_SVM_OP_GET_ATTR:
   3859		r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
   3860		break;
   3861	default:
   3862		r = EINVAL;
   3863		break;
   3864	}
   3865
   3866	return r;
   3867}