cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_ids.c (15161B)


      1/*
      2 * Copyright 2017 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23#include "amdgpu_ids.h"
     24
     25#include <linux/idr.h>
     26#include <linux/dma-fence-array.h>
     27
     28
     29#include "amdgpu.h"
     30#include "amdgpu_trace.h"
     31
     32/*
     33 * PASID manager
     34 *
     35 * PASIDs are global address space identifiers that can be shared
     36 * between the GPU, an IOMMU and the driver. VMs on different devices
     37 * may use the same PASID if they share the same address
     38 * space. Therefore PASIDs are allocated using a global IDA. VMs are
     39 * looked up from the PASID per amdgpu_device.
     40 */
     41static DEFINE_IDA(amdgpu_pasid_ida);
     42
     43/* Helper to free pasid from a fence callback */
     44struct amdgpu_pasid_cb {
     45	struct dma_fence_cb cb;
     46	u32 pasid;
     47};
     48
     49/**
     50 * amdgpu_pasid_alloc - Allocate a PASID
     51 * @bits: Maximum width of the PASID in bits, must be at least 1
     52 *
     53 * Allocates a PASID of the given width while keeping smaller PASIDs
     54 * available if possible.
     55 *
     56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
     57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
     58 * memory allocation failure.
     59 */
     60int amdgpu_pasid_alloc(unsigned int bits)
     61{
     62	int pasid = -EINVAL;
     63
     64	for (bits = min(bits, 31U); bits > 0; bits--) {
     65		pasid = ida_simple_get(&amdgpu_pasid_ida,
     66				       1U << (bits - 1), 1U << bits,
     67				       GFP_KERNEL);
     68		if (pasid != -ENOSPC)
     69			break;
     70	}
     71
     72	if (pasid >= 0)
     73		trace_amdgpu_pasid_allocated(pasid);
     74
     75	return pasid;
     76}
     77
     78/**
     79 * amdgpu_pasid_free - Free a PASID
     80 * @pasid: PASID to free
     81 */
     82void amdgpu_pasid_free(u32 pasid)
     83{
     84	trace_amdgpu_pasid_freed(pasid);
     85	ida_simple_remove(&amdgpu_pasid_ida, pasid);
     86}
     87
     88static void amdgpu_pasid_free_cb(struct dma_fence *fence,
     89				 struct dma_fence_cb *_cb)
     90{
     91	struct amdgpu_pasid_cb *cb =
     92		container_of(_cb, struct amdgpu_pasid_cb, cb);
     93
     94	amdgpu_pasid_free(cb->pasid);
     95	dma_fence_put(fence);
     96	kfree(cb);
     97}
     98
     99/**
    100 * amdgpu_pasid_free_delayed - free pasid when fences signal
    101 *
    102 * @resv: reservation object with the fences to wait for
    103 * @pasid: pasid to free
    104 *
    105 * Free the pasid only after all the fences in resv are signaled.
    106 */
    107void amdgpu_pasid_free_delayed(struct dma_resv *resv,
    108			       u32 pasid)
    109{
    110	struct amdgpu_pasid_cb *cb;
    111	struct dma_fence *fence;
    112	int r;
    113
    114	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
    115	if (r)
    116		goto fallback;
    117
    118	if (!fence) {
    119		amdgpu_pasid_free(pasid);
    120		return;
    121	}
    122
    123	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
    124	if (!cb) {
    125		/* Last resort when we are OOM */
    126		dma_fence_wait(fence, false);
    127		dma_fence_put(fence);
    128		amdgpu_pasid_free(pasid);
    129	} else {
    130		cb->pasid = pasid;
    131		if (dma_fence_add_callback(fence, &cb->cb,
    132					   amdgpu_pasid_free_cb))
    133			amdgpu_pasid_free_cb(fence, &cb->cb);
    134	}
    135
    136	return;
    137
    138fallback:
    139	/* Not enough memory for the delayed delete, as last resort
    140	 * block for all the fences to complete.
    141	 */
    142	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
    143			      false, MAX_SCHEDULE_TIMEOUT);
    144	amdgpu_pasid_free(pasid);
    145}
    146
    147/*
    148 * VMID manager
    149 *
    150 * VMIDs are a per VMHUB identifier for page tables handling.
    151 */
    152
    153/**
    154 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
    155 *
    156 * @adev: amdgpu_device pointer
    157 * @id: VMID structure
    158 *
    159 * Check if GPU reset occured since last use of the VMID.
    160 */
    161bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
    162			       struct amdgpu_vmid *id)
    163{
    164	return id->current_gpu_reset_count !=
    165		atomic_read(&adev->gpu_reset_counter);
    166}
    167
    168/**
    169 * amdgpu_vmid_grab_idle - grab idle VMID
    170 *
    171 * @vm: vm to allocate id for
    172 * @ring: ring we want to submit job to
    173 * @sync: sync object where we add dependencies
    174 * @idle: resulting idle VMID
    175 *
    176 * Try to find an idle VMID, if none is idle add a fence to wait to the sync
    177 * object. Returns -ENOMEM when we are out of memory.
    178 */
    179static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
    180				 struct amdgpu_ring *ring,
    181				 struct amdgpu_sync *sync,
    182				 struct amdgpu_vmid **idle)
    183{
    184	struct amdgpu_device *adev = ring->adev;
    185	unsigned vmhub = ring->funcs->vmhub;
    186	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    187	struct dma_fence **fences;
    188	unsigned i;
    189	int r;
    190
    191	if (!dma_fence_is_signaled(ring->vmid_wait))
    192		return amdgpu_sync_fence(sync, ring->vmid_wait);
    193
    194	fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
    195	if (!fences)
    196		return -ENOMEM;
    197
    198	/* Check if we have an idle VMID */
    199	i = 0;
    200	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
    201		/* Don't use per engine and per process VMID at the same time */
    202		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
    203			NULL : ring;
    204
    205		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
    206		if (!fences[i])
    207			break;
    208		++i;
    209	}
    210
    211	/* If we can't find a idle VMID to use, wait till one becomes available */
    212	if (&(*idle)->list == &id_mgr->ids_lru) {
    213		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
    214		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
    215		struct dma_fence_array *array;
    216		unsigned j;
    217
    218		*idle = NULL;
    219		for (j = 0; j < i; ++j)
    220			dma_fence_get(fences[j]);
    221
    222		array = dma_fence_array_create(i, fences, fence_context,
    223					       seqno, true);
    224		if (!array) {
    225			for (j = 0; j < i; ++j)
    226				dma_fence_put(fences[j]);
    227			kfree(fences);
    228			return -ENOMEM;
    229		}
    230
    231		r = amdgpu_sync_fence(sync, &array->base);
    232		dma_fence_put(ring->vmid_wait);
    233		ring->vmid_wait = &array->base;
    234		return r;
    235	}
    236	kfree(fences);
    237
    238	return 0;
    239}
    240
    241/**
    242 * amdgpu_vmid_grab_reserved - try to assign reserved VMID
    243 *
    244 * @vm: vm to allocate id for
    245 * @ring: ring we want to submit job to
    246 * @sync: sync object where we add dependencies
    247 * @fence: fence protecting ID from reuse
    248 * @job: job who wants to use the VMID
    249 * @id: resulting VMID
    250 *
    251 * Try to assign a reserved VMID.
    252 */
    253static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
    254				     struct amdgpu_ring *ring,
    255				     struct amdgpu_sync *sync,
    256				     struct dma_fence *fence,
    257				     struct amdgpu_job *job,
    258				     struct amdgpu_vmid **id)
    259{
    260	struct amdgpu_device *adev = ring->adev;
    261	unsigned vmhub = ring->funcs->vmhub;
    262	uint64_t fence_context = adev->fence_context + ring->idx;
    263	bool needs_flush = vm->use_cpu_for_update;
    264	uint64_t updates = amdgpu_vm_tlb_seq(vm);
    265	int r;
    266
    267	*id = vm->reserved_vmid[vmhub];
    268	if ((*id)->owner != vm->immediate.fence_context ||
    269	    (*id)->pd_gpu_addr != job->vm_pd_addr ||
    270	    (*id)->flushed_updates < updates ||
    271	    !(*id)->last_flush ||
    272	    ((*id)->last_flush->context != fence_context &&
    273	     !dma_fence_is_signaled((*id)->last_flush))) {
    274		struct dma_fence *tmp;
    275
    276		/* Don't use per engine and per process VMID at the same time */
    277		if (adev->vm_manager.concurrent_flush)
    278			ring = NULL;
    279
    280		/* to prevent one context starved by another context */
    281		(*id)->pd_gpu_addr = 0;
    282		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
    283		if (tmp) {
    284			*id = NULL;
    285			return amdgpu_sync_fence(sync, tmp);
    286		}
    287		needs_flush = true;
    288	}
    289
    290	/* Good we can use this VMID. Remember this submission as
    291	* user of the VMID.
    292	*/
    293	r = amdgpu_sync_fence(&(*id)->active, fence);
    294	if (r)
    295		return r;
    296
    297	(*id)->flushed_updates = updates;
    298	job->vm_needs_flush = needs_flush;
    299	return 0;
    300}
    301
    302/**
    303 * amdgpu_vmid_grab_used - try to reuse a VMID
    304 *
    305 * @vm: vm to allocate id for
    306 * @ring: ring we want to submit job to
    307 * @sync: sync object where we add dependencies
    308 * @fence: fence protecting ID from reuse
    309 * @job: job who wants to use the VMID
    310 * @id: resulting VMID
    311 *
    312 * Try to reuse a VMID for this submission.
    313 */
    314static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
    315				 struct amdgpu_ring *ring,
    316				 struct amdgpu_sync *sync,
    317				 struct dma_fence *fence,
    318				 struct amdgpu_job *job,
    319				 struct amdgpu_vmid **id)
    320{
    321	struct amdgpu_device *adev = ring->adev;
    322	unsigned vmhub = ring->funcs->vmhub;
    323	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    324	uint64_t fence_context = adev->fence_context + ring->idx;
    325	uint64_t updates = amdgpu_vm_tlb_seq(vm);
    326	int r;
    327
    328	job->vm_needs_flush = vm->use_cpu_for_update;
    329
    330	/* Check if we can use a VMID already assigned to this VM */
    331	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
    332		bool needs_flush = vm->use_cpu_for_update;
    333
    334		/* Check all the prerequisites to using this VMID */
    335		if ((*id)->owner != vm->immediate.fence_context)
    336			continue;
    337
    338		if ((*id)->pd_gpu_addr != job->vm_pd_addr)
    339			continue;
    340
    341		if (!(*id)->last_flush ||
    342		    ((*id)->last_flush->context != fence_context &&
    343		     !dma_fence_is_signaled((*id)->last_flush)))
    344			needs_flush = true;
    345
    346		if ((*id)->flushed_updates < updates)
    347			needs_flush = true;
    348
    349		if (needs_flush && !adev->vm_manager.concurrent_flush)
    350			continue;
    351
    352		/* Good, we can use this VMID. Remember this submission as
    353		 * user of the VMID.
    354		 */
    355		r = amdgpu_sync_fence(&(*id)->active, fence);
    356		if (r)
    357			return r;
    358
    359		(*id)->flushed_updates = updates;
    360		job->vm_needs_flush |= needs_flush;
    361		return 0;
    362	}
    363
    364	*id = NULL;
    365	return 0;
    366}
    367
    368/**
    369 * amdgpu_vmid_grab - allocate the next free VMID
    370 *
    371 * @vm: vm to allocate id for
    372 * @ring: ring we want to submit job to
    373 * @sync: sync object where we add dependencies
    374 * @fence: fence protecting ID from reuse
    375 * @job: job who wants to use the VMID
    376 *
    377 * Allocate an id for the vm, adding fences to the sync obj as necessary.
    378 */
    379int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
    380		     struct amdgpu_sync *sync, struct dma_fence *fence,
    381		     struct amdgpu_job *job)
    382{
    383	struct amdgpu_device *adev = ring->adev;
    384	unsigned vmhub = ring->funcs->vmhub;
    385	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    386	struct amdgpu_vmid *idle = NULL;
    387	struct amdgpu_vmid *id = NULL;
    388	int r = 0;
    389
    390	mutex_lock(&id_mgr->lock);
    391	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
    392	if (r || !idle)
    393		goto error;
    394
    395	if (vm->reserved_vmid[vmhub]) {
    396		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
    397		if (r || !id)
    398			goto error;
    399	} else {
    400		r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
    401		if (r)
    402			goto error;
    403
    404		if (!id) {
    405			/* Still no ID to use? Then use the idle one found earlier */
    406			id = idle;
    407
    408			/* Remember this submission as user of the VMID */
    409			r = amdgpu_sync_fence(&id->active, fence);
    410			if (r)
    411				goto error;
    412
    413			id->flushed_updates = amdgpu_vm_tlb_seq(vm);
    414			job->vm_needs_flush = true;
    415		}
    416
    417		list_move_tail(&id->list, &id_mgr->ids_lru);
    418	}
    419
    420	id->pd_gpu_addr = job->vm_pd_addr;
    421	id->owner = vm->immediate.fence_context;
    422
    423	if (job->vm_needs_flush) {
    424		dma_fence_put(id->last_flush);
    425		id->last_flush = NULL;
    426	}
    427	job->vmid = id - id_mgr->ids;
    428	job->pasid = vm->pasid;
    429	trace_amdgpu_vm_grab_id(vm, ring, job);
    430
    431error:
    432	mutex_unlock(&id_mgr->lock);
    433	return r;
    434}
    435
    436int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
    437			       struct amdgpu_vm *vm,
    438			       unsigned vmhub)
    439{
    440	struct amdgpu_vmid_mgr *id_mgr;
    441	struct amdgpu_vmid *idle;
    442	int r = 0;
    443
    444	id_mgr = &adev->vm_manager.id_mgr[vmhub];
    445	mutex_lock(&id_mgr->lock);
    446	if (vm->reserved_vmid[vmhub])
    447		goto unlock;
    448	if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
    449	    AMDGPU_VM_MAX_RESERVED_VMID) {
    450		DRM_ERROR("Over limitation of reserved vmid\n");
    451		atomic_dec(&id_mgr->reserved_vmid_num);
    452		r = -EINVAL;
    453		goto unlock;
    454	}
    455	/* Select the first entry VMID */
    456	idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
    457	list_del_init(&idle->list);
    458	vm->reserved_vmid[vmhub] = idle;
    459	mutex_unlock(&id_mgr->lock);
    460
    461	return 0;
    462unlock:
    463	mutex_unlock(&id_mgr->lock);
    464	return r;
    465}
    466
    467void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
    468			       struct amdgpu_vm *vm,
    469			       unsigned vmhub)
    470{
    471	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    472
    473	mutex_lock(&id_mgr->lock);
    474	if (vm->reserved_vmid[vmhub]) {
    475		list_add(&vm->reserved_vmid[vmhub]->list,
    476			&id_mgr->ids_lru);
    477		vm->reserved_vmid[vmhub] = NULL;
    478		atomic_dec(&id_mgr->reserved_vmid_num);
    479	}
    480	mutex_unlock(&id_mgr->lock);
    481}
    482
    483/**
    484 * amdgpu_vmid_reset - reset VMID to zero
    485 *
    486 * @adev: amdgpu device structure
    487 * @vmhub: vmhub type
    488 * @vmid: vmid number to use
    489 *
    490 * Reset saved GDW, GWS and OA to force switch on next flush.
    491 */
    492void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
    493		       unsigned vmid)
    494{
    495	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
    496	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
    497
    498	mutex_lock(&id_mgr->lock);
    499	id->owner = 0;
    500	id->gds_base = 0;
    501	id->gds_size = 0;
    502	id->gws_base = 0;
    503	id->gws_size = 0;
    504	id->oa_base = 0;
    505	id->oa_size = 0;
    506	mutex_unlock(&id_mgr->lock);
    507}
    508
    509/**
    510 * amdgpu_vmid_reset_all - reset VMID to zero
    511 *
    512 * @adev: amdgpu device structure
    513 *
    514 * Reset VMID to force flush on next use
    515 */
    516void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
    517{
    518	unsigned i, j;
    519
    520	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
    521		struct amdgpu_vmid_mgr *id_mgr =
    522			&adev->vm_manager.id_mgr[i];
    523
    524		for (j = 1; j < id_mgr->num_ids; ++j)
    525			amdgpu_vmid_reset(adev, i, j);
    526	}
    527}
    528
    529/**
    530 * amdgpu_vmid_mgr_init - init the VMID manager
    531 *
    532 * @adev: amdgpu_device pointer
    533 *
    534 * Initialize the VM manager structures
    535 */
    536void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
    537{
    538	unsigned i, j;
    539
    540	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
    541		struct amdgpu_vmid_mgr *id_mgr =
    542			&adev->vm_manager.id_mgr[i];
    543
    544		mutex_init(&id_mgr->lock);
    545		INIT_LIST_HEAD(&id_mgr->ids_lru);
    546		atomic_set(&id_mgr->reserved_vmid_num, 0);
    547
    548		/* manage only VMIDs not used by KFD */
    549		id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
    550
    551		/* skip over VMID 0, since it is the system VM */
    552		for (j = 1; j < id_mgr->num_ids; ++j) {
    553			amdgpu_vmid_reset(adev, i, j);
    554			amdgpu_sync_create(&id_mgr->ids[j].active);
    555			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
    556		}
    557	}
    558}
    559
    560/**
    561 * amdgpu_vmid_mgr_fini - cleanup VM manager
    562 *
    563 * @adev: amdgpu_device pointer
    564 *
    565 * Cleanup the VM manager and free resources.
    566 */
    567void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
    568{
    569	unsigned i, j;
    570
    571	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
    572		struct amdgpu_vmid_mgr *id_mgr =
    573			&adev->vm_manager.id_mgr[i];
    574
    575		mutex_destroy(&id_mgr->lock);
    576		for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
    577			struct amdgpu_vmid *id = &id_mgr->ids[j];
    578
    579			amdgpu_sync_free(&id->active);
    580			dma_fence_put(id->last_flush);
    581			dma_fence_put(id->pasid_mapping);
    582		}
    583	}
    584}