cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_job.c (8749B)


      1/*
      2 * Copyright 2015 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 *
     23 */
     24#include <linux/kthread.h>
     25#include <linux/wait.h>
     26#include <linux/sched.h>
     27
     28#include <drm/drm_drv.h>
     29
     30#include "amdgpu.h"
     31#include "amdgpu_trace.h"
     32
     33static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
     34{
     35	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
     36	struct amdgpu_job *job = to_amdgpu_job(s_job);
     37	struct amdgpu_task_info ti;
     38	struct amdgpu_device *adev = ring->adev;
     39	int idx;
     40	int r;
     41
     42	if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
     43		DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
     44			 __func__, s_job->sched->name);
     45
     46		/* Effectively the job is aborted as the device is gone */
     47		return DRM_GPU_SCHED_STAT_ENODEV;
     48	}
     49
     50	memset(&ti, 0, sizeof(struct amdgpu_task_info));
     51
     52	if (amdgpu_gpu_recovery &&
     53	    amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
     54		DRM_ERROR("ring %s timeout, but soft recovered\n",
     55			  s_job->sched->name);
     56		goto exit;
     57	}
     58
     59	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
     60	DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
     61		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
     62		  ring->fence_drv.sync_seq);
     63	DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
     64		  ti.process_name, ti.tgid, ti.task_name, ti.pid);
     65
     66	if (amdgpu_device_should_recover_gpu(ring->adev)) {
     67		r = amdgpu_device_gpu_recover_imp(ring->adev, job);
     68		if (r)
     69			DRM_ERROR("GPU Recovery Failed: %d\n", r);
     70	} else {
     71		drm_sched_suspend_timeout(&ring->sched);
     72		if (amdgpu_sriov_vf(adev))
     73			adev->virt.tdr_debug = true;
     74	}
     75
     76exit:
     77	drm_dev_exit(idx);
     78	return DRM_GPU_SCHED_STAT_NOMINAL;
     79}
     80
     81int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
     82		     struct amdgpu_job **job, struct amdgpu_vm *vm)
     83{
     84	if (num_ibs == 0)
     85		return -EINVAL;
     86
     87	*job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
     88	if (!*job)
     89		return -ENOMEM;
     90
     91	/*
     92	 * Initialize the scheduler to at least some ring so that we always
     93	 * have a pointer to adev.
     94	 */
     95	(*job)->base.sched = &adev->rings[0]->sched;
     96	(*job)->vm = vm;
     97	(*job)->num_ibs = num_ibs;
     98
     99	amdgpu_sync_create(&(*job)->sync);
    100	amdgpu_sync_create(&(*job)->sched_sync);
    101	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
    102	(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
    103
    104	return 0;
    105}
    106
    107int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
    108		enum amdgpu_ib_pool_type pool_type,
    109		struct amdgpu_job **job)
    110{
    111	int r;
    112
    113	r = amdgpu_job_alloc(adev, 1, job, NULL);
    114	if (r)
    115		return r;
    116
    117	r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
    118	if (r)
    119		kfree(*job);
    120
    121	return r;
    122}
    123
    124void amdgpu_job_free_resources(struct amdgpu_job *job)
    125{
    126	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
    127	struct dma_fence *f;
    128	struct dma_fence *hw_fence;
    129	unsigned i;
    130
    131	if (job->hw_fence.ops == NULL)
    132		hw_fence = job->external_hw_fence;
    133	else
    134		hw_fence = &job->hw_fence;
    135
    136	/* use sched fence if available */
    137	f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
    138	for (i = 0; i < job->num_ibs; ++i)
    139		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
    140}
    141
    142static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
    143{
    144	struct amdgpu_job *job = to_amdgpu_job(s_job);
    145
    146	drm_sched_job_cleanup(s_job);
    147
    148	amdgpu_sync_free(&job->sync);
    149	amdgpu_sync_free(&job->sched_sync);
    150
    151    /* only put the hw fence if has embedded fence */
    152	if (job->hw_fence.ops != NULL)
    153		dma_fence_put(&job->hw_fence);
    154	else
    155		kfree(job);
    156}
    157
    158void amdgpu_job_free(struct amdgpu_job *job)
    159{
    160	amdgpu_job_free_resources(job);
    161	amdgpu_sync_free(&job->sync);
    162	amdgpu_sync_free(&job->sched_sync);
    163
    164	/* only put the hw fence if has embedded fence */
    165	if (job->hw_fence.ops != NULL)
    166		dma_fence_put(&job->hw_fence);
    167	else
    168		kfree(job);
    169}
    170
    171int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
    172		      void *owner, struct dma_fence **f)
    173{
    174	int r;
    175
    176	if (!f)
    177		return -EINVAL;
    178
    179	r = drm_sched_job_init(&job->base, entity, owner);
    180	if (r)
    181		return r;
    182
    183	drm_sched_job_arm(&job->base);
    184
    185	*f = dma_fence_get(&job->base.s_fence->finished);
    186	amdgpu_job_free_resources(job);
    187	drm_sched_entity_push_job(&job->base);
    188
    189	return 0;
    190}
    191
    192int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
    193			     struct dma_fence **fence)
    194{
    195	int r;
    196
    197	job->base.sched = &ring->sched;
    198	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
    199	/* record external_hw_fence for direct submit */
    200	job->external_hw_fence = dma_fence_get(*fence);
    201	if (r)
    202		return r;
    203
    204	amdgpu_job_free(job);
    205	dma_fence_put(*fence);
    206
    207	return 0;
    208}
    209
    210static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
    211					       struct drm_sched_entity *s_entity)
    212{
    213	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
    214	struct amdgpu_job *job = to_amdgpu_job(sched_job);
    215	struct amdgpu_vm *vm = job->vm;
    216	struct dma_fence *fence;
    217	int r;
    218
    219	fence = amdgpu_sync_get_fence(&job->sync);
    220	if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
    221		r = amdgpu_sync_fence(&job->sched_sync, fence);
    222		if (r)
    223			DRM_ERROR("Error adding fence (%d)\n", r);
    224	}
    225
    226	while (fence == NULL && vm && !job->vmid) {
    227		r = amdgpu_vmid_grab(vm, ring, &job->sync,
    228				     &job->base.s_fence->finished,
    229				     job);
    230		if (r)
    231			DRM_ERROR("Error getting VM ID (%d)\n", r);
    232
    233		fence = amdgpu_sync_get_fence(&job->sync);
    234	}
    235
    236	return fence;
    237}
    238
    239static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
    240{
    241	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
    242	struct dma_fence *fence = NULL, *finished;
    243	struct amdgpu_job *job;
    244	int r = 0;
    245
    246	job = to_amdgpu_job(sched_job);
    247	finished = &job->base.s_fence->finished;
    248
    249	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
    250
    251	trace_amdgpu_sched_run_job(job);
    252
    253	if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
    254		dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
    255
    256	if (finished->error < 0) {
    257		DRM_INFO("Skip scheduling IBs!\n");
    258	} else {
    259		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
    260				       &fence);
    261		if (r)
    262			DRM_ERROR("Error scheduling IBs (%d)\n", r);
    263	}
    264
    265	if (!job->job_run_counter)
    266		dma_fence_get(fence);
    267	else if (finished->error < 0)
    268		dma_fence_put(&job->hw_fence);
    269	job->job_run_counter++;
    270	amdgpu_job_free_resources(job);
    271
    272	fence = r ? ERR_PTR(r) : fence;
    273	return fence;
    274}
    275
    276#define to_drm_sched_job(sched_job)		\
    277		container_of((sched_job), struct drm_sched_job, queue_node)
    278
    279void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
    280{
    281	struct drm_sched_job *s_job;
    282	struct drm_sched_entity *s_entity = NULL;
    283	int i;
    284
    285	/* Signal all jobs not yet scheduled */
    286	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
    287		struct drm_sched_rq *rq = &sched->sched_rq[i];
    288
    289		if (!rq)
    290			continue;
    291
    292		spin_lock(&rq->lock);
    293		list_for_each_entry(s_entity, &rq->entities, list) {
    294			while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
    295				struct drm_sched_fence *s_fence = s_job->s_fence;
    296
    297				dma_fence_signal(&s_fence->scheduled);
    298				dma_fence_set_error(&s_fence->finished, -EHWPOISON);
    299				dma_fence_signal(&s_fence->finished);
    300			}
    301		}
    302		spin_unlock(&rq->lock);
    303	}
    304
    305	/* Signal all jobs already scheduled to HW */
    306	list_for_each_entry(s_job, &sched->pending_list, list) {
    307		struct drm_sched_fence *s_fence = s_job->s_fence;
    308
    309		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
    310		dma_fence_signal(&s_fence->finished);
    311	}
    312}
    313
    314const struct drm_sched_backend_ops amdgpu_sched_ops = {
    315	.dependency = amdgpu_job_dependency,
    316	.run_job = amdgpu_job_run,
    317	.timedout_job = amdgpu_job_timedout,
    318	.free_job = amdgpu_job_free_cb
    319};