cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

etnaviv_sched.c (3805B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2017 Etnaviv Project
      4 */
      5
      6#include <linux/moduleparam.h>
      7
      8#include "etnaviv_drv.h"
      9#include "etnaviv_dump.h"
     10#include "etnaviv_gem.h"
     11#include "etnaviv_gpu.h"
     12#include "etnaviv_sched.h"
     13#include "state.xml.h"
     14
     15static int etnaviv_job_hang_limit = 0;
     16module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
     17static int etnaviv_hw_jobs_limit = 4;
     18module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
     19
     20static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
     21{
     22	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
     23	struct dma_fence *fence = NULL;
     24
     25	if (likely(!sched_job->s_fence->finished.error))
     26		fence = etnaviv_gpu_submit(submit);
     27	else
     28		dev_dbg(submit->gpu->dev, "skipping bad job\n");
     29
     30	return fence;
     31}
     32
     33static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
     34							  *sched_job)
     35{
     36	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
     37	struct etnaviv_gpu *gpu = submit->gpu;
     38	u32 dma_addr;
     39	int change;
     40
     41	/* block scheduler */
     42	drm_sched_stop(&gpu->sched, sched_job);
     43
     44	/*
     45	 * If the GPU managed to complete this jobs fence, the timout is
     46	 * spurious. Bail out.
     47	 */
     48	if (dma_fence_is_signaled(submit->out_fence))
     49		goto out_no_timeout;
     50
     51	/*
     52	 * If the GPU is still making forward progress on the front-end (which
     53	 * should never loop) we shift out the timeout to give it a chance to
     54	 * finish the job.
     55	 */
     56	dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
     57	change = dma_addr - gpu->hangcheck_dma_addr;
     58	if (gpu->completed_fence != gpu->hangcheck_fence ||
     59	    change < 0 || change > 16) {
     60		gpu->hangcheck_dma_addr = dma_addr;
     61		gpu->hangcheck_fence = gpu->completed_fence;
     62		goto out_no_timeout;
     63	}
     64
     65	if(sched_job)
     66		drm_sched_increase_karma(sched_job);
     67
     68	/* get the GPU back into the init state */
     69	etnaviv_core_dump(submit);
     70	etnaviv_gpu_recover_hang(gpu);
     71
     72	drm_sched_resubmit_jobs(&gpu->sched);
     73
     74	drm_sched_start(&gpu->sched, true);
     75	return DRM_GPU_SCHED_STAT_NOMINAL;
     76
     77out_no_timeout:
     78	/* restart scheduler after GPU is usable again */
     79	drm_sched_start(&gpu->sched, true);
     80	return DRM_GPU_SCHED_STAT_NOMINAL;
     81}
     82
     83static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
     84{
     85	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
     86
     87	drm_sched_job_cleanup(sched_job);
     88
     89	etnaviv_submit_put(submit);
     90}
     91
     92static const struct drm_sched_backend_ops etnaviv_sched_ops = {
     93	.run_job = etnaviv_sched_run_job,
     94	.timedout_job = etnaviv_sched_timedout_job,
     95	.free_job = etnaviv_sched_free_job,
     96};
     97
     98int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
     99{
    100	int ret = 0;
    101
    102	/*
    103	 * Hold the fence lock across the whole operation to avoid jobs being
    104	 * pushed out of order with regard to their sched fence seqnos as
    105	 * allocated in drm_sched_job_arm.
    106	 */
    107	mutex_lock(&submit->gpu->fence_lock);
    108
    109	drm_sched_job_arm(&submit->sched_job);
    110
    111	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
    112	submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
    113						submit->out_fence, 0,
    114						INT_MAX, GFP_KERNEL);
    115	if (submit->out_fence_id < 0) {
    116		drm_sched_job_cleanup(&submit->sched_job);
    117		ret = -ENOMEM;
    118		goto out_unlock;
    119	}
    120
    121	/* the scheduler holds on to the job now */
    122	kref_get(&submit->refcount);
    123
    124	drm_sched_entity_push_job(&submit->sched_job);
    125
    126out_unlock:
    127	mutex_unlock(&submit->gpu->fence_lock);
    128
    129	return ret;
    130}
    131
    132int etnaviv_sched_init(struct etnaviv_gpu *gpu)
    133{
    134	int ret;
    135
    136	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
    137			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
    138			     msecs_to_jiffies(500), NULL, NULL,
    139			     dev_name(gpu->dev), gpu->dev);
    140	if (ret)
    141		return ret;
    142
    143	return 0;
    144}
    145
    146void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
    147{
    148	drm_sched_fini(&gpu->sched);
    149}