cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

v3d_gem.c (27770B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/* Copyright (C) 2014-2018 Broadcom */
      3
      4#include <linux/device.h>
      5#include <linux/dma-mapping.h>
      6#include <linux/io.h>
      7#include <linux/module.h>
      8#include <linux/platform_device.h>
      9#include <linux/pm_runtime.h>
     10#include <linux/reset.h>
     11#include <linux/sched/signal.h>
     12#include <linux/uaccess.h>
     13
     14#include <drm/drm_syncobj.h>
     15#include <uapi/drm/v3d_drm.h>
     16
     17#include "v3d_drv.h"
     18#include "v3d_regs.h"
     19#include "v3d_trace.h"
     20
     21static void
     22v3d_init_core(struct v3d_dev *v3d, int core)
     23{
     24	/* Set OVRTMUOUT, which means that the texture sampler uniform
     25	 * configuration's tmu output type field is used, instead of
     26	 * using the hardware default behavior based on the texture
     27	 * type.  If you want the default behavior, you can still put
     28	 * "2" in the indirect texture state's output_type field.
     29	 */
     30	if (v3d->ver < 40)
     31		V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
     32
     33	/* Whenever we flush the L2T cache, we always want to flush
     34	 * the whole thing.
     35	 */
     36	V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
     37	V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
     38}
     39
     40/* Sets invariant state for the HW. */
     41static void
     42v3d_init_hw_state(struct v3d_dev *v3d)
     43{
     44	v3d_init_core(v3d, 0);
     45}
     46
     47static void
     48v3d_idle_axi(struct v3d_dev *v3d, int core)
     49{
     50	V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ);
     51
     52	if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) &
     53		      (V3D_GMP_STATUS_RD_COUNT_MASK |
     54		       V3D_GMP_STATUS_WR_COUNT_MASK |
     55		       V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
     56		DRM_ERROR("Failed to wait for safe GMP shutdown\n");
     57	}
     58}
     59
     60static void
     61v3d_idle_gca(struct v3d_dev *v3d)
     62{
     63	if (v3d->ver >= 41)
     64		return;
     65
     66	V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
     67
     68	if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
     69		      V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
     70		     V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
     71		DRM_ERROR("Failed to wait for safe GCA shutdown\n");
     72	}
     73}
     74
     75static void
     76v3d_reset_by_bridge(struct v3d_dev *v3d)
     77{
     78	int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
     79
     80	if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
     81		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
     82				 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
     83		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
     84
     85		/* GFXH-1383: The SW_INIT may cause a stray write to address 0
     86		 * of the unit, so reset it to its power-on value here.
     87		 */
     88		V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
     89	} else {
     90		WARN_ON_ONCE(V3D_GET_FIELD(version,
     91					   V3D_TOP_GR_BRIDGE_MAJOR) != 7);
     92		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
     93				 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
     94		V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
     95	}
     96}
     97
     98static void
     99v3d_reset_v3d(struct v3d_dev *v3d)
    100{
    101	if (v3d->reset)
    102		reset_control_reset(v3d->reset);
    103	else
    104		v3d_reset_by_bridge(v3d);
    105
    106	v3d_init_hw_state(v3d);
    107}
    108
    109void
    110v3d_reset(struct v3d_dev *v3d)
    111{
    112	struct drm_device *dev = &v3d->drm;
    113
    114	DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
    115	DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
    116		      V3D_CORE_READ(0, V3D_ERR_STAT));
    117	trace_v3d_reset_begin(dev);
    118
    119	/* XXX: only needed for safe powerdown, not reset. */
    120	if (false)
    121		v3d_idle_axi(v3d, 0);
    122
    123	v3d_idle_gca(v3d);
    124	v3d_reset_v3d(v3d);
    125
    126	v3d_mmu_set_page_table(v3d);
    127	v3d_irq_reset(v3d);
    128
    129	v3d_perfmon_stop(v3d, v3d->active_perfmon, false);
    130
    131	trace_v3d_reset_end(dev);
    132}
    133
    134static void
    135v3d_flush_l3(struct v3d_dev *v3d)
    136{
    137	if (v3d->ver < 41) {
    138		u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
    139
    140		V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
    141			      gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
    142
    143		if (v3d->ver < 33) {
    144			V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
    145				      gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
    146		}
    147	}
    148}
    149
    150/* Invalidates the (read-only) L2C cache.  This was the L2 cache for
    151 * uniforms and instructions on V3D 3.2.
    152 */
    153static void
    154v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
    155{
    156	if (v3d->ver > 32)
    157		return;
    158
    159	V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
    160		       V3D_L2CACTL_L2CCLR |
    161		       V3D_L2CACTL_L2CENA);
    162}
    163
    164/* Invalidates texture L2 cachelines */
    165static void
    166v3d_flush_l2t(struct v3d_dev *v3d, int core)
    167{
    168	/* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
    169	 * need to wait for completion before dispatching the job --
    170	 * L2T accesses will be stalled until the flush has completed.
    171	 * However, we do need to make sure we don't try to trigger a
    172	 * new flush while the L2_CLEAN queue is trying to
    173	 * synchronously clean after a job.
    174	 */
    175	mutex_lock(&v3d->cache_clean_lock);
    176	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
    177		       V3D_L2TCACTL_L2TFLS |
    178		       V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
    179	mutex_unlock(&v3d->cache_clean_lock);
    180}
    181
    182/* Cleans texture L1 and L2 cachelines (writing back dirty data).
    183 *
    184 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
    185 * executed, we need to make sure that the clean is done before
    186 * signaling job completion.  So, we synchronously wait before
    187 * returning, and we make sure that L2 invalidates don't happen in the
    188 * meantime to confuse our are-we-done checks.
    189 */
    190void
    191v3d_clean_caches(struct v3d_dev *v3d)
    192{
    193	struct drm_device *dev = &v3d->drm;
    194	int core = 0;
    195
    196	trace_v3d_cache_clean_begin(dev);
    197
    198	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
    199	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
    200		       V3D_L2TCACTL_TMUWCF), 100)) {
    201		DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
    202	}
    203
    204	mutex_lock(&v3d->cache_clean_lock);
    205	V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
    206		       V3D_L2TCACTL_L2TFLS |
    207		       V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
    208
    209	if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
    210		       V3D_L2TCACTL_L2TFLS), 100)) {
    211		DRM_ERROR("Timeout waiting for L2T clean\n");
    212	}
    213
    214	mutex_unlock(&v3d->cache_clean_lock);
    215
    216	trace_v3d_cache_clean_end(dev);
    217}
    218
    219/* Invalidates the slice caches.  These are read-only caches. */
    220static void
    221v3d_invalidate_slices(struct v3d_dev *v3d, int core)
    222{
    223	V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
    224		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
    225		       V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
    226		       V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
    227		       V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
    228}
    229
    230void
    231v3d_invalidate_caches(struct v3d_dev *v3d)
    232{
    233	/* Invalidate the caches from the outside in.  That way if
    234	 * another CL's concurrent use of nearby memory were to pull
    235	 * an invalidated cacheline back in, we wouldn't leave stale
    236	 * data in the inner cache.
    237	 */
    238	v3d_flush_l3(v3d);
    239	v3d_invalidate_l2c(v3d, 0);
    240	v3d_flush_l2t(v3d, 0);
    241	v3d_invalidate_slices(v3d, 0);
    242}
    243
    244/* Takes the reservation lock on all the BOs being referenced, so that
    245 * at queue submit time we can update the reservations.
    246 *
    247 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
    248 * (all of which are on exec->unref_list).  They're entirely private
    249 * to v3d, so we don't attach dma-buf fences to them.
    250 */
    251static int
    252v3d_lock_bo_reservations(struct v3d_job *job,
    253			 struct ww_acquire_ctx *acquire_ctx)
    254{
    255	int i, ret;
    256
    257	ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
    258	if (ret)
    259		return ret;
    260
    261	for (i = 0; i < job->bo_count; i++) {
    262		ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
    263		if (ret)
    264			goto fail;
    265
    266		ret = drm_sched_job_add_implicit_dependencies(&job->base,
    267							      job->bo[i], true);
    268		if (ret)
    269			goto fail;
    270	}
    271
    272	return 0;
    273
    274fail:
    275	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
    276	return ret;
    277}
    278
    279/**
    280 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
    281 * referenced by the job.
    282 * @dev: DRM device
    283 * @file_priv: DRM file for this fd
    284 * @job: V3D job being set up
    285 * @bo_handles: GEM handles
    286 * @bo_count: Number of GEM handles passed in
    287 *
    288 * The command validator needs to reference BOs by their index within
    289 * the submitted job's BO list.  This does the validation of the job's
    290 * BO list and reference counting for the lifetime of the job.
    291 *
    292 * Note that this function doesn't need to unreference the BOs on
    293 * failure, because that will happen at v3d_exec_cleanup() time.
    294 */
    295static int
    296v3d_lookup_bos(struct drm_device *dev,
    297	       struct drm_file *file_priv,
    298	       struct v3d_job *job,
    299	       u64 bo_handles,
    300	       u32 bo_count)
    301{
    302	u32 *handles;
    303	int ret = 0;
    304	int i;
    305
    306	job->bo_count = bo_count;
    307
    308	if (!job->bo_count) {
    309		/* See comment on bo_index for why we have to check
    310		 * this.
    311		 */
    312		DRM_DEBUG("Rendering requires BOs\n");
    313		return -EINVAL;
    314	}
    315
    316	job->bo = kvmalloc_array(job->bo_count,
    317				 sizeof(struct drm_gem_cma_object *),
    318				 GFP_KERNEL | __GFP_ZERO);
    319	if (!job->bo) {
    320		DRM_DEBUG("Failed to allocate validated BO pointers\n");
    321		return -ENOMEM;
    322	}
    323
    324	handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
    325	if (!handles) {
    326		ret = -ENOMEM;
    327		DRM_DEBUG("Failed to allocate incoming GEM handles\n");
    328		goto fail;
    329	}
    330
    331	if (copy_from_user(handles,
    332			   (void __user *)(uintptr_t)bo_handles,
    333			   job->bo_count * sizeof(u32))) {
    334		ret = -EFAULT;
    335		DRM_DEBUG("Failed to copy in GEM handles\n");
    336		goto fail;
    337	}
    338
    339	spin_lock(&file_priv->table_lock);
    340	for (i = 0; i < job->bo_count; i++) {
    341		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
    342						     handles[i]);
    343		if (!bo) {
    344			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
    345				  i, handles[i]);
    346			ret = -ENOENT;
    347			spin_unlock(&file_priv->table_lock);
    348			goto fail;
    349		}
    350		drm_gem_object_get(bo);
    351		job->bo[i] = bo;
    352	}
    353	spin_unlock(&file_priv->table_lock);
    354
    355fail:
    356	kvfree(handles);
    357	return ret;
    358}
    359
    360static void
    361v3d_job_free(struct kref *ref)
    362{
    363	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
    364	int i;
    365
    366	for (i = 0; i < job->bo_count; i++) {
    367		if (job->bo[i])
    368			drm_gem_object_put(job->bo[i]);
    369	}
    370	kvfree(job->bo);
    371
    372	dma_fence_put(job->irq_fence);
    373	dma_fence_put(job->done_fence);
    374
    375	pm_runtime_mark_last_busy(job->v3d->drm.dev);
    376	pm_runtime_put_autosuspend(job->v3d->drm.dev);
    377
    378	if (job->perfmon)
    379		v3d_perfmon_put(job->perfmon);
    380
    381	kfree(job);
    382}
    383
    384static void
    385v3d_render_job_free(struct kref *ref)
    386{
    387	struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
    388						  base.refcount);
    389	struct v3d_bo *bo, *save;
    390
    391	list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
    392		drm_gem_object_put(&bo->base.base);
    393	}
    394
    395	v3d_job_free(ref);
    396}
    397
    398void v3d_job_cleanup(struct v3d_job *job)
    399{
    400	if (!job)
    401		return;
    402
    403	drm_sched_job_cleanup(&job->base);
    404	v3d_job_put(job);
    405}
    406
    407void v3d_job_put(struct v3d_job *job)
    408{
    409	kref_put(&job->refcount, job->free);
    410}
    411
    412int
    413v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
    414		  struct drm_file *file_priv)
    415{
    416	int ret;
    417	struct drm_v3d_wait_bo *args = data;
    418	ktime_t start = ktime_get();
    419	u64 delta_ns;
    420	unsigned long timeout_jiffies =
    421		nsecs_to_jiffies_timeout(args->timeout_ns);
    422
    423	if (args->pad != 0)
    424		return -EINVAL;
    425
    426	ret = drm_gem_dma_resv_wait(file_priv, args->handle,
    427				    true, timeout_jiffies);
    428
    429	/* Decrement the user's timeout, in case we got interrupted
    430	 * such that the ioctl will be restarted.
    431	 */
    432	delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
    433	if (delta_ns < args->timeout_ns)
    434		args->timeout_ns -= delta_ns;
    435	else
    436		args->timeout_ns = 0;
    437
    438	/* Asked to wait beyond the jiffie/scheduler precision? */
    439	if (ret == -ETIME && args->timeout_ns)
    440		ret = -EAGAIN;
    441
    442	return ret;
    443}
    444
    445static int
    446v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
    447		 u32 in_sync, u32 point)
    448{
    449	struct dma_fence *in_fence = NULL;
    450	int ret;
    451
    452	ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
    453	if (ret == -EINVAL)
    454		return ret;
    455
    456	return drm_sched_job_add_dependency(&job->base, in_fence);
    457}
    458
    459static int
    460v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
    461	     void **container, size_t size, void (*free)(struct kref *ref),
    462	     u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
    463{
    464	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
    465	struct v3d_job *job;
    466	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
    467	int ret, i;
    468
    469	*container = kcalloc(1, size, GFP_KERNEL);
    470	if (!*container) {
    471		DRM_ERROR("Cannot allocate memory for v3d job.");
    472		return -ENOMEM;
    473	}
    474
    475	job = *container;
    476	job->v3d = v3d;
    477	job->free = free;
    478
    479	ret = pm_runtime_get_sync(v3d->drm.dev);
    480	if (ret < 0)
    481		goto fail;
    482
    483	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
    484				 v3d_priv);
    485	if (ret)
    486		goto fail_job;
    487
    488	if (has_multisync) {
    489		if (se->in_sync_count && se->wait_stage == queue) {
    490			struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
    491
    492			for (i = 0; i < se->in_sync_count; i++) {
    493				struct drm_v3d_sem in;
    494
    495				if (copy_from_user(&in, handle++, sizeof(in))) {
    496					ret = -EFAULT;
    497					DRM_DEBUG("Failed to copy wait dep handle.\n");
    498					goto fail_deps;
    499				}
    500				ret = v3d_job_add_deps(file_priv, job, in.handle, 0);
    501				if (ret)
    502					goto fail_deps;
    503			}
    504		}
    505	} else {
    506		ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
    507		if (ret)
    508			goto fail_deps;
    509	}
    510
    511	kref_init(&job->refcount);
    512
    513	return 0;
    514
    515fail_deps:
    516	drm_sched_job_cleanup(&job->base);
    517fail_job:
    518	pm_runtime_put_autosuspend(v3d->drm.dev);
    519fail:
    520	kfree(*container);
    521	*container = NULL;
    522
    523	return ret;
    524}
    525
    526static void
    527v3d_push_job(struct v3d_job *job)
    528{
    529	drm_sched_job_arm(&job->base);
    530
    531	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
    532
    533	/* put by scheduler job completion */
    534	kref_get(&job->refcount);
    535
    536	drm_sched_entity_push_job(&job->base);
    537}
    538
    539static void
    540v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
    541					 struct v3d_job *job,
    542					 struct ww_acquire_ctx *acquire_ctx,
    543					 u32 out_sync,
    544					 struct v3d_submit_ext *se,
    545					 struct dma_fence *done_fence)
    546{
    547	struct drm_syncobj *sync_out;
    548	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
    549	int i;
    550
    551	for (i = 0; i < job->bo_count; i++) {
    552		/* XXX: Use shared fences for read-only objects. */
    553		dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
    554				   DMA_RESV_USAGE_WRITE);
    555	}
    556
    557	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
    558
    559	/* Update the return sync object for the job */
    560	/* If it only supports a single signal semaphore*/
    561	if (!has_multisync) {
    562		sync_out = drm_syncobj_find(file_priv, out_sync);
    563		if (sync_out) {
    564			drm_syncobj_replace_fence(sync_out, done_fence);
    565			drm_syncobj_put(sync_out);
    566		}
    567		return;
    568	}
    569
    570	/* If multiple semaphores extension is supported */
    571	if (se->out_sync_count) {
    572		for (i = 0; i < se->out_sync_count; i++) {
    573			drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
    574						  done_fence);
    575			drm_syncobj_put(se->out_syncs[i].syncobj);
    576		}
    577		kvfree(se->out_syncs);
    578	}
    579}
    580
    581static void
    582v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
    583{
    584	unsigned int i;
    585
    586	if (!(se && se->out_sync_count))
    587		return;
    588
    589	for (i = 0; i < se->out_sync_count; i++)
    590		drm_syncobj_put(se->out_syncs[i].syncobj);
    591	kvfree(se->out_syncs);
    592}
    593
    594static int
    595v3d_get_multisync_post_deps(struct drm_file *file_priv,
    596			    struct v3d_submit_ext *se,
    597			    u32 count, u64 handles)
    598{
    599	struct drm_v3d_sem __user *post_deps;
    600	int i, ret;
    601
    602	if (!count)
    603		return 0;
    604
    605	se->out_syncs = (struct v3d_submit_outsync *)
    606			kvmalloc_array(count,
    607				       sizeof(struct v3d_submit_outsync),
    608				       GFP_KERNEL);
    609	if (!se->out_syncs)
    610		return -ENOMEM;
    611
    612	post_deps = u64_to_user_ptr(handles);
    613
    614	for (i = 0; i < count; i++) {
    615		struct drm_v3d_sem out;
    616
    617		if (copy_from_user(&out, post_deps++, sizeof(out))) {
    618			ret = -EFAULT;
    619			DRM_DEBUG("Failed to copy post dep handles\n");
    620			goto fail;
    621		}
    622
    623		se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
    624							    out.handle);
    625		if (!se->out_syncs[i].syncobj) {
    626			ret = -EINVAL;
    627			goto fail;
    628		}
    629	}
    630	se->out_sync_count = count;
    631
    632	return 0;
    633
    634fail:
    635	for (i--; i >= 0; i--)
    636		drm_syncobj_put(se->out_syncs[i].syncobj);
    637	kvfree(se->out_syncs);
    638
    639	return ret;
    640}
    641
    642/* Get data for multiple binary semaphores synchronization. Parse syncobj
    643 * to be signaled when job completes (out_sync).
    644 */
    645static int
    646v3d_get_multisync_submit_deps(struct drm_file *file_priv,
    647			      struct drm_v3d_extension __user *ext,
    648			      void *data)
    649{
    650	struct drm_v3d_multi_sync multisync;
    651	struct v3d_submit_ext *se = data;
    652	int ret;
    653
    654	if (copy_from_user(&multisync, ext, sizeof(multisync)))
    655		return -EFAULT;
    656
    657	if (multisync.pad)
    658		return -EINVAL;
    659
    660	ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
    661					  multisync.out_syncs);
    662	if (ret)
    663		return ret;
    664
    665	se->in_sync_count = multisync.in_sync_count;
    666	se->in_syncs = multisync.in_syncs;
    667	se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
    668	se->wait_stage = multisync.wait_stage;
    669
    670	return 0;
    671}
    672
    673/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
    674 * according to the extension id (name).
    675 */
    676static int
    677v3d_get_extensions(struct drm_file *file_priv,
    678		   u64 ext_handles,
    679		   void *data)
    680{
    681	struct drm_v3d_extension __user *user_ext;
    682	int ret;
    683
    684	user_ext = u64_to_user_ptr(ext_handles);
    685	while (user_ext) {
    686		struct drm_v3d_extension ext;
    687
    688		if (copy_from_user(&ext, user_ext, sizeof(ext))) {
    689			DRM_DEBUG("Failed to copy submit extension\n");
    690			return -EFAULT;
    691		}
    692
    693		switch (ext.id) {
    694		case DRM_V3D_EXT_ID_MULTI_SYNC:
    695			ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
    696			if (ret)
    697				return ret;
    698			break;
    699		default:
    700			DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
    701			return -EINVAL;
    702		}
    703
    704		user_ext = u64_to_user_ptr(ext.next);
    705	}
    706
    707	return 0;
    708}
    709
    710/**
    711 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
    712 * @dev: DRM device
    713 * @data: ioctl argument
    714 * @file_priv: DRM file for this fd
    715 *
    716 * This is the main entrypoint for userspace to submit a 3D frame to
    717 * the GPU.  Userspace provides the binner command list (if
    718 * applicable), and the kernel sets up the render command list to draw
    719 * to the framebuffer described in the ioctl, using the command lists
    720 * that the 3D engine's binner will produce.
    721 */
    722int
    723v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
    724		    struct drm_file *file_priv)
    725{
    726	struct v3d_dev *v3d = to_v3d_dev(dev);
    727	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
    728	struct drm_v3d_submit_cl *args = data;
    729	struct v3d_submit_ext se = {0};
    730	struct v3d_bin_job *bin = NULL;
    731	struct v3d_render_job *render = NULL;
    732	struct v3d_job *clean_job = NULL;
    733	struct v3d_job *last_job;
    734	struct ww_acquire_ctx acquire_ctx;
    735	int ret = 0;
    736
    737	trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
    738
    739	if (args->pad)
    740		return -EINVAL;
    741
    742	if (args->flags &&
    743	    args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
    744			    DRM_V3D_SUBMIT_EXTENSION)) {
    745		DRM_INFO("invalid flags: %d\n", args->flags);
    746		return -EINVAL;
    747	}
    748
    749	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
    750		ret = v3d_get_extensions(file_priv, args->extensions, &se);
    751		if (ret) {
    752			DRM_DEBUG("Failed to get extensions.\n");
    753			return ret;
    754		}
    755	}
    756
    757	ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
    758			   v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
    759	if (ret)
    760		goto fail;
    761
    762	render->start = args->rcl_start;
    763	render->end = args->rcl_end;
    764	INIT_LIST_HEAD(&render->unref_list);
    765
    766	if (args->bcl_start != args->bcl_end) {
    767		ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
    768				   v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
    769		if (ret)
    770			goto fail;
    771
    772		bin->start = args->bcl_start;
    773		bin->end = args->bcl_end;
    774		bin->qma = args->qma;
    775		bin->qms = args->qms;
    776		bin->qts = args->qts;
    777		bin->render = render;
    778	}
    779
    780	if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
    781		ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
    782				   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
    783		if (ret)
    784			goto fail;
    785
    786		last_job = clean_job;
    787	} else {
    788		last_job = &render->base;
    789	}
    790
    791	ret = v3d_lookup_bos(dev, file_priv, last_job,
    792			     args->bo_handles, args->bo_handle_count);
    793	if (ret)
    794		goto fail;
    795
    796	ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
    797	if (ret)
    798		goto fail;
    799
    800	if (args->perfmon_id) {
    801		render->base.perfmon = v3d_perfmon_find(v3d_priv,
    802							args->perfmon_id);
    803
    804		if (!render->base.perfmon) {
    805			ret = -ENOENT;
    806			goto fail_perfmon;
    807		}
    808	}
    809
    810	mutex_lock(&v3d->sched_lock);
    811	if (bin) {
    812		bin->base.perfmon = render->base.perfmon;
    813		v3d_perfmon_get(bin->base.perfmon);
    814		v3d_push_job(&bin->base);
    815
    816		ret = drm_sched_job_add_dependency(&render->base.base,
    817						   dma_fence_get(bin->base.done_fence));
    818		if (ret)
    819			goto fail_unreserve;
    820	}
    821
    822	v3d_push_job(&render->base);
    823
    824	if (clean_job) {
    825		struct dma_fence *render_fence =
    826			dma_fence_get(render->base.done_fence);
    827		ret = drm_sched_job_add_dependency(&clean_job->base,
    828						   render_fence);
    829		if (ret)
    830			goto fail_unreserve;
    831		clean_job->perfmon = render->base.perfmon;
    832		v3d_perfmon_get(clean_job->perfmon);
    833		v3d_push_job(clean_job);
    834	}
    835
    836	mutex_unlock(&v3d->sched_lock);
    837
    838	v3d_attach_fences_and_unlock_reservation(file_priv,
    839						 last_job,
    840						 &acquire_ctx,
    841						 args->out_sync,
    842						 &se,
    843						 last_job->done_fence);
    844
    845	if (bin)
    846		v3d_job_put(&bin->base);
    847	v3d_job_put(&render->base);
    848	if (clean_job)
    849		v3d_job_put(clean_job);
    850
    851	return 0;
    852
    853fail_unreserve:
    854	mutex_unlock(&v3d->sched_lock);
    855fail_perfmon:
    856	drm_gem_unlock_reservations(last_job->bo,
    857				    last_job->bo_count, &acquire_ctx);
    858fail:
    859	v3d_job_cleanup((void *)bin);
    860	v3d_job_cleanup((void *)render);
    861	v3d_job_cleanup(clean_job);
    862	v3d_put_multisync_post_deps(&se);
    863
    864	return ret;
    865}
    866
    867/**
    868 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
    869 * @dev: DRM device
    870 * @data: ioctl argument
    871 * @file_priv: DRM file for this fd
    872 *
    873 * Userspace provides the register setup for the TFU, which we don't
    874 * need to validate since the TFU is behind the MMU.
    875 */
    876int
    877v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
    878		     struct drm_file *file_priv)
    879{
    880	struct v3d_dev *v3d = to_v3d_dev(dev);
    881	struct drm_v3d_submit_tfu *args = data;
    882	struct v3d_submit_ext se = {0};
    883	struct v3d_tfu_job *job = NULL;
    884	struct ww_acquire_ctx acquire_ctx;
    885	int ret = 0;
    886
    887	trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
    888
    889	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
    890		DRM_DEBUG("invalid flags: %d\n", args->flags);
    891		return -EINVAL;
    892	}
    893
    894	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
    895		ret = v3d_get_extensions(file_priv, args->extensions, &se);
    896		if (ret) {
    897			DRM_DEBUG("Failed to get extensions.\n");
    898			return ret;
    899		}
    900	}
    901
    902	ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
    903			   v3d_job_free, args->in_sync, &se, V3D_TFU);
    904	if (ret)
    905		goto fail;
    906
    907	job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
    908			       sizeof(*job->base.bo), GFP_KERNEL);
    909	if (!job->base.bo) {
    910		ret = -ENOMEM;
    911		goto fail;
    912	}
    913
    914	job->args = *args;
    915
    916	spin_lock(&file_priv->table_lock);
    917	for (job->base.bo_count = 0;
    918	     job->base.bo_count < ARRAY_SIZE(args->bo_handles);
    919	     job->base.bo_count++) {
    920		struct drm_gem_object *bo;
    921
    922		if (!args->bo_handles[job->base.bo_count])
    923			break;
    924
    925		bo = idr_find(&file_priv->object_idr,
    926			      args->bo_handles[job->base.bo_count]);
    927		if (!bo) {
    928			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
    929				  job->base.bo_count,
    930				  args->bo_handles[job->base.bo_count]);
    931			ret = -ENOENT;
    932			spin_unlock(&file_priv->table_lock);
    933			goto fail;
    934		}
    935		drm_gem_object_get(bo);
    936		job->base.bo[job->base.bo_count] = bo;
    937	}
    938	spin_unlock(&file_priv->table_lock);
    939
    940	ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
    941	if (ret)
    942		goto fail;
    943
    944	mutex_lock(&v3d->sched_lock);
    945	v3d_push_job(&job->base);
    946	mutex_unlock(&v3d->sched_lock);
    947
    948	v3d_attach_fences_and_unlock_reservation(file_priv,
    949						 &job->base, &acquire_ctx,
    950						 args->out_sync,
    951						 &se,
    952						 job->base.done_fence);
    953
    954	v3d_job_put(&job->base);
    955
    956	return 0;
    957
    958fail:
    959	v3d_job_cleanup((void *)job);
    960	v3d_put_multisync_post_deps(&se);
    961
    962	return ret;
    963}
    964
    965/**
    966 * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
    967 * @dev: DRM device
    968 * @data: ioctl argument
    969 * @file_priv: DRM file for this fd
    970 *
    971 * Userspace provides the register setup for the CSD, which we don't
    972 * need to validate since the CSD is behind the MMU.
    973 */
    974int
    975v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
    976		     struct drm_file *file_priv)
    977{
    978	struct v3d_dev *v3d = to_v3d_dev(dev);
    979	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
    980	struct drm_v3d_submit_csd *args = data;
    981	struct v3d_submit_ext se = {0};
    982	struct v3d_csd_job *job = NULL;
    983	struct v3d_job *clean_job = NULL;
    984	struct ww_acquire_ctx acquire_ctx;
    985	int ret;
    986
    987	trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
    988
    989	if (args->pad)
    990		return -EINVAL;
    991
    992	if (!v3d_has_csd(v3d)) {
    993		DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
    994		return -EINVAL;
    995	}
    996
    997	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
    998		DRM_INFO("invalid flags: %d\n", args->flags);
    999		return -EINVAL;
   1000	}
   1001
   1002	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
   1003		ret = v3d_get_extensions(file_priv, args->extensions, &se);
   1004		if (ret) {
   1005			DRM_DEBUG("Failed to get extensions.\n");
   1006			return ret;
   1007		}
   1008	}
   1009
   1010	ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
   1011			   v3d_job_free, args->in_sync, &se, V3D_CSD);
   1012	if (ret)
   1013		goto fail;
   1014
   1015	ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
   1016			   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
   1017	if (ret)
   1018		goto fail;
   1019
   1020	job->args = *args;
   1021
   1022	ret = v3d_lookup_bos(dev, file_priv, clean_job,
   1023			     args->bo_handles, args->bo_handle_count);
   1024	if (ret)
   1025		goto fail;
   1026
   1027	ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
   1028	if (ret)
   1029		goto fail;
   1030
   1031	if (args->perfmon_id) {
   1032		job->base.perfmon = v3d_perfmon_find(v3d_priv,
   1033						     args->perfmon_id);
   1034		if (!job->base.perfmon) {
   1035			ret = -ENOENT;
   1036			goto fail_perfmon;
   1037		}
   1038	}
   1039
   1040	mutex_lock(&v3d->sched_lock);
   1041	v3d_push_job(&job->base);
   1042
   1043	ret = drm_sched_job_add_dependency(&clean_job->base,
   1044					   dma_fence_get(job->base.done_fence));
   1045	if (ret)
   1046		goto fail_unreserve;
   1047
   1048	v3d_push_job(clean_job);
   1049	mutex_unlock(&v3d->sched_lock);
   1050
   1051	v3d_attach_fences_and_unlock_reservation(file_priv,
   1052						 clean_job,
   1053						 &acquire_ctx,
   1054						 args->out_sync,
   1055						 &se,
   1056						 clean_job->done_fence);
   1057
   1058	v3d_job_put(&job->base);
   1059	v3d_job_put(clean_job);
   1060
   1061	return 0;
   1062
   1063fail_unreserve:
   1064	mutex_unlock(&v3d->sched_lock);
   1065fail_perfmon:
   1066	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
   1067				    &acquire_ctx);
   1068fail:
   1069	v3d_job_cleanup((void *)job);
   1070	v3d_job_cleanup(clean_job);
   1071	v3d_put_multisync_post_deps(&se);
   1072
   1073	return ret;
   1074}
   1075
   1076int
   1077v3d_gem_init(struct drm_device *dev)
   1078{
   1079	struct v3d_dev *v3d = to_v3d_dev(dev);
   1080	u32 pt_size = 4096 * 1024;
   1081	int ret, i;
   1082
   1083	for (i = 0; i < V3D_MAX_QUEUES; i++)
   1084		v3d->queue[i].fence_context = dma_fence_context_alloc(1);
   1085
   1086	spin_lock_init(&v3d->mm_lock);
   1087	spin_lock_init(&v3d->job_lock);
   1088	mutex_init(&v3d->bo_lock);
   1089	mutex_init(&v3d->reset_lock);
   1090	mutex_init(&v3d->sched_lock);
   1091	mutex_init(&v3d->cache_clean_lock);
   1092
   1093	/* Note: We don't allocate address 0.  Various bits of HW
   1094	 * treat 0 as special, such as the occlusion query counters
   1095	 * where 0 means "disabled".
   1096	 */
   1097	drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
   1098
   1099	v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
   1100			       &v3d->pt_paddr,
   1101			       GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
   1102	if (!v3d->pt) {
   1103		drm_mm_takedown(&v3d->mm);
   1104		dev_err(v3d->drm.dev,
   1105			"Failed to allocate page tables. Please ensure you have CMA enabled.\n");
   1106		return -ENOMEM;
   1107	}
   1108
   1109	v3d_init_hw_state(v3d);
   1110	v3d_mmu_set_page_table(v3d);
   1111
   1112	ret = v3d_sched_init(v3d);
   1113	if (ret) {
   1114		drm_mm_takedown(&v3d->mm);
   1115		dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
   1116				  v3d->pt_paddr);
   1117	}
   1118
   1119	return 0;
   1120}
   1121
   1122void
   1123v3d_gem_destroy(struct drm_device *dev)
   1124{
   1125	struct v3d_dev *v3d = to_v3d_dev(dev);
   1126
   1127	v3d_sched_fini(v3d);
   1128
   1129	/* Waiting for jobs to finish would need to be done before
   1130	 * unregistering V3D.
   1131	 */
   1132	WARN_ON(v3d->bin_job);
   1133	WARN_ON(v3d->render_job);
   1134
   1135	drm_mm_takedown(&v3d->mm);
   1136
   1137	dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
   1138			  v3d->pt_paddr);
   1139}