cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

msm_submitqueue.c (6433B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
      3 */
      4
      5#include <linux/kref.h>
      6#include <linux/uaccess.h>
      7
      8#include "msm_gpu.h"
      9
     10int msm_file_private_set_sysprof(struct msm_file_private *ctx,
     11				 struct msm_gpu *gpu, int sysprof)
     12{
     13	/*
     14	 * Since pm_runtime and sysprof_active are both refcounts, we
     15	 * call apply the new value first, and then unwind the previous
     16	 * value
     17	 */
     18
     19	switch (sysprof) {
     20	default:
     21		return -EINVAL;
     22	case 2:
     23		pm_runtime_get_sync(&gpu->pdev->dev);
     24		fallthrough;
     25	case 1:
     26		refcount_inc(&gpu->sysprof_active);
     27		fallthrough;
     28	case 0:
     29		break;
     30	}
     31
     32	/* unwind old value: */
     33	switch (ctx->sysprof) {
     34	case 2:
     35		pm_runtime_put_autosuspend(&gpu->pdev->dev);
     36		fallthrough;
     37	case 1:
     38		refcount_dec(&gpu->sysprof_active);
     39		fallthrough;
     40	case 0:
     41		break;
     42	}
     43
     44	ctx->sysprof = sysprof;
     45
     46	return 0;
     47}
     48
     49void __msm_file_private_destroy(struct kref *kref)
     50{
     51	struct msm_file_private *ctx = container_of(kref,
     52		struct msm_file_private, ref);
     53	int i;
     54
     55	for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
     56		if (!ctx->entities[i])
     57			continue;
     58
     59		drm_sched_entity_destroy(ctx->entities[i]);
     60		kfree(ctx->entities[i]);
     61	}
     62
     63	msm_gem_address_space_put(ctx->aspace);
     64	kfree(ctx->comm);
     65	kfree(ctx->cmdline);
     66	kfree(ctx);
     67}
     68
     69void msm_submitqueue_destroy(struct kref *kref)
     70{
     71	struct msm_gpu_submitqueue *queue = container_of(kref,
     72		struct msm_gpu_submitqueue, ref);
     73
     74	idr_destroy(&queue->fence_idr);
     75
     76	msm_file_private_put(queue->ctx);
     77
     78	kfree(queue);
     79}
     80
     81struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
     82		u32 id)
     83{
     84	struct msm_gpu_submitqueue *entry;
     85
     86	if (!ctx)
     87		return NULL;
     88
     89	read_lock(&ctx->queuelock);
     90
     91	list_for_each_entry(entry, &ctx->submitqueues, node) {
     92		if (entry->id == id) {
     93			kref_get(&entry->ref);
     94			read_unlock(&ctx->queuelock);
     95
     96			return entry;
     97		}
     98	}
     99
    100	read_unlock(&ctx->queuelock);
    101	return NULL;
    102}
    103
    104void msm_submitqueue_close(struct msm_file_private *ctx)
    105{
    106	struct msm_gpu_submitqueue *entry, *tmp;
    107
    108	if (!ctx)
    109		return;
    110
    111	/*
    112	 * No lock needed in close and there won't
    113	 * be any more user ioctls coming our way
    114	 */
    115	list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
    116		list_del(&entry->node);
    117		msm_submitqueue_put(entry);
    118	}
    119}
    120
    121static struct drm_sched_entity *
    122get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
    123		 unsigned ring_nr, enum drm_sched_priority sched_prio)
    124{
    125	static DEFINE_MUTEX(entity_lock);
    126	unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
    127
    128	/* We should have already validated that the requested priority is
    129	 * valid by the time we get here.
    130	 */
    131	if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
    132		return ERR_PTR(-EINVAL);
    133
    134	mutex_lock(&entity_lock);
    135
    136	if (!ctx->entities[idx]) {
    137		struct drm_sched_entity *entity;
    138		struct drm_gpu_scheduler *sched = &ring->sched;
    139		int ret;
    140
    141		entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
    142
    143		ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
    144		if (ret) {
    145			mutex_unlock(&entity_lock);
    146			kfree(entity);
    147			return ERR_PTR(ret);
    148		}
    149
    150		ctx->entities[idx] = entity;
    151	}
    152
    153	mutex_unlock(&entity_lock);
    154
    155	return ctx->entities[idx];
    156}
    157
    158int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
    159		u32 prio, u32 flags, u32 *id)
    160{
    161	struct msm_drm_private *priv = drm->dev_private;
    162	struct msm_gpu_submitqueue *queue;
    163	enum drm_sched_priority sched_prio;
    164	unsigned ring_nr;
    165	int ret;
    166
    167	if (!ctx)
    168		return -ENODEV;
    169
    170	if (!priv->gpu)
    171		return -ENODEV;
    172
    173	ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
    174	if (ret)
    175		return ret;
    176
    177	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
    178
    179	if (!queue)
    180		return -ENOMEM;
    181
    182	kref_init(&queue->ref);
    183	queue->flags = flags;
    184	queue->ring_nr = ring_nr;
    185
    186	queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
    187					 ring_nr, sched_prio);
    188	if (IS_ERR(queue->entity)) {
    189		ret = PTR_ERR(queue->entity);
    190		kfree(queue);
    191		return ret;
    192	}
    193
    194	write_lock(&ctx->queuelock);
    195
    196	queue->ctx = msm_file_private_get(ctx);
    197	queue->id = ctx->queueid++;
    198
    199	if (id)
    200		*id = queue->id;
    201
    202	idr_init(&queue->fence_idr);
    203	mutex_init(&queue->lock);
    204
    205	list_add_tail(&queue->node, &ctx->submitqueues);
    206
    207	write_unlock(&ctx->queuelock);
    208
    209	return 0;
    210}
    211
    212/*
    213 * Create the default submit-queue (id==0), used for backwards compatibility
    214 * for userspace that pre-dates the introduction of submitqueues.
    215 */
    216int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
    217{
    218	struct msm_drm_private *priv = drm->dev_private;
    219	int default_prio, max_priority;
    220
    221	if (!priv->gpu)
    222		return -ENODEV;
    223
    224	max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
    225
    226	/*
    227	 * Pick a medium priority level as default.  Lower numeric value is
    228	 * higher priority, so round-up to pick a priority that is not higher
    229	 * than the middle priority level.
    230	 */
    231	default_prio = DIV_ROUND_UP(max_priority, 2);
    232
    233	return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
    234}
    235
    236static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
    237		struct drm_msm_submitqueue_query *args)
    238{
    239	size_t size = min_t(size_t, args->len, sizeof(queue->faults));
    240	int ret;
    241
    242	/* If a zero length was passed in, return the data size we expect */
    243	if (!args->len) {
    244		args->len = sizeof(queue->faults);
    245		return 0;
    246	}
    247
    248	/* Set the length to the actual size of the data */
    249	args->len = size;
    250
    251	ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
    252
    253	return ret ? -EFAULT : 0;
    254}
    255
    256int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
    257		struct drm_msm_submitqueue_query *args)
    258{
    259	struct msm_gpu_submitqueue *queue;
    260	int ret = -EINVAL;
    261
    262	if (args->pad)
    263		return -EINVAL;
    264
    265	queue = msm_submitqueue_get(ctx, args->id);
    266	if (!queue)
    267		return -ENOENT;
    268
    269	if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
    270		ret = msm_submitqueue_query_faults(queue, args);
    271
    272	msm_submitqueue_put(queue);
    273
    274	return ret;
    275}
    276
    277int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
    278{
    279	struct msm_gpu_submitqueue *entry;
    280
    281	if (!ctx)
    282		return 0;
    283
    284	/*
    285	 * id 0 is the "default" queue and can't be destroyed
    286	 * by the user
    287	 */
    288	if (!id)
    289		return -ENOENT;
    290
    291	write_lock(&ctx->queuelock);
    292
    293	list_for_each_entry(entry, &ctx->submitqueues, node) {
    294		if (entry->id == id) {
    295			list_del(&entry->node);
    296			write_unlock(&ctx->queuelock);
    297
    298			msm_submitqueue_put(entry);
    299			return 0;
    300		}
    301	}
    302
    303	write_unlock(&ctx->queuelock);
    304	return -ENOENT;
    305}
    306