cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sched_fence.c (5305B)


      1/*
      2 * Copyright 2015 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 */
     23
     24#include <linux/kthread.h>
     25#include <linux/module.h>
     26#include <linux/sched.h>
     27#include <linux/slab.h>
     28#include <linux/wait.h>
     29
     30#include <drm/gpu_scheduler.h>
     31
     32static struct kmem_cache *sched_fence_slab;
     33
     34static int __init drm_sched_fence_slab_init(void)
     35{
     36	sched_fence_slab = kmem_cache_create(
     37		"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
     38		SLAB_HWCACHE_ALIGN, NULL);
     39	if (!sched_fence_slab)
     40		return -ENOMEM;
     41
     42	return 0;
     43}
     44
     45static void __exit drm_sched_fence_slab_fini(void)
     46{
     47	rcu_barrier();
     48	kmem_cache_destroy(sched_fence_slab);
     49}
     50
     51void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
     52{
     53	dma_fence_signal(&fence->scheduled);
     54}
     55
     56void drm_sched_fence_finished(struct drm_sched_fence *fence)
     57{
     58	dma_fence_signal(&fence->finished);
     59}
     60
     61static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
     62{
     63	return "drm_sched";
     64}
     65
     66static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
     67{
     68	struct drm_sched_fence *fence = to_drm_sched_fence(f);
     69	return (const char *)fence->sched->name;
     70}
     71
     72static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
     73{
     74	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
     75	struct drm_sched_fence *fence = to_drm_sched_fence(f);
     76
     77	if (!WARN_ON_ONCE(!fence))
     78		kmem_cache_free(sched_fence_slab, fence);
     79}
     80
     81/**
     82 * drm_sched_fence_free - free up an uninitialized fence
     83 *
     84 * @fence: fence to free
     85 *
     86 * Free up the fence memory. Should only be used if drm_sched_fence_init()
     87 * has not been called yet.
     88 */
     89void drm_sched_fence_free(struct drm_sched_fence *fence)
     90{
     91	/* This function should not be called if the fence has been initialized. */
     92	if (!WARN_ON_ONCE(fence->sched))
     93		kmem_cache_free(sched_fence_slab, fence);
     94}
     95
     96/**
     97 * drm_sched_fence_release_scheduled - callback that fence can be freed
     98 *
     99 * @f: fence
    100 *
    101 * This function is called when the reference count becomes zero.
    102 * It just RCU schedules freeing up the fence.
    103 */
    104static void drm_sched_fence_release_scheduled(struct dma_fence *f)
    105{
    106	struct drm_sched_fence *fence = to_drm_sched_fence(f);
    107
    108	dma_fence_put(fence->parent);
    109	call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
    110}
    111
    112/**
    113 * drm_sched_fence_release_finished - drop extra reference
    114 *
    115 * @f: fence
    116 *
    117 * Drop the extra reference from the scheduled fence to the base fence.
    118 */
    119static void drm_sched_fence_release_finished(struct dma_fence *f)
    120{
    121	struct drm_sched_fence *fence = to_drm_sched_fence(f);
    122
    123	dma_fence_put(&fence->scheduled);
    124}
    125
    126static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
    127	.get_driver_name = drm_sched_fence_get_driver_name,
    128	.get_timeline_name = drm_sched_fence_get_timeline_name,
    129	.release = drm_sched_fence_release_scheduled,
    130};
    131
    132static const struct dma_fence_ops drm_sched_fence_ops_finished = {
    133	.get_driver_name = drm_sched_fence_get_driver_name,
    134	.get_timeline_name = drm_sched_fence_get_timeline_name,
    135	.release = drm_sched_fence_release_finished,
    136};
    137
    138struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
    139{
    140	if (f->ops == &drm_sched_fence_ops_scheduled)
    141		return container_of(f, struct drm_sched_fence, scheduled);
    142
    143	if (f->ops == &drm_sched_fence_ops_finished)
    144		return container_of(f, struct drm_sched_fence, finished);
    145
    146	return NULL;
    147}
    148EXPORT_SYMBOL(to_drm_sched_fence);
    149
    150struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
    151					      void *owner)
    152{
    153	struct drm_sched_fence *fence = NULL;
    154
    155	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
    156	if (fence == NULL)
    157		return NULL;
    158
    159	fence->owner = owner;
    160	spin_lock_init(&fence->lock);
    161
    162	return fence;
    163}
    164
    165void drm_sched_fence_init(struct drm_sched_fence *fence,
    166			  struct drm_sched_entity *entity)
    167{
    168	unsigned seq;
    169
    170	fence->sched = entity->rq->sched;
    171	seq = atomic_inc_return(&entity->fence_seq);
    172	dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
    173		       &fence->lock, entity->fence_context, seq);
    174	dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
    175		       &fence->lock, entity->fence_context + 1, seq);
    176}
    177
    178module_init(drm_sched_fence_slab_init);
    179module_exit(drm_sched_fence_slab_fini);
    180
    181MODULE_DESCRIPTION("DRM GPU scheduler");
    182MODULE_LICENSE("GPL and additional rights");