cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fence.c (4085B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Syncpoint dma_fence implementation
      4 *
      5 * Copyright (c) 2020, NVIDIA Corporation.
      6 */
      7
      8#include <linux/dma-fence.h>
      9#include <linux/file.h>
     10#include <linux/fs.h>
     11#include <linux/slab.h>
     12#include <linux/sync_file.h>
     13
     14#include "fence.h"
     15#include "intr.h"
     16#include "syncpt.h"
     17
     18static DEFINE_SPINLOCK(lock);
     19
     20struct host1x_syncpt_fence {
     21	struct dma_fence base;
     22
     23	atomic_t signaling;
     24
     25	struct host1x_syncpt *sp;
     26	u32 threshold;
     27
     28	struct host1x_waitlist *waiter;
     29	void *waiter_ref;
     30
     31	struct delayed_work timeout_work;
     32};
     33
     34static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
     35{
     36	return "host1x";
     37}
     38
     39static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
     40{
     41	return "syncpoint";
     42}
     43
     44static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
     45{
     46	return container_of(f, struct host1x_syncpt_fence, base);
     47}
     48
     49static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
     50{
     51	struct host1x_syncpt_fence *sf = to_host1x_fence(f);
     52	int err;
     53
     54	if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
     55		return false;
     56
     57	dma_fence_get(f);
     58
     59	/*
     60	 * The dma_fence framework requires the fence driver to keep a
     61	 * reference to any fences for which 'enable_signaling' has been
     62	 * called (and that have not been signalled).
     63	 *
     64	 * We provide a userspace API to create arbitrary syncpoint fences,
     65	 * so we cannot normally guarantee that all fences get signalled.
     66	 * As such, setup a timeout, so that long-lasting fences will get
     67	 * reaped eventually.
     68	 */
     69	schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
     70
     71	err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
     72				     HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
     73				     sf->waiter, &sf->waiter_ref);
     74	if (err) {
     75		cancel_delayed_work_sync(&sf->timeout_work);
     76		dma_fence_put(f);
     77		return false;
     78	}
     79
     80	/* intr framework takes ownership of waiter */
     81	sf->waiter = NULL;
     82
     83	/*
     84	 * The fence may get signalled at any time after the above call,
     85	 * so we need to initialize all state used by signalling
     86	 * before it.
     87	 */
     88
     89	return true;
     90}
     91
     92static void host1x_syncpt_fence_release(struct dma_fence *f)
     93{
     94	struct host1x_syncpt_fence *sf = to_host1x_fence(f);
     95
     96	if (sf->waiter)
     97		kfree(sf->waiter);
     98
     99	dma_fence_free(f);
    100}
    101
    102const struct dma_fence_ops host1x_syncpt_fence_ops = {
    103	.get_driver_name = host1x_syncpt_fence_get_driver_name,
    104	.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
    105	.enable_signaling = host1x_syncpt_fence_enable_signaling,
    106	.release = host1x_syncpt_fence_release,
    107};
    108
    109void host1x_fence_signal(struct host1x_syncpt_fence *f)
    110{
    111	if (atomic_xchg(&f->signaling, 1))
    112		return;
    113
    114	/*
    115	 * Cancel pending timeout work - if it races, it will
    116	 * not get 'f->signaling' and return.
    117	 */
    118	cancel_delayed_work_sync(&f->timeout_work);
    119
    120	host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
    121
    122	dma_fence_signal(&f->base);
    123	dma_fence_put(&f->base);
    124}
    125
    126static void do_fence_timeout(struct work_struct *work)
    127{
    128	struct delayed_work *dwork = (struct delayed_work *)work;
    129	struct host1x_syncpt_fence *f =
    130		container_of(dwork, struct host1x_syncpt_fence, timeout_work);
    131
    132	if (atomic_xchg(&f->signaling, 1))
    133		return;
    134
    135	/*
    136	 * Cancel pending timeout work - if it races, it will
    137	 * not get 'f->signaling' and return.
    138	 */
    139	host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
    140
    141	dma_fence_set_error(&f->base, -ETIMEDOUT);
    142	dma_fence_signal(&f->base);
    143	dma_fence_put(&f->base);
    144}
    145
    146struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
    147{
    148	struct host1x_syncpt_fence *fence;
    149
    150	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
    151	if (!fence)
    152		return ERR_PTR(-ENOMEM);
    153
    154	fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
    155	if (!fence->waiter) {
    156		kfree(fence);
    157		return ERR_PTR(-ENOMEM);
    158	}
    159
    160	fence->sp = sp;
    161	fence->threshold = threshold;
    162
    163	dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
    164		       dma_fence_context_alloc(1), 0);
    165
    166	INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
    167
    168	return &fence->base;
    169}
    170EXPORT_SYMBOL(host1x_fence_create);