cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mock_engine.c (10593B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2016 Intel Corporation
      4 */
      5
      6#include "gem/i915_gem_context.h"
      7#include "gt/intel_ring.h"
      8
      9#include "i915_drv.h"
     10#include "intel_context.h"
     11#include "intel_engine_pm.h"
     12
     13#include "mock_engine.h"
     14#include "selftests/mock_request.h"
     15
     16static int mock_timeline_pin(struct intel_timeline *tl)
     17{
     18	int err;
     19
     20	if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL)))
     21		return -EBUSY;
     22
     23	err = intel_timeline_pin_map(tl);
     24	i915_gem_object_unlock(tl->hwsp_ggtt->obj);
     25	if (err)
     26		return err;
     27
     28	atomic_inc(&tl->pin_count);
     29	return 0;
     30}
     31
     32static void mock_timeline_unpin(struct intel_timeline *tl)
     33{
     34	GEM_BUG_ON(!atomic_read(&tl->pin_count));
     35	atomic_dec(&tl->pin_count);
     36}
     37
     38static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
     39{
     40	struct i915_address_space *vm = &ggtt->vm;
     41	struct drm_i915_private *i915 = vm->i915;
     42	struct drm_i915_gem_object *obj;
     43	struct i915_vma *vma;
     44
     45	obj = i915_gem_object_create_internal(i915, size);
     46	if (IS_ERR(obj))
     47		return ERR_CAST(obj);
     48
     49	vma = i915_vma_instance(obj, vm, NULL);
     50	if (IS_ERR(vma))
     51		goto err;
     52
     53	return vma;
     54
     55err:
     56	i915_gem_object_put(obj);
     57	return vma;
     58}
     59
     60static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
     61{
     62	const unsigned long sz = PAGE_SIZE;
     63	struct intel_ring *ring;
     64
     65	ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
     66	if (!ring)
     67		return NULL;
     68
     69	kref_init(&ring->ref);
     70	ring->size = sz;
     71	ring->effective_size = sz;
     72	ring->vaddr = (void *)(ring + 1);
     73	atomic_set(&ring->pin_count, 1);
     74
     75	ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE);
     76	if (IS_ERR(ring->vma)) {
     77		kfree(ring);
     78		return NULL;
     79	}
     80
     81	intel_ring_update_space(ring);
     82
     83	return ring;
     84}
     85
     86static void mock_ring_free(struct intel_ring *ring)
     87{
     88	i915_vma_put(ring->vma);
     89
     90	kfree(ring);
     91}
     92
     93static struct i915_request *first_request(struct mock_engine *engine)
     94{
     95	return list_first_entry_or_null(&engine->hw_queue,
     96					struct i915_request,
     97					mock.link);
     98}
     99
    100static void advance(struct i915_request *request)
    101{
    102	list_del_init(&request->mock.link);
    103	i915_request_mark_complete(request);
    104	GEM_BUG_ON(!i915_request_completed(request));
    105
    106	intel_engine_signal_breadcrumbs(request->engine);
    107}
    108
    109static void hw_delay_complete(struct timer_list *t)
    110{
    111	struct mock_engine *engine = from_timer(engine, t, hw_delay);
    112	struct i915_request *request;
    113	unsigned long flags;
    114
    115	spin_lock_irqsave(&engine->hw_lock, flags);
    116
    117	/* Timer fired, first request is complete */
    118	request = first_request(engine);
    119	if (request)
    120		advance(request);
    121
    122	/*
    123	 * Also immediately signal any subsequent 0-delay requests, but
    124	 * requeue the timer for the next delayed request.
    125	 */
    126	while ((request = first_request(engine))) {
    127		if (request->mock.delay) {
    128			mod_timer(&engine->hw_delay,
    129				  jiffies + request->mock.delay);
    130			break;
    131		}
    132
    133		advance(request);
    134	}
    135
    136	spin_unlock_irqrestore(&engine->hw_lock, flags);
    137}
    138
    139static void mock_context_unpin(struct intel_context *ce)
    140{
    141}
    142
    143static void mock_context_post_unpin(struct intel_context *ce)
    144{
    145	i915_vma_unpin(ce->ring->vma);
    146}
    147
    148static void mock_context_destroy(struct kref *ref)
    149{
    150	struct intel_context *ce = container_of(ref, typeof(*ce), ref);
    151
    152	GEM_BUG_ON(intel_context_is_pinned(ce));
    153
    154	if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
    155		mock_ring_free(ce->ring);
    156		mock_timeline_unpin(ce->timeline);
    157	}
    158
    159	intel_context_fini(ce);
    160	intel_context_free(ce);
    161}
    162
    163static int mock_context_alloc(struct intel_context *ce)
    164{
    165	int err;
    166
    167	ce->ring = mock_ring(ce->engine);
    168	if (!ce->ring)
    169		return -ENOMEM;
    170
    171	ce->timeline = intel_timeline_create(ce->engine->gt);
    172	if (IS_ERR(ce->timeline)) {
    173		kfree(ce->engine);
    174		return PTR_ERR(ce->timeline);
    175	}
    176
    177	err = mock_timeline_pin(ce->timeline);
    178	if (err) {
    179		intel_timeline_put(ce->timeline);
    180		ce->timeline = NULL;
    181		return err;
    182	}
    183
    184	return 0;
    185}
    186
    187static int mock_context_pre_pin(struct intel_context *ce,
    188				struct i915_gem_ww_ctx *ww, void **unused)
    189{
    190	return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
    191}
    192
    193static int mock_context_pin(struct intel_context *ce, void *unused)
    194{
    195	return 0;
    196}
    197
    198static void mock_context_reset(struct intel_context *ce)
    199{
    200}
    201
    202static const struct intel_context_ops mock_context_ops = {
    203	.alloc = mock_context_alloc,
    204
    205	.pre_pin = mock_context_pre_pin,
    206	.pin = mock_context_pin,
    207	.unpin = mock_context_unpin,
    208	.post_unpin = mock_context_post_unpin,
    209
    210	.enter = intel_context_enter_engine,
    211	.exit = intel_context_exit_engine,
    212
    213	.reset = mock_context_reset,
    214	.destroy = mock_context_destroy,
    215};
    216
    217static int mock_request_alloc(struct i915_request *request)
    218{
    219	INIT_LIST_HEAD(&request->mock.link);
    220	request->mock.delay = 0;
    221
    222	return 0;
    223}
    224
    225static int mock_emit_flush(struct i915_request *request,
    226			   unsigned int flags)
    227{
    228	return 0;
    229}
    230
    231static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
    232{
    233	return cs;
    234}
    235
    236static void mock_submit_request(struct i915_request *request)
    237{
    238	struct mock_engine *engine =
    239		container_of(request->engine, typeof(*engine), base);
    240	unsigned long flags;
    241
    242	i915_request_submit(request);
    243
    244	spin_lock_irqsave(&engine->hw_lock, flags);
    245	list_add_tail(&request->mock.link, &engine->hw_queue);
    246	if (list_is_first(&request->mock.link, &engine->hw_queue)) {
    247		if (request->mock.delay)
    248			mod_timer(&engine->hw_delay,
    249				  jiffies + request->mock.delay);
    250		else
    251			advance(request);
    252	}
    253	spin_unlock_irqrestore(&engine->hw_lock, flags);
    254}
    255
    256static void mock_add_to_engine(struct i915_request *rq)
    257{
    258	lockdep_assert_held(&rq->engine->sched_engine->lock);
    259	list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
    260}
    261
    262static void mock_remove_from_engine(struct i915_request *rq)
    263{
    264	struct intel_engine_cs *engine, *locked;
    265
    266	/*
    267	 * Virtual engines complicate acquiring the engine timeline lock,
    268	 * as their rq->engine pointer is not stable until under that
    269	 * engine lock. The simple ploy we use is to take the lock then
    270	 * check that the rq still belongs to the newly locked engine.
    271	 */
    272
    273	locked = READ_ONCE(rq->engine);
    274	spin_lock_irq(&locked->sched_engine->lock);
    275	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
    276		spin_unlock(&locked->sched_engine->lock);
    277		spin_lock(&engine->sched_engine->lock);
    278		locked = engine;
    279	}
    280	list_del_init(&rq->sched.link);
    281	spin_unlock_irq(&locked->sched_engine->lock);
    282}
    283
    284static void mock_reset_prepare(struct intel_engine_cs *engine)
    285{
    286}
    287
    288static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
    289{
    290	GEM_BUG_ON(stalled);
    291}
    292
    293static void mock_reset_cancel(struct intel_engine_cs *engine)
    294{
    295	struct mock_engine *mock =
    296		container_of(engine, typeof(*mock), base);
    297	struct i915_request *rq;
    298	unsigned long flags;
    299
    300	del_timer_sync(&mock->hw_delay);
    301
    302	spin_lock_irqsave(&engine->sched_engine->lock, flags);
    303
    304	/* Mark all submitted requests as skipped. */
    305	list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
    306		i915_request_put(i915_request_mark_eio(rq));
    307	intel_engine_signal_breadcrumbs(engine);
    308
    309	/* Cancel and submit all pending requests. */
    310	list_for_each_entry(rq, &mock->hw_queue, mock.link) {
    311		if (i915_request_mark_eio(rq)) {
    312			__i915_request_submit(rq);
    313			i915_request_put(rq);
    314		}
    315	}
    316	INIT_LIST_HEAD(&mock->hw_queue);
    317
    318	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
    319}
    320
    321static void mock_reset_finish(struct intel_engine_cs *engine)
    322{
    323}
    324
    325static void mock_engine_release(struct intel_engine_cs *engine)
    326{
    327	struct mock_engine *mock =
    328		container_of(engine, typeof(*mock), base);
    329
    330	GEM_BUG_ON(timer_pending(&mock->hw_delay));
    331
    332	i915_sched_engine_put(engine->sched_engine);
    333	intel_breadcrumbs_put(engine->breadcrumbs);
    334
    335	intel_context_unpin(engine->kernel_context);
    336	intel_context_put(engine->kernel_context);
    337
    338	intel_engine_fini_retire(engine);
    339}
    340
    341struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
    342				    const char *name,
    343				    int id)
    344{
    345	struct mock_engine *engine;
    346
    347	GEM_BUG_ON(id >= I915_NUM_ENGINES);
    348	GEM_BUG_ON(!to_gt(i915)->uncore);
    349
    350	engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
    351	if (!engine)
    352		return NULL;
    353
    354	/* minimal engine setup for requests */
    355	engine->base.i915 = i915;
    356	engine->base.gt = to_gt(i915);
    357	engine->base.uncore = to_gt(i915)->uncore;
    358	snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
    359	engine->base.id = id;
    360	engine->base.mask = BIT(id);
    361	engine->base.legacy_idx = INVALID_ENGINE;
    362	engine->base.instance = id;
    363	engine->base.status_page.addr = (void *)(engine + 1);
    364
    365	engine->base.cops = &mock_context_ops;
    366	engine->base.request_alloc = mock_request_alloc;
    367	engine->base.emit_flush = mock_emit_flush;
    368	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
    369	engine->base.submit_request = mock_submit_request;
    370	engine->base.add_active_request = mock_add_to_engine;
    371	engine->base.remove_active_request = mock_remove_from_engine;
    372
    373	engine->base.reset.prepare = mock_reset_prepare;
    374	engine->base.reset.rewind = mock_reset_rewind;
    375	engine->base.reset.cancel = mock_reset_cancel;
    376	engine->base.reset.finish = mock_reset_finish;
    377
    378	engine->base.release = mock_engine_release;
    379
    380	to_gt(i915)->engine[id] = &engine->base;
    381	to_gt(i915)->engine_class[0][id] = &engine->base;
    382
    383	/* fake hw queue */
    384	spin_lock_init(&engine->hw_lock);
    385	timer_setup(&engine->hw_delay, hw_delay_complete, 0);
    386	INIT_LIST_HEAD(&engine->hw_queue);
    387
    388	intel_engine_add_user(&engine->base);
    389
    390	return &engine->base;
    391}
    392
    393int mock_engine_init(struct intel_engine_cs *engine)
    394{
    395	struct intel_context *ce;
    396
    397	INIT_LIST_HEAD(&engine->pinned_contexts_list);
    398
    399	engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
    400	if (!engine->sched_engine)
    401		return -ENOMEM;
    402	engine->sched_engine->private_data = engine;
    403
    404	intel_engine_init_execlists(engine);
    405	intel_engine_init__pm(engine);
    406	intel_engine_init_retire(engine);
    407
    408	engine->breadcrumbs = intel_breadcrumbs_create(NULL);
    409	if (!engine->breadcrumbs)
    410		goto err_schedule;
    411
    412	ce = create_kernel_context(engine);
    413	if (IS_ERR(ce))
    414		goto err_breadcrumbs;
    415
    416	/* We insist the kernel context is using the status_page */
    417	engine->status_page.vma = ce->timeline->hwsp_ggtt;
    418
    419	engine->kernel_context = ce;
    420	return 0;
    421
    422err_breadcrumbs:
    423	intel_breadcrumbs_put(engine->breadcrumbs);
    424err_schedule:
    425	i915_sched_engine_put(engine->sched_engine);
    426	return -ENOMEM;
    427}
    428
    429void mock_engine_flush(struct intel_engine_cs *engine)
    430{
    431	struct mock_engine *mock =
    432		container_of(engine, typeof(*mock), base);
    433	struct i915_request *request, *rn;
    434
    435	del_timer_sync(&mock->hw_delay);
    436
    437	spin_lock_irq(&mock->hw_lock);
    438	list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
    439		advance(request);
    440	spin_unlock_irq(&mock->hw_lock);
    441}
    442
    443void mock_engine_reset(struct intel_engine_cs *engine)
    444{
    445}