cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_clflush.c (3652B)


      1/*
      2 * SPDX-License-Identifier: MIT
      3 *
      4 * Copyright © 2016 Intel Corporation
      5 */
      6
      7#include <drm/drm_cache.h>
      8
      9#include "display/intel_frontbuffer.h"
     10
     11#include "i915_drv.h"
     12#include "i915_gem_clflush.h"
     13#include "i915_sw_fence_work.h"
     14#include "i915_trace.h"
     15
     16struct clflush {
     17	struct dma_fence_work base;
     18	struct drm_i915_gem_object *obj;
     19};
     20
     21static void __do_clflush(struct drm_i915_gem_object *obj)
     22{
     23	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
     24	drm_clflush_sg(obj->mm.pages);
     25
     26	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
     27}
     28
     29static void clflush_work(struct dma_fence_work *base)
     30{
     31	struct clflush *clflush = container_of(base, typeof(*clflush), base);
     32
     33	__do_clflush(clflush->obj);
     34}
     35
     36static void clflush_release(struct dma_fence_work *base)
     37{
     38	struct clflush *clflush = container_of(base, typeof(*clflush), base);
     39
     40	i915_gem_object_unpin_pages(clflush->obj);
     41	i915_gem_object_put(clflush->obj);
     42}
     43
     44static const struct dma_fence_work_ops clflush_ops = {
     45	.name = "clflush",
     46	.work = clflush_work,
     47	.release = clflush_release,
     48};
     49
     50static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
     51{
     52	struct clflush *clflush;
     53
     54	GEM_BUG_ON(!obj->cache_dirty);
     55
     56	clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
     57	if (!clflush)
     58		return NULL;
     59
     60	if (__i915_gem_object_get_pages(obj) < 0) {
     61		kfree(clflush);
     62		return NULL;
     63	}
     64
     65	dma_fence_work_init(&clflush->base, &clflush_ops);
     66	clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
     67
     68	return clflush;
     69}
     70
     71bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
     72			     unsigned int flags)
     73{
     74	struct drm_i915_private *i915 = to_i915(obj->base.dev);
     75	struct clflush *clflush;
     76
     77	assert_object_held(obj);
     78
     79	if (IS_DGFX(i915)) {
     80		WARN_ON_ONCE(obj->cache_dirty);
     81		return false;
     82	}
     83
     84	/*
     85	 * Stolen memory is always coherent with the GPU as it is explicitly
     86	 * marked as wc by the system, or the system is cache-coherent.
     87	 * Similarly, we only access struct pages through the CPU cache, so
     88	 * anything not backed by physical memory we consider to be always
     89	 * coherent and not need clflushing.
     90	 */
     91	if (!i915_gem_object_has_struct_page(obj)) {
     92		obj->cache_dirty = false;
     93		return false;
     94	}
     95
     96	/* If the GPU is snooping the contents of the CPU cache,
     97	 * we do not need to manually clear the CPU cache lines.  However,
     98	 * the caches are only snooped when the render cache is
     99	 * flushed/invalidated.  As we always have to emit invalidations
    100	 * and flushes when moving into and out of the RENDER domain, correct
    101	 * snooping behaviour occurs naturally as the result of our domain
    102	 * tracking.
    103	 */
    104	if (!(flags & I915_CLFLUSH_FORCE) &&
    105	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
    106		return false;
    107
    108	trace_i915_gem_object_clflush(obj);
    109
    110	clflush = NULL;
    111	if (!(flags & I915_CLFLUSH_SYNC) &&
    112	    dma_resv_reserve_fences(obj->base.resv, 1) == 0)
    113		clflush = clflush_work_create(obj);
    114	if (clflush) {
    115		i915_sw_fence_await_reservation(&clflush->base.chain,
    116						obj->base.resv, NULL, true,
    117						i915_fence_timeout(i915),
    118						I915_FENCE_GFP);
    119		dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
    120				   DMA_RESV_USAGE_KERNEL);
    121		dma_fence_work_commit(&clflush->base);
    122		/*
    123		 * We must have successfully populated the pages(since we are
    124		 * holding a pin on the pages as per the flush worker) to reach
    125		 * this point, which must mean we have already done the required
    126		 * flush-on-acquire, hence resetting cache_dirty here should be
    127		 * safe.
    128		 */
    129		obj->cache_dirty = false;
    130	} else if (obj->mm.pages) {
    131		__do_clflush(obj);
    132		obj->cache_dirty = false;
    133	} else {
    134		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
    135	}
    136
    137	return true;
    138}