cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_vma_resource.h (7672B)


      1/* SPDX-License-Identifier: MIT */
      2/*
      3 * Copyright © 2021 Intel Corporation
      4 */
      5
      6#ifndef __I915_VMA_RESOURCE_H__
      7#define __I915_VMA_RESOURCE_H__
      8
      9#include <linux/dma-fence.h>
     10#include <linux/refcount.h>
     11
     12#include "i915_gem.h"
     13#include "i915_scatterlist.h"
     14#include "i915_sw_fence.h"
     15#include "intel_runtime_pm.h"
     16
     17struct intel_memory_region;
     18
     19struct i915_page_sizes {
     20	/**
     21	 * The sg mask of the pages sg_table. i.e the mask of
     22	 * the lengths for each sg entry.
     23	 */
     24	unsigned int phys;
     25
     26	/**
     27	 * The gtt page sizes we are allowed to use given the
     28	 * sg mask and the supported page sizes. This will
     29	 * express the smallest unit we can use for the whole
     30	 * object, as well as the larger sizes we may be able
     31	 * to use opportunistically.
     32	 */
     33	unsigned int sg;
     34};
     35
     36/**
     37 * struct i915_vma_resource - Snapshotted unbind information.
     38 * @unbind_fence: Fence to mark unbinding complete. Note that this fence
     39 * is not considered published until unbind is scheduled, and as such it
     40 * is illegal to access this fence before scheduled unbind other than
     41 * for refcounting.
     42 * @lock: The @unbind_fence lock.
     43 * @hold_count: Number of holders blocking the fence from finishing.
     44 * The vma itself is keeping a hold, which is released when unbind
     45 * is scheduled.
     46 * @work: Work struct for deferred unbind work.
     47 * @chain: Pointer to struct i915_sw_fence used to await dependencies.
     48 * @rb: Rb node for the vm's pending unbind interval tree.
     49 * @__subtree_last: Interval tree private member.
     50 * @vm: non-refcounted pointer to the vm. This is for internal use only and
     51 * this member is cleared after vm_resource unbind.
     52 * @mr: The memory region of the object pointed to by the vma.
     53 * @ops: Pointer to the backend i915_vma_ops.
     54 * @private: Bind backend private info.
     55 * @start: Offset into the address space of bind range start.
     56 * @node_size: Size of the allocated range manager node.
     57 * @vma_size: Bind size.
     58 * @page_sizes_gtt: Resulting page sizes from the bind operation.
     59 * @bound_flags: Flags indicating binding status.
     60 * @allocated: Backend private data. TODO: Should move into @private.
     61 * @immediate_unbind: Unbind can be done immediately and doesn't need to be
     62 * deferred to a work item awaiting unsignaled fences. This is a hack.
     63 * (dma_fence_work uses a fence flag for this, but this seems slightly
     64 * cleaner).
     65 * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
     66 * take a wakeref in the dma-fence signalling critical path, it needs to be
     67 * taken when the unbind is scheduled.
     68 * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
     69 * needs to be skipped for unbind.
     70 *
     71 * The lifetime of a struct i915_vma_resource is from a binding request to
     72 * the actual possible asynchronous unbind has completed.
     73 */
     74struct i915_vma_resource {
     75	struct dma_fence unbind_fence;
     76	/* See above for description of the lock. */
     77	spinlock_t lock;
     78	refcount_t hold_count;
     79	struct work_struct work;
     80	struct i915_sw_fence chain;
     81	struct rb_node rb;
     82	u64 __subtree_last;
     83	struct i915_address_space *vm;
     84	intel_wakeref_t wakeref;
     85
     86	/**
     87	 * struct i915_vma_bindinfo - Information needed for async bind
     88	 * only but that can be dropped after the bind has taken place.
     89	 * Consider making this a separate argument to the bind_vma
     90	 * op, coalescing with other arguments like vm, stash, cache_level
     91	 * and flags
     92	 * @pages: The pages sg-table.
     93	 * @page_sizes: Page sizes of the pages.
     94	 * @pages_rsgt: Refcounted sg-table when delayed object destruction
     95	 * is supported. May be NULL.
     96	 * @readonly: Whether the vma should be bound read-only.
     97	 * @lmem: Whether the vma points to lmem.
     98	 */
     99	struct i915_vma_bindinfo {
    100		struct sg_table *pages;
    101		struct i915_page_sizes page_sizes;
    102		struct i915_refct_sgt *pages_rsgt;
    103		bool readonly:1;
    104		bool lmem:1;
    105	} bi;
    106
    107#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
    108	struct intel_memory_region *mr;
    109#endif
    110	const struct i915_vma_ops *ops;
    111	void *private;
    112	u64 start;
    113	u64 node_size;
    114	u64 vma_size;
    115	u32 page_sizes_gtt;
    116
    117	u32 bound_flags;
    118	bool allocated:1;
    119	bool immediate_unbind:1;
    120	bool needs_wakeref:1;
    121	bool skip_pte_rewrite:1;
    122};
    123
    124bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
    125			    bool *lockdep_cookie);
    126
    127void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
    128			      bool lockdep_cookie);
    129
    130struct i915_vma_resource *i915_vma_resource_alloc(void);
    131
    132void i915_vma_resource_free(struct i915_vma_resource *vma_res);
    133
    134struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res);
    135
    136void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
    137
    138/**
    139 * i915_vma_resource_get - Take a reference on a vma resource
    140 * @vma_res: The vma resource on which to take a reference.
    141 *
    142 * Return: The @vma_res pointer
    143 */
    144static inline struct i915_vma_resource
    145*i915_vma_resource_get(struct i915_vma_resource *vma_res)
    146{
    147	dma_fence_get(&vma_res->unbind_fence);
    148	return vma_res;
    149}
    150
    151/**
    152 * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
    153 * @vma_res: The resource
    154 */
    155static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
    156{
    157	dma_fence_put(&vma_res->unbind_fence);
    158}
    159
    160/**
    161 * i915_vma_resource_init - Initialize a vma resource.
    162 * @vma_res: The vma resource to initialize
    163 * @vm: Pointer to the vm.
    164 * @pages: The pages sg-table.
    165 * @page_sizes: Page sizes of the pages.
    166 * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
    167 * delayed destruction.
    168 * @readonly: Whether the vma should be bound read-only.
    169 * @lmem: Whether the vma points to lmem.
    170 * @mr: The memory region of the object the vma points to.
    171 * @ops: The backend ops.
    172 * @private: Bind backend private info.
    173 * @start: Offset into the address space of bind range start.
    174 * @node_size: Size of the allocated range manager node.
    175 * @size: Bind size.
    176 *
    177 * Initializes a vma resource allocated using i915_vma_resource_alloc().
    178 * The reason for having separate allocate and initialize function is that
    179 * initialization may need to be performed from under a lock where
    180 * allocation is not allowed.
    181 */
    182static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
    183					  struct i915_address_space *vm,
    184					  struct sg_table *pages,
    185					  const struct i915_page_sizes *page_sizes,
    186					  struct i915_refct_sgt *pages_rsgt,
    187					  bool readonly,
    188					  bool lmem,
    189					  struct intel_memory_region *mr,
    190					  const struct i915_vma_ops *ops,
    191					  void *private,
    192					  u64 start,
    193					  u64 node_size,
    194					  u64 size)
    195{
    196	__i915_vma_resource_init(vma_res);
    197	vma_res->vm = vm;
    198	vma_res->bi.pages = pages;
    199	vma_res->bi.page_sizes = *page_sizes;
    200	if (pages_rsgt)
    201		vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
    202	vma_res->bi.readonly = readonly;
    203	vma_res->bi.lmem = lmem;
    204#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
    205	vma_res->mr = mr;
    206#endif
    207	vma_res->ops = ops;
    208	vma_res->private = private;
    209	vma_res->start = start;
    210	vma_res->node_size = node_size;
    211	vma_res->vma_size = size;
    212}
    213
    214static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
    215{
    216	GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
    217	if (vma_res->bi.pages_rsgt)
    218		i915_refct_sgt_put(vma_res->bi.pages_rsgt);
    219	i915_sw_fence_fini(&vma_res->chain);
    220}
    221
    222int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
    223				    u64 first,
    224				    u64 last,
    225				    bool intr);
    226
    227int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
    228				     struct i915_sw_fence *sw_fence,
    229				     u64 first,
    230				     u64 last,
    231				     bool intr,
    232				     gfp_t gfp);
    233
    234void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
    235
    236void i915_vma_resource_module_exit(void);
    237
    238int i915_vma_resource_module_init(void);
    239
    240#endif