cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_vma.h (13049B)


      1/*
      2 * Copyright © 2016 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 *
     23 */
     24
     25#ifndef __I915_VMA_H__
     26#define __I915_VMA_H__
     27
     28#include <linux/io-mapping.h>
     29#include <linux/rbtree.h>
     30
     31#include <drm/drm_mm.h>
     32
     33#include "gt/intel_ggtt_fencing.h"
     34#include "gem/i915_gem_object.h"
     35
     36#include "i915_gem_gtt.h"
     37
     38#include "i915_active.h"
     39#include "i915_request.h"
     40#include "i915_vma_resource.h"
     41#include "i915_vma_types.h"
     42
     43struct i915_vma *
     44i915_vma_instance(struct drm_i915_gem_object *obj,
     45		  struct i915_address_space *vm,
     46		  const struct i915_ggtt_view *view);
     47
     48void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
     49#define I915_VMA_RELEASE_MAP BIT(0)
     50
     51static inline bool i915_vma_is_active(const struct i915_vma *vma)
     52{
     53	return !i915_active_is_idle(&vma->active);
     54}
     55
     56/* do not reserve memory to prevent deadlocks */
     57#define __EXEC_OBJECT_NO_RESERVE BIT(31)
     58
     59int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
     60					  struct i915_request *rq,
     61					  struct dma_fence *fence,
     62					  unsigned int flags);
     63static inline int __must_check
     64i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
     65			unsigned int flags)
     66{
     67	return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
     68}
     69
     70#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
     71
     72static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
     73{
     74	return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
     75}
     76
     77static inline bool i915_vma_is_dpt(const struct i915_vma *vma)
     78{
     79	return i915_is_dpt(vma->vm);
     80}
     81
     82static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
     83{
     84	return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
     85}
     86
     87static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
     88{
     89	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
     90	set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
     91}
     92
     93static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
     94{
     95	return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
     96				  __i915_vma_flags(vma));
     97}
     98
     99void i915_vma_flush_writes(struct i915_vma *vma);
    100
    101static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
    102{
    103	return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
    104}
    105
    106static inline bool i915_vma_set_userfault(struct i915_vma *vma)
    107{
    108	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
    109	return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
    110}
    111
    112static inline void i915_vma_unset_userfault(struct i915_vma *vma)
    113{
    114	return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
    115}
    116
    117static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
    118{
    119	return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
    120}
    121
    122static inline bool i915_vma_is_closed(const struct i915_vma *vma)
    123{
    124	return !list_empty(&vma->closed_link);
    125}
    126
    127static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
    128{
    129	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
    130	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    131	GEM_BUG_ON(upper_32_bits(vma->node.start));
    132	GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
    133	return lower_32_bits(vma->node.start);
    134}
    135
    136static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
    137{
    138	return i915_vm_to_ggtt(vma->vm)->pin_bias;
    139}
    140
    141static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
    142{
    143	i915_gem_object_get(vma->obj);
    144	return vma;
    145}
    146
    147static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
    148{
    149	if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
    150		return vma;
    151
    152	return NULL;
    153}
    154
    155static inline void i915_vma_put(struct i915_vma *vma)
    156{
    157	i915_gem_object_put(vma->obj);
    158}
    159
    160static inline long
    161i915_vma_compare(struct i915_vma *vma,
    162		 struct i915_address_space *vm,
    163		 const struct i915_ggtt_view *view)
    164{
    165	ptrdiff_t cmp;
    166
    167	GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
    168
    169	cmp = ptrdiff(vma->vm, vm);
    170	if (cmp)
    171		return cmp;
    172
    173	BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
    174	cmp = vma->ggtt_view.type;
    175	if (!view)
    176		return cmp;
    177
    178	cmp -= view->type;
    179	if (cmp)
    180		return cmp;
    181
    182	assert_i915_gem_gtt_types();
    183
    184	/* ggtt_view.type also encodes its size so that we both distinguish
    185	 * different views using it as a "type" and also use a compact (no
    186	 * accessing of uninitialised padding bytes) memcmp without storing
    187	 * an extra parameter or adding more code.
    188	 *
    189	 * To ensure that the memcmp is valid for all branches of the union,
    190	 * even though the code looks like it is just comparing one branch,
    191	 * we assert above that all branches have the same address, and that
    192	 * each branch has a unique type/size.
    193	 */
    194	BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
    195	BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
    196	BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
    197	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
    198		     offsetof(typeof(*view), partial));
    199	BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
    200		     offsetof(typeof(*view), remapped));
    201	return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
    202}
    203
    204struct i915_vma_work *i915_vma_work(void);
    205int i915_vma_bind(struct i915_vma *vma,
    206		  enum i915_cache_level cache_level,
    207		  u32 flags,
    208		  struct i915_vma_work *work,
    209		  struct i915_vma_resource *vma_res);
    210
    211bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
    212bool i915_vma_misplaced(const struct i915_vma *vma,
    213			u64 size, u64 alignment, u64 flags);
    214void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
    215void i915_vma_revoke_mmap(struct i915_vma *vma);
    216struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async);
    217int __i915_vma_unbind(struct i915_vma *vma);
    218int __must_check i915_vma_unbind(struct i915_vma *vma);
    219int __must_check i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm);
    220int __must_check i915_vma_unbind_unlocked(struct i915_vma *vma);
    221void i915_vma_unlink_ctx(struct i915_vma *vma);
    222void i915_vma_close(struct i915_vma *vma);
    223void i915_vma_reopen(struct i915_vma *vma);
    224
    225void i915_vma_destroy_locked(struct i915_vma *vma);
    226void i915_vma_destroy(struct i915_vma *vma);
    227
    228#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv)
    229
    230static inline void i915_vma_lock(struct i915_vma *vma)
    231{
    232	dma_resv_lock(vma->obj->base.resv, NULL);
    233}
    234
    235static inline void i915_vma_unlock(struct i915_vma *vma)
    236{
    237	dma_resv_unlock(vma->obj->base.resv);
    238}
    239
    240int __must_check
    241i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
    242		u64 size, u64 alignment, u64 flags);
    243
    244static inline int __must_check
    245i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
    246{
    247	struct i915_gem_ww_ctx ww;
    248	int err;
    249
    250	i915_gem_ww_ctx_init(&ww, true);
    251retry:
    252	err = i915_gem_object_lock(vma->obj, &ww);
    253	if (!err)
    254		err = i915_vma_pin_ww(vma, &ww, size, alignment, flags);
    255	if (err == -EDEADLK) {
    256		err = i915_gem_ww_ctx_backoff(&ww);
    257		if (!err)
    258			goto retry;
    259	}
    260	i915_gem_ww_ctx_fini(&ww);
    261
    262	return err;
    263}
    264
    265int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
    266		  u32 align, unsigned int flags);
    267
    268static inline int i915_vma_pin_count(const struct i915_vma *vma)
    269{
    270	return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
    271}
    272
    273static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
    274{
    275	return i915_vma_pin_count(vma);
    276}
    277
    278static inline void __i915_vma_pin(struct i915_vma *vma)
    279{
    280	atomic_inc(&vma->flags);
    281	GEM_BUG_ON(!i915_vma_is_pinned(vma));
    282}
    283
    284static inline void __i915_vma_unpin(struct i915_vma *vma)
    285{
    286	GEM_BUG_ON(!i915_vma_is_pinned(vma));
    287	atomic_dec(&vma->flags);
    288}
    289
    290static inline void i915_vma_unpin(struct i915_vma *vma)
    291{
    292	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
    293	__i915_vma_unpin(vma);
    294}
    295
    296static inline bool i915_vma_is_bound(const struct i915_vma *vma,
    297				     unsigned int where)
    298{
    299	return atomic_read(&vma->flags) & where;
    300}
    301
    302static inline bool i915_node_color_differs(const struct drm_mm_node *node,
    303					   unsigned long color)
    304{
    305	return drm_mm_node_allocated(node) && node->color != color;
    306}
    307
    308/**
    309 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
    310 * @vma: VMA to iomap
    311 *
    312 * The passed in VMA has to be pinned in the global GTT mappable region.
    313 * An extra pinning of the VMA is acquired for the return iomapping,
    314 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
    315 * after the iomapping is no longer required.
    316 *
    317 * Returns a valid iomapped pointer or ERR_PTR.
    318 */
    319void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
    320
    321/**
    322 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
    323 * @vma: VMA to unpin
    324 *
    325 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
    326 *
    327 * This function is only valid to be called on a VMA previously
    328 * iomapped by the caller with i915_vma_pin_iomap().
    329 */
    330void i915_vma_unpin_iomap(struct i915_vma *vma);
    331
    332/**
    333 * i915_vma_pin_fence - pin fencing state
    334 * @vma: vma to pin fencing for
    335 *
    336 * This pins the fencing state (whether tiled or untiled) to make sure the
    337 * vma (and its object) is ready to be used as a scanout target. Fencing
    338 * status must be synchronize first by calling i915_vma_get_fence():
    339 *
    340 * The resulting fence pin reference must be released again with
    341 * i915_vma_unpin_fence().
    342 *
    343 * Returns:
    344 *
    345 * True if the vma has a fence, false otherwise.
    346 */
    347int __must_check i915_vma_pin_fence(struct i915_vma *vma);
    348void i915_vma_revoke_fence(struct i915_vma *vma);
    349
    350int __i915_vma_pin_fence(struct i915_vma *vma);
    351
    352static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
    353{
    354	GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
    355	atomic_dec(&vma->fence->pin_count);
    356}
    357
    358/**
    359 * i915_vma_unpin_fence - unpin fencing state
    360 * @vma: vma to unpin fencing for
    361 *
    362 * This releases the fence pin reference acquired through
    363 * i915_vma_pin_fence. It will handle both objects with and without an
    364 * attached fence correctly, callers do not need to distinguish this.
    365 */
    366static inline void
    367i915_vma_unpin_fence(struct i915_vma *vma)
    368{
    369	if (vma->fence)
    370		__i915_vma_unpin_fence(vma);
    371}
    372
    373void i915_vma_parked(struct intel_gt *gt);
    374
    375static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
    376{
    377	return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
    378}
    379
    380static inline void i915_vma_mark_scanout(struct i915_vma *vma)
    381{
    382	set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
    383}
    384
    385static inline void i915_vma_clear_scanout(struct i915_vma *vma)
    386{
    387	clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma));
    388}
    389
    390#define for_each_until(cond) if (cond) break; else
    391
    392/**
    393 * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
    394 * @V: the #i915_vma iterator
    395 * @OBJ: the #drm_i915_gem_object
    396 *
    397 * GGTT VMA are placed at the being of the object's vma_list, see
    398 * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
    399 * or the list is empty ofc.
    400 */
    401#define for_each_ggtt_vma(V, OBJ) \
    402	list_for_each_entry(V, &(OBJ)->vma.list, obj_link)		\
    403		for_each_until(!i915_vma_is_ggtt(V))
    404
    405struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
    406void i915_vma_make_shrinkable(struct i915_vma *vma);
    407void i915_vma_make_purgeable(struct i915_vma *vma);
    408
    409int i915_vma_wait_for_bind(struct i915_vma *vma);
    410
    411static inline int i915_vma_sync(struct i915_vma *vma)
    412{
    413	/* Wait for the asynchronous bindings and pending GPU reads */
    414	return i915_active_wait(&vma->active);
    415}
    416
    417/**
    418 * i915_vma_get_current_resource - Get the current resource of the vma
    419 * @vma: The vma to get the current resource from.
    420 *
    421 * It's illegal to call this function if the vma is not bound.
    422 *
    423 * Return: A refcounted pointer to the current vma resource
    424 * of the vma, assuming the vma is bound.
    425 */
    426static inline struct i915_vma_resource *
    427i915_vma_get_current_resource(struct i915_vma *vma)
    428{
    429	return i915_vma_resource_get(vma->resource);
    430}
    431
    432#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    433void i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
    434				     struct i915_vma *vma);
    435#endif
    436
    437void i915_vma_module_exit(void);
    438int i915_vma_module_init(void);
    439
    440I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
    441I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
    442
    443#endif