cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_object.h (19118B)


      1/*
      2 * SPDX-License-Identifier: MIT
      3 *
      4 * Copyright © 2016 Intel Corporation
      5 */
      6
      7#ifndef __I915_GEM_OBJECT_H__
      8#define __I915_GEM_OBJECT_H__
      9
     10#include <drm/drm_gem.h>
     11#include <drm/drm_file.h>
     12#include <drm/drm_device.h>
     13
     14#include "display/intel_frontbuffer.h"
     15#include "intel_memory_region.h"
     16#include "i915_gem_object_types.h"
     17#include "i915_gem_gtt.h"
     18#include "i915_gem_ww.h"
     19#include "i915_vma_types.h"
     20
     21enum intel_region_id;
     22
     23/*
     24 * XXX: There is a prevalence of the assumption that we fit the
     25 * object's page count inside a 32bit _signed_ variable. Let's document
     26 * this and catch if we ever need to fix it. In the meantime, if you do
     27 * spot such a local variable, please consider fixing!
     28 *
     29 * Aside from our own locals (for which we have no excuse!):
     30 * - sg_table embeds unsigned int for num_pages
     31 * - get_user_pages*() mixed ints with longs
     32 */
     33#define GEM_CHECK_SIZE_OVERFLOW(sz) \
     34	GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
     35
     36static inline bool i915_gem_object_size_2big(u64 size)
     37{
     38	struct drm_i915_gem_object *obj;
     39
     40	if (GEM_CHECK_SIZE_OVERFLOW(size))
     41		return true;
     42
     43	if (overflows_type(size, obj->base.size))
     44		return true;
     45
     46	return false;
     47}
     48
     49void i915_gem_init__objects(struct drm_i915_private *i915);
     50
     51void i915_objects_module_exit(void);
     52int i915_objects_module_init(void);
     53
     54struct drm_i915_gem_object *i915_gem_object_alloc(void);
     55void i915_gem_object_free(struct drm_i915_gem_object *obj);
     56
     57void i915_gem_object_init(struct drm_i915_gem_object *obj,
     58			  const struct drm_i915_gem_object_ops *ops,
     59			  struct lock_class_key *key,
     60			  unsigned alloc_flags);
     61
     62void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
     63
     64struct drm_i915_gem_object *
     65i915_gem_object_create_shmem(struct drm_i915_private *i915,
     66			     resource_size_t size);
     67struct drm_i915_gem_object *
     68i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
     69				       const void *data, resource_size_t size);
     70struct drm_i915_gem_object *
     71__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
     72			      struct intel_memory_region **placements,
     73			      unsigned int n_placements);
     74
     75extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
     76
     77void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
     78				     struct sg_table *pages,
     79				     bool needs_clflush);
     80
     81int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
     82				const struct drm_i915_gem_pwrite *args);
     83int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
     84			       const struct drm_i915_gem_pread *args);
     85
     86int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
     87void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
     88				     struct sg_table *pages);
     89void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
     90				    struct sg_table *pages);
     91
     92void i915_gem_flush_free_objects(struct drm_i915_private *i915);
     93
     94struct sg_table *
     95__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
     96
     97/**
     98 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
     99 * @filp: DRM file private date
    100 * @handle: userspace handle
    101 *
    102 * Returns:
    103 *
    104 * A pointer to the object named by the handle if such exists on @filp, NULL
    105 * otherwise. This object is only valid whilst under the RCU read lock, and
    106 * note carefully the object may be in the process of being destroyed.
    107 */
    108static inline struct drm_i915_gem_object *
    109i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
    110{
    111#ifdef CONFIG_LOCKDEP
    112	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
    113#endif
    114	return idr_find(&file->object_idr, handle);
    115}
    116
    117static inline struct drm_i915_gem_object *
    118i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
    119{
    120	if (obj && !kref_get_unless_zero(&obj->base.refcount))
    121		obj = NULL;
    122
    123	return obj;
    124}
    125
    126static inline struct drm_i915_gem_object *
    127i915_gem_object_lookup(struct drm_file *file, u32 handle)
    128{
    129	struct drm_i915_gem_object *obj;
    130
    131	rcu_read_lock();
    132	obj = i915_gem_object_lookup_rcu(file, handle);
    133	obj = i915_gem_object_get_rcu(obj);
    134	rcu_read_unlock();
    135
    136	return obj;
    137}
    138
    139__deprecated
    140struct drm_gem_object *
    141drm_gem_object_lookup(struct drm_file *file, u32 handle);
    142
    143__attribute__((nonnull))
    144static inline struct drm_i915_gem_object *
    145i915_gem_object_get(struct drm_i915_gem_object *obj)
    146{
    147	drm_gem_object_get(&obj->base);
    148	return obj;
    149}
    150
    151__attribute__((nonnull))
    152static inline void
    153i915_gem_object_put(struct drm_i915_gem_object *obj)
    154{
    155	__drm_gem_object_put(&obj->base);
    156}
    157
    158#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
    159
    160/*
    161 * If more than one potential simultaneous locker, assert held.
    162 */
    163static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
    164{
    165	/*
    166	 * Note mm list lookup is protected by
    167	 * kref_get_unless_zero().
    168	 */
    169	if (IS_ENABLED(CONFIG_LOCKDEP) &&
    170	    kref_read(&obj->base.refcount) > 0)
    171		assert_object_held(obj);
    172}
    173
    174static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
    175					 struct i915_gem_ww_ctx *ww,
    176					 bool intr)
    177{
    178	int ret;
    179
    180	if (intr)
    181		ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
    182	else
    183		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
    184
    185	if (!ret && ww) {
    186		i915_gem_object_get(obj);
    187		list_add_tail(&obj->obj_link, &ww->obj_list);
    188	}
    189	if (ret == -EALREADY)
    190		ret = 0;
    191
    192	if (ret == -EDEADLK) {
    193		i915_gem_object_get(obj);
    194		ww->contended = obj;
    195	}
    196
    197	return ret;
    198}
    199
    200static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
    201				       struct i915_gem_ww_ctx *ww)
    202{
    203	return __i915_gem_object_lock(obj, ww, ww && ww->intr);
    204}
    205
    206static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
    207						     struct i915_gem_ww_ctx *ww)
    208{
    209	WARN_ON(ww && !ww->intr);
    210	return __i915_gem_object_lock(obj, ww, true);
    211}
    212
    213static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj,
    214					   struct i915_gem_ww_ctx *ww)
    215{
    216	if (!ww)
    217		return dma_resv_trylock(obj->base.resv);
    218	else
    219		return ww_mutex_trylock(&obj->base.resv->lock, &ww->ctx);
    220}
    221
    222static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
    223{
    224	if (obj->ops->adjust_lru)
    225		obj->ops->adjust_lru(obj);
    226
    227	dma_resv_unlock(obj->base.resv);
    228}
    229
    230static inline void
    231i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
    232{
    233	obj->flags |= I915_BO_READONLY;
    234}
    235
    236static inline bool
    237i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
    238{
    239	return obj->flags & I915_BO_READONLY;
    240}
    241
    242static inline bool
    243i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
    244{
    245	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
    246}
    247
    248static inline bool
    249i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
    250{
    251	return obj->flags & I915_BO_ALLOC_VOLATILE;
    252}
    253
    254static inline void
    255i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
    256{
    257	obj->flags |= I915_BO_ALLOC_VOLATILE;
    258}
    259
    260static inline bool
    261i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
    262{
    263	return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
    264}
    265
    266static inline void
    267i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
    268{
    269	set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
    270}
    271
    272static inline void
    273i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
    274{
    275	clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
    276}
    277
    278static inline bool
    279i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
    280{
    281	return obj->flags & I915_BO_PROTECTED;
    282}
    283
    284static inline bool
    285i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
    286			 unsigned long flags)
    287{
    288	return obj->ops->flags & flags;
    289}
    290
    291bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
    292
    293bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
    294
    295static inline bool
    296i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
    297{
    298	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
    299}
    300
    301static inline bool
    302i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj)
    303{
    304	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST);
    305}
    306
    307static inline bool
    308i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
    309{
    310	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
    311}
    312
    313static inline bool
    314i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
    315{
    316	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
    317}
    318
    319static inline bool
    320i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
    321{
    322	return READ_ONCE(obj->frontbuffer);
    323}
    324
    325static inline unsigned int
    326i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
    327{
    328	return obj->tiling_and_stride & TILING_MASK;
    329}
    330
    331static inline bool
    332i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
    333{
    334	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
    335}
    336
    337static inline unsigned int
    338i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
    339{
    340	return obj->tiling_and_stride & STRIDE_MASK;
    341}
    342
    343static inline unsigned int
    344i915_gem_tile_height(unsigned int tiling)
    345{
    346	GEM_BUG_ON(!tiling);
    347	return tiling == I915_TILING_Y ? 32 : 8;
    348}
    349
    350static inline unsigned int
    351i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
    352{
    353	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
    354}
    355
    356static inline unsigned int
    357i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
    358{
    359	return (i915_gem_object_get_stride(obj) *
    360		i915_gem_object_get_tile_height(obj));
    361}
    362
    363int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
    364			       unsigned int tiling, unsigned int stride);
    365
    366struct scatterlist *
    367__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
    368			 struct i915_gem_object_page_iter *iter,
    369			 unsigned int n,
    370			 unsigned int *offset, bool dma);
    371
    372static inline struct scatterlist *
    373i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
    374		       unsigned int n,
    375		       unsigned int *offset)
    376{
    377	return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
    378}
    379
    380static inline struct scatterlist *
    381i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
    382			   unsigned int n,
    383			   unsigned int *offset)
    384{
    385	return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
    386}
    387
    388struct page *
    389i915_gem_object_get_page(struct drm_i915_gem_object *obj,
    390			 unsigned int n);
    391
    392struct page *
    393i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
    394			       unsigned int n);
    395
    396dma_addr_t
    397i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
    398				    unsigned long n,
    399				    unsigned int *len);
    400
    401dma_addr_t
    402i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
    403				unsigned long n);
    404
    405void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
    406				 struct sg_table *pages,
    407				 unsigned int sg_page_sizes);
    408
    409int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
    410int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
    411
    412static inline int __must_check
    413i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
    414{
    415	assert_object_held(obj);
    416
    417	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
    418		return 0;
    419
    420	return __i915_gem_object_get_pages(obj);
    421}
    422
    423int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
    424
    425static inline bool
    426i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
    427{
    428	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
    429}
    430
    431static inline void
    432__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
    433{
    434	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
    435
    436	atomic_inc(&obj->mm.pages_pin_count);
    437}
    438
    439static inline bool
    440i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
    441{
    442	return atomic_read(&obj->mm.pages_pin_count);
    443}
    444
    445static inline void
    446__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
    447{
    448	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
    449	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
    450
    451	atomic_dec(&obj->mm.pages_pin_count);
    452}
    453
    454static inline void
    455i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
    456{
    457	__i915_gem_object_unpin_pages(obj);
    458}
    459
    460int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
    461int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
    462
    463/**
    464 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
    465 * @obj: the object to map into kernel address space
    466 * @type: the type of mapping, used to select pgprot_t
    467 *
    468 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
    469 * pages and then returns a contiguous mapping of the backing storage into
    470 * the kernel address space. Based on the @type of mapping, the PTE will be
    471 * set to either WriteBack or WriteCombine (via pgprot_t).
    472 *
    473 * The caller is responsible for calling i915_gem_object_unpin_map() when the
    474 * mapping is no longer required.
    475 *
    476 * Returns the pointer through which to access the mapped object, or an
    477 * ERR_PTR() on error.
    478 */
    479void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
    480					   enum i915_map_type type);
    481
    482void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
    483						    enum i915_map_type type);
    484
    485void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
    486				 unsigned long offset,
    487				 unsigned long size);
    488static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
    489{
    490	__i915_gem_object_flush_map(obj, 0, obj->base.size);
    491}
    492
    493/**
    494 * i915_gem_object_unpin_map - releases an earlier mapping
    495 * @obj: the object to unmap
    496 *
    497 * After pinning the object and mapping its pages, once you are finished
    498 * with your access, call i915_gem_object_unpin_map() to release the pin
    499 * upon the mapping. Once the pin count reaches zero, that mapping may be
    500 * removed.
    501 */
    502static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
    503{
    504	i915_gem_object_unpin_pages(obj);
    505}
    506
    507void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
    508
    509int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
    510				 unsigned int *needs_clflush);
    511int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
    512				  unsigned int *needs_clflush);
    513#define CLFLUSH_BEFORE	BIT(0)
    514#define CLFLUSH_AFTER	BIT(1)
    515#define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
    516
    517static inline void
    518i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
    519{
    520	i915_gem_object_unpin_pages(obj);
    521}
    522
    523int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
    524				     struct dma_fence **fence);
    525int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
    526				      bool intr);
    527
    528void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
    529					 unsigned int cache_level);
    530bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
    531void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
    532void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
    533bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj);
    534
    535int __must_check
    536i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
    537int __must_check
    538i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
    539int __must_check
    540i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
    541struct i915_vma * __must_check
    542i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
    543				     struct i915_gem_ww_ctx *ww,
    544				     u32 alignment,
    545				     const struct i915_ggtt_view *view,
    546				     unsigned int flags);
    547
    548void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
    549void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
    550void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
    551void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
    552void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
    553
    554static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
    555{
    556	obj->read_domains = I915_GEM_DOMAIN_CPU;
    557	obj->write_domain = I915_GEM_DOMAIN_CPU;
    558	if (i915_gem_cpu_write_needs_clflush(obj))
    559		obj->cache_dirty = true;
    560}
    561
    562void i915_gem_fence_wait_priority(struct dma_fence *fence,
    563				  const struct i915_sched_attr *attr);
    564
    565int i915_gem_object_wait(struct drm_i915_gem_object *obj,
    566			 unsigned int flags,
    567			 long timeout);
    568int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
    569				  unsigned int flags,
    570				  const struct i915_sched_attr *attr);
    571
    572void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
    573					 enum fb_op_origin origin);
    574void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
    575					      enum fb_op_origin origin);
    576
    577static inline void
    578i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
    579				  enum fb_op_origin origin)
    580{
    581	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
    582		__i915_gem_object_flush_frontbuffer(obj, origin);
    583}
    584
    585static inline void
    586i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
    587				       enum fb_op_origin origin)
    588{
    589	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
    590		__i915_gem_object_invalidate_frontbuffer(obj, origin);
    591}
    592
    593int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
    594
    595bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
    596
    597void __i915_gem_free_object_rcu(struct rcu_head *head);
    598
    599void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
    600
    601void __i915_gem_free_object(struct drm_i915_gem_object *obj);
    602
    603bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
    604
    605bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
    606
    607int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
    608			    struct i915_gem_ww_ctx *ww,
    609			    enum intel_region_id id);
    610
    611bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
    612				 enum intel_region_id id);
    613
    614int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
    615				   unsigned int flags);
    616
    617bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
    618					enum intel_memory_type type);
    619
    620int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
    621			 size_t size, struct intel_memory_region *mr,
    622			 struct address_space *mapping,
    623			 unsigned int max_segment);
    624void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
    625			 bool dirty, bool backup);
    626void __shmem_writeback(size_t size, struct address_space *mapping);
    627
    628#ifdef CONFIG_MMU_NOTIFIER
    629static inline bool
    630i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
    631{
    632	return obj->userptr.notifier.mm;
    633}
    634
    635int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
    636int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
    637int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
    638#else
    639static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
    640
    641static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
    642static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
    643static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
    644
    645#endif
    646
    647#endif