cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

msm_gem.h (12757B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2013 Red Hat
      4 * Author: Rob Clark <robdclark@gmail.com>
      5 */
      6
      7#ifndef __MSM_GEM_H__
      8#define __MSM_GEM_H__
      9
     10#include <linux/kref.h>
     11#include <linux/dma-resv.h>
     12#include "drm/gpu_scheduler.h"
     13#include "msm_drv.h"
     14
     15/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
     16 * tend to go wrong 1000s of times in a short timespan.
     17 */
     18#define GEM_WARN_ON(x)  WARN_RATELIMIT(x, "%s", __stringify(x))
     19
     20/* Additional internal-use only BO flags: */
     21#define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
     22#define MSM_BO_MAP_PRIV      0x20000000    /* use IOMMU_PRIV when mapping */
     23
     24struct msm_gem_address_space {
     25	const char *name;
     26	/* NOTE: mm managed at the page level, size is in # of pages
     27	 * and position mm_node->start is in # of pages:
     28	 */
     29	struct drm_mm mm;
     30	spinlock_t lock; /* Protects drm_mm node allocation/removal */
     31	struct msm_mmu *mmu;
     32	struct kref kref;
     33
     34	/* For address spaces associated with a specific process, this
     35	 * will be non-NULL:
     36	 */
     37	struct pid *pid;
     38
     39	/* @faults: the number of GPU hangs associated with this address space */
     40	int faults;
     41
     42	/** @va_start: lowest possible address to allocate */
     43	uint64_t va_start;
     44
     45	/** @va_size: the size of the address space (in bytes) */
     46	uint64_t va_size;
     47};
     48
     49struct msm_gem_address_space *
     50msm_gem_address_space_get(struct msm_gem_address_space *aspace);
     51
     52void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
     53
     54struct msm_gem_address_space *
     55msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
     56		u64 va_start, u64 size);
     57
     58struct msm_fence_context;
     59
     60struct msm_gem_vma {
     61	struct drm_mm_node node;
     62	uint64_t iova;
     63	struct msm_gem_address_space *aspace;
     64	struct list_head list;    /* node in msm_gem_object::vmas */
     65	bool mapped;
     66	int inuse;
     67	uint32_t fence_mask;
     68	uint32_t fence[MSM_GPU_MAX_RINGS];
     69	struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
     70};
     71
     72int msm_gem_init_vma(struct msm_gem_address_space *aspace,
     73		struct msm_gem_vma *vma, int size,
     74		u64 range_start, u64 range_end);
     75bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
     76void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
     77		struct msm_gem_vma *vma);
     78void msm_gem_unpin_vma(struct msm_gem_vma *vma);
     79void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
     80int msm_gem_map_vma(struct msm_gem_address_space *aspace,
     81		struct msm_gem_vma *vma, int prot,
     82		struct sg_table *sgt, int size);
     83void msm_gem_close_vma(struct msm_gem_address_space *aspace,
     84		struct msm_gem_vma *vma);
     85
     86struct msm_gem_object {
     87	struct drm_gem_object base;
     88
     89	uint32_t flags;
     90
     91	/**
     92	 * Advice: are the backing pages purgeable?
     93	 */
     94	uint8_t madv;
     95
     96	/**
     97	 * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
     98	 */
     99	bool dontneed : 1;
    100
    101	/**
    102	 * Is object evictable (ie. counted in priv->evictable_count)?
    103	 */
    104	bool evictable : 1;
    105
    106	/**
    107	 * count of active vmap'ing
    108	 */
    109	uint8_t vmap_count;
    110
    111	/**
    112	 * Node in list of all objects (mainly for debugfs, protected by
    113	 * priv->obj_lock
    114	 */
    115	struct list_head node;
    116
    117	/**
    118	 * An object is either:
    119	 *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
    120	 *     (depending on purgeability status)
    121	 *  active   - on one one of the gpu's active_list..  well, at
    122	 *     least for now we don't have (I don't think) hw sync between
    123	 *     2d and 3d one devices which have both, meaning we need to
    124	 *     block on submit if a bo is already on other ring
    125	 */
    126	struct list_head mm_list;
    127
    128	struct page **pages;
    129	struct sg_table *sgt;
    130	void *vaddr;
    131
    132	struct list_head vmas;    /* list of msm_gem_vma */
    133
    134	/* For physically contiguous buffers.  Used when we don't have
    135	 * an IOMMU.  Also used for stolen/splashscreen buffer.
    136	 */
    137	struct drm_mm_node *vram_node;
    138
    139	char name[32]; /* Identifier to print for the debugfs files */
    140
    141	int active_count;
    142	int pin_count;
    143};
    144#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
    145
    146uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
    147int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
    148void msm_gem_unpin_locked(struct drm_gem_object *obj);
    149struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
    150					   struct msm_gem_address_space *aspace);
    151int msm_gem_get_iova(struct drm_gem_object *obj,
    152		struct msm_gem_address_space *aspace, uint64_t *iova);
    153int msm_gem_set_iova(struct drm_gem_object *obj,
    154		struct msm_gem_address_space *aspace, uint64_t iova);
    155int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
    156		struct msm_gem_address_space *aspace, uint64_t *iova,
    157		u64 range_start, u64 range_end);
    158int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
    159		struct msm_gem_address_space *aspace, uint64_t *iova);
    160void msm_gem_unpin_iova(struct drm_gem_object *obj,
    161		struct msm_gem_address_space *aspace);
    162struct page **msm_gem_get_pages(struct drm_gem_object *obj);
    163void msm_gem_put_pages(struct drm_gem_object *obj);
    164int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
    165		struct drm_mode_create_dumb *args);
    166int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
    167		uint32_t handle, uint64_t *offset);
    168void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
    169void *msm_gem_get_vaddr(struct drm_gem_object *obj);
    170void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
    171void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
    172void msm_gem_put_vaddr(struct drm_gem_object *obj);
    173int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
    174void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
    175void msm_gem_active_put(struct drm_gem_object *obj);
    176int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
    177int msm_gem_cpu_fini(struct drm_gem_object *obj);
    178void msm_gem_free_object(struct drm_gem_object *obj);
    179int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
    180		uint32_t size, uint32_t flags, uint32_t *handle, char *name);
    181struct drm_gem_object *msm_gem_new(struct drm_device *dev,
    182		uint32_t size, uint32_t flags);
    183void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
    184		uint32_t flags, struct msm_gem_address_space *aspace,
    185		struct drm_gem_object **bo, uint64_t *iova);
    186void msm_gem_kernel_put(struct drm_gem_object *bo,
    187		struct msm_gem_address_space *aspace);
    188struct drm_gem_object *msm_gem_import(struct drm_device *dev,
    189		struct dma_buf *dmabuf, struct sg_table *sgt);
    190__printf(2, 3)
    191void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
    192
    193#ifdef CONFIG_DEBUG_FS
    194struct msm_gem_stats {
    195	struct {
    196		unsigned count;
    197		size_t size;
    198	} all, active, resident, purgeable, purged;
    199};
    200
    201void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
    202		struct msm_gem_stats *stats);
    203void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
    204#endif
    205
    206static inline void
    207msm_gem_lock(struct drm_gem_object *obj)
    208{
    209	dma_resv_lock(obj->resv, NULL);
    210}
    211
    212static inline bool __must_check
    213msm_gem_trylock(struct drm_gem_object *obj)
    214{
    215	return dma_resv_trylock(obj->resv);
    216}
    217
    218static inline int
    219msm_gem_lock_interruptible(struct drm_gem_object *obj)
    220{
    221	return dma_resv_lock_interruptible(obj->resv, NULL);
    222}
    223
    224static inline void
    225msm_gem_unlock(struct drm_gem_object *obj)
    226{
    227	dma_resv_unlock(obj->resv);
    228}
    229
    230static inline bool
    231msm_gem_is_locked(struct drm_gem_object *obj)
    232{
    233	return dma_resv_is_locked(obj->resv);
    234}
    235
    236static inline bool is_active(struct msm_gem_object *msm_obj)
    237{
    238	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
    239	return msm_obj->active_count;
    240}
    241
    242/* imported/exported objects are not purgeable: */
    243static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
    244{
    245	return msm_obj->base.import_attach || msm_obj->pin_count;
    246}
    247
    248static inline bool is_purgeable(struct msm_gem_object *msm_obj)
    249{
    250	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
    251			!is_unpurgeable(msm_obj);
    252}
    253
    254static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
    255{
    256	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
    257	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
    258}
    259
    260static inline void mark_purgeable(struct msm_gem_object *msm_obj)
    261{
    262	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
    263
    264	GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
    265
    266	if (is_unpurgeable(msm_obj))
    267		return;
    268
    269	if (GEM_WARN_ON(msm_obj->dontneed))
    270		return;
    271
    272	priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
    273	msm_obj->dontneed = true;
    274}
    275
    276static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
    277{
    278	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
    279
    280	GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
    281
    282	if (is_unpurgeable(msm_obj))
    283		return;
    284
    285	if (GEM_WARN_ON(!msm_obj->dontneed))
    286		return;
    287
    288	priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
    289	GEM_WARN_ON(priv->shrinkable_count < 0);
    290	msm_obj->dontneed = false;
    291}
    292
    293static inline bool is_unevictable(struct msm_gem_object *msm_obj)
    294{
    295	return is_unpurgeable(msm_obj) || msm_obj->vaddr;
    296}
    297
    298static inline void mark_evictable(struct msm_gem_object *msm_obj)
    299{
    300	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
    301
    302	WARN_ON(!mutex_is_locked(&priv->mm_lock));
    303
    304	if (is_unevictable(msm_obj))
    305		return;
    306
    307	if (WARN_ON(msm_obj->evictable))
    308		return;
    309
    310	priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
    311	msm_obj->evictable = true;
    312}
    313
    314static inline void mark_unevictable(struct msm_gem_object *msm_obj)
    315{
    316	struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
    317
    318	WARN_ON(!mutex_is_locked(&priv->mm_lock));
    319
    320	if (is_unevictable(msm_obj))
    321		return;
    322
    323	if (WARN_ON(!msm_obj->evictable))
    324		return;
    325
    326	priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
    327	WARN_ON(priv->evictable_count < 0);
    328	msm_obj->evictable = false;
    329}
    330
    331void msm_gem_purge(struct drm_gem_object *obj);
    332void msm_gem_evict(struct drm_gem_object *obj);
    333void msm_gem_vunmap(struct drm_gem_object *obj);
    334
    335/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
    336 * associated with the cmdstream submission for synchronization (and
    337 * make it easier to unwind when things go wrong, etc).
    338 */
    339struct msm_gem_submit {
    340	struct drm_sched_job base;
    341	struct kref ref;
    342	struct drm_device *dev;
    343	struct msm_gpu *gpu;
    344	struct msm_gem_address_space *aspace;
    345	struct list_head node;   /* node in ring submit list */
    346	struct ww_acquire_ctx ticket;
    347	uint32_t seqno;		/* Sequence number of the submit on the ring */
    348
    349	/* Hw fence, which is created when the scheduler executes the job, and
    350	 * is signaled when the hw finishes (via seqno write from cmdstream)
    351	 */
    352	struct dma_fence *hw_fence;
    353
    354	/* Userspace visible fence, which is signaled by the scheduler after
    355	 * the hw_fence is signaled.
    356	 */
    357	struct dma_fence *user_fence;
    358
    359	int fence_id;       /* key into queue->fence_idr */
    360	struct msm_gpu_submitqueue *queue;
    361	struct pid *pid;    /* submitting process */
    362	bool fault_dumped;  /* Limit devcoredump dumping to one per submit */
    363	bool valid;         /* true if no cmdstream patching needed */
    364	bool in_rb;         /* "sudo" mode, copy cmds into RB */
    365	struct msm_ringbuffer *ring;
    366	unsigned int nr_cmds;
    367	unsigned int nr_bos;
    368	u32 ident;	   /* A "identifier" for the submit for logging */
    369	struct {
    370		uint32_t type;
    371		uint32_t size;  /* in dwords */
    372		uint64_t iova;
    373		uint32_t offset;/* in dwords */
    374		uint32_t idx;   /* cmdstream buffer idx in bos[] */
    375		uint32_t nr_relocs;
    376		struct drm_msm_gem_submit_reloc *relocs;
    377	} *cmd;  /* array of size nr_cmds */
    378	struct {
    379/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
    380#define BO_VALID	0x8000	/* is current addr in cmdstream correct/valid? */
    381#define BO_LOCKED	0x4000	/* obj lock is held */
    382#define BO_ACTIVE	0x2000	/* active refcnt is held */
    383#define BO_OBJ_PINNED	0x1000	/* obj (pages) is pinned and on active list */
    384#define BO_VMA_PINNED	0x0800	/* vma (virtual address) is pinned */
    385		uint32_t flags;
    386		union {
    387			struct msm_gem_object *obj;
    388			uint32_t handle;
    389		};
    390		uint64_t iova;
    391		struct msm_gem_vma *vma;
    392	} bos[];
    393};
    394
    395static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
    396{
    397	return container_of(job, struct msm_gem_submit, base);
    398}
    399
    400void __msm_gem_submit_destroy(struct kref *kref);
    401
    402static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
    403{
    404	kref_get(&submit->ref);
    405}
    406
    407static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
    408{
    409	kref_put(&submit->ref, __msm_gem_submit_destroy);
    410}
    411
    412void msm_submit_retire(struct msm_gem_submit *submit);
    413
    414/* helper to determine of a buffer in submit should be dumped, used for both
    415 * devcoredump and debugfs cmdstream dumping:
    416 */
    417static inline bool
    418should_dump(struct msm_gem_submit *submit, int idx)
    419{
    420	extern bool rd_full;
    421	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
    422}
    423
    424#endif /* __MSM_GEM_H__ */