cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_drm.h (117122B)


      1/*
      2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the
      7 * "Software"), to deal in the Software without restriction, including
      8 * without limitation the rights to use, copy, modify, merge, publish,
      9 * distribute, sub license, and/or sell copies of the Software, and to
     10 * permit persons to whom the Software is furnished to do so, subject to
     11 * the following conditions:
     12 *
     13 * The above copyright notice and this permission notice (including the
     14 * next paragraph) shall be included in all copies or substantial portions
     15 * of the Software.
     16 *
     17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     24 *
     25 */
     26
     27#ifndef _UAPI_I915_DRM_H_
     28#define _UAPI_I915_DRM_H_
     29
     30#include "drm.h"
     31
     32#if defined(__cplusplus)
     33extern "C" {
     34#endif
     35
     36/* Please note that modifications to all structs defined here are
     37 * subject to backwards-compatibility constraints.
     38 */
     39
     40/**
     41 * DOC: uevents generated by i915 on it's device node
     42 *
     43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
     44 *	event from the gpu l3 cache. Additional information supplied is ROW,
     45 *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
     46 *	track of these events and if a specific cache-line seems to have a
     47 *	persistent error remap it with the l3 remapping tool supplied in
     48 *	intel-gpu-tools.  The value supplied with the event is always 1.
     49 *
     50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
     51 *	hangcheck. The error detection event is a good indicator of when things
     52 *	began to go badly. The value supplied with the event is a 1 upon error
     53 *	detection, and a 0 upon reset completion, signifying no more error
     54 *	exists. NOTE: Disabling hangcheck or reset via module parameter will
     55 *	cause the related events to not be seen.
     56 *
     57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
     58 *	GPU. The value supplied with the event is always 1. NOTE: Disable
     59 *	reset via module parameter will cause this event to not be seen.
     60 */
     61#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
     62#define I915_ERROR_UEVENT		"ERROR"
     63#define I915_RESET_UEVENT		"RESET"
     64
     65/**
     66 * struct i915_user_extension - Base class for defining a chain of extensions
     67 *
     68 * Many interfaces need to grow over time. In most cases we can simply
     69 * extend the struct and have userspace pass in more data. Another option,
     70 * as demonstrated by Vulkan's approach to providing extensions for forward
     71 * and backward compatibility, is to use a list of optional structs to
     72 * provide those extra details.
     73 *
     74 * The key advantage to using an extension chain is that it allows us to
     75 * redefine the interface more easily than an ever growing struct of
     76 * increasing complexity, and for large parts of that interface to be
     77 * entirely optional. The downside is more pointer chasing; chasing across
     78 * the __user boundary with pointers encapsulated inside u64.
     79 *
     80 * Example chaining:
     81 *
     82 * .. code-block:: C
     83 *
     84 *	struct i915_user_extension ext3 {
     85 *		.next_extension = 0, // end
     86 *		.name = ...,
     87 *	};
     88 *	struct i915_user_extension ext2 {
     89 *		.next_extension = (uintptr_t)&ext3,
     90 *		.name = ...,
     91 *	};
     92 *	struct i915_user_extension ext1 {
     93 *		.next_extension = (uintptr_t)&ext2,
     94 *		.name = ...,
     95 *	};
     96 *
     97 * Typically the struct i915_user_extension would be embedded in some uAPI
     98 * struct, and in this case we would feed it the head of the chain(i.e ext1),
     99 * which would then apply all of the above extensions.
    100 *
    101 */
    102struct i915_user_extension {
    103	/**
    104	 * @next_extension:
    105	 *
    106	 * Pointer to the next struct i915_user_extension, or zero if the end.
    107	 */
    108	__u64 next_extension;
    109	/**
    110	 * @name: Name of the extension.
    111	 *
    112	 * Note that the name here is just some integer.
    113	 *
    114	 * Also note that the name space for this is not global for the whole
    115	 * driver, but rather its scope/meaning is limited to the specific piece
    116	 * of uAPI which has embedded the struct i915_user_extension.
    117	 */
    118	__u32 name;
    119	/**
    120	 * @flags: MBZ
    121	 *
    122	 * All undefined bits must be zero.
    123	 */
    124	__u32 flags;
    125	/**
    126	 * @rsvd: MBZ
    127	 *
    128	 * Reserved for future use; must be zero.
    129	 */
    130	__u32 rsvd[4];
    131};
    132
    133/*
    134 * MOCS indexes used for GPU surfaces, defining the cacheability of the
    135 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
    136 */
    137enum i915_mocs_table_index {
    138	/*
    139	 * Not cached anywhere, coherency between CPU and GPU accesses is
    140	 * guaranteed.
    141	 */
    142	I915_MOCS_UNCACHED,
    143	/*
    144	 * Cacheability and coherency controlled by the kernel automatically
    145	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
    146	 * usage of the surface (used for display scanout or not).
    147	 */
    148	I915_MOCS_PTE,
    149	/*
    150	 * Cached in all GPU caches available on the platform.
    151	 * Coherency between CPU and GPU accesses to the surface is not
    152	 * guaranteed without extra synchronization.
    153	 */
    154	I915_MOCS_CACHED,
    155};
    156
    157/**
    158 * enum drm_i915_gem_engine_class - uapi engine type enumeration
    159 *
    160 * Different engines serve different roles, and there may be more than one
    161 * engine serving each role.  This enum provides a classification of the role
    162 * of the engine, which may be used when requesting operations to be performed
    163 * on a certain subset of engines, or for providing information about that
    164 * group.
    165 */
    166enum drm_i915_gem_engine_class {
    167	/**
    168	 * @I915_ENGINE_CLASS_RENDER:
    169	 *
    170	 * Render engines support instructions used for 3D, Compute (GPGPU),
    171	 * and programmable media workloads.  These instructions fetch data and
    172	 * dispatch individual work items to threads that operate in parallel.
    173	 * The threads run small programs (called "kernels" or "shaders") on
    174	 * the GPU's execution units (EUs).
    175	 */
    176	I915_ENGINE_CLASS_RENDER	= 0,
    177
    178	/**
    179	 * @I915_ENGINE_CLASS_COPY:
    180	 *
    181	 * Copy engines (also referred to as "blitters") support instructions
    182	 * that move blocks of data from one location in memory to another,
    183	 * or that fill a specified location of memory with fixed data.
    184	 * Copy engines can perform pre-defined logical or bitwise operations
    185	 * on the source, destination, or pattern data.
    186	 */
    187	I915_ENGINE_CLASS_COPY		= 1,
    188
    189	/**
    190	 * @I915_ENGINE_CLASS_VIDEO:
    191	 *
    192	 * Video engines (also referred to as "bit stream decode" (BSD) or
    193	 * "vdbox") support instructions that perform fixed-function media
    194	 * decode and encode.
    195	 */
    196	I915_ENGINE_CLASS_VIDEO		= 2,
    197
    198	/**
    199	 * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
    200	 *
    201	 * Video enhancement engines (also referred to as "vebox") support
    202	 * instructions related to image enhancement.
    203	 */
    204	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
    205
    206	/**
    207	 * @I915_ENGINE_CLASS_COMPUTE:
    208	 *
    209	 * Compute engines support a subset of the instructions available
    210	 * on render engines:  compute engines support Compute (GPGPU) and
    211	 * programmable media workloads, but do not support the 3D pipeline.
    212	 */
    213	I915_ENGINE_CLASS_COMPUTE	= 4,
    214
    215	/* Values in this enum should be kept compact. */
    216
    217	/**
    218	 * @I915_ENGINE_CLASS_INVALID:
    219	 *
    220	 * Placeholder value to represent an invalid engine class assignment.
    221	 */
    222	I915_ENGINE_CLASS_INVALID	= -1
    223};
    224
    225/**
    226 * struct i915_engine_class_instance - Engine class/instance identifier
    227 *
    228 * There may be more than one engine fulfilling any role within the system.
    229 * Each engine of a class is given a unique instance number and therefore
    230 * any engine can be specified by its class:instance tuplet. APIs that allow
    231 * access to any engine in the system will use struct i915_engine_class_instance
    232 * for this identification.
    233 */
    234struct i915_engine_class_instance {
    235	/**
    236	 * @engine_class:
    237	 *
    238	 * Engine class from enum drm_i915_gem_engine_class
    239	 */
    240	__u16 engine_class;
    241#define I915_ENGINE_CLASS_INVALID_NONE -1
    242#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
    243
    244	/**
    245	 * @engine_instance:
    246	 *
    247	 * Engine instance.
    248	 */
    249	__u16 engine_instance;
    250};
    251
    252/**
    253 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
    254 *
    255 */
    256
    257enum drm_i915_pmu_engine_sample {
    258	I915_SAMPLE_BUSY = 0,
    259	I915_SAMPLE_WAIT = 1,
    260	I915_SAMPLE_SEMA = 2
    261};
    262
    263#define I915_PMU_SAMPLE_BITS (4)
    264#define I915_PMU_SAMPLE_MASK (0xf)
    265#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
    266#define I915_PMU_CLASS_SHIFT \
    267	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
    268
    269#define __I915_PMU_ENGINE(class, instance, sample) \
    270	((class) << I915_PMU_CLASS_SHIFT | \
    271	(instance) << I915_PMU_SAMPLE_BITS | \
    272	(sample))
    273
    274#define I915_PMU_ENGINE_BUSY(class, instance) \
    275	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
    276
    277#define I915_PMU_ENGINE_WAIT(class, instance) \
    278	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
    279
    280#define I915_PMU_ENGINE_SEMA(class, instance) \
    281	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
    282
    283#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
    284
    285#define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
    286#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
    287#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
    288#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
    289#define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
    290
    291#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
    292
    293/* Each region is a minimum of 16k, and there are at most 255 of them.
    294 */
    295#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
    296				 * of chars for next/prev indices */
    297#define I915_LOG_MIN_TEX_REGION_SIZE 14
    298
    299typedef struct _drm_i915_init {
    300	enum {
    301		I915_INIT_DMA = 0x01,
    302		I915_CLEANUP_DMA = 0x02,
    303		I915_RESUME_DMA = 0x03
    304	} func;
    305	unsigned int mmio_offset;
    306	int sarea_priv_offset;
    307	unsigned int ring_start;
    308	unsigned int ring_end;
    309	unsigned int ring_size;
    310	unsigned int front_offset;
    311	unsigned int back_offset;
    312	unsigned int depth_offset;
    313	unsigned int w;
    314	unsigned int h;
    315	unsigned int pitch;
    316	unsigned int pitch_bits;
    317	unsigned int back_pitch;
    318	unsigned int depth_pitch;
    319	unsigned int cpp;
    320	unsigned int chipset;
    321} drm_i915_init_t;
    322
    323typedef struct _drm_i915_sarea {
    324	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
    325	int last_upload;	/* last time texture was uploaded */
    326	int last_enqueue;	/* last time a buffer was enqueued */
    327	int last_dispatch;	/* age of the most recently dispatched buffer */
    328	int ctxOwner;		/* last context to upload state */
    329	int texAge;
    330	int pf_enabled;		/* is pageflipping allowed? */
    331	int pf_active;
    332	int pf_current_page;	/* which buffer is being displayed? */
    333	int perf_boxes;		/* performance boxes to be displayed */
    334	int width, height;      /* screen size in pixels */
    335
    336	drm_handle_t front_handle;
    337	int front_offset;
    338	int front_size;
    339
    340	drm_handle_t back_handle;
    341	int back_offset;
    342	int back_size;
    343
    344	drm_handle_t depth_handle;
    345	int depth_offset;
    346	int depth_size;
    347
    348	drm_handle_t tex_handle;
    349	int tex_offset;
    350	int tex_size;
    351	int log_tex_granularity;
    352	int pitch;
    353	int rotation;           /* 0, 90, 180 or 270 */
    354	int rotated_offset;
    355	int rotated_size;
    356	int rotated_pitch;
    357	int virtualX, virtualY;
    358
    359	unsigned int front_tiled;
    360	unsigned int back_tiled;
    361	unsigned int depth_tiled;
    362	unsigned int rotated_tiled;
    363	unsigned int rotated2_tiled;
    364
    365	int pipeA_x;
    366	int pipeA_y;
    367	int pipeA_w;
    368	int pipeA_h;
    369	int pipeB_x;
    370	int pipeB_y;
    371	int pipeB_w;
    372	int pipeB_h;
    373
    374	/* fill out some space for old userspace triple buffer */
    375	drm_handle_t unused_handle;
    376	__u32 unused1, unused2, unused3;
    377
    378	/* buffer object handles for static buffers. May change
    379	 * over the lifetime of the client.
    380	 */
    381	__u32 front_bo_handle;
    382	__u32 back_bo_handle;
    383	__u32 unused_bo_handle;
    384	__u32 depth_bo_handle;
    385
    386} drm_i915_sarea_t;
    387
    388/* due to userspace building against these headers we need some compat here */
    389#define planeA_x pipeA_x
    390#define planeA_y pipeA_y
    391#define planeA_w pipeA_w
    392#define planeA_h pipeA_h
    393#define planeB_x pipeB_x
    394#define planeB_y pipeB_y
    395#define planeB_w pipeB_w
    396#define planeB_h pipeB_h
    397
    398/* Flags for perf_boxes
    399 */
    400#define I915_BOX_RING_EMPTY    0x1
    401#define I915_BOX_FLIP          0x2
    402#define I915_BOX_WAIT          0x4
    403#define I915_BOX_TEXTURE_LOAD  0x8
    404#define I915_BOX_LOST_CONTEXT  0x10
    405
    406/*
    407 * i915 specific ioctls.
    408 *
    409 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
    410 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
    411 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
    412 */
    413#define DRM_I915_INIT		0x00
    414#define DRM_I915_FLUSH		0x01
    415#define DRM_I915_FLIP		0x02
    416#define DRM_I915_BATCHBUFFER	0x03
    417#define DRM_I915_IRQ_EMIT	0x04
    418#define DRM_I915_IRQ_WAIT	0x05
    419#define DRM_I915_GETPARAM	0x06
    420#define DRM_I915_SETPARAM	0x07
    421#define DRM_I915_ALLOC		0x08
    422#define DRM_I915_FREE		0x09
    423#define DRM_I915_INIT_HEAP	0x0a
    424#define DRM_I915_CMDBUFFER	0x0b
    425#define DRM_I915_DESTROY_HEAP	0x0c
    426#define DRM_I915_SET_VBLANK_PIPE	0x0d
    427#define DRM_I915_GET_VBLANK_PIPE	0x0e
    428#define DRM_I915_VBLANK_SWAP	0x0f
    429#define DRM_I915_HWS_ADDR	0x11
    430#define DRM_I915_GEM_INIT	0x13
    431#define DRM_I915_GEM_EXECBUFFER	0x14
    432#define DRM_I915_GEM_PIN	0x15
    433#define DRM_I915_GEM_UNPIN	0x16
    434#define DRM_I915_GEM_BUSY	0x17
    435#define DRM_I915_GEM_THROTTLE	0x18
    436#define DRM_I915_GEM_ENTERVT	0x19
    437#define DRM_I915_GEM_LEAVEVT	0x1a
    438#define DRM_I915_GEM_CREATE	0x1b
    439#define DRM_I915_GEM_PREAD	0x1c
    440#define DRM_I915_GEM_PWRITE	0x1d
    441#define DRM_I915_GEM_MMAP	0x1e
    442#define DRM_I915_GEM_SET_DOMAIN	0x1f
    443#define DRM_I915_GEM_SW_FINISH	0x20
    444#define DRM_I915_GEM_SET_TILING	0x21
    445#define DRM_I915_GEM_GET_TILING	0x22
    446#define DRM_I915_GEM_GET_APERTURE 0x23
    447#define DRM_I915_GEM_MMAP_GTT	0x24
    448#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
    449#define DRM_I915_GEM_MADVISE	0x26
    450#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
    451#define DRM_I915_OVERLAY_ATTRS	0x28
    452#define DRM_I915_GEM_EXECBUFFER2	0x29
    453#define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
    454#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
    455#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
    456#define DRM_I915_GEM_WAIT	0x2c
    457#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
    458#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
    459#define DRM_I915_GEM_SET_CACHING	0x2f
    460#define DRM_I915_GEM_GET_CACHING	0x30
    461#define DRM_I915_REG_READ		0x31
    462#define DRM_I915_GET_RESET_STATS	0x32
    463#define DRM_I915_GEM_USERPTR		0x33
    464#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
    465#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
    466#define DRM_I915_PERF_OPEN		0x36
    467#define DRM_I915_PERF_ADD_CONFIG	0x37
    468#define DRM_I915_PERF_REMOVE_CONFIG	0x38
    469#define DRM_I915_QUERY			0x39
    470#define DRM_I915_GEM_VM_CREATE		0x3a
    471#define DRM_I915_GEM_VM_DESTROY		0x3b
    472#define DRM_I915_GEM_CREATE_EXT		0x3c
    473/* Must be kept compact -- no holes */
    474
    475#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
    476#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
    477#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
    478#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
    479#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
    480#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
    481#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
    482#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
    483#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
    484#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
    485#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
    486#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
    487#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
    488#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
    489#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
    490#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
    491#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
    492#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
    493#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
    494#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
    495#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
    496#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
    497#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
    498#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
    499#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
    500#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
    501#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
    502#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
    503#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
    504#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
    505#define DRM_IOCTL_I915_GEM_CREATE_EXT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
    506#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
    507#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
    508#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
    509#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
    510#define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
    511#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
    512#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
    513#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
    514#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
    515#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
    516#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
    517#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
    518#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
    519#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
    520#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
    521#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
    522#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
    523#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
    524#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
    525#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
    526#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
    527#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
    528#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
    529#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
    530#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
    531#define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
    532#define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
    533#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
    534#define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
    535#define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
    536#define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
    537
    538/* Allow drivers to submit batchbuffers directly to hardware, relying
    539 * on the security mechanisms provided by hardware.
    540 */
    541typedef struct drm_i915_batchbuffer {
    542	int start;		/* agp offset */
    543	int used;		/* nr bytes in use */
    544	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
    545	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
    546	int num_cliprects;	/* mulitpass with multiple cliprects? */
    547	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
    548} drm_i915_batchbuffer_t;
    549
    550/* As above, but pass a pointer to userspace buffer which can be
    551 * validated by the kernel prior to sending to hardware.
    552 */
    553typedef struct _drm_i915_cmdbuffer {
    554	char __user *buf;	/* pointer to userspace command buffer */
    555	int sz;			/* nr bytes in buf */
    556	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
    557	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
    558	int num_cliprects;	/* mulitpass with multiple cliprects? */
    559	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
    560} drm_i915_cmdbuffer_t;
    561
    562/* Userspace can request & wait on irq's:
    563 */
    564typedef struct drm_i915_irq_emit {
    565	int __user *irq_seq;
    566} drm_i915_irq_emit_t;
    567
    568typedef struct drm_i915_irq_wait {
    569	int irq_seq;
    570} drm_i915_irq_wait_t;
    571
    572/*
    573 * Different modes of per-process Graphics Translation Table,
    574 * see I915_PARAM_HAS_ALIASING_PPGTT
    575 */
    576#define I915_GEM_PPGTT_NONE	0
    577#define I915_GEM_PPGTT_ALIASING	1
    578#define I915_GEM_PPGTT_FULL	2
    579
    580/* Ioctl to query kernel params:
    581 */
    582#define I915_PARAM_IRQ_ACTIVE            1
    583#define I915_PARAM_ALLOW_BATCHBUFFER     2
    584#define I915_PARAM_LAST_DISPATCH         3
    585#define I915_PARAM_CHIPSET_ID            4
    586#define I915_PARAM_HAS_GEM               5
    587#define I915_PARAM_NUM_FENCES_AVAIL      6
    588#define I915_PARAM_HAS_OVERLAY           7
    589#define I915_PARAM_HAS_PAGEFLIPPING	 8
    590#define I915_PARAM_HAS_EXECBUF2          9
    591#define I915_PARAM_HAS_BSD		 10
    592#define I915_PARAM_HAS_BLT		 11
    593#define I915_PARAM_HAS_RELAXED_FENCING	 12
    594#define I915_PARAM_HAS_COHERENT_RINGS	 13
    595#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
    596#define I915_PARAM_HAS_RELAXED_DELTA	 15
    597#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
    598#define I915_PARAM_HAS_LLC     	 	 17
    599#define I915_PARAM_HAS_ALIASING_PPGTT	 18
    600#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
    601#define I915_PARAM_HAS_SEMAPHORES	 20
    602#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
    603#define I915_PARAM_HAS_VEBOX		 22
    604#define I915_PARAM_HAS_SECURE_BATCHES	 23
    605#define I915_PARAM_HAS_PINNED_BATCHES	 24
    606#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
    607#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
    608#define I915_PARAM_HAS_WT     	 	 27
    609#define I915_PARAM_CMD_PARSER_VERSION	 28
    610#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
    611#define I915_PARAM_MMAP_VERSION          30
    612#define I915_PARAM_HAS_BSD2		 31
    613#define I915_PARAM_REVISION              32
    614#define I915_PARAM_SUBSLICE_TOTAL	 33
    615#define I915_PARAM_EU_TOTAL		 34
    616#define I915_PARAM_HAS_GPU_RESET	 35
    617#define I915_PARAM_HAS_RESOURCE_STREAMER 36
    618#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
    619#define I915_PARAM_HAS_POOLED_EU	 38
    620#define I915_PARAM_MIN_EU_IN_POOL	 39
    621#define I915_PARAM_MMAP_GTT_VERSION	 40
    622
    623/*
    624 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
    625 * priorities and the driver will attempt to execute batches in priority order.
    626 * The param returns a capability bitmask, nonzero implies that the scheduler
    627 * is enabled, with different features present according to the mask.
    628 *
    629 * The initial priority for each batch is supplied by the context and is
    630 * controlled via I915_CONTEXT_PARAM_PRIORITY.
    631 */
    632#define I915_PARAM_HAS_SCHEDULER	 41
    633#define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
    634#define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
    635#define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
    636#define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
    637#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
    638/*
    639 * Indicates the 2k user priority levels are statically mapped into 3 buckets as
    640 * follows:
    641 *
    642 * -1k to -1	Low priority
    643 * 0		Normal priority
    644 * 1 to 1k	Highest priority
    645 */
    646#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
    647
    648#define I915_PARAM_HUC_STATUS		 42
    649
    650/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
    651 * synchronisation with implicit fencing on individual objects.
    652 * See EXEC_OBJECT_ASYNC.
    653 */
    654#define I915_PARAM_HAS_EXEC_ASYNC	 43
    655
    656/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
    657 * both being able to pass in a sync_file fd to wait upon before executing,
    658 * and being able to return a new sync_file fd that is signaled when the
    659 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
    660 */
    661#define I915_PARAM_HAS_EXEC_FENCE	 44
    662
    663/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
    664 * user specified bufffers for post-mortem debugging of GPU hangs. See
    665 * EXEC_OBJECT_CAPTURE.
    666 */
    667#define I915_PARAM_HAS_EXEC_CAPTURE	 45
    668
    669#define I915_PARAM_SLICE_MASK		 46
    670
    671/* Assuming it's uniform for each slice, this queries the mask of subslices
    672 * per-slice for this system.
    673 */
    674#define I915_PARAM_SUBSLICE_MASK	 47
    675
    676/*
    677 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
    678 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
    679 */
    680#define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
    681
    682/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
    683 * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
    684 */
    685#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
    686
    687/*
    688 * Query whether every context (both per-file default and user created) is
    689 * isolated (insofar as HW supports). If this parameter is not true, then
    690 * freshly created contexts may inherit values from an existing context,
    691 * rather than default HW values. If true, it also ensures (insofar as HW
    692 * supports) that all state set by this context will not leak to any other
    693 * context.
    694 *
    695 * As not every engine across every gen support contexts, the returned
    696 * value reports the support of context isolation for individual engines by
    697 * returning a bitmask of each engine class set to true if that class supports
    698 * isolation.
    699 */
    700#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
    701
    702/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
    703 * registers. This used to be fixed per platform but from CNL onwards, this
    704 * might vary depending on the parts.
    705 */
    706#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
    707
    708/*
    709 * Once upon a time we supposed that writes through the GGTT would be
    710 * immediately in physical memory (once flushed out of the CPU path). However,
    711 * on a few different processors and chipsets, this is not necessarily the case
    712 * as the writes appear to be buffered internally. Thus a read of the backing
    713 * storage (physical memory) via a different path (with different physical tags
    714 * to the indirect write via the GGTT) will see stale values from before
    715 * the GGTT write. Inside the kernel, we can for the most part keep track of
    716 * the different read/write domains in use (e.g. set-domain), but the assumption
    717 * of coherency is baked into the ABI, hence reporting its true state in this
    718 * parameter.
    719 *
    720 * Reports true when writes via mmap_gtt are immediately visible following an
    721 * lfence to flush the WCB.
    722 *
    723 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
    724 * internal buffer and are _not_ immediately visible to third parties accessing
    725 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
    726 * communications channel when reporting false is strongly disadvised.
    727 */
    728#define I915_PARAM_MMAP_GTT_COHERENT	52
    729
    730/*
    731 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
    732 * execution through use of explicit fence support.
    733 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
    734 */
    735#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
    736
    737/*
    738 * Revision of the i915-perf uAPI. The value returned helps determine what
    739 * i915-perf features are available. See drm_i915_perf_property_id.
    740 */
    741#define I915_PARAM_PERF_REVISION	54
    742
    743/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
    744 * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
    745 * I915_EXEC_USE_EXTENSIONS.
    746 */
    747#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
    748
    749/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
    750#define I915_PARAM_HAS_USERPTR_PROBE 56
    751
    752/* Must be kept compact -- no holes and well documented */
    753
    754typedef struct drm_i915_getparam {
    755	__s32 param;
    756	/*
    757	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
    758	 * compat32 code. Don't repeat this mistake.
    759	 */
    760	int __user *value;
    761} drm_i915_getparam_t;
    762
    763/* Ioctl to set kernel params:
    764 */
    765#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
    766#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
    767#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
    768#define I915_SETPARAM_NUM_USED_FENCES                     4
    769/* Must be kept compact -- no holes */
    770
    771typedef struct drm_i915_setparam {
    772	int param;
    773	int value;
    774} drm_i915_setparam_t;
    775
    776/* A memory manager for regions of shared memory:
    777 */
    778#define I915_MEM_REGION_AGP 1
    779
    780typedef struct drm_i915_mem_alloc {
    781	int region;
    782	int alignment;
    783	int size;
    784	int __user *region_offset;	/* offset from start of fb or agp */
    785} drm_i915_mem_alloc_t;
    786
    787typedef struct drm_i915_mem_free {
    788	int region;
    789	int region_offset;
    790} drm_i915_mem_free_t;
    791
    792typedef struct drm_i915_mem_init_heap {
    793	int region;
    794	int size;
    795	int start;
    796} drm_i915_mem_init_heap_t;
    797
    798/* Allow memory manager to be torn down and re-initialized (eg on
    799 * rotate):
    800 */
    801typedef struct drm_i915_mem_destroy_heap {
    802	int region;
    803} drm_i915_mem_destroy_heap_t;
    804
    805/* Allow X server to configure which pipes to monitor for vblank signals
    806 */
    807#define	DRM_I915_VBLANK_PIPE_A	1
    808#define	DRM_I915_VBLANK_PIPE_B	2
    809
    810typedef struct drm_i915_vblank_pipe {
    811	int pipe;
    812} drm_i915_vblank_pipe_t;
    813
    814/* Schedule buffer swap at given vertical blank:
    815 */
    816typedef struct drm_i915_vblank_swap {
    817	drm_drawable_t drawable;
    818	enum drm_vblank_seq_type seqtype;
    819	unsigned int sequence;
    820} drm_i915_vblank_swap_t;
    821
    822typedef struct drm_i915_hws_addr {
    823	__u64 addr;
    824} drm_i915_hws_addr_t;
    825
    826struct drm_i915_gem_init {
    827	/**
    828	 * Beginning offset in the GTT to be managed by the DRM memory
    829	 * manager.
    830	 */
    831	__u64 gtt_start;
    832	/**
    833	 * Ending offset in the GTT to be managed by the DRM memory
    834	 * manager.
    835	 */
    836	__u64 gtt_end;
    837};
    838
    839struct drm_i915_gem_create {
    840	/**
    841	 * Requested size for the object.
    842	 *
    843	 * The (page-aligned) allocated size for the object will be returned.
    844	 */
    845	__u64 size;
    846	/**
    847	 * Returned handle for the object.
    848	 *
    849	 * Object handles are nonzero.
    850	 */
    851	__u32 handle;
    852	__u32 pad;
    853};
    854
    855struct drm_i915_gem_pread {
    856	/** Handle for the object being read. */
    857	__u32 handle;
    858	__u32 pad;
    859	/** Offset into the object to read from */
    860	__u64 offset;
    861	/** Length of data to read */
    862	__u64 size;
    863	/**
    864	 * Pointer to write the data into.
    865	 *
    866	 * This is a fixed-size type for 32/64 compatibility.
    867	 */
    868	__u64 data_ptr;
    869};
    870
    871struct drm_i915_gem_pwrite {
    872	/** Handle for the object being written to. */
    873	__u32 handle;
    874	__u32 pad;
    875	/** Offset into the object to write to */
    876	__u64 offset;
    877	/** Length of data to write */
    878	__u64 size;
    879	/**
    880	 * Pointer to read the data from.
    881	 *
    882	 * This is a fixed-size type for 32/64 compatibility.
    883	 */
    884	__u64 data_ptr;
    885};
    886
    887struct drm_i915_gem_mmap {
    888	/** Handle for the object being mapped. */
    889	__u32 handle;
    890	__u32 pad;
    891	/** Offset in the object to map. */
    892	__u64 offset;
    893	/**
    894	 * Length of data to map.
    895	 *
    896	 * The value will be page-aligned.
    897	 */
    898	__u64 size;
    899	/**
    900	 * Returned pointer the data was mapped at.
    901	 *
    902	 * This is a fixed-size type for 32/64 compatibility.
    903	 */
    904	__u64 addr_ptr;
    905
    906	/**
    907	 * Flags for extended behaviour.
    908	 *
    909	 * Added in version 2.
    910	 */
    911	__u64 flags;
    912#define I915_MMAP_WC 0x1
    913};
    914
    915struct drm_i915_gem_mmap_gtt {
    916	/** Handle for the object being mapped. */
    917	__u32 handle;
    918	__u32 pad;
    919	/**
    920	 * Fake offset to use for subsequent mmap call
    921	 *
    922	 * This is a fixed-size type for 32/64 compatibility.
    923	 */
    924	__u64 offset;
    925};
    926
    927/**
    928 * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
    929 *
    930 * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
    931 * and is used to retrieve the fake offset to mmap an object specified by &handle.
    932 *
    933 * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
    934 * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
    935 * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
    936 */
    937struct drm_i915_gem_mmap_offset {
    938	/** @handle: Handle for the object being mapped. */
    939	__u32 handle;
    940	/** @pad: Must be zero */
    941	__u32 pad;
    942	/**
    943	 * @offset: The fake offset to use for subsequent mmap call
    944	 *
    945	 * This is a fixed-size type for 32/64 compatibility.
    946	 */
    947	__u64 offset;
    948
    949	/**
    950	 * @flags: Flags for extended behaviour.
    951	 *
    952	 * It is mandatory that one of the `MMAP_OFFSET` types
    953	 * should be included:
    954	 *
    955	 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
    956	 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
    957	 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
    958	 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
    959	 *
    960	 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
    961	 * type. On devices without local memory, this caching mode is invalid.
    962	 *
    963	 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
    964	 * be used, depending on the object placement on creation. WB will be used
    965	 * when the object can only exist in system memory, WC otherwise.
    966	 */
    967	__u64 flags;
    968
    969#define I915_MMAP_OFFSET_GTT	0
    970#define I915_MMAP_OFFSET_WC	1
    971#define I915_MMAP_OFFSET_WB	2
    972#define I915_MMAP_OFFSET_UC	3
    973#define I915_MMAP_OFFSET_FIXED	4
    974
    975	/**
    976	 * @extensions: Zero-terminated chain of extensions.
    977	 *
    978	 * No current extensions defined; mbz.
    979	 */
    980	__u64 extensions;
    981};
    982
    983/**
    984 * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
    985 * preparation for accessing the pages via some CPU domain.
    986 *
    987 * Specifying a new write or read domain will flush the object out of the
    988 * previous domain(if required), before then updating the objects domain
    989 * tracking with the new domain.
    990 *
    991 * Note this might involve waiting for the object first if it is still active on
    992 * the GPU.
    993 *
    994 * Supported values for @read_domains and @write_domain:
    995 *
    996 *	- I915_GEM_DOMAIN_WC: Uncached write-combined domain
    997 *	- I915_GEM_DOMAIN_CPU: CPU cache domain
    998 *	- I915_GEM_DOMAIN_GTT: Mappable aperture domain
    999 *
   1000 * All other domains are rejected.
   1001 *
   1002 * Note that for discrete, starting from DG1, this is no longer supported, and
   1003 * is instead rejected. On such platforms the CPU domain is effectively static,
   1004 * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
   1005 * which can't be set explicitly and instead depends on the object placements,
   1006 * as per the below.
   1007 *
   1008 * Implicit caching rules, starting from DG1:
   1009 *
   1010 *	- If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
   1011 *	  contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
   1012 *	  mapped as write-combined only.
   1013 *
   1014 *	- Everything else is always allocated and mapped as write-back, with the
   1015 *	  guarantee that everything is also coherent with the GPU.
   1016 *
   1017 * Note that this is likely to change in the future again, where we might need
   1018 * more flexibility on future devices, so making this all explicit as part of a
   1019 * new &drm_i915_gem_create_ext extension is probable.
   1020 */
   1021struct drm_i915_gem_set_domain {
   1022	/** @handle: Handle for the object. */
   1023	__u32 handle;
   1024
   1025	/** @read_domains: New read domains. */
   1026	__u32 read_domains;
   1027
   1028	/**
   1029	 * @write_domain: New write domain.
   1030	 *
   1031	 * Note that having something in the write domain implies it's in the
   1032	 * read domain, and only that read domain.
   1033	 */
   1034	__u32 write_domain;
   1035};
   1036
   1037struct drm_i915_gem_sw_finish {
   1038	/** Handle for the object */
   1039	__u32 handle;
   1040};
   1041
   1042struct drm_i915_gem_relocation_entry {
   1043	/**
   1044	 * Handle of the buffer being pointed to by this relocation entry.
   1045	 *
   1046	 * It's appealing to make this be an index into the mm_validate_entry
   1047	 * list to refer to the buffer, but this allows the driver to create
   1048	 * a relocation list for state buffers and not re-write it per
   1049	 * exec using the buffer.
   1050	 */
   1051	__u32 target_handle;
   1052
   1053	/**
   1054	 * Value to be added to the offset of the target buffer to make up
   1055	 * the relocation entry.
   1056	 */
   1057	__u32 delta;
   1058
   1059	/** Offset in the buffer the relocation entry will be written into */
   1060	__u64 offset;
   1061
   1062	/**
   1063	 * Offset value of the target buffer that the relocation entry was last
   1064	 * written as.
   1065	 *
   1066	 * If the buffer has the same offset as last time, we can skip syncing
   1067	 * and writing the relocation.  This value is written back out by
   1068	 * the execbuffer ioctl when the relocation is written.
   1069	 */
   1070	__u64 presumed_offset;
   1071
   1072	/**
   1073	 * Target memory domains read by this operation.
   1074	 */
   1075	__u32 read_domains;
   1076
   1077	/**
   1078	 * Target memory domains written by this operation.
   1079	 *
   1080	 * Note that only one domain may be written by the whole
   1081	 * execbuffer operation, so that where there are conflicts,
   1082	 * the application will get -EINVAL back.
   1083	 */
   1084	__u32 write_domain;
   1085};
   1086
   1087/** @{
   1088 * Intel memory domains
   1089 *
   1090 * Most of these just align with the various caches in
   1091 * the system and are used to flush and invalidate as
   1092 * objects end up cached in different domains.
   1093 */
   1094/** CPU cache */
   1095#define I915_GEM_DOMAIN_CPU		0x00000001
   1096/** Render cache, used by 2D and 3D drawing */
   1097#define I915_GEM_DOMAIN_RENDER		0x00000002
   1098/** Sampler cache, used by texture engine */
   1099#define I915_GEM_DOMAIN_SAMPLER		0x00000004
   1100/** Command queue, used to load batch buffers */
   1101#define I915_GEM_DOMAIN_COMMAND		0x00000008
   1102/** Instruction cache, used by shader programs */
   1103#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
   1104/** Vertex address cache */
   1105#define I915_GEM_DOMAIN_VERTEX		0x00000020
   1106/** GTT domain - aperture and scanout */
   1107#define I915_GEM_DOMAIN_GTT		0x00000040
   1108/** WC domain - uncached access */
   1109#define I915_GEM_DOMAIN_WC		0x00000080
   1110/** @} */
   1111
   1112struct drm_i915_gem_exec_object {
   1113	/**
   1114	 * User's handle for a buffer to be bound into the GTT for this
   1115	 * operation.
   1116	 */
   1117	__u32 handle;
   1118
   1119	/** Number of relocations to be performed on this buffer */
   1120	__u32 relocation_count;
   1121	/**
   1122	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
   1123	 * the relocations to be performed in this buffer.
   1124	 */
   1125	__u64 relocs_ptr;
   1126
   1127	/** Required alignment in graphics aperture */
   1128	__u64 alignment;
   1129
   1130	/**
   1131	 * Returned value of the updated offset of the object, for future
   1132	 * presumed_offset writes.
   1133	 */
   1134	__u64 offset;
   1135};
   1136
   1137/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
   1138struct drm_i915_gem_execbuffer {
   1139	/**
   1140	 * List of buffers to be validated with their relocations to be
   1141	 * performend on them.
   1142	 *
   1143	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
   1144	 *
   1145	 * These buffers must be listed in an order such that all relocations
   1146	 * a buffer is performing refer to buffers that have already appeared
   1147	 * in the validate list.
   1148	 */
   1149	__u64 buffers_ptr;
   1150	__u32 buffer_count;
   1151
   1152	/** Offset in the batchbuffer to start execution from. */
   1153	__u32 batch_start_offset;
   1154	/** Bytes used in batchbuffer from batch_start_offset */
   1155	__u32 batch_len;
   1156	__u32 DR1;
   1157	__u32 DR4;
   1158	__u32 num_cliprects;
   1159	/** This is a struct drm_clip_rect *cliprects */
   1160	__u64 cliprects_ptr;
   1161};
   1162
   1163struct drm_i915_gem_exec_object2 {
   1164	/**
   1165	 * User's handle for a buffer to be bound into the GTT for this
   1166	 * operation.
   1167	 */
   1168	__u32 handle;
   1169
   1170	/** Number of relocations to be performed on this buffer */
   1171	__u32 relocation_count;
   1172	/**
   1173	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
   1174	 * the relocations to be performed in this buffer.
   1175	 */
   1176	__u64 relocs_ptr;
   1177
   1178	/** Required alignment in graphics aperture */
   1179	__u64 alignment;
   1180
   1181	/**
   1182	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
   1183	 * the user with the GTT offset at which this object will be pinned.
   1184	 *
   1185	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
   1186	 * presumed_offset of the object.
   1187	 *
   1188	 * During execbuffer2 the kernel populates it with the value of the
   1189	 * current GTT offset of the object, for future presumed_offset writes.
   1190	 *
   1191	 * See struct drm_i915_gem_create_ext for the rules when dealing with
   1192	 * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
   1193	 * minimum page sizes, like DG2.
   1194	 */
   1195	__u64 offset;
   1196
   1197#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
   1198#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
   1199#define EXEC_OBJECT_WRITE		 (1<<2)
   1200#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
   1201#define EXEC_OBJECT_PINNED		 (1<<4)
   1202#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
   1203/* The kernel implicitly tracks GPU activity on all GEM objects, and
   1204 * synchronises operations with outstanding rendering. This includes
   1205 * rendering on other devices if exported via dma-buf. However, sometimes
   1206 * this tracking is too coarse and the user knows better. For example,
   1207 * if the object is split into non-overlapping ranges shared between different
   1208 * clients or engines (i.e. suballocating objects), the implicit tracking
   1209 * by kernel assumes that each operation affects the whole object rather
   1210 * than an individual range, causing needless synchronisation between clients.
   1211 * The kernel will also forgo any CPU cache flushes prior to rendering from
   1212 * the object as the client is expected to be also handling such domain
   1213 * tracking.
   1214 *
   1215 * The kernel maintains the implicit tracking in order to manage resources
   1216 * used by the GPU - this flag only disables the synchronisation prior to
   1217 * rendering with this object in this execbuf.
   1218 *
   1219 * Opting out of implicit synhronisation requires the user to do its own
   1220 * explicit tracking to avoid rendering corruption. See, for example,
   1221 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
   1222 */
   1223#define EXEC_OBJECT_ASYNC		(1<<6)
   1224/* Request that the contents of this execobject be copied into the error
   1225 * state upon a GPU hang involving this batch for post-mortem debugging.
   1226 * These buffers are recorded in no particular order as "user" in
   1227 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
   1228 * if the kernel supports this flag.
   1229 */
   1230#define EXEC_OBJECT_CAPTURE		(1<<7)
   1231/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
   1232#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
   1233	__u64 flags;
   1234
   1235	union {
   1236		__u64 rsvd1;
   1237		__u64 pad_to_size;
   1238	};
   1239	__u64 rsvd2;
   1240};
   1241
   1242struct drm_i915_gem_exec_fence {
   1243	/**
   1244	 * User's handle for a drm_syncobj to wait on or signal.
   1245	 */
   1246	__u32 handle;
   1247
   1248#define I915_EXEC_FENCE_WAIT            (1<<0)
   1249#define I915_EXEC_FENCE_SIGNAL          (1<<1)
   1250#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
   1251	__u32 flags;
   1252};
   1253
   1254/*
   1255 * See drm_i915_gem_execbuffer_ext_timeline_fences.
   1256 */
   1257#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
   1258
   1259/*
   1260 * This structure describes an array of drm_syncobj and associated points for
   1261 * timeline variants of drm_syncobj. It is invalid to append this structure to
   1262 * the execbuf if I915_EXEC_FENCE_ARRAY is set.
   1263 */
   1264struct drm_i915_gem_execbuffer_ext_timeline_fences {
   1265	struct i915_user_extension base;
   1266
   1267	/**
   1268	 * Number of element in the handles_ptr & value_ptr arrays.
   1269	 */
   1270	__u64 fence_count;
   1271
   1272	/**
   1273	 * Pointer to an array of struct drm_i915_gem_exec_fence of length
   1274	 * fence_count.
   1275	 */
   1276	__u64 handles_ptr;
   1277
   1278	/**
   1279	 * Pointer to an array of u64 values of length fence_count. Values
   1280	 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
   1281	 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
   1282	 */
   1283	__u64 values_ptr;
   1284};
   1285
   1286struct drm_i915_gem_execbuffer2 {
   1287	/**
   1288	 * List of gem_exec_object2 structs
   1289	 */
   1290	__u64 buffers_ptr;
   1291	__u32 buffer_count;
   1292
   1293	/** Offset in the batchbuffer to start execution from. */
   1294	__u32 batch_start_offset;
   1295	/** Bytes used in batchbuffer from batch_start_offset */
   1296	__u32 batch_len;
   1297	__u32 DR1;
   1298	__u32 DR4;
   1299	__u32 num_cliprects;
   1300	/**
   1301	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
   1302	 * & I915_EXEC_USE_EXTENSIONS are not set.
   1303	 *
   1304	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
   1305	 * of struct drm_i915_gem_exec_fence and num_cliprects is the length
   1306	 * of the array.
   1307	 *
   1308	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
   1309	 * single struct i915_user_extension and num_cliprects is 0.
   1310	 */
   1311	__u64 cliprects_ptr;
   1312#define I915_EXEC_RING_MASK              (0x3f)
   1313#define I915_EXEC_DEFAULT                (0<<0)
   1314#define I915_EXEC_RENDER                 (1<<0)
   1315#define I915_EXEC_BSD                    (2<<0)
   1316#define I915_EXEC_BLT                    (3<<0)
   1317#define I915_EXEC_VEBOX                  (4<<0)
   1318
   1319/* Used for switching the constants addressing mode on gen4+ RENDER ring.
   1320 * Gen6+ only supports relative addressing to dynamic state (default) and
   1321 * absolute addressing.
   1322 *
   1323 * These flags are ignored for the BSD and BLT rings.
   1324 */
   1325#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
   1326#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
   1327#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
   1328#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
   1329	__u64 flags;
   1330	__u64 rsvd1; /* now used for context info */
   1331	__u64 rsvd2;
   1332};
   1333
   1334/** Resets the SO write offset registers for transform feedback on gen7. */
   1335#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
   1336
   1337/** Request a privileged ("secure") batch buffer. Note only available for
   1338 * DRM_ROOT_ONLY | DRM_MASTER processes.
   1339 */
   1340#define I915_EXEC_SECURE		(1<<9)
   1341
   1342/** Inform the kernel that the batch is and will always be pinned. This
   1343 * negates the requirement for a workaround to be performed to avoid
   1344 * an incoherent CS (such as can be found on 830/845). If this flag is
   1345 * not passed, the kernel will endeavour to make sure the batch is
   1346 * coherent with the CS before execution. If this flag is passed,
   1347 * userspace assumes the responsibility for ensuring the same.
   1348 */
   1349#define I915_EXEC_IS_PINNED		(1<<10)
   1350
   1351/** Provide a hint to the kernel that the command stream and auxiliary
   1352 * state buffers already holds the correct presumed addresses and so the
   1353 * relocation process may be skipped if no buffers need to be moved in
   1354 * preparation for the execbuffer.
   1355 */
   1356#define I915_EXEC_NO_RELOC		(1<<11)
   1357
   1358/** Use the reloc.handle as an index into the exec object array rather
   1359 * than as the per-file handle.
   1360 */
   1361#define I915_EXEC_HANDLE_LUT		(1<<12)
   1362
   1363/** Used for switching BSD rings on the platforms with two BSD rings */
   1364#define I915_EXEC_BSD_SHIFT	 (13)
   1365#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
   1366/* default ping-pong mode */
   1367#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
   1368#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
   1369#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
   1370
   1371/** Tell the kernel that the batchbuffer is processed by
   1372 *  the resource streamer.
   1373 */
   1374#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
   1375
   1376/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
   1377 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
   1378 * the batch.
   1379 *
   1380 * Returns -EINVAL if the sync_file fd cannot be found.
   1381 */
   1382#define I915_EXEC_FENCE_IN		(1<<16)
   1383
   1384/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
   1385 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
   1386 * to the caller, and it should be close() after use. (The fd is a regular
   1387 * file descriptor and will be cleaned up on process termination. It holds
   1388 * a reference to the request, but nothing else.)
   1389 *
   1390 * The sync_file fd can be combined with other sync_file and passed either
   1391 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
   1392 * will only occur after this request completes), or to other devices.
   1393 *
   1394 * Using I915_EXEC_FENCE_OUT requires use of
   1395 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
   1396 * back to userspace. Failure to do so will cause the out-fence to always
   1397 * be reported as zero, and the real fence fd to be leaked.
   1398 */
   1399#define I915_EXEC_FENCE_OUT		(1<<17)
   1400
   1401/*
   1402 * Traditionally the execbuf ioctl has only considered the final element in
   1403 * the execobject[] to be the executable batch. Often though, the client
   1404 * will known the batch object prior to construction and being able to place
   1405 * it into the execobject[] array first can simplify the relocation tracking.
   1406 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
   1407 * execobject[] as the * batch instead (the default is to use the last
   1408 * element).
   1409 */
   1410#define I915_EXEC_BATCH_FIRST		(1<<18)
   1411
   1412/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
   1413 * define an array of i915_gem_exec_fence structures which specify a set of
   1414 * dma fences to wait upon or signal.
   1415 */
   1416#define I915_EXEC_FENCE_ARRAY   (1<<19)
   1417
   1418/*
   1419 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
   1420 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
   1421 * the batch.
   1422 *
   1423 * Returns -EINVAL if the sync_file fd cannot be found.
   1424 */
   1425#define I915_EXEC_FENCE_SUBMIT		(1 << 20)
   1426
   1427/*
   1428 * Setting I915_EXEC_USE_EXTENSIONS implies that
   1429 * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
   1430 * list of i915_user_extension. Each i915_user_extension node is the base of a
   1431 * larger structure. The list of supported structures are listed in the
   1432 * drm_i915_gem_execbuffer_ext enum.
   1433 */
   1434#define I915_EXEC_USE_EXTENSIONS	(1 << 21)
   1435
   1436#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
   1437
   1438#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
   1439#define i915_execbuffer2_set_context_id(eb2, context) \
   1440	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
   1441#define i915_execbuffer2_get_context_id(eb2) \
   1442	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
   1443
   1444struct drm_i915_gem_pin {
   1445	/** Handle of the buffer to be pinned. */
   1446	__u32 handle;
   1447	__u32 pad;
   1448
   1449	/** alignment required within the aperture */
   1450	__u64 alignment;
   1451
   1452	/** Returned GTT offset of the buffer. */
   1453	__u64 offset;
   1454};
   1455
   1456struct drm_i915_gem_unpin {
   1457	/** Handle of the buffer to be unpinned. */
   1458	__u32 handle;
   1459	__u32 pad;
   1460};
   1461
   1462struct drm_i915_gem_busy {
   1463	/** Handle of the buffer to check for busy */
   1464	__u32 handle;
   1465
   1466	/** Return busy status
   1467	 *
   1468	 * A return of 0 implies that the object is idle (after
   1469	 * having flushed any pending activity), and a non-zero return that
   1470	 * the object is still in-flight on the GPU. (The GPU has not yet
   1471	 * signaled completion for all pending requests that reference the
   1472	 * object.) An object is guaranteed to become idle eventually (so
   1473	 * long as no new GPU commands are executed upon it). Due to the
   1474	 * asynchronous nature of the hardware, an object reported
   1475	 * as busy may become idle before the ioctl is completed.
   1476	 *
   1477	 * Furthermore, if the object is busy, which engine is busy is only
   1478	 * provided as a guide and only indirectly by reporting its class
   1479	 * (there may be more than one engine in each class). There are race
   1480	 * conditions which prevent the report of which engines are busy from
   1481	 * being always accurate.  However, the converse is not true. If the
   1482	 * object is idle, the result of the ioctl, that all engines are idle,
   1483	 * is accurate.
   1484	 *
   1485	 * The returned dword is split into two fields to indicate both
   1486	 * the engine classess on which the object is being read, and the
   1487	 * engine class on which it is currently being written (if any).
   1488	 *
   1489	 * The low word (bits 0:15) indicate if the object is being written
   1490	 * to by any engine (there can only be one, as the GEM implicit
   1491	 * synchronisation rules force writes to be serialised). Only the
   1492	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
   1493	 * 1 not 0 etc) for the last write is reported.
   1494	 *
   1495	 * The high word (bits 16:31) are a bitmask of which engines classes
   1496	 * are currently reading from the object. Multiple engines may be
   1497	 * reading from the object simultaneously.
   1498	 *
   1499	 * The value of each engine class is the same as specified in the
   1500	 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
   1501	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
   1502	 * Some hardware may have parallel execution engines, e.g. multiple
   1503	 * media engines, which are mapped to the same class identifier and so
   1504	 * are not separately reported for busyness.
   1505	 *
   1506	 * Caveat emptor:
   1507	 * Only the boolean result of this query is reliable; that is whether
   1508	 * the object is idle or busy. The report of which engines are busy
   1509	 * should be only used as a heuristic.
   1510	 */
   1511	__u32 busy;
   1512};
   1513
   1514/**
   1515 * struct drm_i915_gem_caching - Set or get the caching for given object
   1516 * handle.
   1517 *
   1518 * Allow userspace to control the GTT caching bits for a given object when the
   1519 * object is later mapped through the ppGTT(or GGTT on older platforms lacking
   1520 * ppGTT support, or if the object is used for scanout). Note that this might
   1521 * require unbinding the object from the GTT first, if its current caching value
   1522 * doesn't match.
   1523 *
   1524 * Note that this all changes on discrete platforms, starting from DG1, the
   1525 * set/get caching is no longer supported, and is now rejected.  Instead the CPU
   1526 * caching attributes(WB vs WC) will become an immutable creation time property
   1527 * for the object, along with the GTT caching level. For now we don't expose any
   1528 * new uAPI for this, instead on DG1 this is all implicit, although this largely
   1529 * shouldn't matter since DG1 is coherent by default(without any way of
   1530 * controlling it).
   1531 *
   1532 * Implicit caching rules, starting from DG1:
   1533 *
   1534 *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
   1535 *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
   1536 *       mapped as write-combined only.
   1537 *
   1538 *     - Everything else is always allocated and mapped as write-back, with the
   1539 *       guarantee that everything is also coherent with the GPU.
   1540 *
   1541 * Note that this is likely to change in the future again, where we might need
   1542 * more flexibility on future devices, so making this all explicit as part of a
   1543 * new &drm_i915_gem_create_ext extension is probable.
   1544 *
   1545 * Side note: Part of the reason for this is that changing the at-allocation-time CPU
   1546 * caching attributes for the pages might be required(and is expensive) if we
   1547 * need to then CPU map the pages later with different caching attributes. This
   1548 * inconsistent caching behaviour, while supported on x86, is not universally
   1549 * supported on other architectures. So for simplicity we opt for setting
   1550 * everything at creation time, whilst also making it immutable, on discrete
   1551 * platforms.
   1552 */
   1553struct drm_i915_gem_caching {
   1554	/**
   1555	 * @handle: Handle of the buffer to set/get the caching level.
   1556	 */
   1557	__u32 handle;
   1558
   1559	/**
   1560	 * @caching: The GTT caching level to apply or possible return value.
   1561	 *
   1562	 * The supported @caching values:
   1563	 *
   1564	 * I915_CACHING_NONE:
   1565	 *
   1566	 * GPU access is not coherent with CPU caches.  Default for machines
   1567	 * without an LLC. This means manual flushing might be needed, if we
   1568	 * want GPU access to be coherent.
   1569	 *
   1570	 * I915_CACHING_CACHED:
   1571	 *
   1572	 * GPU access is coherent with CPU caches and furthermore the data is
   1573	 * cached in last-level caches shared between CPU cores and the GPU GT.
   1574	 *
   1575	 * I915_CACHING_DISPLAY:
   1576	 *
   1577	 * Special GPU caching mode which is coherent with the scanout engines.
   1578	 * Transparently falls back to I915_CACHING_NONE on platforms where no
   1579	 * special cache mode (like write-through or gfdt flushing) is
   1580	 * available. The kernel automatically sets this mode when using a
   1581	 * buffer as a scanout target.  Userspace can manually set this mode to
   1582	 * avoid a costly stall and clflush in the hotpath of drawing the first
   1583	 * frame.
   1584	 */
   1585#define I915_CACHING_NONE		0
   1586#define I915_CACHING_CACHED		1
   1587#define I915_CACHING_DISPLAY		2
   1588	__u32 caching;
   1589};
   1590
   1591#define I915_TILING_NONE	0
   1592#define I915_TILING_X		1
   1593#define I915_TILING_Y		2
   1594/*
   1595 * Do not add new tiling types here.  The I915_TILING_* values are for
   1596 * de-tiling fence registers that no longer exist on modern platforms.  Although
   1597 * the hardware may support new types of tiling in general (e.g., Tile4), we
   1598 * do not need to add them to the uapi that is specific to now-defunct ioctls.
   1599 */
   1600#define I915_TILING_LAST	I915_TILING_Y
   1601
   1602#define I915_BIT_6_SWIZZLE_NONE		0
   1603#define I915_BIT_6_SWIZZLE_9		1
   1604#define I915_BIT_6_SWIZZLE_9_10		2
   1605#define I915_BIT_6_SWIZZLE_9_11		3
   1606#define I915_BIT_6_SWIZZLE_9_10_11	4
   1607/* Not seen by userland */
   1608#define I915_BIT_6_SWIZZLE_UNKNOWN	5
   1609/* Seen by userland. */
   1610#define I915_BIT_6_SWIZZLE_9_17		6
   1611#define I915_BIT_6_SWIZZLE_9_10_17	7
   1612
   1613struct drm_i915_gem_set_tiling {
   1614	/** Handle of the buffer to have its tiling state updated */
   1615	__u32 handle;
   1616
   1617	/**
   1618	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
   1619	 * I915_TILING_Y).
   1620	 *
   1621	 * This value is to be set on request, and will be updated by the
   1622	 * kernel on successful return with the actual chosen tiling layout.
   1623	 *
   1624	 * The tiling mode may be demoted to I915_TILING_NONE when the system
   1625	 * has bit 6 swizzling that can't be managed correctly by GEM.
   1626	 *
   1627	 * Buffer contents become undefined when changing tiling_mode.
   1628	 */
   1629	__u32 tiling_mode;
   1630
   1631	/**
   1632	 * Stride in bytes for the object when in I915_TILING_X or
   1633	 * I915_TILING_Y.
   1634	 */
   1635	__u32 stride;
   1636
   1637	/**
   1638	 * Returned address bit 6 swizzling required for CPU access through
   1639	 * mmap mapping.
   1640	 */
   1641	__u32 swizzle_mode;
   1642};
   1643
   1644struct drm_i915_gem_get_tiling {
   1645	/** Handle of the buffer to get tiling state for. */
   1646	__u32 handle;
   1647
   1648	/**
   1649	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
   1650	 * I915_TILING_Y).
   1651	 */
   1652	__u32 tiling_mode;
   1653
   1654	/**
   1655	 * Returned address bit 6 swizzling required for CPU access through
   1656	 * mmap mapping.
   1657	 */
   1658	__u32 swizzle_mode;
   1659
   1660	/**
   1661	 * Returned address bit 6 swizzling required for CPU access through
   1662	 * mmap mapping whilst bound.
   1663	 */
   1664	__u32 phys_swizzle_mode;
   1665};
   1666
   1667struct drm_i915_gem_get_aperture {
   1668	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
   1669	__u64 aper_size;
   1670
   1671	/**
   1672	 * Available space in the aperture used by i915_gem_execbuffer, in
   1673	 * bytes
   1674	 */
   1675	__u64 aper_available_size;
   1676};
   1677
   1678struct drm_i915_get_pipe_from_crtc_id {
   1679	/** ID of CRTC being requested **/
   1680	__u32 crtc_id;
   1681
   1682	/** pipe of requested CRTC **/
   1683	__u32 pipe;
   1684};
   1685
   1686#define I915_MADV_WILLNEED 0
   1687#define I915_MADV_DONTNEED 1
   1688#define __I915_MADV_PURGED 2 /* internal state */
   1689
   1690struct drm_i915_gem_madvise {
   1691	/** Handle of the buffer to change the backing store advice */
   1692	__u32 handle;
   1693
   1694	/* Advice: either the buffer will be needed again in the near future,
   1695	 *         or wont be and could be discarded under memory pressure.
   1696	 */
   1697	__u32 madv;
   1698
   1699	/** Whether the backing store still exists. */
   1700	__u32 retained;
   1701};
   1702
   1703/* flags */
   1704#define I915_OVERLAY_TYPE_MASK 		0xff
   1705#define I915_OVERLAY_YUV_PLANAR 	0x01
   1706#define I915_OVERLAY_YUV_PACKED 	0x02
   1707#define I915_OVERLAY_RGB		0x03
   1708
   1709#define I915_OVERLAY_DEPTH_MASK		0xff00
   1710#define I915_OVERLAY_RGB24		0x1000
   1711#define I915_OVERLAY_RGB16		0x2000
   1712#define I915_OVERLAY_RGB15		0x3000
   1713#define I915_OVERLAY_YUV422		0x0100
   1714#define I915_OVERLAY_YUV411		0x0200
   1715#define I915_OVERLAY_YUV420		0x0300
   1716#define I915_OVERLAY_YUV410		0x0400
   1717
   1718#define I915_OVERLAY_SWAP_MASK		0xff0000
   1719#define I915_OVERLAY_NO_SWAP		0x000000
   1720#define I915_OVERLAY_UV_SWAP		0x010000
   1721#define I915_OVERLAY_Y_SWAP		0x020000
   1722#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
   1723
   1724#define I915_OVERLAY_FLAGS_MASK		0xff000000
   1725#define I915_OVERLAY_ENABLE		0x01000000
   1726
   1727struct drm_intel_overlay_put_image {
   1728	/* various flags and src format description */
   1729	__u32 flags;
   1730	/* source picture description */
   1731	__u32 bo_handle;
   1732	/* stride values and offsets are in bytes, buffer relative */
   1733	__u16 stride_Y; /* stride for packed formats */
   1734	__u16 stride_UV;
   1735	__u32 offset_Y; /* offset for packet formats */
   1736	__u32 offset_U;
   1737	__u32 offset_V;
   1738	/* in pixels */
   1739	__u16 src_width;
   1740	__u16 src_height;
   1741	/* to compensate the scaling factors for partially covered surfaces */
   1742	__u16 src_scan_width;
   1743	__u16 src_scan_height;
   1744	/* output crtc description */
   1745	__u32 crtc_id;
   1746	__u16 dst_x;
   1747	__u16 dst_y;
   1748	__u16 dst_width;
   1749	__u16 dst_height;
   1750};
   1751
   1752/* flags */
   1753#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
   1754#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
   1755#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
   1756struct drm_intel_overlay_attrs {
   1757	__u32 flags;
   1758	__u32 color_key;
   1759	__s32 brightness;
   1760	__u32 contrast;
   1761	__u32 saturation;
   1762	__u32 gamma0;
   1763	__u32 gamma1;
   1764	__u32 gamma2;
   1765	__u32 gamma3;
   1766	__u32 gamma4;
   1767	__u32 gamma5;
   1768};
   1769
   1770/*
   1771 * Intel sprite handling
   1772 *
   1773 * Color keying works with a min/mask/max tuple.  Both source and destination
   1774 * color keying is allowed.
   1775 *
   1776 * Source keying:
   1777 * Sprite pixels within the min & max values, masked against the color channels
   1778 * specified in the mask field, will be transparent.  All other pixels will
   1779 * be displayed on top of the primary plane.  For RGB surfaces, only the min
   1780 * and mask fields will be used; ranged compares are not allowed.
   1781 *
   1782 * Destination keying:
   1783 * Primary plane pixels that match the min value, masked against the color
   1784 * channels specified in the mask field, will be replaced by corresponding
   1785 * pixels from the sprite plane.
   1786 *
   1787 * Note that source & destination keying are exclusive; only one can be
   1788 * active on a given plane.
   1789 */
   1790
   1791#define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
   1792						* flags==0 to disable colorkeying.
   1793						*/
   1794#define I915_SET_COLORKEY_DESTINATION	(1<<1)
   1795#define I915_SET_COLORKEY_SOURCE	(1<<2)
   1796struct drm_intel_sprite_colorkey {
   1797	__u32 plane_id;
   1798	__u32 min_value;
   1799	__u32 channel_mask;
   1800	__u32 max_value;
   1801	__u32 flags;
   1802};
   1803
   1804struct drm_i915_gem_wait {
   1805	/** Handle of BO we shall wait on */
   1806	__u32 bo_handle;
   1807	__u32 flags;
   1808	/** Number of nanoseconds to wait, Returns time remaining. */
   1809	__s64 timeout_ns;
   1810};
   1811
   1812struct drm_i915_gem_context_create {
   1813	__u32 ctx_id; /* output: id of new context*/
   1814	__u32 pad;
   1815};
   1816
   1817struct drm_i915_gem_context_create_ext {
   1818	__u32 ctx_id; /* output: id of new context*/
   1819	__u32 flags;
   1820#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
   1821#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
   1822#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
   1823	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
   1824	__u64 extensions;
   1825};
   1826
   1827struct drm_i915_gem_context_param {
   1828	__u32 ctx_id;
   1829	__u32 size;
   1830	__u64 param;
   1831#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
   1832/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
   1833 * someone somewhere has attempted to use it, never re-use this context
   1834 * param number.
   1835 */
   1836#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
   1837#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
   1838#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
   1839#define I915_CONTEXT_PARAM_BANNABLE	0x5
   1840#define I915_CONTEXT_PARAM_PRIORITY	0x6
   1841#define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
   1842#define   I915_CONTEXT_DEFAULT_PRIORITY		0
   1843#define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
   1844	/*
   1845	 * When using the following param, value should be a pointer to
   1846	 * drm_i915_gem_context_param_sseu.
   1847	 */
   1848#define I915_CONTEXT_PARAM_SSEU		0x7
   1849
   1850/*
   1851 * Not all clients may want to attempt automatic recover of a context after
   1852 * a hang (for example, some clients may only submit very small incremental
   1853 * batches relying on known logical state of previous batches which will never
   1854 * recover correctly and each attempt will hang), and so would prefer that
   1855 * the context is forever banned instead.
   1856 *
   1857 * If set to false (0), after a reset, subsequent (and in flight) rendering
   1858 * from this context is discarded, and the client will need to create a new
   1859 * context to use instead.
   1860 *
   1861 * If set to true (1), the kernel will automatically attempt to recover the
   1862 * context by skipping the hanging batch and executing the next batch starting
   1863 * from the default context state (discarding the incomplete logical context
   1864 * state lost due to the reset).
   1865 *
   1866 * On creation, all new contexts are marked as recoverable.
   1867 */
   1868#define I915_CONTEXT_PARAM_RECOVERABLE	0x8
   1869
   1870	/*
   1871	 * The id of the associated virtual memory address space (ppGTT) of
   1872	 * this context. Can be retrieved and passed to another context
   1873	 * (on the same fd) for both to use the same ppGTT and so share
   1874	 * address layouts, and avoid reloading the page tables on context
   1875	 * switches between themselves.
   1876	 *
   1877	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
   1878	 */
   1879#define I915_CONTEXT_PARAM_VM		0x9
   1880
   1881/*
   1882 * I915_CONTEXT_PARAM_ENGINES:
   1883 *
   1884 * Bind this context to operate on this subset of available engines. Henceforth,
   1885 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
   1886 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
   1887 * and upwards. Slots 0...N are filled in using the specified (class, instance).
   1888 * Use
   1889 *	engine_class: I915_ENGINE_CLASS_INVALID,
   1890 *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
   1891 * to specify a gap in the array that can be filled in later, e.g. by a
   1892 * virtual engine used for load balancing.
   1893 *
   1894 * Setting the number of engines bound to the context to 0, by passing a zero
   1895 * sized argument, will revert back to default settings.
   1896 *
   1897 * See struct i915_context_param_engines.
   1898 *
   1899 * Extensions:
   1900 *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
   1901 *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
   1902 *   i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
   1903 */
   1904#define I915_CONTEXT_PARAM_ENGINES	0xa
   1905
   1906/*
   1907 * I915_CONTEXT_PARAM_PERSISTENCE:
   1908 *
   1909 * Allow the context and active rendering to survive the process until
   1910 * completion. Persistence allows fire-and-forget clients to queue up a
   1911 * bunch of work, hand the output over to a display server and then quit.
   1912 * If the context is marked as not persistent, upon closing (either via
   1913 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
   1914 * or process termination), the context and any outstanding requests will be
   1915 * cancelled (and exported fences for cancelled requests marked as -EIO).
   1916 *
   1917 * By default, new contexts allow persistence.
   1918 */
   1919#define I915_CONTEXT_PARAM_PERSISTENCE	0xb
   1920
   1921/* This API has been removed.  On the off chance someone somewhere has
   1922 * attempted to use it, never re-use this context param number.
   1923 */
   1924#define I915_CONTEXT_PARAM_RINGSIZE	0xc
   1925
   1926/*
   1927 * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
   1928 *
   1929 * Mark that the context makes use of protected content, which will result
   1930 * in the context being invalidated when the protected content session is.
   1931 * Given that the protected content session is killed on suspend, the device
   1932 * is kept awake for the lifetime of a protected context, so the user should
   1933 * make sure to dispose of them once done.
   1934 * This flag can only be set at context creation time and, when set to true,
   1935 * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
   1936 * to false. This flag can't be set to true in conjunction with setting the
   1937 * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
   1938 *
   1939 * .. code-block:: C
   1940 *
   1941 *	struct drm_i915_gem_context_create_ext_setparam p_protected = {
   1942 *		.base = {
   1943 *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
   1944 *		},
   1945 *		.param = {
   1946 *			.param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
   1947 *			.value = 1,
   1948 *		}
   1949 *	};
   1950 *	struct drm_i915_gem_context_create_ext_setparam p_norecover = {
   1951 *		.base = {
   1952 *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
   1953 *			.next_extension = to_user_pointer(&p_protected),
   1954 *		},
   1955 *		.param = {
   1956 *			.param = I915_CONTEXT_PARAM_RECOVERABLE,
   1957 *			.value = 0,
   1958 *		}
   1959 *	};
   1960 *	struct drm_i915_gem_context_create_ext create = {
   1961 *		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
   1962 *		.extensions = to_user_pointer(&p_norecover);
   1963 *	};
   1964 *
   1965 *	ctx_id = gem_context_create_ext(drm_fd, &create);
   1966 *
   1967 * In addition to the normal failure cases, setting this flag during context
   1968 * creation can result in the following errors:
   1969 *
   1970 * -ENODEV: feature not available
   1971 * -EPERM: trying to mark a recoverable or not bannable context as protected
   1972 */
   1973#define I915_CONTEXT_PARAM_PROTECTED_CONTENT    0xd
   1974/* Must be kept compact -- no holes and well documented */
   1975
   1976	__u64 value;
   1977};
   1978
   1979/*
   1980 * Context SSEU programming
   1981 *
   1982 * It may be necessary for either functional or performance reason to configure
   1983 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
   1984 * Sub-slice/EU).
   1985 *
   1986 * This is done by configuring SSEU configuration using the below
   1987 * @struct drm_i915_gem_context_param_sseu for every supported engine which
   1988 * userspace intends to use.
   1989 *
   1990 * Not all GPUs or engines support this functionality in which case an error
   1991 * code -ENODEV will be returned.
   1992 *
   1993 * Also, flexibility of possible SSEU configuration permutations varies between
   1994 * GPU generations and software imposed limitations. Requesting such a
   1995 * combination will return an error code of -EINVAL.
   1996 *
   1997 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
   1998 * favour of a single global setting.
   1999 */
   2000struct drm_i915_gem_context_param_sseu {
   2001	/*
   2002	 * Engine class & instance to be configured or queried.
   2003	 */
   2004	struct i915_engine_class_instance engine;
   2005
   2006	/*
   2007	 * Unknown flags must be cleared to zero.
   2008	 */
   2009	__u32 flags;
   2010#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
   2011
   2012	/*
   2013	 * Mask of slices to enable for the context. Valid values are a subset
   2014	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
   2015	 */
   2016	__u64 slice_mask;
   2017
   2018	/*
   2019	 * Mask of subslices to enable for the context. Valid values are a
   2020	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
   2021	 */
   2022	__u64 subslice_mask;
   2023
   2024	/*
   2025	 * Minimum/Maximum number of EUs to enable per subslice for the
   2026	 * context. min_eus_per_subslice must be inferior or equal to
   2027	 * max_eus_per_subslice.
   2028	 */
   2029	__u16 min_eus_per_subslice;
   2030	__u16 max_eus_per_subslice;
   2031
   2032	/*
   2033	 * Unused for now. Must be cleared to zero.
   2034	 */
   2035	__u32 rsvd;
   2036};
   2037
   2038/**
   2039 * DOC: Virtual Engine uAPI
   2040 *
   2041 * Virtual engine is a concept where userspace is able to configure a set of
   2042 * physical engines, submit a batch buffer, and let the driver execute it on any
   2043 * engine from the set as it sees fit.
   2044 *
   2045 * This is primarily useful on parts which have multiple instances of a same
   2046 * class engine, like for example GT3+ Skylake parts with their two VCS engines.
   2047 *
   2048 * For instance userspace can enumerate all engines of a certain class using the
   2049 * previously described `Engine Discovery uAPI`_. After that userspace can
   2050 * create a GEM context with a placeholder slot for the virtual engine (using
   2051 * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
   2052 * and instance respectively) and finally using the
   2053 * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
   2054 * the same reserved slot.
   2055 *
   2056 * Example of creating a virtual engine and submitting a batch buffer to it:
   2057 *
   2058 * .. code-block:: C
   2059 *
   2060 * 	I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
   2061 * 		.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
   2062 * 		.engine_index = 0, // Place this virtual engine into engine map slot 0
   2063 * 		.num_siblings = 2,
   2064 * 		.engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
   2065 * 			     { I915_ENGINE_CLASS_VIDEO, 1 }, },
   2066 * 	};
   2067 * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
   2068 * 		.engines = { { I915_ENGINE_CLASS_INVALID,
   2069 * 			       I915_ENGINE_CLASS_INVALID_NONE } },
   2070 * 		.extensions = to_user_pointer(&virtual), // Chains after load_balance extension
   2071 * 	};
   2072 * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
   2073 * 		.base = {
   2074 * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
   2075 * 		},
   2076 * 		.param = {
   2077 * 			.param = I915_CONTEXT_PARAM_ENGINES,
   2078 * 			.value = to_user_pointer(&engines),
   2079 * 			.size = sizeof(engines),
   2080 * 		},
   2081 * 	};
   2082 * 	struct drm_i915_gem_context_create_ext create = {
   2083 * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
   2084 * 		.extensions = to_user_pointer(&p_engines);
   2085 * 	};
   2086 *
   2087 * 	ctx_id = gem_context_create_ext(drm_fd, &create);
   2088 *
   2089 * 	// Now we have created a GEM context with its engine map containing a
   2090 * 	// single virtual engine. Submissions to this slot can go either to
   2091 * 	// vcs0 or vcs1, depending on the load balancing algorithm used inside
   2092 * 	// the driver. The load balancing is dynamic from one batch buffer to
   2093 * 	// another and transparent to userspace.
   2094 *
   2095 * 	...
   2096 * 	execbuf.rsvd1 = ctx_id;
   2097 * 	execbuf.flags = 0; // Submits to index 0 which is the virtual engine
   2098 * 	gem_execbuf(drm_fd, &execbuf);
   2099 */
   2100
   2101/*
   2102 * i915_context_engines_load_balance:
   2103 *
   2104 * Enable load balancing across this set of engines.
   2105 *
   2106 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
   2107 * used will proxy the execbuffer request onto one of the set of engines
   2108 * in such a way as to distribute the load evenly across the set.
   2109 *
   2110 * The set of engines must be compatible (e.g. the same HW class) as they
   2111 * will share the same logical GPU context and ring.
   2112 *
   2113 * To intermix rendering with the virtual engine and direct rendering onto
   2114 * the backing engines (bypassing the load balancing proxy), the context must
   2115 * be defined to use a single timeline for all engines.
   2116 */
   2117struct i915_context_engines_load_balance {
   2118	struct i915_user_extension base;
   2119
   2120	__u16 engine_index;
   2121	__u16 num_siblings;
   2122	__u32 flags; /* all undefined flags must be zero */
   2123
   2124	__u64 mbz64; /* reserved for future use; must be zero */
   2125
   2126	struct i915_engine_class_instance engines[0];
   2127} __attribute__((packed));
   2128
   2129#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
   2130	struct i915_user_extension base; \
   2131	__u16 engine_index; \
   2132	__u16 num_siblings; \
   2133	__u32 flags; \
   2134	__u64 mbz64; \
   2135	struct i915_engine_class_instance engines[N__]; \
   2136} __attribute__((packed)) name__
   2137
   2138/*
   2139 * i915_context_engines_bond:
   2140 *
   2141 * Constructed bonded pairs for execution within a virtual engine.
   2142 *
   2143 * All engines are equal, but some are more equal than others. Given
   2144 * the distribution of resources in the HW, it may be preferable to run
   2145 * a request on a given subset of engines in parallel to a request on a
   2146 * specific engine. We enable this selection of engines within a virtual
   2147 * engine by specifying bonding pairs, for any given master engine we will
   2148 * only execute on one of the corresponding siblings within the virtual engine.
   2149 *
   2150 * To execute a request in parallel on the master engine and a sibling requires
   2151 * coordination with a I915_EXEC_FENCE_SUBMIT.
   2152 */
   2153struct i915_context_engines_bond {
   2154	struct i915_user_extension base;
   2155
   2156	struct i915_engine_class_instance master;
   2157
   2158	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
   2159	__u16 num_bonds;
   2160
   2161	__u64 flags; /* all undefined flags must be zero */
   2162	__u64 mbz64[4]; /* reserved for future use; must be zero */
   2163
   2164	struct i915_engine_class_instance engines[0];
   2165} __attribute__((packed));
   2166
   2167#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
   2168	struct i915_user_extension base; \
   2169	struct i915_engine_class_instance master; \
   2170	__u16 virtual_index; \
   2171	__u16 num_bonds; \
   2172	__u64 flags; \
   2173	__u64 mbz64[4]; \
   2174	struct i915_engine_class_instance engines[N__]; \
   2175} __attribute__((packed)) name__
   2176
   2177/**
   2178 * struct i915_context_engines_parallel_submit - Configure engine for
   2179 * parallel submission.
   2180 *
   2181 * Setup a slot in the context engine map to allow multiple BBs to be submitted
   2182 * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
   2183 * in parallel. Multiple hardware contexts are created internally in the i915 to
   2184 * run these BBs. Once a slot is configured for N BBs only N BBs can be
   2185 * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
   2186 * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
   2187 * many BBs there are based on the slot's configuration. The N BBs are the last
   2188 * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
   2189 *
   2190 * The default placement behavior is to create implicit bonds between each
   2191 * context if each context maps to more than 1 physical engine (e.g. context is
   2192 * a virtual engine). Also we only allow contexts of same engine class and these
   2193 * contexts must be in logically contiguous order. Examples of the placement
   2194 * behavior are described below. Lastly, the default is to not allow BBs to be
   2195 * preempted mid-batch. Rather insert coordinated preemption points on all
   2196 * hardware contexts between each set of BBs. Flags could be added in the future
   2197 * to change both of these default behaviors.
   2198 *
   2199 * Returns -EINVAL if hardware context placement configuration is invalid or if
   2200 * the placement configuration isn't supported on the platform / submission
   2201 * interface.
   2202 * Returns -ENODEV if extension isn't supported on the platform / submission
   2203 * interface.
   2204 *
   2205 * .. code-block:: none
   2206 *
   2207 *	Examples syntax:
   2208 *	CS[X] = generic engine of same class, logical instance X
   2209 *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
   2210 *
   2211 *	Example 1 pseudo code:
   2212 *	set_engines(INVALID)
   2213 *	set_parallel(engine_index=0, width=2, num_siblings=1,
   2214 *		     engines=CS[0],CS[1])
   2215 *
   2216 *	Results in the following valid placement:
   2217 *	CS[0], CS[1]
   2218 *
   2219 *	Example 2 pseudo code:
   2220 *	set_engines(INVALID)
   2221 *	set_parallel(engine_index=0, width=2, num_siblings=2,
   2222 *		     engines=CS[0],CS[2],CS[1],CS[3])
   2223 *
   2224 *	Results in the following valid placements:
   2225 *	CS[0], CS[1]
   2226 *	CS[2], CS[3]
   2227 *
   2228 *	This can be thought of as two virtual engines, each containing two
   2229 *	engines thereby making a 2D array. However, there are bonds tying the
   2230 *	entries together and placing restrictions on how they can be scheduled.
   2231 *	Specifically, the scheduler can choose only vertical columns from the 2D
   2232 *	array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
   2233 *	scheduler wants to submit to CS[0], it must also choose CS[1] and vice
   2234 *	versa. Same for CS[2] requires also using CS[3].
   2235 *	VE[0] = CS[0], CS[2]
   2236 *	VE[1] = CS[1], CS[3]
   2237 *
   2238 *	Example 3 pseudo code:
   2239 *	set_engines(INVALID)
   2240 *	set_parallel(engine_index=0, width=2, num_siblings=2,
   2241 *		     engines=CS[0],CS[1],CS[1],CS[3])
   2242 *
   2243 *	Results in the following valid and invalid placements:
   2244 *	CS[0], CS[1]
   2245 *	CS[1], CS[3] - Not logically contiguous, return -EINVAL
   2246 */
   2247struct i915_context_engines_parallel_submit {
   2248	/**
   2249	 * @base: base user extension.
   2250	 */
   2251	struct i915_user_extension base;
   2252
   2253	/**
   2254	 * @engine_index: slot for parallel engine
   2255	 */
   2256	__u16 engine_index;
   2257
   2258	/**
   2259	 * @width: number of contexts per parallel engine or in other words the
   2260	 * number of batches in each submission
   2261	 */
   2262	__u16 width;
   2263
   2264	/**
   2265	 * @num_siblings: number of siblings per context or in other words the
   2266	 * number of possible placements for each submission
   2267	 */
   2268	__u16 num_siblings;
   2269
   2270	/**
   2271	 * @mbz16: reserved for future use; must be zero
   2272	 */
   2273	__u16 mbz16;
   2274
   2275	/**
   2276	 * @flags: all undefined flags must be zero, currently not defined flags
   2277	 */
   2278	__u64 flags;
   2279
   2280	/**
   2281	 * @mbz64: reserved for future use; must be zero
   2282	 */
   2283	__u64 mbz64[3];
   2284
   2285	/**
   2286	 * @engines: 2-d array of engine instances to configure parallel engine
   2287	 *
   2288	 * length = width (i) * num_siblings (j)
   2289	 * index = j + i * num_siblings
   2290	 */
   2291	struct i915_engine_class_instance engines[0];
   2292
   2293} __packed;
   2294
   2295#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
   2296	struct i915_user_extension base; \
   2297	__u16 engine_index; \
   2298	__u16 width; \
   2299	__u16 num_siblings; \
   2300	__u16 mbz16; \
   2301	__u64 flags; \
   2302	__u64 mbz64[3]; \
   2303	struct i915_engine_class_instance engines[N__]; \
   2304} __attribute__((packed)) name__
   2305
   2306/**
   2307 * DOC: Context Engine Map uAPI
   2308 *
   2309 * Context engine map is a new way of addressing engines when submitting batch-
   2310 * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
   2311 * inside the flags field of `struct drm_i915_gem_execbuffer2`.
   2312 *
   2313 * To use it created GEM contexts need to be configured with a list of engines
   2314 * the user is intending to submit to. This is accomplished using the
   2315 * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
   2316 * i915_context_param_engines`.
   2317 *
   2318 * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
   2319 * configured map.
   2320 *
   2321 * Example of creating such context and submitting against it:
   2322 *
   2323 * .. code-block:: C
   2324 *
   2325 * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
   2326 * 		.engines = { { I915_ENGINE_CLASS_RENDER, 0 },
   2327 * 			     { I915_ENGINE_CLASS_COPY, 0 } }
   2328 * 	};
   2329 * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
   2330 * 		.base = {
   2331 * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
   2332 * 		},
   2333 * 		.param = {
   2334 * 			.param = I915_CONTEXT_PARAM_ENGINES,
   2335 * 			.value = to_user_pointer(&engines),
   2336 * 			.size = sizeof(engines),
   2337 * 		},
   2338 * 	};
   2339 * 	struct drm_i915_gem_context_create_ext create = {
   2340 * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
   2341 * 		.extensions = to_user_pointer(&p_engines);
   2342 * 	};
   2343 *
   2344 * 	ctx_id = gem_context_create_ext(drm_fd, &create);
   2345 *
   2346 * 	// We have now created a GEM context with two engines in the map:
   2347 * 	// Index 0 points to rcs0 while index 1 points to bcs0. Other engines
   2348 * 	// will not be accessible from this context.
   2349 *
   2350 * 	...
   2351 * 	execbuf.rsvd1 = ctx_id;
   2352 * 	execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
   2353 * 	gem_execbuf(drm_fd, &execbuf);
   2354 *
   2355 * 	...
   2356 * 	execbuf.rsvd1 = ctx_id;
   2357 * 	execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
   2358 * 	gem_execbuf(drm_fd, &execbuf);
   2359 */
   2360
   2361struct i915_context_param_engines {
   2362	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
   2363#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
   2364#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
   2365#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
   2366	struct i915_engine_class_instance engines[0];
   2367} __attribute__((packed));
   2368
   2369#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
   2370	__u64 extensions; \
   2371	struct i915_engine_class_instance engines[N__]; \
   2372} __attribute__((packed)) name__
   2373
   2374struct drm_i915_gem_context_create_ext_setparam {
   2375#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
   2376	struct i915_user_extension base;
   2377	struct drm_i915_gem_context_param param;
   2378};
   2379
   2380/* This API has been removed.  On the off chance someone somewhere has
   2381 * attempted to use it, never re-use this extension number.
   2382 */
   2383#define I915_CONTEXT_CREATE_EXT_CLONE 1
   2384
   2385struct drm_i915_gem_context_destroy {
   2386	__u32 ctx_id;
   2387	__u32 pad;
   2388};
   2389
   2390/*
   2391 * DRM_I915_GEM_VM_CREATE -
   2392 *
   2393 * Create a new virtual memory address space (ppGTT) for use within a context
   2394 * on the same file. Extensions can be provided to configure exactly how the
   2395 * address space is setup upon creation.
   2396 *
   2397 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
   2398 * returned in the outparam @id.
   2399 *
   2400 * No flags are defined, with all bits reserved and must be zero.
   2401 *
   2402 * An extension chain maybe provided, starting with @extensions, and terminated
   2403 * by the @next_extension being 0. Currently, no extensions are defined.
   2404 *
   2405 * DRM_I915_GEM_VM_DESTROY -
   2406 *
   2407 * Destroys a previously created VM id, specified in @id.
   2408 *
   2409 * No extensions or flags are allowed currently, and so must be zero.
   2410 */
   2411struct drm_i915_gem_vm_control {
   2412	__u64 extensions;
   2413	__u32 flags;
   2414	__u32 vm_id;
   2415};
   2416
   2417struct drm_i915_reg_read {
   2418	/*
   2419	 * Register offset.
   2420	 * For 64bit wide registers where the upper 32bits don't immediately
   2421	 * follow the lower 32bits, the offset of the lower 32bits must
   2422	 * be specified
   2423	 */
   2424	__u64 offset;
   2425#define I915_REG_READ_8B_WA (1ul << 0)
   2426
   2427	__u64 val; /* Return value */
   2428};
   2429
   2430/* Known registers:
   2431 *
   2432 * Render engine timestamp - 0x2358 + 64bit - gen7+
   2433 * - Note this register returns an invalid value if using the default
   2434 *   single instruction 8byte read, in order to workaround that pass
   2435 *   flag I915_REG_READ_8B_WA in offset field.
   2436 *
   2437 */
   2438
   2439struct drm_i915_reset_stats {
   2440	__u32 ctx_id;
   2441	__u32 flags;
   2442
   2443	/* All resets since boot/module reload, for all contexts */
   2444	__u32 reset_count;
   2445
   2446	/* Number of batches lost when active in GPU, for this context */
   2447	__u32 batch_active;
   2448
   2449	/* Number of batches lost pending for execution, for this context */
   2450	__u32 batch_pending;
   2451
   2452	__u32 pad;
   2453};
   2454
   2455/**
   2456 * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
   2457 *
   2458 * Userptr objects have several restrictions on what ioctls can be used with the
   2459 * object handle.
   2460 */
   2461struct drm_i915_gem_userptr {
   2462	/**
   2463	 * @user_ptr: The pointer to the allocated memory.
   2464	 *
   2465	 * Needs to be aligned to PAGE_SIZE.
   2466	 */
   2467	__u64 user_ptr;
   2468
   2469	/**
   2470	 * @user_size:
   2471	 *
   2472	 * The size in bytes for the allocated memory. This will also become the
   2473	 * object size.
   2474	 *
   2475	 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
   2476	 * or larger.
   2477	 */
   2478	__u64 user_size;
   2479
   2480	/**
   2481	 * @flags:
   2482	 *
   2483	 * Supported flags:
   2484	 *
   2485	 * I915_USERPTR_READ_ONLY:
   2486	 *
   2487	 * Mark the object as readonly, this also means GPU access can only be
   2488	 * readonly. This is only supported on HW which supports readonly access
   2489	 * through the GTT. If the HW can't support readonly access, an error is
   2490	 * returned.
   2491	 *
   2492	 * I915_USERPTR_PROBE:
   2493	 *
   2494	 * Probe the provided @user_ptr range and validate that the @user_ptr is
   2495	 * indeed pointing to normal memory and that the range is also valid.
   2496	 * For example if some garbage address is given to the kernel, then this
   2497	 * should complain.
   2498	 *
   2499	 * Returns -EFAULT if the probe failed.
   2500	 *
   2501	 * Note that this doesn't populate the backing pages, and also doesn't
   2502	 * guarantee that the object will remain valid when the object is
   2503	 * eventually used.
   2504	 *
   2505	 * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
   2506	 * returns a non-zero value.
   2507	 *
   2508	 * I915_USERPTR_UNSYNCHRONIZED:
   2509	 *
   2510	 * NOT USED. Setting this flag will result in an error.
   2511	 */
   2512	__u32 flags;
   2513#define I915_USERPTR_READ_ONLY 0x1
   2514#define I915_USERPTR_PROBE 0x2
   2515#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
   2516	/**
   2517	 * @handle: Returned handle for the object.
   2518	 *
   2519	 * Object handles are nonzero.
   2520	 */
   2521	__u32 handle;
   2522};
   2523
   2524enum drm_i915_oa_format {
   2525	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
   2526	I915_OA_FORMAT_A29,	    /* HSW only */
   2527	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
   2528	I915_OA_FORMAT_B4_C8,	    /* HSW only */
   2529	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
   2530	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
   2531	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
   2532
   2533	/* Gen8+ */
   2534	I915_OA_FORMAT_A12,
   2535	I915_OA_FORMAT_A12_B8_C8,
   2536	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
   2537
   2538	I915_OA_FORMAT_MAX	    /* non-ABI */
   2539};
   2540
   2541enum drm_i915_perf_property_id {
   2542	/**
   2543	 * Open the stream for a specific context handle (as used with
   2544	 * execbuffer2). A stream opened for a specific context this way
   2545	 * won't typically require root privileges.
   2546	 *
   2547	 * This property is available in perf revision 1.
   2548	 */
   2549	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
   2550
   2551	/**
   2552	 * A value of 1 requests the inclusion of raw OA unit reports as
   2553	 * part of stream samples.
   2554	 *
   2555	 * This property is available in perf revision 1.
   2556	 */
   2557	DRM_I915_PERF_PROP_SAMPLE_OA,
   2558
   2559	/**
   2560	 * The value specifies which set of OA unit metrics should be
   2561	 * configured, defining the contents of any OA unit reports.
   2562	 *
   2563	 * This property is available in perf revision 1.
   2564	 */
   2565	DRM_I915_PERF_PROP_OA_METRICS_SET,
   2566
   2567	/**
   2568	 * The value specifies the size and layout of OA unit reports.
   2569	 *
   2570	 * This property is available in perf revision 1.
   2571	 */
   2572	DRM_I915_PERF_PROP_OA_FORMAT,
   2573
   2574	/**
   2575	 * Specifying this property implicitly requests periodic OA unit
   2576	 * sampling and (at least on Haswell) the sampling frequency is derived
   2577	 * from this exponent as follows:
   2578	 *
   2579	 *   80ns * 2^(period_exponent + 1)
   2580	 *
   2581	 * This property is available in perf revision 1.
   2582	 */
   2583	DRM_I915_PERF_PROP_OA_EXPONENT,
   2584
   2585	/**
   2586	 * Specifying this property is only valid when specify a context to
   2587	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
   2588	 * will hold preemption of the particular context we want to gather
   2589	 * performance data about. The execbuf2 submissions must include a
   2590	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
   2591	 *
   2592	 * This property is available in perf revision 3.
   2593	 */
   2594	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
   2595
   2596	/**
   2597	 * Specifying this pins all contexts to the specified SSEU power
   2598	 * configuration for the duration of the recording.
   2599	 *
   2600	 * This parameter's value is a pointer to a struct
   2601	 * drm_i915_gem_context_param_sseu.
   2602	 *
   2603	 * This property is available in perf revision 4.
   2604	 */
   2605	DRM_I915_PERF_PROP_GLOBAL_SSEU,
   2606
   2607	/**
   2608	 * This optional parameter specifies the timer interval in nanoseconds
   2609	 * at which the i915 driver will check the OA buffer for available data.
   2610	 * Minimum allowed value is 100 microseconds. A default value is used by
   2611	 * the driver if this parameter is not specified. Note that larger timer
   2612	 * values will reduce cpu consumption during OA perf captures. However,
   2613	 * excessively large values would potentially result in OA buffer
   2614	 * overwrites as captures reach end of the OA buffer.
   2615	 *
   2616	 * This property is available in perf revision 5.
   2617	 */
   2618	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
   2619
   2620	DRM_I915_PERF_PROP_MAX /* non-ABI */
   2621};
   2622
   2623struct drm_i915_perf_open_param {
   2624	__u32 flags;
   2625#define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
   2626#define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
   2627#define I915_PERF_FLAG_DISABLED		(1<<2)
   2628
   2629	/** The number of u64 (id, value) pairs */
   2630	__u32 num_properties;
   2631
   2632	/**
   2633	 * Pointer to array of u64 (id, value) pairs configuring the stream
   2634	 * to open.
   2635	 */
   2636	__u64 properties_ptr;
   2637};
   2638
   2639/*
   2640 * Enable data capture for a stream that was either opened in a disabled state
   2641 * via I915_PERF_FLAG_DISABLED or was later disabled via
   2642 * I915_PERF_IOCTL_DISABLE.
   2643 *
   2644 * It is intended to be cheaper to disable and enable a stream than it may be
   2645 * to close and re-open a stream with the same configuration.
   2646 *
   2647 * It's undefined whether any pending data for the stream will be lost.
   2648 *
   2649 * This ioctl is available in perf revision 1.
   2650 */
   2651#define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
   2652
   2653/*
   2654 * Disable data capture for a stream.
   2655 *
   2656 * It is an error to try and read a stream that is disabled.
   2657 *
   2658 * This ioctl is available in perf revision 1.
   2659 */
   2660#define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
   2661
   2662/*
   2663 * Change metrics_set captured by a stream.
   2664 *
   2665 * If the stream is bound to a specific context, the configuration change
   2666 * will performed inline with that context such that it takes effect before
   2667 * the next execbuf submission.
   2668 *
   2669 * Returns the previously bound metrics set id, or a negative error code.
   2670 *
   2671 * This ioctl is available in perf revision 2.
   2672 */
   2673#define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
   2674
   2675/*
   2676 * Common to all i915 perf records
   2677 */
   2678struct drm_i915_perf_record_header {
   2679	__u32 type;
   2680	__u16 pad;
   2681	__u16 size;
   2682};
   2683
   2684enum drm_i915_perf_record_type {
   2685
   2686	/**
   2687	 * Samples are the work horse record type whose contents are extensible
   2688	 * and defined when opening an i915 perf stream based on the given
   2689	 * properties.
   2690	 *
   2691	 * Boolean properties following the naming convention
   2692	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
   2693	 * every sample.
   2694	 *
   2695	 * The order of these sample properties given by userspace has no
   2696	 * affect on the ordering of data within a sample. The order is
   2697	 * documented here.
   2698	 *
   2699	 * struct {
   2700	 *     struct drm_i915_perf_record_header header;
   2701	 *
   2702	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
   2703	 * };
   2704	 */
   2705	DRM_I915_PERF_RECORD_SAMPLE = 1,
   2706
   2707	/*
   2708	 * Indicates that one or more OA reports were not written by the
   2709	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
   2710	 * command collides with periodic sampling - which would be more likely
   2711	 * at higher sampling frequencies.
   2712	 */
   2713	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
   2714
   2715	/**
   2716	 * An error occurred that resulted in all pending OA reports being lost.
   2717	 */
   2718	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
   2719
   2720	DRM_I915_PERF_RECORD_MAX /* non-ABI */
   2721};
   2722
   2723/**
   2724 * struct drm_i915_perf_oa_config
   2725 *
   2726 * Structure to upload perf dynamic configuration into the kernel.
   2727 */
   2728struct drm_i915_perf_oa_config {
   2729	/**
   2730	 * @uuid:
   2731	 *
   2732	 * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
   2733	 */
   2734	char uuid[36];
   2735
   2736	/**
   2737	 * @n_mux_regs:
   2738	 *
   2739	 * Number of mux regs in &mux_regs_ptr.
   2740	 */
   2741	__u32 n_mux_regs;
   2742
   2743	/**
   2744	 * @n_boolean_regs:
   2745	 *
   2746	 * Number of boolean regs in &boolean_regs_ptr.
   2747	 */
   2748	__u32 n_boolean_regs;
   2749
   2750	/**
   2751	 * @n_flex_regs:
   2752	 *
   2753	 * Number of flex regs in &flex_regs_ptr.
   2754	 */
   2755	__u32 n_flex_regs;
   2756
   2757	/**
   2758	 * @mux_regs_ptr:
   2759	 *
   2760	 * Pointer to tuples of u32 values (register address, value) for mux
   2761	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
   2762	 * &n_mux_regs).
   2763	 */
   2764	__u64 mux_regs_ptr;
   2765
   2766	/**
   2767	 * @boolean_regs_ptr:
   2768	 *
   2769	 * Pointer to tuples of u32 values (register address, value) for mux
   2770	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
   2771	 * &n_boolean_regs).
   2772	 */
   2773	__u64 boolean_regs_ptr;
   2774
   2775	/**
   2776	 * @flex_regs_ptr:
   2777	 *
   2778	 * Pointer to tuples of u32 values (register address, value) for mux
   2779	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
   2780	 * &n_flex_regs).
   2781	 */
   2782	__u64 flex_regs_ptr;
   2783};
   2784
   2785/**
   2786 * struct drm_i915_query_item - An individual query for the kernel to process.
   2787 *
   2788 * The behaviour is determined by the @query_id. Note that exactly what
   2789 * @data_ptr is also depends on the specific @query_id.
   2790 */
   2791struct drm_i915_query_item {
   2792	/**
   2793	 * @query_id:
   2794	 *
   2795	 * The id for this query.  Currently accepted query IDs are:
   2796	 *  - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
   2797	 *  - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
   2798	 *  - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
   2799	 *  - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
   2800	 *  - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
   2801	 *  - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
   2802	 */
   2803	__u64 query_id;
   2804#define DRM_I915_QUERY_TOPOLOGY_INFO		1
   2805#define DRM_I915_QUERY_ENGINE_INFO		2
   2806#define DRM_I915_QUERY_PERF_CONFIG		3
   2807#define DRM_I915_QUERY_MEMORY_REGIONS		4
   2808#define DRM_I915_QUERY_HWCONFIG_BLOB		5
   2809#define DRM_I915_QUERY_GEOMETRY_SUBSLICES	6
   2810/* Must be kept compact -- no holes and well documented */
   2811
   2812	/**
   2813	 * @length:
   2814	 *
   2815	 * When set to zero by userspace, this is filled with the size of the
   2816	 * data to be written at the @data_ptr pointer. The kernel sets this
   2817	 * value to a negative value to signal an error on a particular query
   2818	 * item.
   2819	 */
   2820	__s32 length;
   2821
   2822	/**
   2823	 * @flags:
   2824	 *
   2825	 * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
   2826	 *
   2827	 * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
   2828	 * following:
   2829	 *
   2830	 *	- %DRM_I915_QUERY_PERF_CONFIG_LIST
   2831	 *      - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
   2832	 *      - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
   2833	 *
   2834	 * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
   2835	 * a struct i915_engine_class_instance that references a render engine.
   2836	 */
   2837	__u32 flags;
   2838#define DRM_I915_QUERY_PERF_CONFIG_LIST          1
   2839#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
   2840#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
   2841
   2842	/**
   2843	 * @data_ptr:
   2844	 *
   2845	 * Data will be written at the location pointed by @data_ptr when the
   2846	 * value of @length matches the length of the data to be written by the
   2847	 * kernel.
   2848	 */
   2849	__u64 data_ptr;
   2850};
   2851
   2852/**
   2853 * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
   2854 * kernel to fill out.
   2855 *
   2856 * Note that this is generally a two step process for each struct
   2857 * drm_i915_query_item in the array:
   2858 *
   2859 * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
   2860 *    drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
   2861 *    kernel will then fill in the size, in bytes, which tells userspace how
   2862 *    memory it needs to allocate for the blob(say for an array of properties).
   2863 *
   2864 * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
   2865 *    &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
   2866 *    the &drm_i915_query_item.length should still be the same as what the
   2867 *    kernel previously set. At this point the kernel can fill in the blob.
   2868 *
   2869 * Note that for some query items it can make sense for userspace to just pass
   2870 * in a buffer/blob equal to or larger than the required size. In this case only
   2871 * a single ioctl call is needed. For some smaller query items this can work
   2872 * quite well.
   2873 *
   2874 */
   2875struct drm_i915_query {
   2876	/** @num_items: The number of elements in the @items_ptr array */
   2877	__u32 num_items;
   2878
   2879	/**
   2880	 * @flags: Unused for now. Must be cleared to zero.
   2881	 */
   2882	__u32 flags;
   2883
   2884	/**
   2885	 * @items_ptr:
   2886	 *
   2887	 * Pointer to an array of struct drm_i915_query_item. The number of
   2888	 * array elements is @num_items.
   2889	 */
   2890	__u64 items_ptr;
   2891};
   2892
   2893/**
   2894 * struct drm_i915_query_topology_info
   2895 *
   2896 * Describes slice/subslice/EU information queried by
   2897 * %DRM_I915_QUERY_TOPOLOGY_INFO
   2898 */
   2899struct drm_i915_query_topology_info {
   2900	/**
   2901	 * @flags:
   2902	 *
   2903	 * Unused for now. Must be cleared to zero.
   2904	 */
   2905	__u16 flags;
   2906
   2907	/**
   2908	 * @max_slices:
   2909	 *
   2910	 * The number of bits used to express the slice mask.
   2911	 */
   2912	__u16 max_slices;
   2913
   2914	/**
   2915	 * @max_subslices:
   2916	 *
   2917	 * The number of bits used to express the subslice mask.
   2918	 */
   2919	__u16 max_subslices;
   2920
   2921	/**
   2922	 * @max_eus_per_subslice:
   2923	 *
   2924	 * The number of bits in the EU mask that correspond to a single
   2925	 * subslice's EUs.
   2926	 */
   2927	__u16 max_eus_per_subslice;
   2928
   2929	/**
   2930	 * @subslice_offset:
   2931	 *
   2932	 * Offset in data[] at which the subslice masks are stored.
   2933	 */
   2934	__u16 subslice_offset;
   2935
   2936	/**
   2937	 * @subslice_stride:
   2938	 *
   2939	 * Stride at which each of the subslice masks for each slice are
   2940	 * stored.
   2941	 */
   2942	__u16 subslice_stride;
   2943
   2944	/**
   2945	 * @eu_offset:
   2946	 *
   2947	 * Offset in data[] at which the EU masks are stored.
   2948	 */
   2949	__u16 eu_offset;
   2950
   2951	/**
   2952	 * @eu_stride:
   2953	 *
   2954	 * Stride at which each of the EU masks for each subslice are stored.
   2955	 */
   2956	__u16 eu_stride;
   2957
   2958	/**
   2959	 * @data:
   2960	 *
   2961	 * Contains 3 pieces of information :
   2962	 *
   2963	 * - The slice mask with one bit per slice telling whether a slice is
   2964	 *   available. The availability of slice X can be queried with the
   2965	 *   following formula :
   2966	 *
   2967	 *   .. code:: c
   2968	 *
   2969	 *      (data[X / 8] >> (X % 8)) & 1
   2970	 *
   2971	 *   Starting with Xe_HP platforms, Intel hardware no longer has
   2972	 *   traditional slices so i915 will always report a single slice
   2973	 *   (hardcoded slicemask = 0x1) which contains all of the platform's
   2974	 *   subslices.  I.e., the mask here does not reflect any of the newer
   2975	 *   hardware concepts such as "gslices" or "cslices" since userspace
   2976	 *   is capable of inferring those from the subslice mask.
   2977	 *
   2978	 * - The subslice mask for each slice with one bit per subslice telling
   2979	 *   whether a subslice is available.  Starting with Gen12 we use the
   2980	 *   term "subslice" to refer to what the hardware documentation
   2981	 *   describes as a "dual-subslices."  The availability of subslice Y
   2982	 *   in slice X can be queried with the following formula :
   2983	 *
   2984	 *   .. code:: c
   2985	 *
   2986	 *      (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
   2987	 *
   2988	 * - The EU mask for each subslice in each slice, with one bit per EU
   2989	 *   telling whether an EU is available. The availability of EU Z in
   2990	 *   subslice Y in slice X can be queried with the following formula :
   2991	 *
   2992	 *   .. code:: c
   2993	 *
   2994	 *      (data[eu_offset +
   2995	 *            (X * max_subslices + Y) * eu_stride +
   2996	 *            Z / 8
   2997	 *       ] >> (Z % 8)) & 1
   2998	 */
   2999	__u8 data[];
   3000};
   3001
   3002/**
   3003 * DOC: Engine Discovery uAPI
   3004 *
   3005 * Engine discovery uAPI is a way of enumerating physical engines present in a
   3006 * GPU associated with an open i915 DRM file descriptor. This supersedes the old
   3007 * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
   3008 * `I915_PARAM_HAS_BLT`.
   3009 *
   3010 * The need for this interface came starting with Icelake and newer GPUs, which
   3011 * started to establish a pattern of having multiple engines of a same class,
   3012 * where not all instances were always completely functionally equivalent.
   3013 *
   3014 * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
   3015 * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
   3016 *
   3017 * Example for getting the list of engines:
   3018 *
   3019 * .. code-block:: C
   3020 *
   3021 * 	struct drm_i915_query_engine_info *info;
   3022 * 	struct drm_i915_query_item item = {
   3023 * 		.query_id = DRM_I915_QUERY_ENGINE_INFO;
   3024 * 	};
   3025 * 	struct drm_i915_query query = {
   3026 * 		.num_items = 1,
   3027 * 		.items_ptr = (uintptr_t)&item,
   3028 * 	};
   3029 * 	int err, i;
   3030 *
   3031 * 	// First query the size of the blob we need, this needs to be large
   3032 * 	// enough to hold our array of engines. The kernel will fill out the
   3033 * 	// item.length for us, which is the number of bytes we need.
   3034 * 	//
   3035 * 	// Alternatively a large buffer can be allocated straight away enabling
   3036 * 	// querying in one pass, in which case item.length should contain the
   3037 * 	// length of the provided buffer.
   3038 * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
   3039 * 	if (err) ...
   3040 *
   3041 * 	info = calloc(1, item.length);
   3042 * 	// Now that we allocated the required number of bytes, we call the ioctl
   3043 * 	// again, this time with the data_ptr pointing to our newly allocated
   3044 * 	// blob, which the kernel can then populate with info on all engines.
   3045 * 	item.data_ptr = (uintptr_t)&info,
   3046 *
   3047 * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
   3048 * 	if (err) ...
   3049 *
   3050 * 	// We can now access each engine in the array
   3051 * 	for (i = 0; i < info->num_engines; i++) {
   3052 * 		struct drm_i915_engine_info einfo = info->engines[i];
   3053 * 		u16 class = einfo.engine.class;
   3054 * 		u16 instance = einfo.engine.instance;
   3055 * 		....
   3056 * 	}
   3057 *
   3058 * 	free(info);
   3059 *
   3060 * Each of the enumerated engines, apart from being defined by its class and
   3061 * instance (see `struct i915_engine_class_instance`), also can have flags and
   3062 * capabilities defined as documented in i915_drm.h.
   3063 *
   3064 * For instance video engines which support HEVC encoding will have the
   3065 * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
   3066 *
   3067 * Engine discovery only fully comes to its own when combined with the new way
   3068 * of addressing engines when submitting batch buffers using contexts with
   3069 * engine maps configured.
   3070 */
   3071
   3072/**
   3073 * struct drm_i915_engine_info
   3074 *
   3075 * Describes one engine and it's capabilities as known to the driver.
   3076 */
   3077struct drm_i915_engine_info {
   3078	/** @engine: Engine class and instance. */
   3079	struct i915_engine_class_instance engine;
   3080
   3081	/** @rsvd0: Reserved field. */
   3082	__u32 rsvd0;
   3083
   3084	/** @flags: Engine flags. */
   3085	__u64 flags;
   3086#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE		(1 << 0)
   3087
   3088	/** @capabilities: Capabilities of this engine. */
   3089	__u64 capabilities;
   3090#define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
   3091#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
   3092
   3093	/** @logical_instance: Logical instance of engine */
   3094	__u16 logical_instance;
   3095
   3096	/** @rsvd1: Reserved fields. */
   3097	__u16 rsvd1[3];
   3098	/** @rsvd2: Reserved fields. */
   3099	__u64 rsvd2[3];
   3100};
   3101
   3102/**
   3103 * struct drm_i915_query_engine_info
   3104 *
   3105 * Engine info query enumerates all engines known to the driver by filling in
   3106 * an array of struct drm_i915_engine_info structures.
   3107 */
   3108struct drm_i915_query_engine_info {
   3109	/** @num_engines: Number of struct drm_i915_engine_info structs following. */
   3110	__u32 num_engines;
   3111
   3112	/** @rsvd: MBZ */
   3113	__u32 rsvd[3];
   3114
   3115	/** @engines: Marker for drm_i915_engine_info structures. */
   3116	struct drm_i915_engine_info engines[];
   3117};
   3118
   3119/**
   3120 * struct drm_i915_query_perf_config
   3121 *
   3122 * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
   3123 * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
   3124 */
   3125struct drm_i915_query_perf_config {
   3126	union {
   3127		/**
   3128		 * @n_configs:
   3129		 *
   3130		 * When &drm_i915_query_item.flags ==
   3131		 * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
   3132		 * the number of configurations available.
   3133		 */
   3134		__u64 n_configs;
   3135
   3136		/**
   3137		 * @config:
   3138		 *
   3139		 * When &drm_i915_query_item.flags ==
   3140		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
   3141		 * value in this field as configuration identifier to decide
   3142		 * what data to write into config_ptr.
   3143		 */
   3144		__u64 config;
   3145
   3146		/**
   3147		 * @uuid:
   3148		 *
   3149		 * When &drm_i915_query_item.flags ==
   3150		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
   3151		 * value in this field as configuration identifier to decide
   3152		 * what data to write into config_ptr.
   3153		 *
   3154		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
   3155		 */
   3156		char uuid[36];
   3157	};
   3158
   3159	/**
   3160	 * @flags:
   3161	 *
   3162	 * Unused for now. Must be cleared to zero.
   3163	 */
   3164	__u32 flags;
   3165
   3166	/**
   3167	 * @data:
   3168	 *
   3169	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
   3170	 * i915 will write an array of __u64 of configuration identifiers.
   3171	 *
   3172	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
   3173	 * i915 will write a struct drm_i915_perf_oa_config. If the following
   3174	 * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
   3175	 * write into the associated pointers the values of submitted when the
   3176	 * configuration was created :
   3177	 *
   3178	 *  - &drm_i915_perf_oa_config.n_mux_regs
   3179	 *  - &drm_i915_perf_oa_config.n_boolean_regs
   3180	 *  - &drm_i915_perf_oa_config.n_flex_regs
   3181	 */
   3182	__u8 data[];
   3183};
   3184
   3185/**
   3186 * enum drm_i915_gem_memory_class - Supported memory classes
   3187 */
   3188enum drm_i915_gem_memory_class {
   3189	/** @I915_MEMORY_CLASS_SYSTEM: System memory */
   3190	I915_MEMORY_CLASS_SYSTEM = 0,
   3191	/** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
   3192	I915_MEMORY_CLASS_DEVICE,
   3193};
   3194
   3195/**
   3196 * struct drm_i915_gem_memory_class_instance - Identify particular memory region
   3197 */
   3198struct drm_i915_gem_memory_class_instance {
   3199	/** @memory_class: See enum drm_i915_gem_memory_class */
   3200	__u16 memory_class;
   3201
   3202	/** @memory_instance: Which instance */
   3203	__u16 memory_instance;
   3204};
   3205
   3206/**
   3207 * struct drm_i915_memory_region_info - Describes one region as known to the
   3208 * driver.
   3209 *
   3210 * Note that we reserve some stuff here for potential future work. As an example
   3211 * we might want expose the capabilities for a given region, which could include
   3212 * things like if the region is CPU mappable/accessible, what are the supported
   3213 * mapping types etc.
   3214 *
   3215 * Note that to extend struct drm_i915_memory_region_info and struct
   3216 * drm_i915_query_memory_regions in the future the plan is to do the following:
   3217 *
   3218 * .. code-block:: C
   3219 *
   3220 *	struct drm_i915_memory_region_info {
   3221 *		struct drm_i915_gem_memory_class_instance region;
   3222 *		union {
   3223 *			__u32 rsvd0;
   3224 *			__u32 new_thing1;
   3225 *		};
   3226 *		...
   3227 *		union {
   3228 *			__u64 rsvd1[8];
   3229 *			struct {
   3230 *				__u64 new_thing2;
   3231 *				__u64 new_thing3;
   3232 *				...
   3233 *			};
   3234 *		};
   3235 *	};
   3236 *
   3237 * With this things should remain source compatible between versions for
   3238 * userspace, even as we add new fields.
   3239 *
   3240 * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
   3241 * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
   3242 * at &drm_i915_query_item.query_id.
   3243 */
   3244struct drm_i915_memory_region_info {
   3245	/** @region: The class:instance pair encoding */
   3246	struct drm_i915_gem_memory_class_instance region;
   3247
   3248	/** @rsvd0: MBZ */
   3249	__u32 rsvd0;
   3250
   3251	/** @probed_size: Memory probed by the driver (-1 = unknown) */
   3252	__u64 probed_size;
   3253
   3254	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
   3255	__u64 unallocated_size;
   3256
   3257	/** @rsvd1: MBZ */
   3258	__u64 rsvd1[8];
   3259};
   3260
   3261/**
   3262 * struct drm_i915_query_memory_regions
   3263 *
   3264 * The region info query enumerates all regions known to the driver by filling
   3265 * in an array of struct drm_i915_memory_region_info structures.
   3266 *
   3267 * Example for getting the list of supported regions:
   3268 *
   3269 * .. code-block:: C
   3270 *
   3271 *	struct drm_i915_query_memory_regions *info;
   3272 *	struct drm_i915_query_item item = {
   3273 *		.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
   3274 *	};
   3275 *	struct drm_i915_query query = {
   3276 *		.num_items = 1,
   3277 *		.items_ptr = (uintptr_t)&item,
   3278 *	};
   3279 *	int err, i;
   3280 *
   3281 *	// First query the size of the blob we need, this needs to be large
   3282 *	// enough to hold our array of regions. The kernel will fill out the
   3283 *	// item.length for us, which is the number of bytes we need.
   3284 *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
   3285 *	if (err) ...
   3286 *
   3287 *	info = calloc(1, item.length);
   3288 *	// Now that we allocated the required number of bytes, we call the ioctl
   3289 *	// again, this time with the data_ptr pointing to our newly allocated
   3290 *	// blob, which the kernel can then populate with the all the region info.
   3291 *	item.data_ptr = (uintptr_t)&info,
   3292 *
   3293 *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
   3294 *	if (err) ...
   3295 *
   3296 *	// We can now access each region in the array
   3297 *	for (i = 0; i < info->num_regions; i++) {
   3298 *		struct drm_i915_memory_region_info mr = info->regions[i];
   3299 *		u16 class = mr.region.class;
   3300 *		u16 instance = mr.region.instance;
   3301 *
   3302 *		....
   3303 *	}
   3304 *
   3305 *	free(info);
   3306 */
   3307struct drm_i915_query_memory_regions {
   3308	/** @num_regions: Number of supported regions */
   3309	__u32 num_regions;
   3310
   3311	/** @rsvd: MBZ */
   3312	__u32 rsvd[3];
   3313
   3314	/** @regions: Info about each supported region */
   3315	struct drm_i915_memory_region_info regions[];
   3316};
   3317
   3318/**
   3319 * DOC: GuC HWCONFIG blob uAPI
   3320 *
   3321 * The GuC produces a blob with information about the current device.
   3322 * i915 reads this blob from GuC and makes it available via this uAPI.
   3323 *
   3324 * The format and meaning of the blob content are documented in the
   3325 * Programmer's Reference Manual.
   3326 */
   3327
   3328/**
   3329 * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
   3330 * extension support using struct i915_user_extension.
   3331 *
   3332 * Note that in the future we want to have our buffer flags here, at least for
   3333 * the stuff that is immutable. Previously we would have two ioctls, one to
   3334 * create the object with gem_create, and another to apply various parameters,
   3335 * however this creates some ambiguity for the params which are considered
   3336 * immutable. Also in general we're phasing out the various SET/GET ioctls.
   3337 */
   3338struct drm_i915_gem_create_ext {
   3339	/**
   3340	 * @size: Requested size for the object.
   3341	 *
   3342	 * The (page-aligned) allocated size for the object will be returned.
   3343	 *
   3344	 *
   3345	 * DG2 64K min page size implications:
   3346	 *
   3347	 * On discrete platforms, starting from DG2, we have to contend with GTT
   3348	 * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
   3349	 * objects.  Specifically the hardware only supports 64K or larger GTT
   3350	 * page sizes for such memory. The kernel will already ensure that all
   3351	 * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
   3352	 * sizes underneath.
   3353	 *
   3354	 * Note that the returned size here will always reflect any required
   3355	 * rounding up done by the kernel, i.e 4K will now become 64K on devices
   3356	 * such as DG2.
   3357	 *
   3358	 * Special DG2 GTT address alignment requirement:
   3359	 *
   3360	 * The GTT alignment will also need to be at least 2M for such objects.
   3361	 *
   3362	 * Note that due to how the hardware implements 64K GTT page support, we
   3363	 * have some further complications:
   3364	 *
   3365	 *   1) The entire PDE (which covers a 2MB virtual address range), must
   3366	 *   contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
   3367	 *   PDE is forbidden by the hardware.
   3368	 *
   3369	 *   2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
   3370	 *   objects.
   3371	 *
   3372	 * To keep things simple for userland, we mandate that any GTT mappings
   3373	 * must be aligned to and rounded up to 2MB. The kernel will internally
   3374	 * pad them out to the next 2MB boundary. As this only wastes virtual
   3375	 * address space and avoids userland having to copy any needlessly
   3376	 * complicated PDE sharing scheme (coloring) and only affects DG2, this
   3377	 * is deemed to be a good compromise.
   3378	 */
   3379	__u64 size;
   3380	/**
   3381	 * @handle: Returned handle for the object.
   3382	 *
   3383	 * Object handles are nonzero.
   3384	 */
   3385	__u32 handle;
   3386	/** @flags: MBZ */
   3387	__u32 flags;
   3388	/**
   3389	 * @extensions: The chain of extensions to apply to this object.
   3390	 *
   3391	 * This will be useful in the future when we need to support several
   3392	 * different extensions, and we need to apply more than one when
   3393	 * creating the object. See struct i915_user_extension.
   3394	 *
   3395	 * If we don't supply any extensions then we get the same old gem_create
   3396	 * behaviour.
   3397	 *
   3398	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
   3399	 * struct drm_i915_gem_create_ext_memory_regions.
   3400	 *
   3401	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
   3402	 * struct drm_i915_gem_create_ext_protected_content.
   3403	 */
   3404#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
   3405#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
   3406	__u64 extensions;
   3407};
   3408
   3409/**
   3410 * struct drm_i915_gem_create_ext_memory_regions - The
   3411 * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
   3412 *
   3413 * Set the object with the desired set of placements/regions in priority
   3414 * order. Each entry must be unique and supported by the device.
   3415 *
   3416 * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
   3417 * an equivalent layout of class:instance pair encodings. See struct
   3418 * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
   3419 * query the supported regions for a device.
   3420 *
   3421 * As an example, on discrete devices, if we wish to set the placement as
   3422 * device local-memory we can do something like:
   3423 *
   3424 * .. code-block:: C
   3425 *
   3426 *	struct drm_i915_gem_memory_class_instance region_lmem = {
   3427 *              .memory_class = I915_MEMORY_CLASS_DEVICE,
   3428 *              .memory_instance = 0,
   3429 *      };
   3430 *      struct drm_i915_gem_create_ext_memory_regions regions = {
   3431 *              .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
   3432 *              .regions = (uintptr_t)&region_lmem,
   3433 *              .num_regions = 1,
   3434 *      };
   3435 *      struct drm_i915_gem_create_ext create_ext = {
   3436 *              .size = 16 * PAGE_SIZE,
   3437 *              .extensions = (uintptr_t)&regions,
   3438 *      };
   3439 *
   3440 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
   3441 *      if (err) ...
   3442 *
   3443 * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
   3444 * along with the final object size in &drm_i915_gem_create_ext.size, which
   3445 * should account for any rounding up, if required.
   3446 */
   3447struct drm_i915_gem_create_ext_memory_regions {
   3448	/** @base: Extension link. See struct i915_user_extension. */
   3449	struct i915_user_extension base;
   3450
   3451	/** @pad: MBZ */
   3452	__u32 pad;
   3453	/** @num_regions: Number of elements in the @regions array. */
   3454	__u32 num_regions;
   3455	/**
   3456	 * @regions: The regions/placements array.
   3457	 *
   3458	 * An array of struct drm_i915_gem_memory_class_instance.
   3459	 */
   3460	__u64 regions;
   3461};
   3462
   3463/**
   3464 * struct drm_i915_gem_create_ext_protected_content - The
   3465 * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
   3466 *
   3467 * If this extension is provided, buffer contents are expected to be protected
   3468 * by PXP encryption and require decryption for scan out and processing. This
   3469 * is only possible on platforms that have PXP enabled, on all other scenarios
   3470 * using this extension will cause the ioctl to fail and return -ENODEV. The
   3471 * flags parameter is reserved for future expansion and must currently be set
   3472 * to zero.
   3473 *
   3474 * The buffer contents are considered invalid after a PXP session teardown.
   3475 *
   3476 * The encryption is guaranteed to be processed correctly only if the object
   3477 * is submitted with a context created using the
   3478 * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
   3479 * at submission time on the validity of the objects involved.
   3480 *
   3481 * Below is an example on how to create a protected object:
   3482 *
   3483 * .. code-block:: C
   3484 *
   3485 *      struct drm_i915_gem_create_ext_protected_content protected_ext = {
   3486 *              .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
   3487 *              .flags = 0,
   3488 *      };
   3489 *      struct drm_i915_gem_create_ext create_ext = {
   3490 *              .size = PAGE_SIZE,
   3491 *              .extensions = (uintptr_t)&protected_ext,
   3492 *      };
   3493 *
   3494 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
   3495 *      if (err) ...
   3496 */
   3497struct drm_i915_gem_create_ext_protected_content {
   3498	/** @base: Extension link. See struct i915_user_extension. */
   3499	struct i915_user_extension base;
   3500	/** @flags: reserved for future usage, currently MBZ */
   3501	__u32 flags;
   3502};
   3503
   3504/* ID of the protected content session managed by i915 when PXP is active */
   3505#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
   3506
   3507#if defined(__cplusplus)
   3508}
   3509#endif
   3510
   3511#endif /* _UAPI_I915_DRM_H_ */