cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

virtgpu_drm.h (7257B)


      1/*
      2 * Copyright 2013 Red Hat
      3 * All Rights Reserved.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the "Software"),
      7 * to deal in the Software without restriction, including without limitation
      8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9 * and/or sell copies of the Software, and to permit persons to whom the
     10 * Software is furnished to do so, subject to the following conditions:
     11 *
     12 * The above copyright notice and this permission notice (including the next
     13 * paragraph) shall be included in all copies or substantial portions of the
     14 * Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19 * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22 * OTHER DEALINGS IN THE SOFTWARE.
     23 */
     24#ifndef VIRTGPU_DRM_H
     25#define VIRTGPU_DRM_H
     26
     27#include "drm.h"
     28
     29#if defined(__cplusplus)
     30extern "C" {
     31#endif
     32
     33/* Please note that modifications to all structs defined here are
     34 * subject to backwards-compatibility constraints.
     35 *
     36 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
     37 * compatibility Keep fields aligned to their size
     38 */
     39
     40#define DRM_VIRTGPU_MAP         0x01
     41#define DRM_VIRTGPU_EXECBUFFER  0x02
     42#define DRM_VIRTGPU_GETPARAM    0x03
     43#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
     44#define DRM_VIRTGPU_RESOURCE_INFO     0x05
     45#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
     46#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
     47#define DRM_VIRTGPU_WAIT     0x08
     48#define DRM_VIRTGPU_GET_CAPS  0x09
     49#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
     50#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
     51
     52#define VIRTGPU_EXECBUF_FENCE_FD_IN	0x01
     53#define VIRTGPU_EXECBUF_FENCE_FD_OUT	0x02
     54#define VIRTGPU_EXECBUF_RING_IDX	0x04
     55#define VIRTGPU_EXECBUF_FLAGS  (\
     56		VIRTGPU_EXECBUF_FENCE_FD_IN |\
     57		VIRTGPU_EXECBUF_FENCE_FD_OUT |\
     58		VIRTGPU_EXECBUF_RING_IDX |\
     59		0)
     60
     61struct drm_virtgpu_map {
     62	__u64 offset; /* use for mmap system call */
     63	__u32 handle;
     64	__u32 pad;
     65};
     66
     67struct drm_virtgpu_execbuffer {
     68	__u32 flags;
     69	__u32 size;
     70	__u64 command; /* void* */
     71	__u64 bo_handles;
     72	__u32 num_bo_handles;
     73	__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
     74	__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
     75	__u32 pad;
     76};
     77
     78#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
     79#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
     80#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
     81#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
     82#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing  */
     83#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
     84#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
     85
     86struct drm_virtgpu_getparam {
     87	__u64 param;
     88	__u64 value;
     89};
     90
     91/* NO_BO flags? NO resource flag? */
     92/* resource flag for y_0_top */
     93struct drm_virtgpu_resource_create {
     94	__u32 target;
     95	__u32 format;
     96	__u32 bind;
     97	__u32 width;
     98	__u32 height;
     99	__u32 depth;
    100	__u32 array_size;
    101	__u32 last_level;
    102	__u32 nr_samples;
    103	__u32 flags;
    104	__u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
    105	__u32 res_handle;  /* returned by kernel */
    106	__u32 size;        /* validate transfer in the host */
    107	__u32 stride;      /* validate transfer in the host */
    108};
    109
    110struct drm_virtgpu_resource_info {
    111	__u32 bo_handle;
    112	__u32 res_handle;
    113	__u32 size;
    114	__u32 blob_mem;
    115};
    116
    117struct drm_virtgpu_3d_box {
    118	__u32 x;
    119	__u32 y;
    120	__u32 z;
    121	__u32 w;
    122	__u32 h;
    123	__u32 d;
    124};
    125
    126struct drm_virtgpu_3d_transfer_to_host {
    127	__u32 bo_handle;
    128	struct drm_virtgpu_3d_box box;
    129	__u32 level;
    130	__u32 offset;
    131	__u32 stride;
    132	__u32 layer_stride;
    133};
    134
    135struct drm_virtgpu_3d_transfer_from_host {
    136	__u32 bo_handle;
    137	struct drm_virtgpu_3d_box box;
    138	__u32 level;
    139	__u32 offset;
    140	__u32 stride;
    141	__u32 layer_stride;
    142};
    143
    144#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
    145struct drm_virtgpu_3d_wait {
    146	__u32 handle; /* 0 is an invalid handle */
    147	__u32 flags;
    148};
    149
    150struct drm_virtgpu_get_caps {
    151	__u32 cap_set_id;
    152	__u32 cap_set_ver;
    153	__u64 addr;
    154	__u32 size;
    155	__u32 pad;
    156};
    157
    158struct drm_virtgpu_resource_create_blob {
    159#define VIRTGPU_BLOB_MEM_GUEST             0x0001
    160#define VIRTGPU_BLOB_MEM_HOST3D            0x0002
    161#define VIRTGPU_BLOB_MEM_HOST3D_GUEST      0x0003
    162
    163#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE     0x0001
    164#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE    0x0002
    165#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
    166	/* zero is invalid blob_mem */
    167	__u32 blob_mem;
    168	__u32 blob_flags;
    169	__u32 bo_handle;
    170	__u32 res_handle;
    171	__u64 size;
    172
    173	/*
    174	 * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
    175	 * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
    176	 */
    177	__u32 pad;
    178	__u32 cmd_size;
    179	__u64 cmd;
    180	__u64 blob_id;
    181};
    182
    183#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID       0x0001
    184#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS       0x0002
    185#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
    186struct drm_virtgpu_context_set_param {
    187	__u64 param;
    188	__u64 value;
    189};
    190
    191struct drm_virtgpu_context_init {
    192	__u32 num_params;
    193	__u32 pad;
    194
    195	/* pointer to drm_virtgpu_context_set_param array */
    196	__u64 ctx_set_params;
    197};
    198
    199/*
    200 * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
    201 * effect.  The event size is sizeof(drm_event), since there is no additional
    202 * payload.
    203 */
    204#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
    205
    206#define DRM_IOCTL_VIRTGPU_MAP \
    207	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
    208
    209#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
    210	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
    211		struct drm_virtgpu_execbuffer)
    212
    213#define DRM_IOCTL_VIRTGPU_GETPARAM \
    214	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
    215		struct drm_virtgpu_getparam)
    216
    217#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE			\
    218	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE,	\
    219		struct drm_virtgpu_resource_create)
    220
    221#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
    222	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
    223		 struct drm_virtgpu_resource_info)
    224
    225#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
    226	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST,	\
    227		struct drm_virtgpu_3d_transfer_from_host)
    228
    229#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
    230	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST,	\
    231		struct drm_virtgpu_3d_transfer_to_host)
    232
    233#define DRM_IOCTL_VIRTGPU_WAIT				\
    234	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT,	\
    235		struct drm_virtgpu_3d_wait)
    236
    237#define DRM_IOCTL_VIRTGPU_GET_CAPS \
    238	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
    239	struct drm_virtgpu_get_caps)
    240
    241#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB				\
    242	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB,	\
    243		struct drm_virtgpu_resource_create_blob)
    244
    245#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT					\
    246	DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT,		\
    247		struct drm_virtgpu_context_init)
    248
    249#if defined(__cplusplus)
    250}
    251#endif
    252
    253#endif