cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

io_uring.h (13845B)


      1/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
      2/*
      3 * Header file for the io_uring interface.
      4 *
      5 * Copyright (C) 2019 Jens Axboe
      6 * Copyright (C) 2019 Christoph Hellwig
      7 */
      8#ifndef LINUX_IO_URING_H
      9#define LINUX_IO_URING_H
     10
     11#include <linux/fs.h>
     12#include <linux/types.h>
     13
     14/*
     15 * IO submission data structure (Submission Queue Entry)
     16 */
     17struct io_uring_sqe {
     18	__u8	opcode;		/* type of operation for this sqe */
     19	__u8	flags;		/* IOSQE_ flags */
     20	__u16	ioprio;		/* ioprio for the request */
     21	__s32	fd;		/* file descriptor to do IO on */
     22	union {
     23		__u64	off;	/* offset into file */
     24		__u64	addr2;
     25		struct {
     26			__u32	cmd_op;
     27			__u32	__pad1;
     28		};
     29	};
     30	union {
     31		__u64	addr;	/* pointer to buffer or iovecs */
     32		__u64	splice_off_in;
     33	};
     34	__u32	len;		/* buffer size or number of iovecs */
     35	union {
     36		__kernel_rwf_t	rw_flags;
     37		__u32		fsync_flags;
     38		__u16		poll_events;	/* compatibility */
     39		__u32		poll32_events;	/* word-reversed for BE */
     40		__u32		sync_range_flags;
     41		__u32		msg_flags;
     42		__u32		timeout_flags;
     43		__u32		accept_flags;
     44		__u32		cancel_flags;
     45		__u32		open_flags;
     46		__u32		statx_flags;
     47		__u32		fadvise_advice;
     48		__u32		splice_flags;
     49		__u32		rename_flags;
     50		__u32		unlink_flags;
     51		__u32		hardlink_flags;
     52		__u32		xattr_flags;
     53	};
     54	__u64	user_data;	/* data to be passed back at completion time */
     55	/* pack this to avoid bogus arm OABI complaints */
     56	union {
     57		/* index into fixed buffers, if used */
     58		__u16	buf_index;
     59		/* for grouped buffer selection */
     60		__u16	buf_group;
     61	} __attribute__((packed));
     62	/* personality to use, if used */
     63	__u16	personality;
     64	union {
     65		__s32	splice_fd_in;
     66		__u32	file_index;
     67	};
     68	union {
     69		struct {
     70			__u64	addr3;
     71			__u64	__pad2[1];
     72		};
     73		/*
     74		 * If the ring is initialized with IORING_SETUP_SQE128, then
     75		 * this field is used for 80 bytes of arbitrary command data
     76		 */
     77		__u8	cmd[0];
     78	};
     79};
     80
     81/*
     82 * If sqe->file_index is set to this for opcodes that instantiate a new
     83 * direct descriptor (like openat/openat2/accept), then io_uring will allocate
     84 * an available direct descriptor instead of having the application pass one
     85 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
     86 * if the space is full.
     87 */
     88#define IORING_FILE_INDEX_ALLOC		(~0U)
     89
     90enum {
     91	IOSQE_FIXED_FILE_BIT,
     92	IOSQE_IO_DRAIN_BIT,
     93	IOSQE_IO_LINK_BIT,
     94	IOSQE_IO_HARDLINK_BIT,
     95	IOSQE_ASYNC_BIT,
     96	IOSQE_BUFFER_SELECT_BIT,
     97	IOSQE_CQE_SKIP_SUCCESS_BIT,
     98};
     99
    100/*
    101 * sqe->flags
    102 */
    103/* use fixed fileset */
    104#define IOSQE_FIXED_FILE	(1U << IOSQE_FIXED_FILE_BIT)
    105/* issue after inflight IO */
    106#define IOSQE_IO_DRAIN		(1U << IOSQE_IO_DRAIN_BIT)
    107/* links next sqe */
    108#define IOSQE_IO_LINK		(1U << IOSQE_IO_LINK_BIT)
    109/* like LINK, but stronger */
    110#define IOSQE_IO_HARDLINK	(1U << IOSQE_IO_HARDLINK_BIT)
    111/* always go async */
    112#define IOSQE_ASYNC		(1U << IOSQE_ASYNC_BIT)
    113/* select buffer from sqe->buf_group */
    114#define IOSQE_BUFFER_SELECT	(1U << IOSQE_BUFFER_SELECT_BIT)
    115/* don't post CQE if request succeeded */
    116#define IOSQE_CQE_SKIP_SUCCESS	(1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
    117
    118/*
    119 * io_uring_setup() flags
    120 */
    121#define IORING_SETUP_IOPOLL	(1U << 0)	/* io_context is polled */
    122#define IORING_SETUP_SQPOLL	(1U << 1)	/* SQ poll thread */
    123#define IORING_SETUP_SQ_AFF	(1U << 2)	/* sq_thread_cpu is valid */
    124#define IORING_SETUP_CQSIZE	(1U << 3)	/* app defines CQ size */
    125#define IORING_SETUP_CLAMP	(1U << 4)	/* clamp SQ/CQ ring sizes */
    126#define IORING_SETUP_ATTACH_WQ	(1U << 5)	/* attach to existing wq */
    127#define IORING_SETUP_R_DISABLED	(1U << 6)	/* start with ring disabled */
    128#define IORING_SETUP_SUBMIT_ALL	(1U << 7)	/* continue submit on error */
    129/*
    130 * Cooperative task running. When requests complete, they often require
    131 * forcing the submitter to transition to the kernel to complete. If this
    132 * flag is set, work will be done when the task transitions anyway, rather
    133 * than force an inter-processor interrupt reschedule. This avoids interrupting
    134 * a task running in userspace, and saves an IPI.
    135 */
    136#define IORING_SETUP_COOP_TASKRUN	(1U << 8)
    137/*
    138 * If COOP_TASKRUN is set, get notified if task work is available for
    139 * running and a kernel transition would be needed to run it. This sets
    140 * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
    141 */
    142#define IORING_SETUP_TASKRUN_FLAG	(1U << 9)
    143
    144#define IORING_SETUP_SQE128		(1U << 10) /* SQEs are 128 byte */
    145#define IORING_SETUP_CQE32		(1U << 11) /* CQEs are 32 byte */
    146
    147enum io_uring_op {
    148	IORING_OP_NOP,
    149	IORING_OP_READV,
    150	IORING_OP_WRITEV,
    151	IORING_OP_FSYNC,
    152	IORING_OP_READ_FIXED,
    153	IORING_OP_WRITE_FIXED,
    154	IORING_OP_POLL_ADD,
    155	IORING_OP_POLL_REMOVE,
    156	IORING_OP_SYNC_FILE_RANGE,
    157	IORING_OP_SENDMSG,
    158	IORING_OP_RECVMSG,
    159	IORING_OP_TIMEOUT,
    160	IORING_OP_TIMEOUT_REMOVE,
    161	IORING_OP_ACCEPT,
    162	IORING_OP_ASYNC_CANCEL,
    163	IORING_OP_LINK_TIMEOUT,
    164	IORING_OP_CONNECT,
    165	IORING_OP_FALLOCATE,
    166	IORING_OP_OPENAT,
    167	IORING_OP_CLOSE,
    168	IORING_OP_FILES_UPDATE,
    169	IORING_OP_STATX,
    170	IORING_OP_READ,
    171	IORING_OP_WRITE,
    172	IORING_OP_FADVISE,
    173	IORING_OP_MADVISE,
    174	IORING_OP_SEND,
    175	IORING_OP_RECV,
    176	IORING_OP_OPENAT2,
    177	IORING_OP_EPOLL_CTL,
    178	IORING_OP_SPLICE,
    179	IORING_OP_PROVIDE_BUFFERS,
    180	IORING_OP_REMOVE_BUFFERS,
    181	IORING_OP_TEE,
    182	IORING_OP_SHUTDOWN,
    183	IORING_OP_RENAMEAT,
    184	IORING_OP_UNLINKAT,
    185	IORING_OP_MKDIRAT,
    186	IORING_OP_SYMLINKAT,
    187	IORING_OP_LINKAT,
    188	IORING_OP_MSG_RING,
    189	IORING_OP_FSETXATTR,
    190	IORING_OP_SETXATTR,
    191	IORING_OP_FGETXATTR,
    192	IORING_OP_GETXATTR,
    193	IORING_OP_SOCKET,
    194	IORING_OP_URING_CMD,
    195
    196	/* this goes last, obviously */
    197	IORING_OP_LAST,
    198};
    199
    200/*
    201 * sqe->fsync_flags
    202 */
    203#define IORING_FSYNC_DATASYNC	(1U << 0)
    204
    205/*
    206 * sqe->timeout_flags
    207 */
    208#define IORING_TIMEOUT_ABS		(1U << 0)
    209#define IORING_TIMEOUT_UPDATE		(1U << 1)
    210#define IORING_TIMEOUT_BOOTTIME		(1U << 2)
    211#define IORING_TIMEOUT_REALTIME		(1U << 3)
    212#define IORING_LINK_TIMEOUT_UPDATE	(1U << 4)
    213#define IORING_TIMEOUT_ETIME_SUCCESS	(1U << 5)
    214#define IORING_TIMEOUT_CLOCK_MASK	(IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
    215#define IORING_TIMEOUT_UPDATE_MASK	(IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
    216/*
    217 * sqe->splice_flags
    218 * extends splice(2) flags
    219 */
    220#define SPLICE_F_FD_IN_FIXED	(1U << 31) /* the last bit of __u32 */
    221
    222/*
    223 * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
    224 * command flags for POLL_ADD are stored in sqe->len.
    225 *
    226 * IORING_POLL_ADD_MULTI	Multishot poll. Sets IORING_CQE_F_MORE if
    227 *				the poll handler will continue to report
    228 *				CQEs on behalf of the same SQE.
    229 *
    230 * IORING_POLL_UPDATE		Update existing poll request, matching
    231 *				sqe->addr as the old user_data field.
    232 */
    233#define IORING_POLL_ADD_MULTI	(1U << 0)
    234#define IORING_POLL_UPDATE_EVENTS	(1U << 1)
    235#define IORING_POLL_UPDATE_USER_DATA	(1U << 2)
    236
    237/*
    238 * ASYNC_CANCEL flags.
    239 *
    240 * IORING_ASYNC_CANCEL_ALL	Cancel all requests that match the given key
    241 * IORING_ASYNC_CANCEL_FD	Key off 'fd' for cancelation rather than the
    242 *				request 'user_data'
    243 * IORING_ASYNC_CANCEL_ANY	Match any request
    244 */
    245#define IORING_ASYNC_CANCEL_ALL	(1U << 0)
    246#define IORING_ASYNC_CANCEL_FD	(1U << 1)
    247#define IORING_ASYNC_CANCEL_ANY	(1U << 2)
    248
    249/*
    250 * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
    251 *
    252 * IORING_RECVSEND_POLL_FIRST	If set, instead of first attempting to send
    253 *				or receive and arm poll if that yields an
    254 *				-EAGAIN result, arm poll upfront and skip
    255 *				the initial transfer attempt.
    256 */
    257#define IORING_RECVSEND_POLL_FIRST	(1U << 0)
    258
    259/*
    260 * accept flags stored in sqe->ioprio
    261 */
    262#define IORING_ACCEPT_MULTISHOT	(1U << 0)
    263
    264/*
    265 * IO completion data structure (Completion Queue Entry)
    266 */
    267struct io_uring_cqe {
    268	__u64	user_data;	/* sqe->data submission passed back */
    269	__s32	res;		/* result code for this event */
    270	__u32	flags;
    271
    272	/*
    273	 * If the ring is initialized with IORING_SETUP_CQE32, then this field
    274	 * contains 16-bytes of padding, doubling the size of the CQE.
    275	 */
    276	__u64 big_cqe[];
    277};
    278
    279/*
    280 * cqe->flags
    281 *
    282 * IORING_CQE_F_BUFFER	If set, the upper 16 bits are the buffer ID
    283 * IORING_CQE_F_MORE	If set, parent SQE will generate more CQE entries
    284 * IORING_CQE_F_SOCK_NONEMPTY	If set, more data to read after socket recv
    285 */
    286#define IORING_CQE_F_BUFFER		(1U << 0)
    287#define IORING_CQE_F_MORE		(1U << 1)
    288#define IORING_CQE_F_SOCK_NONEMPTY	(1U << 2)
    289
    290enum {
    291	IORING_CQE_BUFFER_SHIFT		= 16,
    292};
    293
    294/*
    295 * Magic offsets for the application to mmap the data it needs
    296 */
    297#define IORING_OFF_SQ_RING		0ULL
    298#define IORING_OFF_CQ_RING		0x8000000ULL
    299#define IORING_OFF_SQES			0x10000000ULL
    300
    301/*
    302 * Filled with the offset for mmap(2)
    303 */
    304struct io_sqring_offsets {
    305	__u32 head;
    306	__u32 tail;
    307	__u32 ring_mask;
    308	__u32 ring_entries;
    309	__u32 flags;
    310	__u32 dropped;
    311	__u32 array;
    312	__u32 resv1;
    313	__u64 resv2;
    314};
    315
    316/*
    317 * sq_ring->flags
    318 */
    319#define IORING_SQ_NEED_WAKEUP	(1U << 0) /* needs io_uring_enter wakeup */
    320#define IORING_SQ_CQ_OVERFLOW	(1U << 1) /* CQ ring is overflown */
    321#define IORING_SQ_TASKRUN	(1U << 2) /* task should enter the kernel */
    322
    323struct io_cqring_offsets {
    324	__u32 head;
    325	__u32 tail;
    326	__u32 ring_mask;
    327	__u32 ring_entries;
    328	__u32 overflow;
    329	__u32 cqes;
    330	__u32 flags;
    331	__u32 resv1;
    332	__u64 resv2;
    333};
    334
    335/*
    336 * cq_ring->flags
    337 */
    338
    339/* disable eventfd notifications */
    340#define IORING_CQ_EVENTFD_DISABLED	(1U << 0)
    341
    342/*
    343 * io_uring_enter(2) flags
    344 */
    345#define IORING_ENTER_GETEVENTS		(1U << 0)
    346#define IORING_ENTER_SQ_WAKEUP		(1U << 1)
    347#define IORING_ENTER_SQ_WAIT		(1U << 2)
    348#define IORING_ENTER_EXT_ARG		(1U << 3)
    349#define IORING_ENTER_REGISTERED_RING	(1U << 4)
    350
    351/*
    352 * Passed in for io_uring_setup(2). Copied back with updated info on success
    353 */
    354struct io_uring_params {
    355	__u32 sq_entries;
    356	__u32 cq_entries;
    357	__u32 flags;
    358	__u32 sq_thread_cpu;
    359	__u32 sq_thread_idle;
    360	__u32 features;
    361	__u32 wq_fd;
    362	__u32 resv[3];
    363	struct io_sqring_offsets sq_off;
    364	struct io_cqring_offsets cq_off;
    365};
    366
    367/*
    368 * io_uring_params->features flags
    369 */
    370#define IORING_FEAT_SINGLE_MMAP		(1U << 0)
    371#define IORING_FEAT_NODROP		(1U << 1)
    372#define IORING_FEAT_SUBMIT_STABLE	(1U << 2)
    373#define IORING_FEAT_RW_CUR_POS		(1U << 3)
    374#define IORING_FEAT_CUR_PERSONALITY	(1U << 4)
    375#define IORING_FEAT_FAST_POLL		(1U << 5)
    376#define IORING_FEAT_POLL_32BITS 	(1U << 6)
    377#define IORING_FEAT_SQPOLL_NONFIXED	(1U << 7)
    378#define IORING_FEAT_EXT_ARG		(1U << 8)
    379#define IORING_FEAT_NATIVE_WORKERS	(1U << 9)
    380#define IORING_FEAT_RSRC_TAGS		(1U << 10)
    381#define IORING_FEAT_CQE_SKIP		(1U << 11)
    382#define IORING_FEAT_LINKED_FILE		(1U << 12)
    383
    384/*
    385 * io_uring_register(2) opcodes and arguments
    386 */
    387enum {
    388	IORING_REGISTER_BUFFERS			= 0,
    389	IORING_UNREGISTER_BUFFERS		= 1,
    390	IORING_REGISTER_FILES			= 2,
    391	IORING_UNREGISTER_FILES			= 3,
    392	IORING_REGISTER_EVENTFD			= 4,
    393	IORING_UNREGISTER_EVENTFD		= 5,
    394	IORING_REGISTER_FILES_UPDATE		= 6,
    395	IORING_REGISTER_EVENTFD_ASYNC		= 7,
    396	IORING_REGISTER_PROBE			= 8,
    397	IORING_REGISTER_PERSONALITY		= 9,
    398	IORING_UNREGISTER_PERSONALITY		= 10,
    399	IORING_REGISTER_RESTRICTIONS		= 11,
    400	IORING_REGISTER_ENABLE_RINGS		= 12,
    401
    402	/* extended with tagging */
    403	IORING_REGISTER_FILES2			= 13,
    404	IORING_REGISTER_FILES_UPDATE2		= 14,
    405	IORING_REGISTER_BUFFERS2		= 15,
    406	IORING_REGISTER_BUFFERS_UPDATE		= 16,
    407
    408	/* set/clear io-wq thread affinities */
    409	IORING_REGISTER_IOWQ_AFF		= 17,
    410	IORING_UNREGISTER_IOWQ_AFF		= 18,
    411
    412	/* set/get max number of io-wq workers */
    413	IORING_REGISTER_IOWQ_MAX_WORKERS	= 19,
    414
    415	/* register/unregister io_uring fd with the ring */
    416	IORING_REGISTER_RING_FDS		= 20,
    417	IORING_UNREGISTER_RING_FDS		= 21,
    418
    419	/* register ring based provide buffer group */
    420	IORING_REGISTER_PBUF_RING		= 22,
    421	IORING_UNREGISTER_PBUF_RING		= 23,
    422
    423	/* this goes last */
    424	IORING_REGISTER_LAST
    425};
    426
    427/* io-wq worker categories */
    428enum {
    429	IO_WQ_BOUND,
    430	IO_WQ_UNBOUND,
    431};
    432
    433/* deprecated, see struct io_uring_rsrc_update */
    434struct io_uring_files_update {
    435	__u32 offset;
    436	__u32 resv;
    437	__aligned_u64 /* __s32 * */ fds;
    438};
    439
    440/*
    441 * Register a fully sparse file space, rather than pass in an array of all
    442 * -1 file descriptors.
    443 */
    444#define IORING_RSRC_REGISTER_SPARSE	(1U << 0)
    445
    446struct io_uring_rsrc_register {
    447	__u32 nr;
    448	__u32 flags;
    449	__u64 resv2;
    450	__aligned_u64 data;
    451	__aligned_u64 tags;
    452};
    453
    454struct io_uring_rsrc_update {
    455	__u32 offset;
    456	__u32 resv;
    457	__aligned_u64 data;
    458};
    459
    460struct io_uring_rsrc_update2 {
    461	__u32 offset;
    462	__u32 resv;
    463	__aligned_u64 data;
    464	__aligned_u64 tags;
    465	__u32 nr;
    466	__u32 resv2;
    467};
    468
    469/* Skip updating fd indexes set to this value in the fd table */
    470#define IORING_REGISTER_FILES_SKIP	(-2)
    471
    472#define IO_URING_OP_SUPPORTED	(1U << 0)
    473
    474struct io_uring_probe_op {
    475	__u8 op;
    476	__u8 resv;
    477	__u16 flags;	/* IO_URING_OP_* flags */
    478	__u32 resv2;
    479};
    480
    481struct io_uring_probe {
    482	__u8 last_op;	/* last opcode supported */
    483	__u8 ops_len;	/* length of ops[] array below */
    484	__u16 resv;
    485	__u32 resv2[3];
    486	struct io_uring_probe_op ops[0];
    487};
    488
    489struct io_uring_restriction {
    490	__u16 opcode;
    491	union {
    492		__u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
    493		__u8 sqe_op;      /* IORING_RESTRICTION_SQE_OP */
    494		__u8 sqe_flags;   /* IORING_RESTRICTION_SQE_FLAGS_* */
    495	};
    496	__u8 resv;
    497	__u32 resv2[3];
    498};
    499
    500struct io_uring_buf {
    501	__u64	addr;
    502	__u32	len;
    503	__u16	bid;
    504	__u16	resv;
    505};
    506
    507struct io_uring_buf_ring {
    508	union {
    509		/*
    510		 * To avoid spilling into more pages than we need to, the
    511		 * ring tail is overlaid with the io_uring_buf->resv field.
    512		 */
    513		struct {
    514			__u64	resv1;
    515			__u32	resv2;
    516			__u16	resv3;
    517			__u16	tail;
    518		};
    519		struct io_uring_buf	bufs[0];
    520	};
    521};
    522
    523/* argument for IORING_(UN)REGISTER_PBUF_RING */
    524struct io_uring_buf_reg {
    525	__u64	ring_addr;
    526	__u32	ring_entries;
    527	__u16	bgid;
    528	__u16	pad;
    529	__u64	resv[3];
    530};
    531
    532/*
    533 * io_uring_restriction->opcode values
    534 */
    535enum {
    536	/* Allow an io_uring_register(2) opcode */
    537	IORING_RESTRICTION_REGISTER_OP		= 0,
    538
    539	/* Allow an sqe opcode */
    540	IORING_RESTRICTION_SQE_OP		= 1,
    541
    542	/* Allow sqe flags */
    543	IORING_RESTRICTION_SQE_FLAGS_ALLOWED	= 2,
    544
    545	/* Require sqe flags (these flags must be set on each submission) */
    546	IORING_RESTRICTION_SQE_FLAGS_REQUIRED	= 3,
    547
    548	IORING_RESTRICTION_LAST
    549};
    550
    551struct io_uring_getevents_arg {
    552	__u64	sigmask;
    553	__u32	sigmask_sz;
    554	__u32	pad;
    555	__u64	ts;
    556};
    557
    558#endif