cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

io_uring.h (17150B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#undef TRACE_SYSTEM
      3#define TRACE_SYSTEM io_uring
      4
      5#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
      6#define _TRACE_IO_URING_H
      7
      8#include <linux/tracepoint.h>
      9#include <uapi/linux/io_uring.h>
     10#include <linux/io_uring.h>
     11
     12struct io_wq_work;
     13
     14/**
     15 * io_uring_create - called after a new io_uring context was prepared
     16 *
     17 * @fd:		corresponding file descriptor
     18 * @ctx:	pointer to a ring context structure
     19 * @sq_entries:	actual SQ size
     20 * @cq_entries:	actual CQ size
     21 * @flags:	SQ ring flags, provided to io_uring_setup(2)
     22 *
     23 * Allows to trace io_uring creation and provide pointer to a context, that can
     24 * be used later to find correlated events.
     25 */
     26TRACE_EVENT(io_uring_create,
     27
     28	TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
     29
     30	TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
     31
     32	TP_STRUCT__entry (
     33		__field(  int,		fd		)
     34		__field(  void *,	ctx		)
     35		__field(  u32,		sq_entries	)
     36		__field(  u32,		cq_entries	)
     37		__field(  u32,		flags		)
     38	),
     39
     40	TP_fast_assign(
     41		__entry->fd		= fd;
     42		__entry->ctx		= ctx;
     43		__entry->sq_entries	= sq_entries;
     44		__entry->cq_entries	= cq_entries;
     45		__entry->flags		= flags;
     46	),
     47
     48	TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
     49			  __entry->ctx, __entry->fd, __entry->sq_entries,
     50			  __entry->cq_entries, __entry->flags)
     51);
     52
     53/**
     54 * io_uring_register - called after a buffer/file/eventfd was successfully
     55 * 					   registered for a ring
     56 *
     57 * @ctx:		pointer to a ring context structure
     58 * @opcode:		describes which operation to perform
     59 * @nr_user_files:	number of registered files
     60 * @nr_user_bufs:	number of registered buffers
     61 * @ret:		return code
     62 *
     63 * Allows to trace fixed files/buffers, that could be registered to
     64 * avoid an overhead of getting references to them for every operation. This
     65 * event, together with io_uring_file_get, can provide a full picture of how
     66 * much overhead one can reduce via fixing.
     67 */
     68TRACE_EVENT(io_uring_register,
     69
     70	TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
     71			 unsigned nr_bufs, long ret),
     72
     73	TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
     74
     75	TP_STRUCT__entry (
     76		__field(  void *,	ctx	)
     77		__field(  unsigned,	opcode	)
     78		__field(  unsigned,	nr_files)
     79		__field(  unsigned,	nr_bufs	)
     80		__field(  long,		ret	)
     81	),
     82
     83	TP_fast_assign(
     84		__entry->ctx		= ctx;
     85		__entry->opcode		= opcode;
     86		__entry->nr_files	= nr_files;
     87		__entry->nr_bufs	= nr_bufs;
     88		__entry->ret		= ret;
     89	),
     90
     91	TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
     92			  "ret %ld",
     93			  __entry->ctx, __entry->opcode, __entry->nr_files,
     94			  __entry->nr_bufs, __entry->ret)
     95);
     96
     97/**
     98 * io_uring_file_get - called before getting references to an SQE file
     99 *
    100 * @ctx:	pointer to a ring context structure
    101 * @req:	pointer to a submitted request
    102 * @user_data:	user data associated with the request
    103 * @fd:		SQE file descriptor
    104 *
    105 * Allows to trace out how often an SQE file reference is obtained, which can
    106 * help figuring out if it makes sense to use fixed files, or check that fixed
    107 * files are used correctly.
    108 */
    109TRACE_EVENT(io_uring_file_get,
    110
    111	TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd),
    112
    113	TP_ARGS(ctx, req, user_data, fd),
    114
    115	TP_STRUCT__entry (
    116		__field(  void *,	ctx		)
    117		__field(  void *,	req		)
    118		__field(  u64,		user_data	)
    119		__field(  int,		fd		)
    120	),
    121
    122	TP_fast_assign(
    123		__entry->ctx		= ctx;
    124		__entry->req		= req;
    125		__entry->user_data	= user_data;
    126		__entry->fd		= fd;
    127	),
    128
    129	TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
    130		__entry->ctx, __entry->req, __entry->user_data, __entry->fd)
    131);
    132
    133/**
    134 * io_uring_queue_async_work - called before submitting a new async work
    135 *
    136 * @ctx:	pointer to a ring context structure
    137 * @req:	pointer to a submitted request
    138 * @user_data:	user data associated with the request
    139 * @opcode:	opcode of request
    140 * @flags	request flags
    141 * @work:	pointer to a submitted io_wq_work
    142 * @rw:		type of workqueue, hashed or normal
    143 *
    144 * Allows to trace asynchronous work submission.
    145 */
    146TRACE_EVENT(io_uring_queue_async_work,
    147
    148	TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode,
    149		unsigned int flags, struct io_wq_work *work, int rw),
    150
    151	TP_ARGS(ctx, req, user_data, opcode, flags, work, rw),
    152
    153	TP_STRUCT__entry (
    154		__field(  void *,			ctx		)
    155		__field(  void *,			req		)
    156		__field(  u64,				user_data	)
    157		__field(  u8,				opcode		)
    158		__field(  unsigned int,			flags		)
    159		__field(  struct io_wq_work *,		work		)
    160		__field(  int,				rw		)
    161
    162		__string( op_str, io_uring_get_opcode(opcode)	)
    163	),
    164
    165	TP_fast_assign(
    166		__entry->ctx		= ctx;
    167		__entry->req		= req;
    168		__entry->user_data	= user_data;
    169		__entry->flags		= flags;
    170		__entry->opcode		= opcode;
    171		__entry->work		= work;
    172		__entry->rw		= rw;
    173
    174		__assign_str(op_str, io_uring_get_opcode(opcode));
    175	),
    176
    177	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
    178		__entry->ctx, __entry->req, __entry->user_data,
    179		__get_str(op_str),
    180		__entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
    181);
    182
    183/**
    184 * io_uring_defer - called when an io_uring request is deferred
    185 *
    186 * @ctx:	pointer to a ring context structure
    187 * @req:	pointer to a deferred request
    188 * @user_data:	user data associated with the request
    189 * @opcode:	opcode of request
    190 *
    191 * Allows to track deferred requests, to get an insight about what requests are
    192 * not started immediately.
    193 */
    194TRACE_EVENT(io_uring_defer,
    195
    196	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode),
    197
    198	TP_ARGS(ctx, req, user_data, opcode),
    199
    200	TP_STRUCT__entry (
    201		__field(  void *,		ctx	)
    202		__field(  void *,		req	)
    203		__field(  unsigned long long,	data	)
    204		__field(  u8,			opcode	)
    205
    206		__string( op_str, io_uring_get_opcode(opcode) )
    207	),
    208
    209	TP_fast_assign(
    210		__entry->ctx	= ctx;
    211		__entry->req	= req;
    212		__entry->data	= user_data;
    213		__entry->opcode	= opcode;
    214
    215		__assign_str(op_str, io_uring_get_opcode(opcode));
    216	),
    217
    218	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
    219		__entry->ctx, __entry->req, __entry->data,
    220		__get_str(op_str))
    221);
    222
    223/**
    224 * io_uring_link - called before the io_uring request added into link_list of
    225 * 		   another request
    226 *
    227 * @ctx:		pointer to a ring context structure
    228 * @req:		pointer to a linked request
    229 * @target_req:		pointer to a previous request, that would contain @req
    230 *
    231 * Allows to track linked requests, to understand dependencies between requests
    232 * and how does it influence their execution flow.
    233 */
    234TRACE_EVENT(io_uring_link,
    235
    236	TP_PROTO(void *ctx, void *req, void *target_req),
    237
    238	TP_ARGS(ctx, req, target_req),
    239
    240	TP_STRUCT__entry (
    241		__field(  void *,	ctx		)
    242		__field(  void *,	req		)
    243		__field(  void *,	target_req	)
    244	),
    245
    246	TP_fast_assign(
    247		__entry->ctx		= ctx;
    248		__entry->req		= req;
    249		__entry->target_req	= target_req;
    250	),
    251
    252	TP_printk("ring %p, request %p linked after %p",
    253			  __entry->ctx, __entry->req, __entry->target_req)
    254);
    255
    256/**
    257 * io_uring_cqring_wait - called before start waiting for an available CQE
    258 *
    259 * @ctx:		pointer to a ring context structure
    260 * @min_events:	minimal number of events to wait for
    261 *
    262 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
    263 * situations, when an application wants to wait for an event, that never
    264 * comes.
    265 */
    266TRACE_EVENT(io_uring_cqring_wait,
    267
    268	TP_PROTO(void *ctx, int min_events),
    269
    270	TP_ARGS(ctx, min_events),
    271
    272	TP_STRUCT__entry (
    273		__field(  void *,	ctx		)
    274		__field(  int,		min_events	)
    275	),
    276
    277	TP_fast_assign(
    278		__entry->ctx		= ctx;
    279		__entry->min_events	= min_events;
    280	),
    281
    282	TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
    283);
    284
    285/**
    286 * io_uring_fail_link - called before failing a linked request
    287 *
    288 * @ctx:	pointer to a ring context structure
    289 * @req:	request, which links were cancelled
    290 * @user_data:	user data associated with the request
    291 * @opcode:	opcode of request
    292 * @link:	cancelled link
    293 *
    294 * Allows to track linked requests cancellation, to see not only that some work
    295 * was cancelled, but also which request was the reason.
    296 */
    297TRACE_EVENT(io_uring_fail_link,
    298
    299	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link),
    300
    301	TP_ARGS(ctx, req, user_data, opcode, link),
    302
    303	TP_STRUCT__entry (
    304		__field(  void *,		ctx		)
    305		__field(  void *,		req		)
    306		__field(  unsigned long long,	user_data	)
    307		__field(  u8,			opcode		)
    308		__field(  void *,		link		)
    309
    310		__string( op_str, io_uring_get_opcode(opcode) )
    311	),
    312
    313	TP_fast_assign(
    314		__entry->ctx		= ctx;
    315		__entry->req		= req;
    316		__entry->user_data	= user_data;
    317		__entry->opcode		= opcode;
    318		__entry->link		= link;
    319
    320		__assign_str(op_str, io_uring_get_opcode(opcode));
    321	),
    322
    323	TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
    324		__entry->ctx, __entry->req, __entry->user_data,
    325		__get_str(op_str), __entry->link)
    326);
    327
    328/**
    329 * io_uring_complete - called when completing an SQE
    330 *
    331 * @ctx:		pointer to a ring context structure
    332 * @req:		pointer to a submitted request
    333 * @user_data:		user data associated with the request
    334 * @res:		result of the request
    335 * @cflags:		completion flags
    336 * @extra1:		extra 64-bit data for CQE32
    337 * @extra2:		extra 64-bit data for CQE32
    338 *
    339 */
    340TRACE_EVENT(io_uring_complete,
    341
    342	TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
    343		 u64 extra1, u64 extra2),
    344
    345	TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
    346
    347	TP_STRUCT__entry (
    348		__field(  void *,	ctx		)
    349		__field(  void *,	req		)
    350		__field(  u64,		user_data	)
    351		__field(  int,		res		)
    352		__field(  unsigned,	cflags		)
    353		__field(  u64,		extra1		)
    354		__field(  u64,		extra2		)
    355	),
    356
    357	TP_fast_assign(
    358		__entry->ctx		= ctx;
    359		__entry->req		= req;
    360		__entry->user_data	= user_data;
    361		__entry->res		= res;
    362		__entry->cflags		= cflags;
    363		__entry->extra1		= extra1;
    364		__entry->extra2		= extra2;
    365	),
    366
    367	TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
    368		  "extra1 %llu extra2 %llu ",
    369		__entry->ctx, __entry->req,
    370		__entry->user_data,
    371		__entry->res, __entry->cflags,
    372		(unsigned long long) __entry->extra1,
    373		(unsigned long long) __entry->extra2)
    374);
    375
    376/**
    377 * io_uring_submit_sqe - called before submitting one SQE
    378 *
    379 * @ctx:		pointer to a ring context structure
    380 * @req:		pointer to a submitted request
    381 * @user_data:		user data associated with the request
    382 * @opcode:		opcode of request
    383 * @flags		request flags
    384 * @force_nonblock:	whether a context blocking or not
    385 * @sq_thread:		true if sq_thread has submitted this SQE
    386 *
    387 * Allows to track SQE submitting, to understand what was the source of it, SQ
    388 * thread or io_uring_enter call.
    389 */
    390TRACE_EVENT(io_uring_submit_sqe,
    391
    392	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags,
    393		 bool force_nonblock, bool sq_thread),
    394
    395	TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread),
    396
    397	TP_STRUCT__entry (
    398		__field(  void *,		ctx		)
    399		__field(  void *,		req		)
    400		__field(  unsigned long long,	user_data	)
    401		__field(  u8,			opcode		)
    402		__field(  u32,			flags		)
    403		__field(  bool,			force_nonblock	)
    404		__field(  bool,			sq_thread	)
    405
    406		__string( op_str, io_uring_get_opcode(opcode) )
    407	),
    408
    409	TP_fast_assign(
    410		__entry->ctx		= ctx;
    411		__entry->req		= req;
    412		__entry->user_data	= user_data;
    413		__entry->opcode		= opcode;
    414		__entry->flags		= flags;
    415		__entry->force_nonblock	= force_nonblock;
    416		__entry->sq_thread	= sq_thread;
    417
    418		__assign_str(op_str, io_uring_get_opcode(opcode));
    419	),
    420
    421	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
    422		  "non block %d, sq_thread %d", __entry->ctx, __entry->req,
    423		  __entry->user_data, __get_str(op_str),
    424		  __entry->flags, __entry->force_nonblock, __entry->sq_thread)
    425);
    426
    427/*
    428 * io_uring_poll_arm - called after arming a poll wait if successful
    429 *
    430 * @ctx:		pointer to a ring context structure
    431 * @req:		pointer to the armed request
    432 * @user_data:		user data associated with the request
    433 * @opcode:		opcode of request
    434 * @mask:		request poll events mask
    435 * @events:		registered events of interest
    436 *
    437 * Allows to track which fds are waiting for and what are the events of
    438 * interest.
    439 */
    440TRACE_EVENT(io_uring_poll_arm,
    441
    442	TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode,
    443		 int mask, int events),
    444
    445	TP_ARGS(ctx, req, user_data, opcode, mask, events),
    446
    447	TP_STRUCT__entry (
    448		__field(  void *,		ctx		)
    449		__field(  void *,		req		)
    450		__field(  unsigned long long,	user_data	)
    451		__field(  u8,			opcode		)
    452		__field(  int,			mask		)
    453		__field(  int,			events		)
    454
    455		__string( op_str, io_uring_get_opcode(opcode) )
    456	),
    457
    458	TP_fast_assign(
    459		__entry->ctx		= ctx;
    460		__entry->req		= req;
    461		__entry->user_data	= user_data;
    462		__entry->opcode		= opcode;
    463		__entry->mask		= mask;
    464		__entry->events		= events;
    465
    466		__assign_str(op_str, io_uring_get_opcode(opcode));
    467	),
    468
    469	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
    470		  __entry->ctx, __entry->req, __entry->user_data,
    471		  __get_str(op_str),
    472		  __entry->mask, __entry->events)
    473);
    474
    475/*
    476 * io_uring_task_add - called after adding a task
    477 *
    478 * @ctx:		pointer to a ring context structure
    479 * @req:		pointer to request
    480 * @user_data:		user data associated with the request
    481 * @opcode:		opcode of request
    482 * @mask:		request poll events mask
    483 *
    484 */
    485TRACE_EVENT(io_uring_task_add,
    486
    487	TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask),
    488
    489	TP_ARGS(ctx, req, user_data, opcode, mask),
    490
    491	TP_STRUCT__entry (
    492		__field(  void *,		ctx		)
    493		__field(  void *,		req		)
    494		__field(  unsigned long long,	user_data	)
    495		__field(  u8,			opcode		)
    496		__field(  int,			mask		)
    497
    498		__string( op_str, io_uring_get_opcode(opcode) )
    499	),
    500
    501	TP_fast_assign(
    502		__entry->ctx		= ctx;
    503		__entry->req		= req;
    504		__entry->user_data	= user_data;
    505		__entry->opcode		= opcode;
    506		__entry->mask		= mask;
    507
    508		__assign_str(op_str, io_uring_get_opcode(opcode));
    509	),
    510
    511	TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
    512		__entry->ctx, __entry->req, __entry->user_data,
    513		__get_str(op_str),
    514		__entry->mask)
    515);
    516
    517/*
    518 * io_uring_req_failed - called when an sqe is errored dring submission
    519 *
    520 * @sqe:		pointer to the io_uring_sqe that failed
    521 * @ctx:		pointer to a ring context structure
    522 * @req:		pointer to request
    523 * @error:		error it failed with
    524 *
    525 * Allows easier diagnosing of malformed requests in production systems.
    526 */
    527TRACE_EVENT(io_uring_req_failed,
    528
    529	TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error),
    530
    531	TP_ARGS(sqe, ctx, req, error),
    532
    533	TP_STRUCT__entry (
    534		__field(  void *,		ctx		)
    535		__field(  void *,		req		)
    536		__field(  unsigned long long,	user_data	)
    537		__field(  u8,			opcode		)
    538		__field(  u8,			flags		)
    539		__field(  u8,			ioprio		)
    540		__field( u64,			off		)
    541		__field( u64,			addr		)
    542		__field( u32,			len		)
    543		__field( u32,			op_flags	)
    544		__field( u16,			buf_index	)
    545		__field( u16,			personality	)
    546		__field( u32,			file_index	)
    547		__field( u64,			pad1		)
    548		__field( u64,			addr3		)
    549		__field( int,			error		)
    550
    551		__string( op_str, io_uring_get_opcode(sqe->opcode) )
    552	),
    553
    554	TP_fast_assign(
    555		__entry->ctx		= ctx;
    556		__entry->req		= req;
    557		__entry->user_data	= sqe->user_data;
    558		__entry->opcode		= sqe->opcode;
    559		__entry->flags		= sqe->flags;
    560		__entry->ioprio		= sqe->ioprio;
    561		__entry->off		= sqe->off;
    562		__entry->addr		= sqe->addr;
    563		__entry->len		= sqe->len;
    564		__entry->op_flags	= sqe->poll32_events;
    565		__entry->buf_index	= sqe->buf_index;
    566		__entry->personality	= sqe->personality;
    567		__entry->file_index	= sqe->file_index;
    568		__entry->pad1		= sqe->__pad2[0];
    569		__entry->addr3		= sqe->addr3;
    570		__entry->error		= error;
    571
    572		__assign_str(op_str, io_uring_get_opcode(sqe->opcode));
    573	),
    574
    575	TP_printk("ring %p, req %p, user_data 0x%llx, "
    576		  "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
    577		  "len=%u, rw_flags=0x%x, buf_index=%d, "
    578		  "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, "
    579		  "error=%d",
    580		  __entry->ctx, __entry->req, __entry->user_data,
    581		  __get_str(op_str),
    582		  __entry->flags, __entry->ioprio,
    583		  (unsigned long long)__entry->off,
    584		  (unsigned long long) __entry->addr, __entry->len,
    585		  __entry->op_flags,
    586		  __entry->buf_index, __entry->personality, __entry->file_index,
    587		  (unsigned long long) __entry->pad1,
    588		  (unsigned long long) __entry->addr3, __entry->error)
    589);
    590
    591
    592/*
    593 * io_uring_cqe_overflow - a CQE overflowed
    594 *
    595 * @ctx:		pointer to a ring context structure
    596 * @user_data:		user data associated with the request
    597 * @res:		CQE result
    598 * @cflags:		CQE flags
    599 * @ocqe:		pointer to the overflow cqe (if available)
    600 *
    601 */
    602TRACE_EVENT(io_uring_cqe_overflow,
    603
    604	TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags,
    605		 void *ocqe),
    606
    607	TP_ARGS(ctx, user_data, res, cflags, ocqe),
    608
    609	TP_STRUCT__entry (
    610		__field(  void *,		ctx		)
    611		__field(  unsigned long long,	user_data	)
    612		__field(  s32,			res		)
    613		__field(  u32,			cflags		)
    614		__field(  void *,		ocqe		)
    615	),
    616
    617	TP_fast_assign(
    618		__entry->ctx		= ctx;
    619		__entry->user_data	= user_data;
    620		__entry->res		= res;
    621		__entry->cflags		= cflags;
    622		__entry->ocqe		= ocqe;
    623	),
    624
    625	TP_printk("ring %p, user_data 0x%llx, res %d, flags %x, "
    626		  "overflow_cqe %p",
    627		  __entry->ctx, __entry->user_data, __entry->res,
    628		  __entry->cflags, __entry->ocqe)
    629);
    630
    631#endif /* _TRACE_IO_URING_H */
    632
    633/* This part must be outside protection */
    634#include <trace/define_trace.h>