1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM io_uring
5 #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_IO_URING_H
8 #include <linux/tracepoint.h>
13 * io_uring_create - called after a new io_uring context was prepared
15 * @fd: corresponding file descriptor
16 * @ctx: pointer to a ring context structure
17 * @sq_entries: actual SQ size
18 * @cq_entries: actual CQ size
19 * @flags: SQ ring flags, provided to io_uring_setup(2)
21 * Allows to trace io_uring creation and provide pointer to a context, that can
22 * be used later to find correlated events.
24 TRACE_EVENT(io_uring_create
,
26 TP_PROTO(int fd
, void *ctx
, u32 sq_entries
, u32 cq_entries
, u32 flags
),
28 TP_ARGS(fd
, ctx
, sq_entries
, cq_entries
, flags
),
32 __field( void *, ctx
)
33 __field( u32
, sq_entries
)
34 __field( u32
, cq_entries
)
41 __entry
->sq_entries
= sq_entries
;
42 __entry
->cq_entries
= cq_entries
;
43 __entry
->flags
= flags
;
46 TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
47 __entry
->ctx
, __entry
->fd
, __entry
->sq_entries
,
48 __entry
->cq_entries
, __entry
->flags
)
52 * io_uring_register - called after a buffer/file/eventfd was succesfully
53 * registered for a ring
55 * @ctx: pointer to a ring context structure
56 * @opcode: describes which operation to perform
57 * @nr_user_files: number of registered files
58 * @nr_user_bufs: number of registered buffers
59 * @cq_ev_fd: whether eventfs registered or not
62 * Allows to trace fixed files/buffers/eventfds, that could be registered to
63 * avoid an overhead of getting references to them for every operation. This
64 * event, together with io_uring_file_get, can provide a full picture of how
65 * much overhead one can reduce via fixing.
67 TRACE_EVENT(io_uring_register
,
69 TP_PROTO(void *ctx
, unsigned opcode
, unsigned nr_files
,
70 unsigned nr_bufs
, bool eventfd
, long ret
),
72 TP_ARGS(ctx
, opcode
, nr_files
, nr_bufs
, eventfd
, ret
),
75 __field( void *, ctx
)
76 __field( unsigned, opcode
)
77 __field( unsigned, nr_files
)
78 __field( unsigned, nr_bufs
)
79 __field( bool, eventfd
)
85 __entry
->opcode
= opcode
;
86 __entry
->nr_files
= nr_files
;
87 __entry
->nr_bufs
= nr_bufs
;
88 __entry
->eventfd
= eventfd
;
92 TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
93 "eventfd %d, ret %ld",
94 __entry
->ctx
, __entry
->opcode
, __entry
->nr_files
,
95 __entry
->nr_bufs
, __entry
->eventfd
, __entry
->ret
)
99 * io_uring_file_get - called before getting references to an SQE file
101 * @ctx: pointer to a ring context structure
102 * @fd: SQE file descriptor
104 * Allows to trace out how often an SQE file reference is obtained, which can
105 * help figuring out if it makes sense to use fixed files, or check that fixed
106 * files are used correctly.
108 TRACE_EVENT(io_uring_file_get
,
110 TP_PROTO(void *ctx
, int fd
),
115 __field( void *, ctx
)
124 TP_printk("ring %p, fd %d", __entry
->ctx
, __entry
->fd
)
128 * io_uring_queue_async_work - called before submitting a new async work
130 * @ctx: pointer to a ring context structure
131 * @hashed: type of workqueue, hashed or normal
132 * @req: pointer to a submitted request
133 * @work: pointer to a submitted io_wq_work
135 * Allows to trace asynchronous work submission.
137 TRACE_EVENT(io_uring_queue_async_work
,
139 TP_PROTO(void *ctx
, int rw
, void * req
, struct io_wq_work
*work
,
142 TP_ARGS(ctx
, rw
, req
, work
, flags
),
145 __field( void *, ctx
)
147 __field( void *, req
)
148 __field( struct io_wq_work
*, work
)
149 __field( unsigned int, flags
)
156 __entry
->work
= work
;
157 __entry
->flags
= flags
;
160 TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
161 __entry
->ctx
, __entry
->req
, __entry
->flags
,
162 __entry
->rw
? "hashed" : "normal", __entry
->work
)
166 * io_uring_defer - called when an io_uring request is deferred
168 * @ctx: pointer to a ring context structure
169 * @req: pointer to a deferred request
170 * @user_data: user data associated with the request
172 * Allows to track deferred requests, to get an insight about what requests are
173 * not started immediately.
175 TRACE_EVENT(io_uring_defer
,
177 TP_PROTO(void *ctx
, void *req
, unsigned long long user_data
),
179 TP_ARGS(ctx
, req
, user_data
),
182 __field( void *, ctx
)
183 __field( void *, req
)
184 __field( unsigned long long, data
)
190 __entry
->data
= user_data
;
193 TP_printk("ring %p, request %p user_data %llu", __entry
->ctx
,
194 __entry
->req
, __entry
->data
)
198 * io_uring_link - called before the io_uring request added into link_list of
201 * @ctx: pointer to a ring context structure
202 * @req: pointer to a linked request
203 * @target_req: pointer to a previous request, that would contain @req
205 * Allows to track linked requests, to understand dependencies between requests
206 * and how does it influence their execution flow.
208 TRACE_EVENT(io_uring_link
,
210 TP_PROTO(void *ctx
, void *req
, void *target_req
),
212 TP_ARGS(ctx
, req
, target_req
),
215 __field( void *, ctx
)
216 __field( void *, req
)
217 __field( void *, target_req
)
223 __entry
->target_req
= target_req
;
226 TP_printk("ring %p, request %p linked after %p",
227 __entry
->ctx
, __entry
->req
, __entry
->target_req
)
231 * io_uring_cqring_wait - called before start waiting for an available CQE
233 * @ctx: pointer to a ring context structure
234 * @min_events: minimal number of events to wait for
236 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
237 * situations, when an application wants to wait for an event, that never
240 TRACE_EVENT(io_uring_cqring_wait
,
242 TP_PROTO(void *ctx
, int min_events
),
244 TP_ARGS(ctx
, min_events
),
247 __field( void *, ctx
)
248 __field( int, min_events
)
253 __entry
->min_events
= min_events
;
256 TP_printk("ring %p, min_events %d", __entry
->ctx
, __entry
->min_events
)
260 * io_uring_fail_link - called before failing a linked request
262 * @req: request, which links were cancelled
263 * @link: cancelled link
265 * Allows to track linked requests cancellation, to see not only that some work
266 * was cancelled, but also which request was the reason.
268 TRACE_EVENT(io_uring_fail_link
,
270 TP_PROTO(void *req
, void *link
),
275 __field( void *, req
)
276 __field( void *, link
)
281 __entry
->link
= link
;
284 TP_printk("request %p, link %p", __entry
->req
, __entry
->link
)
288 * io_uring_complete - called when completing an SQE
290 * @ctx: pointer to a ring context structure
291 * @user_data: user data associated with the request
292 * @res: result of the request
295 TRACE_EVENT(io_uring_complete
,
297 TP_PROTO(void *ctx
, u64 user_data
, long res
),
299 TP_ARGS(ctx
, user_data
, res
),
302 __field( void *, ctx
)
303 __field( u64
, user_data
)
309 __entry
->user_data
= user_data
;
313 TP_printk("ring %p, user_data 0x%llx, result %ld",
314 __entry
->ctx
, (unsigned long long)__entry
->user_data
,
320 * io_uring_submit_sqe - called before submitting one SQE
322 * @ctx: pointer to a ring context structure
323 * @opcode: opcode of request
324 * @user_data: user data associated with the request
325 * @force_nonblock: whether a context blocking or not
326 * @sq_thread: true if sq_thread has submitted this SQE
328 * Allows to track SQE submitting, to understand what was the source of it, SQ
329 * thread or io_uring_enter call.
331 TRACE_EVENT(io_uring_submit_sqe
,
333 TP_PROTO(void *ctx
, u8 opcode
, u64 user_data
, bool force_nonblock
,
336 TP_ARGS(ctx
, opcode
, user_data
, force_nonblock
, sq_thread
),
339 __field( void *, ctx
)
340 __field( u8
, opcode
)
341 __field( u64
, user_data
)
342 __field( bool, force_nonblock
)
343 __field( bool, sq_thread
)
348 __entry
->opcode
= opcode
;
349 __entry
->user_data
= user_data
;
350 __entry
->force_nonblock
= force_nonblock
;
351 __entry
->sq_thread
= sq_thread
;
354 TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
355 __entry
->ctx
, __entry
->opcode
,
356 (unsigned long long) __entry
->user_data
,
357 __entry
->force_nonblock
, __entry
->sq_thread
)
360 TRACE_EVENT(io_uring_poll_arm
,
362 TP_PROTO(void *ctx
, u8 opcode
, u64 user_data
, int mask
, int events
),
364 TP_ARGS(ctx
, opcode
, user_data
, mask
, events
),
367 __field( void *, ctx
)
368 __field( u8
, opcode
)
369 __field( u64
, user_data
)
371 __field( int, events
)
376 __entry
->opcode
= opcode
;
377 __entry
->user_data
= user_data
;
378 __entry
->mask
= mask
;
379 __entry
->events
= events
;
382 TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
383 __entry
->ctx
, __entry
->opcode
,
384 (unsigned long long) __entry
->user_data
,
385 __entry
->mask
, __entry
->events
)
388 TRACE_EVENT(io_uring_poll_wake
,
390 TP_PROTO(void *ctx
, u8 opcode
, u64 user_data
, int mask
),
392 TP_ARGS(ctx
, opcode
, user_data
, mask
),
395 __field( void *, ctx
)
396 __field( u8
, opcode
)
397 __field( u64
, user_data
)
403 __entry
->opcode
= opcode
;
404 __entry
->user_data
= user_data
;
405 __entry
->mask
= mask
;
408 TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
409 __entry
->ctx
, __entry
->opcode
,
410 (unsigned long long) __entry
->user_data
,
414 TRACE_EVENT(io_uring_task_add
,
416 TP_PROTO(void *ctx
, u8 opcode
, u64 user_data
, int mask
),
418 TP_ARGS(ctx
, opcode
, user_data
, mask
),
421 __field( void *, ctx
)
422 __field( u8
, opcode
)
423 __field( u64
, user_data
)
429 __entry
->opcode
= opcode
;
430 __entry
->user_data
= user_data
;
431 __entry
->mask
= mask
;
434 TP_printk("ring %p, op %d, data 0x%llx, mask %x",
435 __entry
->ctx
, __entry
->opcode
,
436 (unsigned long long) __entry
->user_data
,
440 TRACE_EVENT(io_uring_task_run
,
442 TP_PROTO(void *ctx
, u8 opcode
, u64 user_data
),
444 TP_ARGS(ctx
, opcode
, user_data
),
447 __field( void *, ctx
)
448 __field( u8
, opcode
)
449 __field( u64
, user_data
)
454 __entry
->opcode
= opcode
;
455 __entry
->user_data
= user_data
;
458 TP_printk("ring %p, op %d, data 0x%llx",
459 __entry
->ctx
, __entry
->opcode
,
460 (unsigned long long) __entry
->user_data
)
463 #endif /* _TRACE_IO_URING_H */
465 /* This part must be outside protection */
466 #include <trace/define_trace.h>