2 #define TRACE_SYSTEM block
4 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/blktrace_api.h>
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
10 #include <linux/tracepoint.h>
14 DECLARE_EVENT_CLASS(block_buffer
,
16 TP_PROTO(struct buffer_head
*bh
),
22 __field( sector_t
, sector
)
23 __field( size_t, size
)
27 __entry
->dev
= bh
->b_bdev
->bd_dev
;
28 __entry
->sector
= bh
->b_blocknr
;
29 __entry
->size
= bh
->b_size
;
32 TP_printk("%d,%d sector=%llu size=%zu",
33 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
34 (unsigned long long)__entry
->sector
, __entry
->size
39 * block_touch_buffer - mark a buffer accessed
40 * @bh: buffer_head being touched
42 * Called from touch_buffer().
44 DEFINE_EVENT(block_buffer
, block_touch_buffer
,
46 TP_PROTO(struct buffer_head
*bh
),
52 * block_dirty_buffer - mark a buffer dirty
53 * @bh: buffer_head being dirtied
55 * Called from mark_buffer_dirty().
57 DEFINE_EVENT(block_buffer
, block_dirty_buffer
,
59 TP_PROTO(struct buffer_head
*bh
),
64 DECLARE_EVENT_CLASS(block_rq_with_error
,
66 TP_PROTO(struct request_queue
*q
, struct request
*rq
),
72 __field( sector_t
, sector
)
73 __field( unsigned int, nr_sector
)
74 __field( int, errors
)
75 __array( char, rwbs
, RWBS_LEN
)
76 __dynamic_array( char, cmd
, blk_cmd_buf_len(rq
) )
80 __entry
->dev
= rq
->rq_disk
? disk_devt(rq
->rq_disk
) : 0;
81 __entry
->sector
= (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) ?
83 __entry
->nr_sector
= (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) ?
84 0 : blk_rq_sectors(rq
);
85 __entry
->errors
= rq
->errors
;
87 blk_fill_rwbs(__entry
->rwbs
, rq
->cmd_flags
, blk_rq_bytes(rq
));
88 blk_dump_cmd(__get_str(cmd
), rq
);
91 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
92 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
93 __entry
->rwbs
, __get_str(cmd
),
94 (unsigned long long)__entry
->sector
,
95 __entry
->nr_sector
, __entry
->errors
)
99 * block_rq_abort - abort block operation request
100 * @q: queue containing the block operation request
101 * @rq: block IO operation request
103 * Called immediately after pending block IO operation request @rq in
104 * queue @q is aborted. The fields in the operation request @rq
105 * can be examined to determine which device and sectors the pending
106 * operation would access.
108 DEFINE_EVENT(block_rq_with_error
, block_rq_abort
,
110 TP_PROTO(struct request_queue
*q
, struct request
*rq
),
116 * block_rq_requeue - place block IO request back on a queue
117 * @q: queue holding operation
118 * @rq: block IO operation request
120 * The block operation request @rq is being placed back into queue
121 * @q. For some reason the request was not completed and needs to be
122 * put back in the queue.
124 DEFINE_EVENT(block_rq_with_error
, block_rq_requeue
,
126 TP_PROTO(struct request_queue
*q
, struct request
*rq
),
132 * block_rq_complete - block IO operation completed by device driver
133 * @q: queue containing the block operation request
134 * @rq: block operations request
135 * @nr_bytes: number of completed bytes
137 * The block_rq_complete tracepoint event indicates that some portion
138 * of operation request has been completed by the device driver. If
139 * the @rq->bio is %NULL, then there is absolutely no additional work to
140 * do for the request. If @rq->bio is non-NULL then there is
141 * additional work required to complete the request.
143 TRACE_EVENT(block_rq_complete
,
145 TP_PROTO(struct request_queue
*q
, struct request
*rq
,
146 unsigned int nr_bytes
),
148 TP_ARGS(q
, rq
, nr_bytes
),
151 __field( dev_t
, dev
)
152 __field( sector_t
, sector
)
153 __field( unsigned int, nr_sector
)
154 __field( int, errors
)
155 __array( char, rwbs
, RWBS_LEN
)
156 __dynamic_array( char, cmd
, blk_cmd_buf_len(rq
) )
160 __entry
->dev
= rq
->rq_disk
? disk_devt(rq
->rq_disk
) : 0;
161 __entry
->sector
= blk_rq_pos(rq
);
162 __entry
->nr_sector
= nr_bytes
>> 9;
163 __entry
->errors
= rq
->errors
;
165 blk_fill_rwbs(__entry
->rwbs
, rq
->cmd_flags
, nr_bytes
);
166 blk_dump_cmd(__get_str(cmd
), rq
);
169 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
170 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
171 __entry
->rwbs
, __get_str(cmd
),
172 (unsigned long long)__entry
->sector
,
173 __entry
->nr_sector
, __entry
->errors
)
176 DECLARE_EVENT_CLASS(block_rq
,
178 TP_PROTO(struct request_queue
*q
, struct request
*rq
),
183 __field( dev_t
, dev
)
184 __field( sector_t
, sector
)
185 __field( unsigned int, nr_sector
)
186 __field( unsigned int, bytes
)
187 __array( char, rwbs
, RWBS_LEN
)
188 __array( char, comm
, TASK_COMM_LEN
)
189 __dynamic_array( char, cmd
, blk_cmd_buf_len(rq
) )
193 __entry
->dev
= rq
->rq_disk
? disk_devt(rq
->rq_disk
) : 0;
194 __entry
->sector
= (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) ?
196 __entry
->nr_sector
= (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) ?
197 0 : blk_rq_sectors(rq
);
198 __entry
->bytes
= (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) ?
199 blk_rq_bytes(rq
) : 0;
201 blk_fill_rwbs(__entry
->rwbs
, rq
->cmd_flags
, blk_rq_bytes(rq
));
202 blk_dump_cmd(__get_str(cmd
), rq
);
203 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
206 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
207 MAJOR(__entry
->dev
), MINOR(__entry
->dev
),
208 __entry
->rwbs
, __entry
->bytes
, __get_str(cmd
),
209 (unsigned long long)__entry
->sector
,
210 __entry
->nr_sector
, __entry
->comm
)
214 * block_rq_insert - insert block operation request into queue
216 * @rq: block IO operation request
218 * Called immediately before block operation request @rq is inserted
219 * into queue @q. The fields in the operation request @rq struct can
220 * be examined to determine which device and sectors the pending
221 * operation would access.
223 DEFINE_EVENT(block_rq
, block_rq_insert
,
225 TP_PROTO(struct request_queue
*q
, struct request
*rq
),
231 * block_rq_issue - issue pending block IO request operation to device driver
232 * @q: queue holding operation
233 * @rq: block IO operation operation request
235 * Called when block operation request @rq from queue @q is sent to a
236 * device driver for processing.
238 DEFINE_EVENT(block_rq
, block_rq_issue
,
240 TP_PROTO(struct request_queue
*q
, struct request
*rq
),
246 * block_bio_bounce - used bounce buffer when processing block operation
247 * @q: queue holding the block operation
248 * @bio: block operation
250 * A bounce buffer was used to handle the block operation @bio in @q.
251 * This occurs when hardware limitations prevent a direct transfer of
252 * data between the @bio data memory area and the IO device. Use of a
253 * bounce buffer requires extra copying of data and decreases
256 TRACE_EVENT(block_bio_bounce
,
258 TP_PROTO(struct request_queue
*q
, struct bio
*bio
),
263 __field( dev_t
, dev
)
264 __field( sector_t
, sector
)
265 __field( unsigned int, nr_sector
)
266 __array( char, rwbs
, RWBS_LEN
)
267 __array( char, comm
, TASK_COMM_LEN
)
271 __entry
->dev
= bio
->bi_bdev
?
272 bio
->bi_bdev
->bd_dev
: 0;
273 __entry
->sector
= bio
->bi_iter
.bi_sector
;
274 __entry
->nr_sector
= bio_sectors(bio
);
275 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
276 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
279 TP_printk("%d,%d %s %llu + %u [%s]",
280 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
281 (unsigned long long)__entry
->sector
,
282 __entry
->nr_sector
, __entry
->comm
)
286 * block_bio_complete - completed all work on the block operation
287 * @q: queue holding the block operation
288 * @bio: block operation completed
289 * @error: io error value
291 * This tracepoint indicates there is no further work to do on this
292 * block IO operation @bio.
294 TRACE_EVENT(block_bio_complete
,
296 TP_PROTO(struct request_queue
*q
, struct bio
*bio
, int error
),
298 TP_ARGS(q
, bio
, error
),
301 __field( dev_t
, dev
)
302 __field( sector_t
, sector
)
303 __field( unsigned, nr_sector
)
304 __field( int, error
)
305 __array( char, rwbs
, RWBS_LEN
)
309 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
310 __entry
->sector
= bio
->bi_iter
.bi_sector
;
311 __entry
->nr_sector
= bio_sectors(bio
);
312 __entry
->error
= error
;
313 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
316 TP_printk("%d,%d %s %llu + %u [%d]",
317 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
318 (unsigned long long)__entry
->sector
,
319 __entry
->nr_sector
, __entry
->error
)
322 DECLARE_EVENT_CLASS(block_bio_merge
,
324 TP_PROTO(struct request_queue
*q
, struct request
*rq
, struct bio
*bio
),
329 __field( dev_t
, dev
)
330 __field( sector_t
, sector
)
331 __field( unsigned int, nr_sector
)
332 __array( char, rwbs
, RWBS_LEN
)
333 __array( char, comm
, TASK_COMM_LEN
)
337 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
338 __entry
->sector
= bio
->bi_iter
.bi_sector
;
339 __entry
->nr_sector
= bio_sectors(bio
);
340 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
341 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
344 TP_printk("%d,%d %s %llu + %u [%s]",
345 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
346 (unsigned long long)__entry
->sector
,
347 __entry
->nr_sector
, __entry
->comm
)
351 * block_bio_backmerge - merging block operation to the end of an existing operation
352 * @q: queue holding operation
353 * @rq: request bio is being merged into
354 * @bio: new block operation to merge
356 * Merging block request @bio to the end of an existing block request
359 DEFINE_EVENT(block_bio_merge
, block_bio_backmerge
,
361 TP_PROTO(struct request_queue
*q
, struct request
*rq
, struct bio
*bio
),
367 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
368 * @q: queue holding operation
369 * @rq: request bio is being merged into
370 * @bio: new block operation to merge
372 * Merging block IO operation @bio to the beginning of an existing block
373 * operation in queue @q.
375 DEFINE_EVENT(block_bio_merge
, block_bio_frontmerge
,
377 TP_PROTO(struct request_queue
*q
, struct request
*rq
, struct bio
*bio
),
383 * block_bio_queue - putting new block IO operation in queue
384 * @q: queue holding operation
385 * @bio: new block operation
387 * About to place the block IO operation @bio into queue @q.
389 TRACE_EVENT(block_bio_queue
,
391 TP_PROTO(struct request_queue
*q
, struct bio
*bio
),
396 __field( dev_t
, dev
)
397 __field( sector_t
, sector
)
398 __field( unsigned int, nr_sector
)
399 __array( char, rwbs
, RWBS_LEN
)
400 __array( char, comm
, TASK_COMM_LEN
)
404 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
405 __entry
->sector
= bio
->bi_iter
.bi_sector
;
406 __entry
->nr_sector
= bio_sectors(bio
);
407 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
408 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
411 TP_printk("%d,%d %s %llu + %u [%s]",
412 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
413 (unsigned long long)__entry
->sector
,
414 __entry
->nr_sector
, __entry
->comm
)
417 DECLARE_EVENT_CLASS(block_get_rq
,
419 TP_PROTO(struct request_queue
*q
, struct bio
*bio
, int rw
),
424 __field( dev_t
, dev
)
425 __field( sector_t
, sector
)
426 __field( unsigned int, nr_sector
)
427 __array( char, rwbs
, RWBS_LEN
)
428 __array( char, comm
, TASK_COMM_LEN
)
432 __entry
->dev
= bio
? bio
->bi_bdev
->bd_dev
: 0;
433 __entry
->sector
= bio
? bio
->bi_iter
.bi_sector
: 0;
434 __entry
->nr_sector
= bio
? bio_sectors(bio
) : 0;
435 blk_fill_rwbs(__entry
->rwbs
,
436 bio
? bio
->bi_rw
: 0, __entry
->nr_sector
);
437 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
440 TP_printk("%d,%d %s %llu + %u [%s]",
441 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
442 (unsigned long long)__entry
->sector
,
443 __entry
->nr_sector
, __entry
->comm
)
447 * block_getrq - get a free request entry in queue for block IO operations
448 * @q: queue for operations
449 * @bio: pending block IO operation
450 * @rw: low bit indicates a read (%0) or a write (%1)
452 * A request struct for queue @q has been allocated to handle the
453 * block IO operation @bio.
455 DEFINE_EVENT(block_get_rq
, block_getrq
,
457 TP_PROTO(struct request_queue
*q
, struct bio
*bio
, int rw
),
463 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
464 * @q: queue for operation
465 * @bio: pending block IO operation
466 * @rw: low bit indicates a read (%0) or a write (%1)
468 * In the case where a request struct cannot be provided for queue @q
469 * the process needs to wait for an request struct to become
470 * available. This tracepoint event is generated each time the
471 * process goes to sleep waiting for request struct become available.
473 DEFINE_EVENT(block_get_rq
, block_sleeprq
,
475 TP_PROTO(struct request_queue
*q
, struct bio
*bio
, int rw
),
481 * block_plug - keep operations requests in request queue
482 * @q: request queue to plug
484 * Plug the request queue @q. Do not allow block operation requests
485 * to be sent to the device driver. Instead, accumulate requests in
486 * the queue to improve throughput performance of the block device.
488 TRACE_EVENT(block_plug
,
490 TP_PROTO(struct request_queue
*q
),
495 __array( char, comm
, TASK_COMM_LEN
)
499 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
502 TP_printk("[%s]", __entry
->comm
)
505 DECLARE_EVENT_CLASS(block_unplug
,
507 TP_PROTO(struct request_queue
*q
, unsigned int depth
, bool explicit),
509 TP_ARGS(q
, depth
, explicit),
512 __field( int, nr_rq
)
513 __array( char, comm
, TASK_COMM_LEN
)
517 __entry
->nr_rq
= depth
;
518 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
521 TP_printk("[%s] %d", __entry
->comm
, __entry
->nr_rq
)
525 * block_unplug - release of operations requests in request queue
526 * @q: request queue to unplug
527 * @depth: number of requests just added to the queue
528 * @explicit: whether this was an explicit unplug, or one from schedule()
530 * Unplug request queue @q because device driver is scheduled to work
531 * on elements in the request queue.
533 DEFINE_EVENT(block_unplug
, block_unplug
,
535 TP_PROTO(struct request_queue
*q
, unsigned int depth
, bool explicit),
537 TP_ARGS(q
, depth
, explicit)
541 * block_split - split a single bio struct into two bio structs
542 * @q: queue containing the bio
543 * @bio: block operation being split
544 * @new_sector: The starting sector for the new bio
546 * The bio request @bio in request queue @q needs to be split into two
547 * bio requests. The newly created @bio request starts at
548 * @new_sector. This split may be required due to hardware limitation
549 * such as operation crossing device boundaries in a RAID system.
551 TRACE_EVENT(block_split
,
553 TP_PROTO(struct request_queue
*q
, struct bio
*bio
,
554 unsigned int new_sector
),
556 TP_ARGS(q
, bio
, new_sector
),
559 __field( dev_t
, dev
)
560 __field( sector_t
, sector
)
561 __field( sector_t
, new_sector
)
562 __array( char, rwbs
, RWBS_LEN
)
563 __array( char, comm
, TASK_COMM_LEN
)
567 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
568 __entry
->sector
= bio
->bi_iter
.bi_sector
;
569 __entry
->new_sector
= new_sector
;
570 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
571 memcpy(__entry
->comm
, current
->comm
, TASK_COMM_LEN
);
574 TP_printk("%d,%d %s %llu / %llu [%s]",
575 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
576 (unsigned long long)__entry
->sector
,
577 (unsigned long long)__entry
->new_sector
,
582 * block_bio_remap - map request for a logical device to the raw device
583 * @q: queue holding the operation
584 * @bio: revised operation
585 * @dev: device for the operation
586 * @from: original sector for the operation
588 * An operation for a logical device has been mapped to the
591 TRACE_EVENT(block_bio_remap
,
593 TP_PROTO(struct request_queue
*q
, struct bio
*bio
, dev_t dev
,
596 TP_ARGS(q
, bio
, dev
, from
),
599 __field( dev_t
, dev
)
600 __field( sector_t
, sector
)
601 __field( unsigned int, nr_sector
)
602 __field( dev_t
, old_dev
)
603 __field( sector_t
, old_sector
)
604 __array( char, rwbs
, RWBS_LEN
)
608 __entry
->dev
= bio
->bi_bdev
->bd_dev
;
609 __entry
->sector
= bio
->bi_iter
.bi_sector
;
610 __entry
->nr_sector
= bio_sectors(bio
);
611 __entry
->old_dev
= dev
;
612 __entry
->old_sector
= from
;
613 blk_fill_rwbs(__entry
->rwbs
, bio
->bi_rw
, bio
->bi_iter
.bi_size
);
616 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
617 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
618 (unsigned long long)__entry
->sector
,
620 MAJOR(__entry
->old_dev
), MINOR(__entry
->old_dev
),
621 (unsigned long long)__entry
->old_sector
)
625 * block_rq_remap - map request for a block operation request
626 * @q: queue holding the operation
627 * @rq: block IO operation request
628 * @dev: device for the operation
629 * @from: original sector for the operation
631 * The block operation request @rq in @q has been remapped. The block
632 * operation request @rq holds the current information and @from hold
633 * the original sector.
635 TRACE_EVENT(block_rq_remap
,
637 TP_PROTO(struct request_queue
*q
, struct request
*rq
, dev_t dev
,
640 TP_ARGS(q
, rq
, dev
, from
),
643 __field( dev_t
, dev
)
644 __field( sector_t
, sector
)
645 __field( unsigned int, nr_sector
)
646 __field( dev_t
, old_dev
)
647 __field( sector_t
, old_sector
)
648 __field( unsigned int, nr_bios
)
649 __array( char, rwbs
, RWBS_LEN
)
653 __entry
->dev
= disk_devt(rq
->rq_disk
);
654 __entry
->sector
= blk_rq_pos(rq
);
655 __entry
->nr_sector
= blk_rq_sectors(rq
);
656 __entry
->old_dev
= dev
;
657 __entry
->old_sector
= from
;
658 __entry
->nr_bios
= blk_rq_count_bios(rq
);
659 blk_fill_rwbs(__entry
->rwbs
, rq
->cmd_flags
, blk_rq_bytes(rq
));
662 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
663 MAJOR(__entry
->dev
), MINOR(__entry
->dev
), __entry
->rwbs
,
664 (unsigned long long)__entry
->sector
,
666 MAJOR(__entry
->old_dev
), MINOR(__entry
->old_dev
),
667 (unsigned long long)__entry
->old_sector
, __entry
->nr_bios
)
670 #endif /* _TRACE_BLOCK_H */
672 /* This part must be outside protection */
673 #include <trace/define_trace.h>