1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/time.h>
16 #include <linux/uaccess.h>
17 #include <linux/list.h>
18 #include <linux/blk-cgroup.h>
20 #include "../../block/blk.h"
22 #include <trace/events/block.h>
24 #include "trace_output.h"
26 #ifdef CONFIG_BLK_DEV_IO_TRACE
28 static unsigned int blktrace_seq __read_mostly
= 1;
30 static struct trace_array
*blk_tr
;
31 static bool blk_tracer_enabled __read_mostly
;
33 static LIST_HEAD(running_trace_list
);
34 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(running_trace_lock
);
36 /* Select an alternative, minimalistic output than the original one */
37 #define TRACE_BLK_OPT_CLASSIC 0x1
38 #define TRACE_BLK_OPT_CGROUP 0x2
39 #define TRACE_BLK_OPT_CGNAME 0x4
41 static struct tracer_opt blk_tracer_opts
[] = {
42 /* Default disable the minimalistic output */
43 { TRACER_OPT(blk_classic
, TRACE_BLK_OPT_CLASSIC
) },
44 #ifdef CONFIG_BLK_CGROUP
45 { TRACER_OPT(blk_cgroup
, TRACE_BLK_OPT_CGROUP
) },
46 { TRACER_OPT(blk_cgname
, TRACE_BLK_OPT_CGNAME
) },
51 static struct tracer_flags blk_tracer_flags
= {
53 .opts
= blk_tracer_opts
,
56 /* Global reference count of probes */
57 static DEFINE_MUTEX(blk_probe_mutex
);
58 static int blk_probes_ref
;
60 static void blk_register_tracepoints(void);
61 static void blk_unregister_tracepoints(void);
64 * Send out a notify message.
66 static void trace_note(struct blk_trace
*bt
, pid_t pid
, int action
,
67 const void *data
, size_t len
, u64 cgid
)
69 struct blk_io_trace
*t
;
70 struct ring_buffer_event
*event
= NULL
;
71 struct trace_buffer
*buffer
= NULL
;
73 int cpu
= smp_processor_id();
74 bool blk_tracer
= blk_tracer_enabled
;
75 ssize_t cgid_len
= cgid
? sizeof(cgid
) : 0;
78 buffer
= blk_tr
->array_buffer
.buffer
;
80 event
= trace_buffer_lock_reserve(buffer
, TRACE_BLK
,
81 sizeof(*t
) + len
+ cgid_len
,
85 t
= ring_buffer_event_data(event
);
92 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + len
+ cgid_len
);
94 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
95 t
->time
= ktime_to_ns(ktime_get());
98 t
->action
= action
| (cgid
? __BLK_TN_CGROUP
: 0);
101 t
->pdu_len
= len
+ cgid_len
;
103 memcpy((void *)t
+ sizeof(*t
), &cgid
, cgid_len
);
104 memcpy((void *) t
+ sizeof(*t
) + cgid_len
, data
, len
);
107 trace_buffer_unlock_commit(blk_tr
, buffer
, event
, 0, pc
);
112 * Send out a notify for this process, if we haven't done so since a trace
115 static void trace_note_tsk(struct task_struct
*tsk
)
118 struct blk_trace
*bt
;
120 tsk
->btrace_seq
= blktrace_seq
;
121 spin_lock_irqsave(&running_trace_lock
, flags
);
122 list_for_each_entry(bt
, &running_trace_list
, running_list
) {
123 trace_note(bt
, tsk
->pid
, BLK_TN_PROCESS
, tsk
->comm
,
124 sizeof(tsk
->comm
), 0);
126 spin_unlock_irqrestore(&running_trace_lock
, flags
);
129 static void trace_note_time(struct blk_trace
*bt
)
131 struct timespec64 now
;
135 /* need to check user space to see if this breaks in y2038 or y2106 */
136 ktime_get_real_ts64(&now
);
137 words
[0] = (u32
)now
.tv_sec
;
138 words
[1] = now
.tv_nsec
;
140 local_irq_save(flags
);
141 trace_note(bt
, 0, BLK_TN_TIMESTAMP
, words
, sizeof(words
), 0);
142 local_irq_restore(flags
);
145 void __trace_note_message(struct blk_trace
*bt
, struct blkcg
*blkcg
,
146 const char *fmt
, ...)
153 if (unlikely(bt
->trace_state
!= Blktrace_running
&&
154 !blk_tracer_enabled
))
158 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
159 * message to the trace.
161 if (!(bt
->act_mask
& BLK_TC_NOTIFY
))
164 local_irq_save(flags
);
165 buf
= this_cpu_ptr(bt
->msg_data
);
167 n
= vscnprintf(buf
, BLK_TN_MAX_MSG
, fmt
, args
);
170 if (!(blk_tracer_flags
.val
& TRACE_BLK_OPT_CGROUP
))
172 #ifdef CONFIG_BLK_CGROUP
173 trace_note(bt
, 0, BLK_TN_MESSAGE
, buf
, n
,
174 blkcg
? cgroup_id(blkcg
->css
.cgroup
) : 1);
176 trace_note(bt
, 0, BLK_TN_MESSAGE
, buf
, n
, 0);
178 local_irq_restore(flags
);
180 EXPORT_SYMBOL_GPL(__trace_note_message
);
182 static int act_log_check(struct blk_trace
*bt
, u32 what
, sector_t sector
,
185 if (((bt
->act_mask
<< BLK_TC_SHIFT
) & what
) == 0)
187 if (sector
&& (sector
< bt
->start_lba
|| sector
> bt
->end_lba
))
189 if (bt
->pid
&& pid
!= bt
->pid
)
196 * Data direction bit lookup
198 static const u32 ddir_act
[2] = { BLK_TC_ACT(BLK_TC_READ
),
199 BLK_TC_ACT(BLK_TC_WRITE
) };
201 #define BLK_TC_RAHEAD BLK_TC_AHEAD
202 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
204 /* The ilog2() calls fall out because they're constant */
205 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
206 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
209 * The worker for the various blk_add_trace*() types. Fills out a
210 * blk_io_trace structure and places it in a per-cpu subbuffer.
212 static void __blk_add_trace(struct blk_trace
*bt
, sector_t sector
, int bytes
,
213 int op
, int op_flags
, u32 what
, int error
, int pdu_len
,
214 void *pdu_data
, u64 cgid
)
216 struct task_struct
*tsk
= current
;
217 struct ring_buffer_event
*event
= NULL
;
218 struct trace_buffer
*buffer
= NULL
;
219 struct blk_io_trace
*t
;
220 unsigned long flags
= 0;
221 unsigned long *sequence
;
224 bool blk_tracer
= blk_tracer_enabled
;
225 ssize_t cgid_len
= cgid
? sizeof(cgid
) : 0;
227 if (unlikely(bt
->trace_state
!= Blktrace_running
&& !blk_tracer
))
230 what
|= ddir_act
[op_is_write(op
) ? WRITE
: READ
];
231 what
|= MASK_TC_BIT(op_flags
, SYNC
);
232 what
|= MASK_TC_BIT(op_flags
, RAHEAD
);
233 what
|= MASK_TC_BIT(op_flags
, META
);
234 what
|= MASK_TC_BIT(op_flags
, PREFLUSH
);
235 what
|= MASK_TC_BIT(op_flags
, FUA
);
236 if (op
== REQ_OP_DISCARD
|| op
== REQ_OP_SECURE_ERASE
)
237 what
|= BLK_TC_ACT(BLK_TC_DISCARD
);
238 if (op
== REQ_OP_FLUSH
)
239 what
|= BLK_TC_ACT(BLK_TC_FLUSH
);
241 what
|= __BLK_TA_CGROUP
;
244 if (act_log_check(bt
, what
, sector
, pid
))
246 cpu
= raw_smp_processor_id();
249 tracing_record_cmdline(current
);
251 buffer
= blk_tr
->array_buffer
.buffer
;
252 pc
= preempt_count();
253 event
= trace_buffer_lock_reserve(buffer
, TRACE_BLK
,
254 sizeof(*t
) + pdu_len
+ cgid_len
,
258 t
= ring_buffer_event_data(event
);
262 if (unlikely(tsk
->btrace_seq
!= blktrace_seq
))
266 * A word about the locking here - we disable interrupts to reserve
267 * some space in the relay per-cpu buffer, to prevent an irq
268 * from coming in and stepping on our toes.
270 local_irq_save(flags
);
271 t
= relay_reserve(bt
->rchan
, sizeof(*t
) + pdu_len
+ cgid_len
);
273 sequence
= per_cpu_ptr(bt
->sequence
, cpu
);
275 t
->magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
;
276 t
->sequence
= ++(*sequence
);
277 t
->time
= ktime_to_ns(ktime_get());
280 * These two are not needed in ftrace as they are in the
281 * generic trace_entry, filled by tracing_generic_entry_update,
282 * but for the trace_event->bin() synthesizer benefit we do it
293 t
->pdu_len
= pdu_len
+ cgid_len
;
296 memcpy((void *)t
+ sizeof(*t
), &cgid
, cgid_len
);
298 memcpy((void *)t
+ sizeof(*t
) + cgid_len
, pdu_data
, pdu_len
);
301 trace_buffer_unlock_commit(blk_tr
, buffer
, event
, 0, pc
);
306 local_irq_restore(flags
);
309 static void blk_trace_free(struct blk_trace
*bt
)
311 debugfs_remove(bt
->msg_file
);
312 debugfs_remove(bt
->dropped_file
);
313 relay_close(bt
->rchan
);
314 debugfs_remove(bt
->dir
);
315 free_percpu(bt
->sequence
);
316 free_percpu(bt
->msg_data
);
320 static void get_probe_ref(void)
322 mutex_lock(&blk_probe_mutex
);
323 if (++blk_probes_ref
== 1)
324 blk_register_tracepoints();
325 mutex_unlock(&blk_probe_mutex
);
328 static void put_probe_ref(void)
330 mutex_lock(&blk_probe_mutex
);
331 if (!--blk_probes_ref
)
332 blk_unregister_tracepoints();
333 mutex_unlock(&blk_probe_mutex
);
336 static void blk_trace_cleanup(struct blk_trace
*bt
)
343 static int __blk_trace_remove(struct request_queue
*q
)
345 struct blk_trace
*bt
;
347 bt
= xchg(&q
->blk_trace
, NULL
);
351 if (bt
->trace_state
!= Blktrace_running
)
352 blk_trace_cleanup(bt
);
357 int blk_trace_remove(struct request_queue
*q
)
361 mutex_lock(&q
->blk_trace_mutex
);
362 ret
= __blk_trace_remove(q
);
363 mutex_unlock(&q
->blk_trace_mutex
);
367 EXPORT_SYMBOL_GPL(blk_trace_remove
);
369 static ssize_t
blk_dropped_read(struct file
*filp
, char __user
*buffer
,
370 size_t count
, loff_t
*ppos
)
372 struct blk_trace
*bt
= filp
->private_data
;
375 snprintf(buf
, sizeof(buf
), "%u\n", atomic_read(&bt
->dropped
));
377 return simple_read_from_buffer(buffer
, count
, ppos
, buf
, strlen(buf
));
380 static const struct file_operations blk_dropped_fops
= {
381 .owner
= THIS_MODULE
,
383 .read
= blk_dropped_read
,
384 .llseek
= default_llseek
,
387 static ssize_t
blk_msg_write(struct file
*filp
, const char __user
*buffer
,
388 size_t count
, loff_t
*ppos
)
391 struct blk_trace
*bt
;
393 if (count
>= BLK_TN_MAX_MSG
)
396 msg
= memdup_user_nul(buffer
, count
);
400 bt
= filp
->private_data
;
401 __trace_note_message(bt
, NULL
, "%s", msg
);
407 static const struct file_operations blk_msg_fops
= {
408 .owner
= THIS_MODULE
,
410 .write
= blk_msg_write
,
411 .llseek
= noop_llseek
,
415 * Keep track of how many times we encountered a full subbuffer, to aid
416 * the user space app in telling how many lost events there were.
418 static int blk_subbuf_start_callback(struct rchan_buf
*buf
, void *subbuf
,
419 void *prev_subbuf
, size_t prev_padding
)
421 struct blk_trace
*bt
;
423 if (!relay_buf_full(buf
))
426 bt
= buf
->chan
->private_data
;
427 atomic_inc(&bt
->dropped
);
431 static int blk_remove_buf_file_callback(struct dentry
*dentry
)
433 debugfs_remove(dentry
);
438 static struct dentry
*blk_create_buf_file_callback(const char *filename
,
439 struct dentry
*parent
,
441 struct rchan_buf
*buf
,
444 return debugfs_create_file(filename
, mode
, parent
, buf
,
445 &relay_file_operations
);
448 static struct rchan_callbacks blk_relay_callbacks
= {
449 .subbuf_start
= blk_subbuf_start_callback
,
450 .create_buf_file
= blk_create_buf_file_callback
,
451 .remove_buf_file
= blk_remove_buf_file_callback
,
454 static void blk_trace_setup_lba(struct blk_trace
*bt
,
455 struct block_device
*bdev
)
457 struct hd_struct
*part
= NULL
;
460 part
= bdev
->bd_part
;
463 bt
->start_lba
= part
->start_sect
;
464 bt
->end_lba
= part
->start_sect
+ part
->nr_sects
;
472 * Setup everything required to start tracing
474 static int do_blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
475 struct block_device
*bdev
,
476 struct blk_user_trace_setup
*buts
)
478 struct blk_trace
*bt
= NULL
;
479 struct dentry
*dir
= NULL
;
482 if (!buts
->buf_size
|| !buts
->buf_nr
)
485 if (!blk_debugfs_root
)
488 strncpy(buts
->name
, name
, BLKTRACE_BDEV_SIZE
);
489 buts
->name
[BLKTRACE_BDEV_SIZE
- 1] = '\0';
492 * some device names have larger paths - convert the slashes
493 * to underscores for this to work as expected
495 strreplace(buts
->name
, '/', '_');
497 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
502 bt
->sequence
= alloc_percpu(unsigned long);
506 bt
->msg_data
= __alloc_percpu(BLK_TN_MAX_MSG
, __alignof__(char));
512 dir
= debugfs_lookup(buts
->name
, blk_debugfs_root
);
514 bt
->dir
= dir
= debugfs_create_dir(buts
->name
, blk_debugfs_root
);
517 atomic_set(&bt
->dropped
, 0);
518 INIT_LIST_HEAD(&bt
->running_list
);
521 bt
->dropped_file
= debugfs_create_file("dropped", 0444, dir
, bt
,
524 bt
->msg_file
= debugfs_create_file("msg", 0222, dir
, bt
, &blk_msg_fops
);
526 bt
->rchan
= relay_open("trace", dir
, buts
->buf_size
,
527 buts
->buf_nr
, &blk_relay_callbacks
, bt
);
531 bt
->act_mask
= buts
->act_mask
;
533 bt
->act_mask
= (u16
) -1;
535 blk_trace_setup_lba(bt
, bdev
);
537 /* overwrite with user settings */
539 bt
->start_lba
= buts
->start_lba
;
541 bt
->end_lba
= buts
->end_lba
;
544 bt
->trace_state
= Blktrace_setup
;
547 if (cmpxchg(&q
->blk_trace
, NULL
, bt
))
561 static int __blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
562 struct block_device
*bdev
, char __user
*arg
)
564 struct blk_user_trace_setup buts
;
567 ret
= copy_from_user(&buts
, arg
, sizeof(buts
));
571 ret
= do_blk_trace_setup(q
, name
, dev
, bdev
, &buts
);
575 if (copy_to_user(arg
, &buts
, sizeof(buts
))) {
576 __blk_trace_remove(q
);
582 int blk_trace_setup(struct request_queue
*q
, char *name
, dev_t dev
,
583 struct block_device
*bdev
,
588 mutex_lock(&q
->blk_trace_mutex
);
589 ret
= __blk_trace_setup(q
, name
, dev
, bdev
, arg
);
590 mutex_unlock(&q
->blk_trace_mutex
);
594 EXPORT_SYMBOL_GPL(blk_trace_setup
);
596 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
597 static int compat_blk_trace_setup(struct request_queue
*q
, char *name
,
598 dev_t dev
, struct block_device
*bdev
,
601 struct blk_user_trace_setup buts
;
602 struct compat_blk_user_trace_setup cbuts
;
605 if (copy_from_user(&cbuts
, arg
, sizeof(cbuts
)))
608 buts
= (struct blk_user_trace_setup
) {
609 .act_mask
= cbuts
.act_mask
,
610 .buf_size
= cbuts
.buf_size
,
611 .buf_nr
= cbuts
.buf_nr
,
612 .start_lba
= cbuts
.start_lba
,
613 .end_lba
= cbuts
.end_lba
,
617 ret
= do_blk_trace_setup(q
, name
, dev
, bdev
, &buts
);
621 if (copy_to_user(arg
, &buts
.name
, ARRAY_SIZE(buts
.name
))) {
622 __blk_trace_remove(q
);
630 static int __blk_trace_startstop(struct request_queue
*q
, int start
)
633 struct blk_trace
*bt
;
635 bt
= rcu_dereference_protected(q
->blk_trace
,
636 lockdep_is_held(&q
->blk_trace_mutex
));
641 * For starting a trace, we can transition from a setup or stopped
642 * trace. For stopping a trace, the state must be running
646 if (bt
->trace_state
== Blktrace_setup
||
647 bt
->trace_state
== Blktrace_stopped
) {
650 bt
->trace_state
= Blktrace_running
;
651 spin_lock_irq(&running_trace_lock
);
652 list_add(&bt
->running_list
, &running_trace_list
);
653 spin_unlock_irq(&running_trace_lock
);
659 if (bt
->trace_state
== Blktrace_running
) {
660 bt
->trace_state
= Blktrace_stopped
;
661 spin_lock_irq(&running_trace_lock
);
662 list_del_init(&bt
->running_list
);
663 spin_unlock_irq(&running_trace_lock
);
664 relay_flush(bt
->rchan
);
672 int blk_trace_startstop(struct request_queue
*q
, int start
)
676 mutex_lock(&q
->blk_trace_mutex
);
677 ret
= __blk_trace_startstop(q
, start
);
678 mutex_unlock(&q
->blk_trace_mutex
);
682 EXPORT_SYMBOL_GPL(blk_trace_startstop
);
685 * When reading or writing the blktrace sysfs files, the references to the
686 * opened sysfs or device files should prevent the underlying block device
687 * from being removed. So no further delete protection is really needed.
691 * blk_trace_ioctl: - handle the ioctls associated with tracing
692 * @bdev: the block device
693 * @cmd: the ioctl cmd
694 * @arg: the argument data, if any
697 int blk_trace_ioctl(struct block_device
*bdev
, unsigned cmd
, char __user
*arg
)
699 struct request_queue
*q
;
701 char b
[BDEVNAME_SIZE
];
703 q
= bdev_get_queue(bdev
);
707 mutex_lock(&q
->blk_trace_mutex
);
712 ret
= __blk_trace_setup(q
, b
, bdev
->bd_dev
, bdev
, arg
);
714 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
715 case BLKTRACESETUP32
:
717 ret
= compat_blk_trace_setup(q
, b
, bdev
->bd_dev
, bdev
, arg
);
724 ret
= __blk_trace_startstop(q
, start
);
726 case BLKTRACETEARDOWN
:
727 ret
= __blk_trace_remove(q
);
734 mutex_unlock(&q
->blk_trace_mutex
);
739 * blk_trace_shutdown: - stop and cleanup trace structures
740 * @q: the request queue associated with the device
743 void blk_trace_shutdown(struct request_queue
*q
)
745 mutex_lock(&q
->blk_trace_mutex
);
746 if (rcu_dereference_protected(q
->blk_trace
,
747 lockdep_is_held(&q
->blk_trace_mutex
))) {
748 __blk_trace_startstop(q
, 0);
749 __blk_trace_remove(q
);
752 mutex_unlock(&q
->blk_trace_mutex
);
755 #ifdef CONFIG_BLK_CGROUP
756 static u64
blk_trace_bio_get_cgid(struct request_queue
*q
, struct bio
*bio
)
758 struct blk_trace
*bt
;
760 /* We don't use the 'bt' value here except as an optimization... */
761 bt
= rcu_dereference_protected(q
->blk_trace
, 1);
762 if (!bt
|| !(blk_tracer_flags
.val
& TRACE_BLK_OPT_CGROUP
))
767 return cgroup_id(bio_blkcg(bio
)->css
.cgroup
);
770 u64
blk_trace_bio_get_cgid(struct request_queue
*q
, struct bio
*bio
)
777 blk_trace_request_get_cgid(struct request_queue
*q
, struct request
*rq
)
781 /* Use the first bio */
782 return blk_trace_bio_get_cgid(q
, rq
->bio
);
790 * blk_add_trace_rq - Add a trace for a request oriented action
791 * @rq: the source request
792 * @error: return status to log
793 * @nr_bytes: number of completed bytes
795 * @cgid: the cgroup info
798 * Records an action against a request. Will log the bio offset + size.
801 static void blk_add_trace_rq(struct request
*rq
, int error
,
802 unsigned int nr_bytes
, u32 what
, u64 cgid
)
804 struct blk_trace
*bt
;
807 bt
= rcu_dereference(rq
->q
->blk_trace
);
813 if (blk_rq_is_passthrough(rq
))
814 what
|= BLK_TC_ACT(BLK_TC_PC
);
816 what
|= BLK_TC_ACT(BLK_TC_FS
);
818 __blk_add_trace(bt
, blk_rq_trace_sector(rq
), nr_bytes
, req_op(rq
),
819 rq
->cmd_flags
, what
, error
, 0, NULL
, cgid
);
823 static void blk_add_trace_rq_insert(void *ignore
,
824 struct request_queue
*q
, struct request
*rq
)
826 blk_add_trace_rq(rq
, 0, blk_rq_bytes(rq
), BLK_TA_INSERT
,
827 blk_trace_request_get_cgid(q
, rq
));
830 static void blk_add_trace_rq_issue(void *ignore
,
831 struct request_queue
*q
, struct request
*rq
)
833 blk_add_trace_rq(rq
, 0, blk_rq_bytes(rq
), BLK_TA_ISSUE
,
834 blk_trace_request_get_cgid(q
, rq
));
837 static void blk_add_trace_rq_requeue(void *ignore
,
838 struct request_queue
*q
,
841 blk_add_trace_rq(rq
, 0, blk_rq_bytes(rq
), BLK_TA_REQUEUE
,
842 blk_trace_request_get_cgid(q
, rq
));
845 static void blk_add_trace_rq_complete(void *ignore
, struct request
*rq
,
846 int error
, unsigned int nr_bytes
)
848 blk_add_trace_rq(rq
, error
, nr_bytes
, BLK_TA_COMPLETE
,
849 blk_trace_request_get_cgid(rq
->q
, rq
));
853 * blk_add_trace_bio - Add a trace for a bio oriented action
854 * @q: queue the io is for
855 * @bio: the source bio
857 * @error: error, if any
860 * Records an action against a bio. Will log the bio offset + size.
863 static void blk_add_trace_bio(struct request_queue
*q
, struct bio
*bio
,
866 struct blk_trace
*bt
;
869 bt
= rcu_dereference(q
->blk_trace
);
875 __blk_add_trace(bt
, bio
->bi_iter
.bi_sector
, bio
->bi_iter
.bi_size
,
876 bio_op(bio
), bio
->bi_opf
, what
, error
, 0, NULL
,
877 blk_trace_bio_get_cgid(q
, bio
));
881 static void blk_add_trace_bio_bounce(void *ignore
,
882 struct request_queue
*q
, struct bio
*bio
)
884 blk_add_trace_bio(q
, bio
, BLK_TA_BOUNCE
, 0);
887 static void blk_add_trace_bio_complete(void *ignore
,
888 struct request_queue
*q
, struct bio
*bio
,
891 blk_add_trace_bio(q
, bio
, BLK_TA_COMPLETE
, error
);
894 static void blk_add_trace_bio_backmerge(void *ignore
,
895 struct request_queue
*q
,
899 blk_add_trace_bio(q
, bio
, BLK_TA_BACKMERGE
, 0);
902 static void blk_add_trace_bio_frontmerge(void *ignore
,
903 struct request_queue
*q
,
907 blk_add_trace_bio(q
, bio
, BLK_TA_FRONTMERGE
, 0);
910 static void blk_add_trace_bio_queue(void *ignore
,
911 struct request_queue
*q
, struct bio
*bio
)
913 blk_add_trace_bio(q
, bio
, BLK_TA_QUEUE
, 0);
916 static void blk_add_trace_getrq(void *ignore
,
917 struct request_queue
*q
,
918 struct bio
*bio
, int rw
)
921 blk_add_trace_bio(q
, bio
, BLK_TA_GETRQ
, 0);
923 struct blk_trace
*bt
;
926 bt
= rcu_dereference(q
->blk_trace
);
928 __blk_add_trace(bt
, 0, 0, rw
, 0, BLK_TA_GETRQ
, 0, 0,
935 static void blk_add_trace_sleeprq(void *ignore
,
936 struct request_queue
*q
,
937 struct bio
*bio
, int rw
)
940 blk_add_trace_bio(q
, bio
, BLK_TA_SLEEPRQ
, 0);
942 struct blk_trace
*bt
;
945 bt
= rcu_dereference(q
->blk_trace
);
947 __blk_add_trace(bt
, 0, 0, rw
, 0, BLK_TA_SLEEPRQ
,
953 static void blk_add_trace_plug(void *ignore
, struct request_queue
*q
)
955 struct blk_trace
*bt
;
958 bt
= rcu_dereference(q
->blk_trace
);
960 __blk_add_trace(bt
, 0, 0, 0, 0, BLK_TA_PLUG
, 0, 0, NULL
, 0);
964 static void blk_add_trace_unplug(void *ignore
, struct request_queue
*q
,
965 unsigned int depth
, bool explicit)
967 struct blk_trace
*bt
;
970 bt
= rcu_dereference(q
->blk_trace
);
972 __be64 rpdu
= cpu_to_be64(depth
);
976 what
= BLK_TA_UNPLUG_IO
;
978 what
= BLK_TA_UNPLUG_TIMER
;
980 __blk_add_trace(bt
, 0, 0, 0, 0, what
, 0, sizeof(rpdu
), &rpdu
, 0);
985 static void blk_add_trace_split(void *ignore
,
986 struct request_queue
*q
, struct bio
*bio
,
989 struct blk_trace
*bt
;
992 bt
= rcu_dereference(q
->blk_trace
);
994 __be64 rpdu
= cpu_to_be64(pdu
);
996 __blk_add_trace(bt
, bio
->bi_iter
.bi_sector
,
997 bio
->bi_iter
.bi_size
, bio_op(bio
), bio
->bi_opf
,
998 BLK_TA_SPLIT
, bio
->bi_status
, sizeof(rpdu
),
999 &rpdu
, blk_trace_bio_get_cgid(q
, bio
));
1005 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
1006 * @ignore: trace callback data parameter (not used)
1007 * @q: queue the io is for
1008 * @bio: the source bio
1009 * @dev: target device
1010 * @from: source sector
1013 * Device mapper or raid target sometimes need to split a bio because
1014 * it spans a stripe (or similar). Add a trace for that action.
1017 static void blk_add_trace_bio_remap(void *ignore
,
1018 struct request_queue
*q
, struct bio
*bio
,
1019 dev_t dev
, sector_t from
)
1021 struct blk_trace
*bt
;
1022 struct blk_io_trace_remap r
;
1025 bt
= rcu_dereference(q
->blk_trace
);
1031 r
.device_from
= cpu_to_be32(dev
);
1032 r
.device_to
= cpu_to_be32(bio_dev(bio
));
1033 r
.sector_from
= cpu_to_be64(from
);
1035 __blk_add_trace(bt
, bio
->bi_iter
.bi_sector
, bio
->bi_iter
.bi_size
,
1036 bio_op(bio
), bio
->bi_opf
, BLK_TA_REMAP
, bio
->bi_status
,
1037 sizeof(r
), &r
, blk_trace_bio_get_cgid(q
, bio
));
1042 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1043 * @ignore: trace callback data parameter (not used)
1044 * @q: queue the io is for
1045 * @rq: the source request
1046 * @dev: target device
1047 * @from: source sector
1050 * Device mapper remaps request to other devices.
1051 * Add a trace for that action.
1054 static void blk_add_trace_rq_remap(void *ignore
,
1055 struct request_queue
*q
,
1056 struct request
*rq
, dev_t dev
,
1059 struct blk_trace
*bt
;
1060 struct blk_io_trace_remap r
;
1063 bt
= rcu_dereference(q
->blk_trace
);
1069 r
.device_from
= cpu_to_be32(dev
);
1070 r
.device_to
= cpu_to_be32(disk_devt(rq
->rq_disk
));
1071 r
.sector_from
= cpu_to_be64(from
);
1073 __blk_add_trace(bt
, blk_rq_pos(rq
), blk_rq_bytes(rq
),
1074 rq_data_dir(rq
), 0, BLK_TA_REMAP
, 0,
1075 sizeof(r
), &r
, blk_trace_request_get_cgid(q
, rq
));
1080 * blk_add_driver_data - Add binary message with driver-specific data
1081 * @q: queue the io is for
1083 * @data: driver-specific data
1084 * @len: length of driver-specific data
1087 * Some drivers might want to write driver-specific data per request.
1090 void blk_add_driver_data(struct request_queue
*q
,
1092 void *data
, size_t len
)
1094 struct blk_trace
*bt
;
1097 bt
= rcu_dereference(q
->blk_trace
);
1103 __blk_add_trace(bt
, blk_rq_trace_sector(rq
), blk_rq_bytes(rq
), 0, 0,
1104 BLK_TA_DRV_DATA
, 0, len
, data
,
1105 blk_trace_request_get_cgid(q
, rq
));
1108 EXPORT_SYMBOL_GPL(blk_add_driver_data
);
1110 static void blk_register_tracepoints(void)
1114 ret
= register_trace_block_rq_insert(blk_add_trace_rq_insert
, NULL
);
1116 ret
= register_trace_block_rq_issue(blk_add_trace_rq_issue
, NULL
);
1118 ret
= register_trace_block_rq_requeue(blk_add_trace_rq_requeue
, NULL
);
1120 ret
= register_trace_block_rq_complete(blk_add_trace_rq_complete
, NULL
);
1122 ret
= register_trace_block_bio_bounce(blk_add_trace_bio_bounce
, NULL
);
1124 ret
= register_trace_block_bio_complete(blk_add_trace_bio_complete
, NULL
);
1126 ret
= register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
, NULL
);
1128 ret
= register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
, NULL
);
1130 ret
= register_trace_block_bio_queue(blk_add_trace_bio_queue
, NULL
);
1132 ret
= register_trace_block_getrq(blk_add_trace_getrq
, NULL
);
1134 ret
= register_trace_block_sleeprq(blk_add_trace_sleeprq
, NULL
);
1136 ret
= register_trace_block_plug(blk_add_trace_plug
, NULL
);
1138 ret
= register_trace_block_unplug(blk_add_trace_unplug
, NULL
);
1140 ret
= register_trace_block_split(blk_add_trace_split
, NULL
);
1142 ret
= register_trace_block_bio_remap(blk_add_trace_bio_remap
, NULL
);
1144 ret
= register_trace_block_rq_remap(blk_add_trace_rq_remap
, NULL
);
1148 static void blk_unregister_tracepoints(void)
1150 unregister_trace_block_rq_remap(blk_add_trace_rq_remap
, NULL
);
1151 unregister_trace_block_bio_remap(blk_add_trace_bio_remap
, NULL
);
1152 unregister_trace_block_split(blk_add_trace_split
, NULL
);
1153 unregister_trace_block_unplug(blk_add_trace_unplug
, NULL
);
1154 unregister_trace_block_plug(blk_add_trace_plug
, NULL
);
1155 unregister_trace_block_sleeprq(blk_add_trace_sleeprq
, NULL
);
1156 unregister_trace_block_getrq(blk_add_trace_getrq
, NULL
);
1157 unregister_trace_block_bio_queue(blk_add_trace_bio_queue
, NULL
);
1158 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge
, NULL
);
1159 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge
, NULL
);
1160 unregister_trace_block_bio_complete(blk_add_trace_bio_complete
, NULL
);
1161 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce
, NULL
);
1162 unregister_trace_block_rq_complete(blk_add_trace_rq_complete
, NULL
);
1163 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue
, NULL
);
1164 unregister_trace_block_rq_issue(blk_add_trace_rq_issue
, NULL
);
1165 unregister_trace_block_rq_insert(blk_add_trace_rq_insert
, NULL
);
1167 tracepoint_synchronize_unregister();
1171 * struct blk_io_tracer formatting routines
1174 static void fill_rwbs(char *rwbs
, const struct blk_io_trace
*t
)
1177 int tc
= t
->action
>> BLK_TC_SHIFT
;
1179 if ((t
->action
& ~__BLK_TN_CGROUP
) == BLK_TN_MESSAGE
) {
1184 if (tc
& BLK_TC_FLUSH
)
1187 if (tc
& BLK_TC_DISCARD
)
1189 else if (tc
& BLK_TC_WRITE
)
1196 if (tc
& BLK_TC_FUA
)
1198 if (tc
& BLK_TC_AHEAD
)
1200 if (tc
& BLK_TC_SYNC
)
1202 if (tc
& BLK_TC_META
)
1209 const struct blk_io_trace
*te_blk_io_trace(const struct trace_entry
*ent
)
1211 return (const struct blk_io_trace
*)ent
;
1214 static inline const void *pdu_start(const struct trace_entry
*ent
, bool has_cg
)
1216 return (void *)(te_blk_io_trace(ent
) + 1) + (has_cg
? sizeof(u64
) : 0);
1219 static inline u64
t_cgid(const struct trace_entry
*ent
)
1221 return *(u64
*)(te_blk_io_trace(ent
) + 1);
1224 static inline int pdu_real_len(const struct trace_entry
*ent
, bool has_cg
)
1226 return te_blk_io_trace(ent
)->pdu_len
- (has_cg
? sizeof(u64
) : 0);
1229 static inline u32
t_action(const struct trace_entry
*ent
)
1231 return te_blk_io_trace(ent
)->action
;
1234 static inline u32
t_bytes(const struct trace_entry
*ent
)
1236 return te_blk_io_trace(ent
)->bytes
;
1239 static inline u32
t_sec(const struct trace_entry
*ent
)
1241 return te_blk_io_trace(ent
)->bytes
>> 9;
1244 static inline unsigned long long t_sector(const struct trace_entry
*ent
)
1246 return te_blk_io_trace(ent
)->sector
;
1249 static inline __u16
t_error(const struct trace_entry
*ent
)
1251 return te_blk_io_trace(ent
)->error
;
1254 static __u64
get_pdu_int(const struct trace_entry
*ent
, bool has_cg
)
1256 const __u64
*val
= pdu_start(ent
, has_cg
);
1257 return be64_to_cpu(*val
);
1260 static void get_pdu_remap(const struct trace_entry
*ent
,
1261 struct blk_io_trace_remap
*r
, bool has_cg
)
1263 const struct blk_io_trace_remap
*__r
= pdu_start(ent
, has_cg
);
1264 __u64 sector_from
= __r
->sector_from
;
1266 r
->device_from
= be32_to_cpu(__r
->device_from
);
1267 r
->device_to
= be32_to_cpu(__r
->device_to
);
1268 r
->sector_from
= be64_to_cpu(sector_from
);
1271 typedef void (blk_log_action_t
) (struct trace_iterator
*iter
, const char *act
,
1274 static void blk_log_action_classic(struct trace_iterator
*iter
, const char *act
,
1277 char rwbs
[RWBS_LEN
];
1278 unsigned long long ts
= iter
->ts
;
1279 unsigned long nsec_rem
= do_div(ts
, NSEC_PER_SEC
);
1280 unsigned secs
= (unsigned long)ts
;
1281 const struct blk_io_trace
*t
= te_blk_io_trace(iter
->ent
);
1285 trace_seq_printf(&iter
->seq
,
1286 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1287 MAJOR(t
->device
), MINOR(t
->device
), iter
->cpu
,
1288 secs
, nsec_rem
, iter
->ent
->pid
, act
, rwbs
);
1291 static void blk_log_action(struct trace_iterator
*iter
, const char *act
,
1294 char rwbs
[RWBS_LEN
];
1295 const struct blk_io_trace
*t
= te_blk_io_trace(iter
->ent
);
1299 u64 id
= t_cgid(iter
->ent
);
1301 if (blk_tracer_flags
.val
& TRACE_BLK_OPT_CGNAME
) {
1302 char blkcg_name_buf
[NAME_MAX
+ 1] = "<...>";
1304 cgroup_path_from_kernfs_id(id
, blkcg_name_buf
,
1305 sizeof(blkcg_name_buf
));
1306 trace_seq_printf(&iter
->seq
, "%3d,%-3d %s %2s %3s ",
1307 MAJOR(t
->device
), MINOR(t
->device
),
1308 blkcg_name_buf
, act
, rwbs
);
1311 * The cgid portion used to be "INO,GEN". Userland
1312 * builds a FILEID_INO32_GEN fid out of them and
1313 * opens the cgroup using open_by_handle_at(2).
1314 * While 32bit ino setups are still the same, 64bit
1315 * ones now use the 64bit ino as the whole ID and
1316 * no longer use generation.
1318 * Regarldess of the content, always output
1319 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1320 * be mapped back to @id on both 64 and 32bit ino
1321 * setups. See __kernfs_fh_to_dentry().
1323 trace_seq_printf(&iter
->seq
,
1324 "%3d,%-3d %llx,%-llx %2s %3s ",
1325 MAJOR(t
->device
), MINOR(t
->device
),
1326 id
& U32_MAX
, id
>> 32, act
, rwbs
);
1329 trace_seq_printf(&iter
->seq
, "%3d,%-3d %2s %3s ",
1330 MAJOR(t
->device
), MINOR(t
->device
), act
, rwbs
);
1333 static void blk_log_dump_pdu(struct trace_seq
*s
,
1334 const struct trace_entry
*ent
, bool has_cg
)
1336 const unsigned char *pdu_buf
;
1340 pdu_buf
= pdu_start(ent
, has_cg
);
1341 pdu_len
= pdu_real_len(ent
, has_cg
);
1346 /* find the last zero that needs to be printed */
1347 for (end
= pdu_len
- 1; end
>= 0; end
--)
1352 trace_seq_putc(s
, '(');
1354 for (i
= 0; i
< pdu_len
; i
++) {
1356 trace_seq_printf(s
, "%s%02x",
1357 i
== 0 ? "" : " ", pdu_buf
[i
]);
1360 * stop when the rest is just zeroes and indicate so
1361 * with a ".." appended
1363 if (i
== end
&& end
!= pdu_len
- 1) {
1364 trace_seq_puts(s
, " ..) ");
1369 trace_seq_puts(s
, ") ");
1372 static void blk_log_generic(struct trace_seq
*s
, const struct trace_entry
*ent
, bool has_cg
)
1374 char cmd
[TASK_COMM_LEN
];
1376 trace_find_cmdline(ent
->pid
, cmd
);
1378 if (t_action(ent
) & BLK_TC_ACT(BLK_TC_PC
)) {
1379 trace_seq_printf(s
, "%u ", t_bytes(ent
));
1380 blk_log_dump_pdu(s
, ent
, has_cg
);
1381 trace_seq_printf(s
, "[%s]\n", cmd
);
1384 trace_seq_printf(s
, "%llu + %u [%s]\n",
1385 t_sector(ent
), t_sec(ent
), cmd
);
1387 trace_seq_printf(s
, "[%s]\n", cmd
);
1391 static void blk_log_with_error(struct trace_seq
*s
,
1392 const struct trace_entry
*ent
, bool has_cg
)
1394 if (t_action(ent
) & BLK_TC_ACT(BLK_TC_PC
)) {
1395 blk_log_dump_pdu(s
, ent
, has_cg
);
1396 trace_seq_printf(s
, "[%d]\n", t_error(ent
));
1399 trace_seq_printf(s
, "%llu + %u [%d]\n",
1401 t_sec(ent
), t_error(ent
));
1403 trace_seq_printf(s
, "%llu [%d]\n",
1404 t_sector(ent
), t_error(ent
));
1408 static void blk_log_remap(struct trace_seq
*s
, const struct trace_entry
*ent
, bool has_cg
)
1410 struct blk_io_trace_remap r
= { .device_from
= 0, };
1412 get_pdu_remap(ent
, &r
, has_cg
);
1413 trace_seq_printf(s
, "%llu + %u <- (%d,%d) %llu\n",
1414 t_sector(ent
), t_sec(ent
),
1415 MAJOR(r
.device_from
), MINOR(r
.device_from
),
1416 (unsigned long long)r
.sector_from
);
1419 static void blk_log_plug(struct trace_seq
*s
, const struct trace_entry
*ent
, bool has_cg
)
1421 char cmd
[TASK_COMM_LEN
];
1423 trace_find_cmdline(ent
->pid
, cmd
);
1425 trace_seq_printf(s
, "[%s]\n", cmd
);
1428 static void blk_log_unplug(struct trace_seq
*s
, const struct trace_entry
*ent
, bool has_cg
)
1430 char cmd
[TASK_COMM_LEN
];
1432 trace_find_cmdline(ent
->pid
, cmd
);
1434 trace_seq_printf(s
, "[%s] %llu\n", cmd
, get_pdu_int(ent
, has_cg
));
1437 static void blk_log_split(struct trace_seq
*s
, const struct trace_entry
*ent
, bool has_cg
)
1439 char cmd
[TASK_COMM_LEN
];
1441 trace_find_cmdline(ent
->pid
, cmd
);
1443 trace_seq_printf(s
, "%llu / %llu [%s]\n", t_sector(ent
),
1444 get_pdu_int(ent
, has_cg
), cmd
);
1447 static void blk_log_msg(struct trace_seq
*s
, const struct trace_entry
*ent
,
1451 trace_seq_putmem(s
, pdu_start(ent
, has_cg
),
1452 pdu_real_len(ent
, has_cg
));
1453 trace_seq_putc(s
, '\n');
1457 * struct tracer operations
1460 static void blk_tracer_print_header(struct seq_file
*m
)
1462 if (!(blk_tracer_flags
.val
& TRACE_BLK_OPT_CLASSIC
))
1464 seq_puts(m
, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1468 static void blk_tracer_start(struct trace_array
*tr
)
1470 blk_tracer_enabled
= true;
1473 static int blk_tracer_init(struct trace_array
*tr
)
1476 blk_tracer_start(tr
);
1480 static void blk_tracer_stop(struct trace_array
*tr
)
1482 blk_tracer_enabled
= false;
1485 static void blk_tracer_reset(struct trace_array
*tr
)
1487 blk_tracer_stop(tr
);
1490 static const struct {
1492 void (*print
)(struct trace_seq
*s
, const struct trace_entry
*ent
,
1495 [__BLK_TA_QUEUE
] = {{ "Q", "queue" }, blk_log_generic
},
1496 [__BLK_TA_BACKMERGE
] = {{ "M", "backmerge" }, blk_log_generic
},
1497 [__BLK_TA_FRONTMERGE
] = {{ "F", "frontmerge" }, blk_log_generic
},
1498 [__BLK_TA_GETRQ
] = {{ "G", "getrq" }, blk_log_generic
},
1499 [__BLK_TA_SLEEPRQ
] = {{ "S", "sleeprq" }, blk_log_generic
},
1500 [__BLK_TA_REQUEUE
] = {{ "R", "requeue" }, blk_log_with_error
},
1501 [__BLK_TA_ISSUE
] = {{ "D", "issue" }, blk_log_generic
},
1502 [__BLK_TA_COMPLETE
] = {{ "C", "complete" }, blk_log_with_error
},
1503 [__BLK_TA_PLUG
] = {{ "P", "plug" }, blk_log_plug
},
1504 [__BLK_TA_UNPLUG_IO
] = {{ "U", "unplug_io" }, blk_log_unplug
},
1505 [__BLK_TA_UNPLUG_TIMER
] = {{ "UT", "unplug_timer" }, blk_log_unplug
},
1506 [__BLK_TA_INSERT
] = {{ "I", "insert" }, blk_log_generic
},
1507 [__BLK_TA_SPLIT
] = {{ "X", "split" }, blk_log_split
},
1508 [__BLK_TA_BOUNCE
] = {{ "B", "bounce" }, blk_log_generic
},
1509 [__BLK_TA_REMAP
] = {{ "A", "remap" }, blk_log_remap
},
1512 static enum print_line_t
print_one_line(struct trace_iterator
*iter
,
1515 struct trace_array
*tr
= iter
->tr
;
1516 struct trace_seq
*s
= &iter
->seq
;
1517 const struct blk_io_trace
*t
;
1520 blk_log_action_t
*log_action
;
1523 t
= te_blk_io_trace(iter
->ent
);
1524 what
= (t
->action
& ((1 << BLK_TC_SHIFT
) - 1)) & ~__BLK_TA_CGROUP
;
1525 long_act
= !!(tr
->trace_flags
& TRACE_ITER_VERBOSE
);
1526 log_action
= classic
? &blk_log_action_classic
: &blk_log_action
;
1527 has_cg
= t
->action
& __BLK_TA_CGROUP
;
1529 if ((t
->action
& ~__BLK_TN_CGROUP
) == BLK_TN_MESSAGE
) {
1530 log_action(iter
, long_act
? "message" : "m", has_cg
);
1531 blk_log_msg(s
, iter
->ent
, has_cg
);
1532 return trace_handle_return(s
);
1535 if (unlikely(what
== 0 || what
>= ARRAY_SIZE(what2act
)))
1536 trace_seq_printf(s
, "Unknown action %x\n", what
);
1538 log_action(iter
, what2act
[what
].act
[long_act
], has_cg
);
1539 what2act
[what
].print(s
, iter
->ent
, has_cg
);
1542 return trace_handle_return(s
);
1545 static enum print_line_t
blk_trace_event_print(struct trace_iterator
*iter
,
1546 int flags
, struct trace_event
*event
)
1548 return print_one_line(iter
, false);
1551 static void blk_trace_synthesize_old_trace(struct trace_iterator
*iter
)
1553 struct trace_seq
*s
= &iter
->seq
;
1554 struct blk_io_trace
*t
= (struct blk_io_trace
*)iter
->ent
;
1555 const int offset
= offsetof(struct blk_io_trace
, sector
);
1556 struct blk_io_trace old
= {
1557 .magic
= BLK_IO_TRACE_MAGIC
| BLK_IO_TRACE_VERSION
,
1561 trace_seq_putmem(s
, &old
, offset
);
1562 trace_seq_putmem(s
, &t
->sector
,
1563 sizeof(old
) - offset
+ t
->pdu_len
);
1566 static enum print_line_t
1567 blk_trace_event_print_binary(struct trace_iterator
*iter
, int flags
,
1568 struct trace_event
*event
)
1570 blk_trace_synthesize_old_trace(iter
);
1572 return trace_handle_return(&iter
->seq
);
1575 static enum print_line_t
blk_tracer_print_line(struct trace_iterator
*iter
)
1577 if (!(blk_tracer_flags
.val
& TRACE_BLK_OPT_CLASSIC
))
1578 return TRACE_TYPE_UNHANDLED
;
1580 return print_one_line(iter
, true);
1584 blk_tracer_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
1586 /* don't output context-info for blk_classic output */
1587 if (bit
== TRACE_BLK_OPT_CLASSIC
) {
1589 tr
->trace_flags
&= ~TRACE_ITER_CONTEXT_INFO
;
1591 tr
->trace_flags
|= TRACE_ITER_CONTEXT_INFO
;
1596 static struct tracer blk_tracer __read_mostly
= {
1598 .init
= blk_tracer_init
,
1599 .reset
= blk_tracer_reset
,
1600 .start
= blk_tracer_start
,
1601 .stop
= blk_tracer_stop
,
1602 .print_header
= blk_tracer_print_header
,
1603 .print_line
= blk_tracer_print_line
,
1604 .flags
= &blk_tracer_flags
,
1605 .set_flag
= blk_tracer_set_flag
,
1608 static struct trace_event_functions trace_blk_event_funcs
= {
1609 .trace
= blk_trace_event_print
,
1610 .binary
= blk_trace_event_print_binary
,
1613 static struct trace_event trace_blk_event
= {
1615 .funcs
= &trace_blk_event_funcs
,
1618 static int __init
init_blk_tracer(void)
1620 if (!register_trace_event(&trace_blk_event
)) {
1621 pr_warn("Warning: could not register block events\n");
1625 if (register_tracer(&blk_tracer
) != 0) {
1626 pr_warn("Warning: could not register the block tracer\n");
1627 unregister_trace_event(&trace_blk_event
);
1634 device_initcall(init_blk_tracer
);
1636 static int blk_trace_remove_queue(struct request_queue
*q
)
1638 struct blk_trace
*bt
;
1640 bt
= xchg(&q
->blk_trace
, NULL
);
1651 * Setup everything required to start tracing
1653 static int blk_trace_setup_queue(struct request_queue
*q
,
1654 struct block_device
*bdev
)
1656 struct blk_trace
*bt
= NULL
;
1659 bt
= kzalloc(sizeof(*bt
), GFP_KERNEL
);
1663 bt
->msg_data
= __alloc_percpu(BLK_TN_MAX_MSG
, __alignof__(char));
1667 bt
->dev
= bdev
->bd_dev
;
1668 bt
->act_mask
= (u16
)-1;
1670 blk_trace_setup_lba(bt
, bdev
);
1673 if (cmpxchg(&q
->blk_trace
, NULL
, bt
))
1685 * sysfs interface to enable and configure tracing
1688 static ssize_t
sysfs_blk_trace_attr_show(struct device
*dev
,
1689 struct device_attribute
*attr
,
1691 static ssize_t
sysfs_blk_trace_attr_store(struct device
*dev
,
1692 struct device_attribute
*attr
,
1693 const char *buf
, size_t count
);
1694 #define BLK_TRACE_DEVICE_ATTR(_name) \
1695 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1696 sysfs_blk_trace_attr_show, \
1697 sysfs_blk_trace_attr_store)
1699 static BLK_TRACE_DEVICE_ATTR(enable
);
1700 static BLK_TRACE_DEVICE_ATTR(act_mask
);
1701 static BLK_TRACE_DEVICE_ATTR(pid
);
1702 static BLK_TRACE_DEVICE_ATTR(start_lba
);
1703 static BLK_TRACE_DEVICE_ATTR(end_lba
);
1705 static struct attribute
*blk_trace_attrs
[] = {
1706 &dev_attr_enable
.attr
,
1707 &dev_attr_act_mask
.attr
,
1709 &dev_attr_start_lba
.attr
,
1710 &dev_attr_end_lba
.attr
,
1714 struct attribute_group blk_trace_attr_group
= {
1716 .attrs
= blk_trace_attrs
,
1719 static const struct {
1723 { BLK_TC_READ
, "read" },
1724 { BLK_TC_WRITE
, "write" },
1725 { BLK_TC_FLUSH
, "flush" },
1726 { BLK_TC_SYNC
, "sync" },
1727 { BLK_TC_QUEUE
, "queue" },
1728 { BLK_TC_REQUEUE
, "requeue" },
1729 { BLK_TC_ISSUE
, "issue" },
1730 { BLK_TC_COMPLETE
, "complete" },
1731 { BLK_TC_FS
, "fs" },
1732 { BLK_TC_PC
, "pc" },
1733 { BLK_TC_NOTIFY
, "notify" },
1734 { BLK_TC_AHEAD
, "ahead" },
1735 { BLK_TC_META
, "meta" },
1736 { BLK_TC_DISCARD
, "discard" },
1737 { BLK_TC_DRV_DATA
, "drv_data" },
1738 { BLK_TC_FUA
, "fua" },
1741 static int blk_trace_str2mask(const char *str
)
1745 char *buf
, *s
, *token
;
1747 buf
= kstrdup(str
, GFP_KERNEL
);
1753 token
= strsep(&s
, ",");
1760 for (i
= 0; i
< ARRAY_SIZE(mask_maps
); i
++) {
1761 if (strcasecmp(token
, mask_maps
[i
].str
) == 0) {
1762 mask
|= mask_maps
[i
].mask
;
1766 if (i
== ARRAY_SIZE(mask_maps
)) {
1776 static ssize_t
blk_trace_mask2str(char *buf
, int mask
)
1781 for (i
= 0; i
< ARRAY_SIZE(mask_maps
); i
++) {
1782 if (mask
& mask_maps
[i
].mask
) {
1783 p
+= sprintf(p
, "%s%s",
1784 (p
== buf
) ? "" : ",", mask_maps
[i
].str
);
1792 static struct request_queue
*blk_trace_get_queue(struct block_device
*bdev
)
1794 if (bdev
->bd_disk
== NULL
)
1797 return bdev_get_queue(bdev
);
1800 static ssize_t
sysfs_blk_trace_attr_show(struct device
*dev
,
1801 struct device_attribute
*attr
,
1804 struct hd_struct
*p
= dev_to_part(dev
);
1805 struct request_queue
*q
;
1806 struct block_device
*bdev
;
1807 struct blk_trace
*bt
;
1808 ssize_t ret
= -ENXIO
;
1810 bdev
= bdget(part_devt(p
));
1814 q
= blk_trace_get_queue(bdev
);
1818 mutex_lock(&q
->blk_trace_mutex
);
1820 bt
= rcu_dereference_protected(q
->blk_trace
,
1821 lockdep_is_held(&q
->blk_trace_mutex
));
1822 if (attr
== &dev_attr_enable
) {
1823 ret
= sprintf(buf
, "%u\n", !!bt
);
1824 goto out_unlock_bdev
;
1828 ret
= sprintf(buf
, "disabled\n");
1829 else if (attr
== &dev_attr_act_mask
)
1830 ret
= blk_trace_mask2str(buf
, bt
->act_mask
);
1831 else if (attr
== &dev_attr_pid
)
1832 ret
= sprintf(buf
, "%u\n", bt
->pid
);
1833 else if (attr
== &dev_attr_start_lba
)
1834 ret
= sprintf(buf
, "%llu\n", bt
->start_lba
);
1835 else if (attr
== &dev_attr_end_lba
)
1836 ret
= sprintf(buf
, "%llu\n", bt
->end_lba
);
1839 mutex_unlock(&q
->blk_trace_mutex
);
1846 static ssize_t
sysfs_blk_trace_attr_store(struct device
*dev
,
1847 struct device_attribute
*attr
,
1848 const char *buf
, size_t count
)
1850 struct block_device
*bdev
;
1851 struct request_queue
*q
;
1852 struct hd_struct
*p
;
1853 struct blk_trace
*bt
;
1855 ssize_t ret
= -EINVAL
;
1860 if (attr
== &dev_attr_act_mask
) {
1861 if (kstrtoull(buf
, 0, &value
)) {
1862 /* Assume it is a list of trace category names */
1863 ret
= blk_trace_str2mask(buf
);
1868 } else if (kstrtoull(buf
, 0, &value
))
1873 p
= dev_to_part(dev
);
1874 bdev
= bdget(part_devt(p
));
1878 q
= blk_trace_get_queue(bdev
);
1882 mutex_lock(&q
->blk_trace_mutex
);
1884 bt
= rcu_dereference_protected(q
->blk_trace
,
1885 lockdep_is_held(&q
->blk_trace_mutex
));
1886 if (attr
== &dev_attr_enable
) {
1887 if (!!value
== !!bt
) {
1889 goto out_unlock_bdev
;
1892 ret
= blk_trace_setup_queue(q
, bdev
);
1894 ret
= blk_trace_remove_queue(q
);
1895 goto out_unlock_bdev
;
1900 ret
= blk_trace_setup_queue(q
, bdev
);
1901 bt
= rcu_dereference_protected(q
->blk_trace
,
1902 lockdep_is_held(&q
->blk_trace_mutex
));
1906 if (attr
== &dev_attr_act_mask
)
1907 bt
->act_mask
= value
;
1908 else if (attr
== &dev_attr_pid
)
1910 else if (attr
== &dev_attr_start_lba
)
1911 bt
->start_lba
= value
;
1912 else if (attr
== &dev_attr_end_lba
)
1913 bt
->end_lba
= value
;
1917 mutex_unlock(&q
->blk_trace_mutex
);
1921 return ret
? ret
: count
;
1924 int blk_trace_init_sysfs(struct device
*dev
)
1926 return sysfs_create_group(&dev
->kobj
, &blk_trace_attr_group
);
1929 void blk_trace_remove_sysfs(struct device
*dev
)
1931 sysfs_remove_group(&dev
->kobj
, &blk_trace_attr_group
);
1934 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1936 #ifdef CONFIG_EVENT_TRACING
1938 void blk_fill_rwbs(char *rwbs
, unsigned int op
, int bytes
)
1942 if (op
& REQ_PREFLUSH
)
1945 switch (op
& REQ_OP_MASK
) {
1947 case REQ_OP_WRITE_SAME
:
1950 case REQ_OP_DISCARD
:
1953 case REQ_OP_SECURE_ERASE
:
1969 if (op
& REQ_RAHEAD
)
1978 EXPORT_SYMBOL_GPL(blk_fill_rwbs
);
1980 #endif /* CONFIG_EVENT_TRACING */