include: replace linux/module.h with "struct module" wherever possible
[linux-2.6/next.git] / kernel / trace / blktrace.c
blobc1ff08239b75a2ede5438aaddd034f491f800e1f
1 /*
2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/time.h>
28 #include <linux/uaccess.h>
30 #include <trace/events/block.h>
32 #include "trace_output.h"
34 #ifdef CONFIG_BLK_DEV_IO_TRACE
36 static unsigned int blktrace_seq __read_mostly = 1;
38 static struct trace_array *blk_tr;
39 static bool blk_tracer_enabled __read_mostly;
41 /* Select an alternative, minimalistic output than the original one */
42 #define TRACE_BLK_OPT_CLASSIC 0x1
44 static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47 { }
50 static struct tracer_flags blk_tracer_flags = {
51 .val = 0,
52 .opts = blk_tracer_opts,
55 /* Global reference count of probes */
56 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
58 static void blk_register_tracepoints(void);
59 static void blk_unregister_tracepoints(void);
62 * Send out a notify message.
64 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
65 const void *data, size_t len)
67 struct blk_io_trace *t;
68 struct ring_buffer_event *event = NULL;
69 struct ring_buffer *buffer = NULL;
70 int pc = 0;
71 int cpu = smp_processor_id();
72 bool blk_tracer = blk_tracer_enabled;
74 if (blk_tracer) {
75 buffer = blk_tr->buffer;
76 pc = preempt_count();
77 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
78 sizeof(*t) + len,
79 0, pc);
80 if (!event)
81 return;
82 t = ring_buffer_event_data(event);
83 goto record_it;
86 if (!bt->rchan)
87 return;
89 t = relay_reserve(bt->rchan, sizeof(*t) + len);
90 if (t) {
91 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
92 t->time = ktime_to_ns(ktime_get());
93 record_it:
94 t->device = bt->dev;
95 t->action = action;
96 t->pid = pid;
97 t->cpu = cpu;
98 t->pdu_len = len;
99 memcpy((void *) t + sizeof(*t), data, len);
101 if (blk_tracer)
102 trace_buffer_unlock_commit(buffer, event, 0, pc);
107 * Send out a notify for this process, if we haven't done so since a trace
108 * started
110 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
112 tsk->btrace_seq = blktrace_seq;
113 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
116 static void trace_note_time(struct blk_trace *bt)
118 struct timespec now;
119 unsigned long flags;
120 u32 words[2];
122 getnstimeofday(&now);
123 words[0] = now.tv_sec;
124 words[1] = now.tv_nsec;
126 local_irq_save(flags);
127 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
128 local_irq_restore(flags);
131 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
133 int n;
134 va_list args;
135 unsigned long flags;
136 char *buf;
138 if (unlikely(bt->trace_state != Blktrace_running &&
139 !blk_tracer_enabled))
140 return;
143 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
144 * message to the trace.
146 if (!(bt->act_mask & BLK_TC_NOTIFY))
147 return;
149 local_irq_save(flags);
150 buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
151 va_start(args, fmt);
152 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
153 va_end(args);
155 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
156 local_irq_restore(flags);
158 EXPORT_SYMBOL_GPL(__trace_note_message);
160 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
161 pid_t pid)
163 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
164 return 1;
165 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
166 return 1;
167 if (bt->pid && pid != bt->pid)
168 return 1;
170 return 0;
174 * Data direction bit lookup
176 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
177 BLK_TC_ACT(BLK_TC_WRITE) };
179 #define BLK_TC_RAHEAD BLK_TC_AHEAD
181 /* The ilog2() calls fall out because they're constant */
182 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
183 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
186 * The worker for the various blk_add_trace*() types. Fills out a
187 * blk_io_trace structure and places it in a per-cpu subbuffer.
189 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
190 int rw, u32 what, int error, int pdu_len, void *pdu_data)
192 struct task_struct *tsk = current;
193 struct ring_buffer_event *event = NULL;
194 struct ring_buffer *buffer = NULL;
195 struct blk_io_trace *t;
196 unsigned long flags = 0;
197 unsigned long *sequence;
198 pid_t pid;
199 int cpu, pc = 0;
200 bool blk_tracer = blk_tracer_enabled;
202 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
203 return;
205 what |= ddir_act[rw & WRITE];
206 what |= MASK_TC_BIT(rw, SYNC);
207 what |= MASK_TC_BIT(rw, RAHEAD);
208 what |= MASK_TC_BIT(rw, META);
209 what |= MASK_TC_BIT(rw, DISCARD);
211 pid = tsk->pid;
212 if (act_log_check(bt, what, sector, pid))
213 return;
214 cpu = raw_smp_processor_id();
216 if (blk_tracer) {
217 tracing_record_cmdline(current);
219 buffer = blk_tr->buffer;
220 pc = preempt_count();
221 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
222 sizeof(*t) + pdu_len,
223 0, pc);
224 if (!event)
225 return;
226 t = ring_buffer_event_data(event);
227 goto record_it;
231 * A word about the locking here - we disable interrupts to reserve
232 * some space in the relay per-cpu buffer, to prevent an irq
233 * from coming in and stepping on our toes.
235 local_irq_save(flags);
237 if (unlikely(tsk->btrace_seq != blktrace_seq))
238 trace_note_tsk(bt, tsk);
240 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
241 if (t) {
242 sequence = per_cpu_ptr(bt->sequence, cpu);
244 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
245 t->sequence = ++(*sequence);
246 t->time = ktime_to_ns(ktime_get());
247 record_it:
249 * These two are not needed in ftrace as they are in the
250 * generic trace_entry, filled by tracing_generic_entry_update,
251 * but for the trace_event->bin() synthesizer benefit we do it
252 * here too.
254 t->cpu = cpu;
255 t->pid = pid;
257 t->sector = sector;
258 t->bytes = bytes;
259 t->action = what;
260 t->device = bt->dev;
261 t->error = error;
262 t->pdu_len = pdu_len;
264 if (pdu_len)
265 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
267 if (blk_tracer) {
268 trace_buffer_unlock_commit(buffer, event, 0, pc);
269 return;
273 local_irq_restore(flags);
276 static struct dentry *blk_tree_root;
277 static DEFINE_MUTEX(blk_tree_mutex);
279 static void blk_trace_free(struct blk_trace *bt)
281 debugfs_remove(bt->msg_file);
282 debugfs_remove(bt->dropped_file);
283 relay_close(bt->rchan);
284 debugfs_remove(bt->dir);
285 free_percpu(bt->sequence);
286 free_percpu(bt->msg_data);
287 kfree(bt);
290 static void blk_trace_cleanup(struct blk_trace *bt)
292 blk_trace_free(bt);
293 if (atomic_dec_and_test(&blk_probes_ref))
294 blk_unregister_tracepoints();
297 int blk_trace_remove(struct request_queue *q)
299 struct blk_trace *bt;
301 bt = xchg(&q->blk_trace, NULL);
302 if (!bt)
303 return -EINVAL;
305 if (bt->trace_state != Blktrace_running)
306 blk_trace_cleanup(bt);
308 return 0;
310 EXPORT_SYMBOL_GPL(blk_trace_remove);
312 static int blk_dropped_open(struct inode *inode, struct file *filp)
314 filp->private_data = inode->i_private;
316 return 0;
319 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
320 size_t count, loff_t *ppos)
322 struct blk_trace *bt = filp->private_data;
323 char buf[16];
325 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
327 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
330 static const struct file_operations blk_dropped_fops = {
331 .owner = THIS_MODULE,
332 .open = blk_dropped_open,
333 .read = blk_dropped_read,
334 .llseek = default_llseek,
337 static int blk_msg_open(struct inode *inode, struct file *filp)
339 filp->private_data = inode->i_private;
341 return 0;
344 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
345 size_t count, loff_t *ppos)
347 char *msg;
348 struct blk_trace *bt;
350 if (count >= BLK_TN_MAX_MSG)
351 return -EINVAL;
353 msg = kmalloc(count + 1, GFP_KERNEL);
354 if (msg == NULL)
355 return -ENOMEM;
357 if (copy_from_user(msg, buffer, count)) {
358 kfree(msg);
359 return -EFAULT;
362 msg[count] = '\0';
363 bt = filp->private_data;
364 __trace_note_message(bt, "%s", msg);
365 kfree(msg);
367 return count;
370 static const struct file_operations blk_msg_fops = {
371 .owner = THIS_MODULE,
372 .open = blk_msg_open,
373 .write = blk_msg_write,
374 .llseek = noop_llseek,
378 * Keep track of how many times we encountered a full subbuffer, to aid
379 * the user space app in telling how many lost events there were.
381 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
382 void *prev_subbuf, size_t prev_padding)
384 struct blk_trace *bt;
386 if (!relay_buf_full(buf))
387 return 1;
389 bt = buf->chan->private_data;
390 atomic_inc(&bt->dropped);
391 return 0;
394 static int blk_remove_buf_file_callback(struct dentry *dentry)
396 debugfs_remove(dentry);
398 return 0;
401 static struct dentry *blk_create_buf_file_callback(const char *filename,
402 struct dentry *parent,
403 int mode,
404 struct rchan_buf *buf,
405 int *is_global)
407 return debugfs_create_file(filename, mode, parent, buf,
408 &relay_file_operations);
411 static struct rchan_callbacks blk_relay_callbacks = {
412 .subbuf_start = blk_subbuf_start_callback,
413 .create_buf_file = blk_create_buf_file_callback,
414 .remove_buf_file = blk_remove_buf_file_callback,
417 static void blk_trace_setup_lba(struct blk_trace *bt,
418 struct block_device *bdev)
420 struct hd_struct *part = NULL;
422 if (bdev)
423 part = bdev->bd_part;
425 if (part) {
426 bt->start_lba = part->start_sect;
427 bt->end_lba = part->start_sect + part->nr_sects;
428 } else {
429 bt->start_lba = 0;
430 bt->end_lba = -1ULL;
435 * Setup everything required to start tracing
437 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
438 struct block_device *bdev,
439 struct blk_user_trace_setup *buts)
441 struct blk_trace *old_bt, *bt = NULL;
442 struct dentry *dir = NULL;
443 int ret, i;
445 if (!buts->buf_size || !buts->buf_nr)
446 return -EINVAL;
448 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
449 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
452 * some device names have larger paths - convert the slashes
453 * to underscores for this to work as expected
455 for (i = 0; i < strlen(buts->name); i++)
456 if (buts->name[i] == '/')
457 buts->name[i] = '_';
459 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
460 if (!bt)
461 return -ENOMEM;
463 ret = -ENOMEM;
464 bt->sequence = alloc_percpu(unsigned long);
465 if (!bt->sequence)
466 goto err;
468 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
469 if (!bt->msg_data)
470 goto err;
472 ret = -ENOENT;
474 mutex_lock(&blk_tree_mutex);
475 if (!blk_tree_root) {
476 blk_tree_root = debugfs_create_dir("block", NULL);
477 if (!blk_tree_root) {
478 mutex_unlock(&blk_tree_mutex);
479 goto err;
482 mutex_unlock(&blk_tree_mutex);
484 dir = debugfs_create_dir(buts->name, blk_tree_root);
486 if (!dir)
487 goto err;
489 bt->dir = dir;
490 bt->dev = dev;
491 atomic_set(&bt->dropped, 0);
493 ret = -EIO;
494 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
495 &blk_dropped_fops);
496 if (!bt->dropped_file)
497 goto err;
499 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
500 if (!bt->msg_file)
501 goto err;
503 bt->rchan = relay_open("trace", dir, buts->buf_size,
504 buts->buf_nr, &blk_relay_callbacks, bt);
505 if (!bt->rchan)
506 goto err;
508 bt->act_mask = buts->act_mask;
509 if (!bt->act_mask)
510 bt->act_mask = (u16) -1;
512 blk_trace_setup_lba(bt, bdev);
514 /* overwrite with user settings */
515 if (buts->start_lba)
516 bt->start_lba = buts->start_lba;
517 if (buts->end_lba)
518 bt->end_lba = buts->end_lba;
520 bt->pid = buts->pid;
521 bt->trace_state = Blktrace_setup;
523 ret = -EBUSY;
524 old_bt = xchg(&q->blk_trace, bt);
525 if (old_bt) {
526 (void) xchg(&q->blk_trace, old_bt);
527 goto err;
530 if (atomic_inc_return(&blk_probes_ref) == 1)
531 blk_register_tracepoints();
533 return 0;
534 err:
535 blk_trace_free(bt);
536 return ret;
539 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
540 struct block_device *bdev,
541 char __user *arg)
543 struct blk_user_trace_setup buts;
544 int ret;
546 ret = copy_from_user(&buts, arg, sizeof(buts));
547 if (ret)
548 return -EFAULT;
550 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
551 if (ret)
552 return ret;
554 if (copy_to_user(arg, &buts, sizeof(buts))) {
555 blk_trace_remove(q);
556 return -EFAULT;
558 return 0;
560 EXPORT_SYMBOL_GPL(blk_trace_setup);
562 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
563 static int compat_blk_trace_setup(struct request_queue *q, char *name,
564 dev_t dev, struct block_device *bdev,
565 char __user *arg)
567 struct blk_user_trace_setup buts;
568 struct compat_blk_user_trace_setup cbuts;
569 int ret;
571 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
572 return -EFAULT;
574 buts = (struct blk_user_trace_setup) {
575 .act_mask = cbuts.act_mask,
576 .buf_size = cbuts.buf_size,
577 .buf_nr = cbuts.buf_nr,
578 .start_lba = cbuts.start_lba,
579 .end_lba = cbuts.end_lba,
580 .pid = cbuts.pid,
582 memcpy(&buts.name, &cbuts.name, 32);
584 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
585 if (ret)
586 return ret;
588 if (copy_to_user(arg, &buts.name, 32)) {
589 blk_trace_remove(q);
590 return -EFAULT;
593 return 0;
595 #endif
597 int blk_trace_startstop(struct request_queue *q, int start)
599 int ret;
600 struct blk_trace *bt = q->blk_trace;
602 if (bt == NULL)
603 return -EINVAL;
606 * For starting a trace, we can transition from a setup or stopped
607 * trace. For stopping a trace, the state must be running
609 ret = -EINVAL;
610 if (start) {
611 if (bt->trace_state == Blktrace_setup ||
612 bt->trace_state == Blktrace_stopped) {
613 blktrace_seq++;
614 smp_mb();
615 bt->trace_state = Blktrace_running;
617 trace_note_time(bt);
618 ret = 0;
620 } else {
621 if (bt->trace_state == Blktrace_running) {
622 bt->trace_state = Blktrace_stopped;
623 relay_flush(bt->rchan);
624 ret = 0;
628 return ret;
630 EXPORT_SYMBOL_GPL(blk_trace_startstop);
633 * blk_trace_ioctl: - handle the ioctls associated with tracing
634 * @bdev: the block device
635 * @cmd: the ioctl cmd
636 * @arg: the argument data, if any
639 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
641 struct request_queue *q;
642 int ret, start = 0;
643 char b[BDEVNAME_SIZE];
645 q = bdev_get_queue(bdev);
646 if (!q)
647 return -ENXIO;
649 mutex_lock(&bdev->bd_mutex);
651 switch (cmd) {
652 case BLKTRACESETUP:
653 bdevname(bdev, b);
654 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
655 break;
656 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
657 case BLKTRACESETUP32:
658 bdevname(bdev, b);
659 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
660 break;
661 #endif
662 case BLKTRACESTART:
663 start = 1;
664 case BLKTRACESTOP:
665 ret = blk_trace_startstop(q, start);
666 break;
667 case BLKTRACETEARDOWN:
668 ret = blk_trace_remove(q);
669 break;
670 default:
671 ret = -ENOTTY;
672 break;
675 mutex_unlock(&bdev->bd_mutex);
676 return ret;
680 * blk_trace_shutdown: - stop and cleanup trace structures
681 * @q: the request queue associated with the device
684 void blk_trace_shutdown(struct request_queue *q)
686 if (q->blk_trace) {
687 blk_trace_startstop(q, 0);
688 blk_trace_remove(q);
693 * blktrace probes
697 * blk_add_trace_rq - Add a trace for a request oriented action
698 * @q: queue the io is for
699 * @rq: the source request
700 * @what: the action
702 * Description:
703 * Records an action against a request. Will log the bio offset + size.
706 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
707 u32 what)
709 struct blk_trace *bt = q->blk_trace;
711 if (likely(!bt))
712 return;
714 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
715 what |= BLK_TC_ACT(BLK_TC_PC);
716 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
717 what, rq->errors, rq->cmd_len, rq->cmd);
718 } else {
719 what |= BLK_TC_ACT(BLK_TC_FS);
720 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
721 rq->cmd_flags, what, rq->errors, 0, NULL);
725 static void blk_add_trace_rq_abort(void *ignore,
726 struct request_queue *q, struct request *rq)
728 blk_add_trace_rq(q, rq, BLK_TA_ABORT);
731 static void blk_add_trace_rq_insert(void *ignore,
732 struct request_queue *q, struct request *rq)
734 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
737 static void blk_add_trace_rq_issue(void *ignore,
738 struct request_queue *q, struct request *rq)
740 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
743 static void blk_add_trace_rq_requeue(void *ignore,
744 struct request_queue *q,
745 struct request *rq)
747 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
750 static void blk_add_trace_rq_complete(void *ignore,
751 struct request_queue *q,
752 struct request *rq)
754 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
758 * blk_add_trace_bio - Add a trace for a bio oriented action
759 * @q: queue the io is for
760 * @bio: the source bio
761 * @what: the action
762 * @error: error, if any
764 * Description:
765 * Records an action against a bio. Will log the bio offset + size.
768 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
769 u32 what, int error)
771 struct blk_trace *bt = q->blk_trace;
773 if (likely(!bt))
774 return;
776 if (!error && !bio_flagged(bio, BIO_UPTODATE))
777 error = EIO;
779 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
780 error, 0, NULL);
783 static void blk_add_trace_bio_bounce(void *ignore,
784 struct request_queue *q, struct bio *bio)
786 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
789 static void blk_add_trace_bio_complete(void *ignore,
790 struct request_queue *q, struct bio *bio,
791 int error)
793 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
796 static void blk_add_trace_bio_backmerge(void *ignore,
797 struct request_queue *q,
798 struct bio *bio)
800 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
803 static void blk_add_trace_bio_frontmerge(void *ignore,
804 struct request_queue *q,
805 struct bio *bio)
807 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
810 static void blk_add_trace_bio_queue(void *ignore,
811 struct request_queue *q, struct bio *bio)
813 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
816 static void blk_add_trace_getrq(void *ignore,
817 struct request_queue *q,
818 struct bio *bio, int rw)
820 if (bio)
821 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
822 else {
823 struct blk_trace *bt = q->blk_trace;
825 if (bt)
826 __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
831 static void blk_add_trace_sleeprq(void *ignore,
832 struct request_queue *q,
833 struct bio *bio, int rw)
835 if (bio)
836 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
837 else {
838 struct blk_trace *bt = q->blk_trace;
840 if (bt)
841 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
842 0, 0, NULL);
846 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
848 struct blk_trace *bt = q->blk_trace;
850 if (bt)
851 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
854 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
855 unsigned int depth, bool explicit)
857 struct blk_trace *bt = q->blk_trace;
859 if (bt) {
860 __be64 rpdu = cpu_to_be64(depth);
861 u32 what;
863 if (explicit)
864 what = BLK_TA_UNPLUG_IO;
865 else
866 what = BLK_TA_UNPLUG_TIMER;
868 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
872 static void blk_add_trace_split(void *ignore,
873 struct request_queue *q, struct bio *bio,
874 unsigned int pdu)
876 struct blk_trace *bt = q->blk_trace;
878 if (bt) {
879 __be64 rpdu = cpu_to_be64(pdu);
881 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
882 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
883 sizeof(rpdu), &rpdu);
888 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
889 * @ignore: trace callback data parameter (not used)
890 * @q: queue the io is for
891 * @bio: the source bio
892 * @dev: target device
893 * @from: source sector
895 * Description:
896 * Device mapper or raid target sometimes need to split a bio because
897 * it spans a stripe (or similar). Add a trace for that action.
900 static void blk_add_trace_bio_remap(void *ignore,
901 struct request_queue *q, struct bio *bio,
902 dev_t dev, sector_t from)
904 struct blk_trace *bt = q->blk_trace;
905 struct blk_io_trace_remap r;
907 if (likely(!bt))
908 return;
910 r.device_from = cpu_to_be32(dev);
911 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
912 r.sector_from = cpu_to_be64(from);
914 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
915 BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
916 sizeof(r), &r);
920 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
921 * @ignore: trace callback data parameter (not used)
922 * @q: queue the io is for
923 * @rq: the source request
924 * @dev: target device
925 * @from: source sector
927 * Description:
928 * Device mapper remaps request to other devices.
929 * Add a trace for that action.
932 static void blk_add_trace_rq_remap(void *ignore,
933 struct request_queue *q,
934 struct request *rq, dev_t dev,
935 sector_t from)
937 struct blk_trace *bt = q->blk_trace;
938 struct blk_io_trace_remap r;
940 if (likely(!bt))
941 return;
943 r.device_from = cpu_to_be32(dev);
944 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
945 r.sector_from = cpu_to_be64(from);
947 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
948 rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
949 sizeof(r), &r);
953 * blk_add_driver_data - Add binary message with driver-specific data
954 * @q: queue the io is for
955 * @rq: io request
956 * @data: driver-specific data
957 * @len: length of driver-specific data
959 * Description:
960 * Some drivers might want to write driver-specific data per request.
963 void blk_add_driver_data(struct request_queue *q,
964 struct request *rq,
965 void *data, size_t len)
967 struct blk_trace *bt = q->blk_trace;
969 if (likely(!bt))
970 return;
972 if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
973 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
974 BLK_TA_DRV_DATA, rq->errors, len, data);
975 else
976 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
977 BLK_TA_DRV_DATA, rq->errors, len, data);
979 EXPORT_SYMBOL_GPL(blk_add_driver_data);
981 static void blk_register_tracepoints(void)
983 int ret;
985 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
986 WARN_ON(ret);
987 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
988 WARN_ON(ret);
989 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
990 WARN_ON(ret);
991 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
992 WARN_ON(ret);
993 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
994 WARN_ON(ret);
995 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
996 WARN_ON(ret);
997 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
998 WARN_ON(ret);
999 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1000 WARN_ON(ret);
1001 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1002 WARN_ON(ret);
1003 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1004 WARN_ON(ret);
1005 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1006 WARN_ON(ret);
1007 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1008 WARN_ON(ret);
1009 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1010 WARN_ON(ret);
1011 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1012 WARN_ON(ret);
1013 ret = register_trace_block_split(blk_add_trace_split, NULL);
1014 WARN_ON(ret);
1015 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1016 WARN_ON(ret);
1017 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1018 WARN_ON(ret);
1021 static void blk_unregister_tracepoints(void)
1023 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1024 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1025 unregister_trace_block_split(blk_add_trace_split, NULL);
1026 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1027 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1028 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1029 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1030 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1031 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1032 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1033 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1034 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1035 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1036 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1037 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1038 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1039 unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
1041 tracepoint_synchronize_unregister();
1045 * struct blk_io_tracer formatting routines
1048 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1050 int i = 0;
1051 int tc = t->action >> BLK_TC_SHIFT;
1053 if (t->action == BLK_TN_MESSAGE) {
1054 rwbs[i++] = 'N';
1055 goto out;
1058 if (tc & BLK_TC_DISCARD)
1059 rwbs[i++] = 'D';
1060 else if (tc & BLK_TC_WRITE)
1061 rwbs[i++] = 'W';
1062 else if (t->bytes)
1063 rwbs[i++] = 'R';
1064 else
1065 rwbs[i++] = 'N';
1067 if (tc & BLK_TC_AHEAD)
1068 rwbs[i++] = 'A';
1069 if (tc & BLK_TC_BARRIER)
1070 rwbs[i++] = 'B';
1071 if (tc & BLK_TC_SYNC)
1072 rwbs[i++] = 'S';
1073 if (tc & BLK_TC_META)
1074 rwbs[i++] = 'M';
1075 out:
1076 rwbs[i] = '\0';
1079 static inline
1080 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1082 return (const struct blk_io_trace *)ent;
1085 static inline const void *pdu_start(const struct trace_entry *ent)
1087 return te_blk_io_trace(ent) + 1;
1090 static inline u32 t_action(const struct trace_entry *ent)
1092 return te_blk_io_trace(ent)->action;
1095 static inline u32 t_bytes(const struct trace_entry *ent)
1097 return te_blk_io_trace(ent)->bytes;
1100 static inline u32 t_sec(const struct trace_entry *ent)
1102 return te_blk_io_trace(ent)->bytes >> 9;
1105 static inline unsigned long long t_sector(const struct trace_entry *ent)
1107 return te_blk_io_trace(ent)->sector;
1110 static inline __u16 t_error(const struct trace_entry *ent)
1112 return te_blk_io_trace(ent)->error;
1115 static __u64 get_pdu_int(const struct trace_entry *ent)
1117 const __u64 *val = pdu_start(ent);
1118 return be64_to_cpu(*val);
1121 static void get_pdu_remap(const struct trace_entry *ent,
1122 struct blk_io_trace_remap *r)
1124 const struct blk_io_trace_remap *__r = pdu_start(ent);
1125 __u64 sector_from = __r->sector_from;
1127 r->device_from = be32_to_cpu(__r->device_from);
1128 r->device_to = be32_to_cpu(__r->device_to);
1129 r->sector_from = be64_to_cpu(sector_from);
1132 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1134 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1136 char rwbs[6];
1137 unsigned long long ts = iter->ts;
1138 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1139 unsigned secs = (unsigned long)ts;
1140 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1142 fill_rwbs(rwbs, t);
1144 return trace_seq_printf(&iter->seq,
1145 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1146 MAJOR(t->device), MINOR(t->device), iter->cpu,
1147 secs, nsec_rem, iter->ent->pid, act, rwbs);
1150 static int blk_log_action(struct trace_iterator *iter, const char *act)
1152 char rwbs[6];
1153 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1155 fill_rwbs(rwbs, t);
1156 return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1157 MAJOR(t->device), MINOR(t->device), act, rwbs);
1160 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1162 const unsigned char *pdu_buf;
1163 int pdu_len;
1164 int i, end, ret;
1166 pdu_buf = pdu_start(ent);
1167 pdu_len = te_blk_io_trace(ent)->pdu_len;
1169 if (!pdu_len)
1170 return 1;
1172 /* find the last zero that needs to be printed */
1173 for (end = pdu_len - 1; end >= 0; end--)
1174 if (pdu_buf[end])
1175 break;
1176 end++;
1178 if (!trace_seq_putc(s, '('))
1179 return 0;
1181 for (i = 0; i < pdu_len; i++) {
1183 ret = trace_seq_printf(s, "%s%02x",
1184 i == 0 ? "" : " ", pdu_buf[i]);
1185 if (!ret)
1186 return ret;
1189 * stop when the rest is just zeroes and indicate so
1190 * with a ".." appended
1192 if (i == end && end != pdu_len - 1)
1193 return trace_seq_puts(s, " ..) ");
1196 return trace_seq_puts(s, ") ");
1199 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1201 char cmd[TASK_COMM_LEN];
1203 trace_find_cmdline(ent->pid, cmd);
1205 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1206 int ret;
1208 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1209 if (!ret)
1210 return 0;
1211 ret = blk_log_dump_pdu(s, ent);
1212 if (!ret)
1213 return 0;
1214 return trace_seq_printf(s, "[%s]\n", cmd);
1215 } else {
1216 if (t_sec(ent))
1217 return trace_seq_printf(s, "%llu + %u [%s]\n",
1218 t_sector(ent), t_sec(ent), cmd);
1219 return trace_seq_printf(s, "[%s]\n", cmd);
1223 static int blk_log_with_error(struct trace_seq *s,
1224 const struct trace_entry *ent)
1226 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1227 int ret;
1229 ret = blk_log_dump_pdu(s, ent);
1230 if (ret)
1231 return trace_seq_printf(s, "[%d]\n", t_error(ent));
1232 return 0;
1233 } else {
1234 if (t_sec(ent))
1235 return trace_seq_printf(s, "%llu + %u [%d]\n",
1236 t_sector(ent),
1237 t_sec(ent), t_error(ent));
1238 return trace_seq_printf(s, "%llu [%d]\n",
1239 t_sector(ent), t_error(ent));
1243 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1245 struct blk_io_trace_remap r = { .device_from = 0, };
1247 get_pdu_remap(ent, &r);
1248 return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1249 t_sector(ent), t_sec(ent),
1250 MAJOR(r.device_from), MINOR(r.device_from),
1251 (unsigned long long)r.sector_from);
1254 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1256 char cmd[TASK_COMM_LEN];
1258 trace_find_cmdline(ent->pid, cmd);
1260 return trace_seq_printf(s, "[%s]\n", cmd);
1263 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1265 char cmd[TASK_COMM_LEN];
1267 trace_find_cmdline(ent->pid, cmd);
1269 return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1272 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1274 char cmd[TASK_COMM_LEN];
1276 trace_find_cmdline(ent->pid, cmd);
1278 return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1279 get_pdu_int(ent), cmd);
1282 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1284 int ret;
1285 const struct blk_io_trace *t = te_blk_io_trace(ent);
1287 ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1288 if (ret)
1289 return trace_seq_putc(s, '\n');
1290 return ret;
1294 * struct tracer operations
1297 static void blk_tracer_print_header(struct seq_file *m)
1299 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1300 return;
1301 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1302 "# | | | | | |\n");
1305 static void blk_tracer_start(struct trace_array *tr)
1307 blk_tracer_enabled = true;
1310 static int blk_tracer_init(struct trace_array *tr)
1312 blk_tr = tr;
1313 blk_tracer_start(tr);
1314 return 0;
1317 static void blk_tracer_stop(struct trace_array *tr)
1319 blk_tracer_enabled = false;
1322 static void blk_tracer_reset(struct trace_array *tr)
1324 blk_tracer_stop(tr);
1327 static const struct {
1328 const char *act[2];
1329 int (*print)(struct trace_seq *s, const struct trace_entry *ent);
1330 } what2act[] = {
1331 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1332 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1333 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1334 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1335 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1336 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1337 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1338 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1339 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1340 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1341 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1342 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1343 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1344 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1345 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1348 static enum print_line_t print_one_line(struct trace_iterator *iter,
1349 bool classic)
1351 struct trace_seq *s = &iter->seq;
1352 const struct blk_io_trace *t;
1353 u16 what;
1354 int ret;
1355 bool long_act;
1356 blk_log_action_t *log_action;
1358 t = te_blk_io_trace(iter->ent);
1359 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
1360 long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
1361 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1363 if (t->action == BLK_TN_MESSAGE) {
1364 ret = log_action(iter, long_act ? "message" : "m");
1365 if (ret)
1366 ret = blk_log_msg(s, iter->ent);
1367 goto out;
1370 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1371 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1372 else {
1373 ret = log_action(iter, what2act[what].act[long_act]);
1374 if (ret)
1375 ret = what2act[what].print(s, iter->ent);
1377 out:
1378 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1381 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1382 int flags, struct trace_event *event)
1384 return print_one_line(iter, false);
1387 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1389 struct trace_seq *s = &iter->seq;
1390 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1391 const int offset = offsetof(struct blk_io_trace, sector);
1392 struct blk_io_trace old = {
1393 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1394 .time = iter->ts,
1397 if (!trace_seq_putmem(s, &old, offset))
1398 return 0;
1399 return trace_seq_putmem(s, &t->sector,
1400 sizeof(old) - offset + t->pdu_len);
1403 static enum print_line_t
1404 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1405 struct trace_event *event)
1407 return blk_trace_synthesize_old_trace(iter) ?
1408 TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1411 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1413 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1414 return TRACE_TYPE_UNHANDLED;
1416 return print_one_line(iter, true);
1419 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1421 /* don't output context-info for blk_classic output */
1422 if (bit == TRACE_BLK_OPT_CLASSIC) {
1423 if (set)
1424 trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1425 else
1426 trace_flags |= TRACE_ITER_CONTEXT_INFO;
1428 return 0;
1431 static struct tracer blk_tracer __read_mostly = {
1432 .name = "blk",
1433 .init = blk_tracer_init,
1434 .reset = blk_tracer_reset,
1435 .start = blk_tracer_start,
1436 .stop = blk_tracer_stop,
1437 .print_header = blk_tracer_print_header,
1438 .print_line = blk_tracer_print_line,
1439 .flags = &blk_tracer_flags,
1440 .set_flag = blk_tracer_set_flag,
1443 static struct trace_event_functions trace_blk_event_funcs = {
1444 .trace = blk_trace_event_print,
1445 .binary = blk_trace_event_print_binary,
1448 static struct trace_event trace_blk_event = {
1449 .type = TRACE_BLK,
1450 .funcs = &trace_blk_event_funcs,
1453 static int __init init_blk_tracer(void)
1455 if (!register_ftrace_event(&trace_blk_event)) {
1456 pr_warning("Warning: could not register block events\n");
1457 return 1;
1460 if (register_tracer(&blk_tracer) != 0) {
1461 pr_warning("Warning: could not register the block tracer\n");
1462 unregister_ftrace_event(&trace_blk_event);
1463 return 1;
1466 return 0;
1469 device_initcall(init_blk_tracer);
1471 static int blk_trace_remove_queue(struct request_queue *q)
1473 struct blk_trace *bt;
1475 bt = xchg(&q->blk_trace, NULL);
1476 if (bt == NULL)
1477 return -EINVAL;
1479 if (atomic_dec_and_test(&blk_probes_ref))
1480 blk_unregister_tracepoints();
1482 blk_trace_free(bt);
1483 return 0;
1487 * Setup everything required to start tracing
1489 static int blk_trace_setup_queue(struct request_queue *q,
1490 struct block_device *bdev)
1492 struct blk_trace *old_bt, *bt = NULL;
1493 int ret = -ENOMEM;
1495 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1496 if (!bt)
1497 return -ENOMEM;
1499 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1500 if (!bt->msg_data)
1501 goto free_bt;
1503 bt->dev = bdev->bd_dev;
1504 bt->act_mask = (u16)-1;
1506 blk_trace_setup_lba(bt, bdev);
1508 old_bt = xchg(&q->blk_trace, bt);
1509 if (old_bt != NULL) {
1510 (void)xchg(&q->blk_trace, old_bt);
1511 ret = -EBUSY;
1512 goto free_bt;
1515 if (atomic_inc_return(&blk_probes_ref) == 1)
1516 blk_register_tracepoints();
1517 return 0;
1519 free_bt:
1520 blk_trace_free(bt);
1521 return ret;
1525 * sysfs interface to enable and configure tracing
1528 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1529 struct device_attribute *attr,
1530 char *buf);
1531 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1532 struct device_attribute *attr,
1533 const char *buf, size_t count);
1534 #define BLK_TRACE_DEVICE_ATTR(_name) \
1535 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1536 sysfs_blk_trace_attr_show, \
1537 sysfs_blk_trace_attr_store)
1539 static BLK_TRACE_DEVICE_ATTR(enable);
1540 static BLK_TRACE_DEVICE_ATTR(act_mask);
1541 static BLK_TRACE_DEVICE_ATTR(pid);
1542 static BLK_TRACE_DEVICE_ATTR(start_lba);
1543 static BLK_TRACE_DEVICE_ATTR(end_lba);
1545 static struct attribute *blk_trace_attrs[] = {
1546 &dev_attr_enable.attr,
1547 &dev_attr_act_mask.attr,
1548 &dev_attr_pid.attr,
1549 &dev_attr_start_lba.attr,
1550 &dev_attr_end_lba.attr,
1551 NULL
1554 struct attribute_group blk_trace_attr_group = {
1555 .name = "trace",
1556 .attrs = blk_trace_attrs,
1559 static const struct {
1560 int mask;
1561 const char *str;
1562 } mask_maps[] = {
1563 { BLK_TC_READ, "read" },
1564 { BLK_TC_WRITE, "write" },
1565 { BLK_TC_BARRIER, "barrier" },
1566 { BLK_TC_SYNC, "sync" },
1567 { BLK_TC_QUEUE, "queue" },
1568 { BLK_TC_REQUEUE, "requeue" },
1569 { BLK_TC_ISSUE, "issue" },
1570 { BLK_TC_COMPLETE, "complete" },
1571 { BLK_TC_FS, "fs" },
1572 { BLK_TC_PC, "pc" },
1573 { BLK_TC_AHEAD, "ahead" },
1574 { BLK_TC_META, "meta" },
1575 { BLK_TC_DISCARD, "discard" },
1576 { BLK_TC_DRV_DATA, "drv_data" },
1579 static int blk_trace_str2mask(const char *str)
1581 int i;
1582 int mask = 0;
1583 char *buf, *s, *token;
1585 buf = kstrdup(str, GFP_KERNEL);
1586 if (buf == NULL)
1587 return -ENOMEM;
1588 s = strstrip(buf);
1590 while (1) {
1591 token = strsep(&s, ",");
1592 if (token == NULL)
1593 break;
1595 if (*token == '\0')
1596 continue;
1598 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1599 if (strcasecmp(token, mask_maps[i].str) == 0) {
1600 mask |= mask_maps[i].mask;
1601 break;
1604 if (i == ARRAY_SIZE(mask_maps)) {
1605 mask = -EINVAL;
1606 break;
1609 kfree(buf);
1611 return mask;
1614 static ssize_t blk_trace_mask2str(char *buf, int mask)
1616 int i;
1617 char *p = buf;
1619 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1620 if (mask & mask_maps[i].mask) {
1621 p += sprintf(p, "%s%s",
1622 (p == buf) ? "" : ",", mask_maps[i].str);
1625 *p++ = '\n';
1627 return p - buf;
1630 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1632 if (bdev->bd_disk == NULL)
1633 return NULL;
1635 return bdev_get_queue(bdev);
1638 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1639 struct device_attribute *attr,
1640 char *buf)
1642 struct hd_struct *p = dev_to_part(dev);
1643 struct request_queue *q;
1644 struct block_device *bdev;
1645 ssize_t ret = -ENXIO;
1647 bdev = bdget(part_devt(p));
1648 if (bdev == NULL)
1649 goto out;
1651 q = blk_trace_get_queue(bdev);
1652 if (q == NULL)
1653 goto out_bdput;
1655 mutex_lock(&bdev->bd_mutex);
1657 if (attr == &dev_attr_enable) {
1658 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1659 goto out_unlock_bdev;
1662 if (q->blk_trace == NULL)
1663 ret = sprintf(buf, "disabled\n");
1664 else if (attr == &dev_attr_act_mask)
1665 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1666 else if (attr == &dev_attr_pid)
1667 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1668 else if (attr == &dev_attr_start_lba)
1669 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1670 else if (attr == &dev_attr_end_lba)
1671 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1673 out_unlock_bdev:
1674 mutex_unlock(&bdev->bd_mutex);
1675 out_bdput:
1676 bdput(bdev);
1677 out:
1678 return ret;
1681 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1682 struct device_attribute *attr,
1683 const char *buf, size_t count)
1685 struct block_device *bdev;
1686 struct request_queue *q;
1687 struct hd_struct *p;
1688 u64 value;
1689 ssize_t ret = -EINVAL;
1691 if (count == 0)
1692 goto out;
1694 if (attr == &dev_attr_act_mask) {
1695 if (sscanf(buf, "%llx", &value) != 1) {
1696 /* Assume it is a list of trace category names */
1697 ret = blk_trace_str2mask(buf);
1698 if (ret < 0)
1699 goto out;
1700 value = ret;
1702 } else if (sscanf(buf, "%llu", &value) != 1)
1703 goto out;
1705 ret = -ENXIO;
1707 p = dev_to_part(dev);
1708 bdev = bdget(part_devt(p));
1709 if (bdev == NULL)
1710 goto out;
1712 q = blk_trace_get_queue(bdev);
1713 if (q == NULL)
1714 goto out_bdput;
1716 mutex_lock(&bdev->bd_mutex);
1718 if (attr == &dev_attr_enable) {
1719 if (value)
1720 ret = blk_trace_setup_queue(q, bdev);
1721 else
1722 ret = blk_trace_remove_queue(q);
1723 goto out_unlock_bdev;
1726 ret = 0;
1727 if (q->blk_trace == NULL)
1728 ret = blk_trace_setup_queue(q, bdev);
1730 if (ret == 0) {
1731 if (attr == &dev_attr_act_mask)
1732 q->blk_trace->act_mask = value;
1733 else if (attr == &dev_attr_pid)
1734 q->blk_trace->pid = value;
1735 else if (attr == &dev_attr_start_lba)
1736 q->blk_trace->start_lba = value;
1737 else if (attr == &dev_attr_end_lba)
1738 q->blk_trace->end_lba = value;
1741 out_unlock_bdev:
1742 mutex_unlock(&bdev->bd_mutex);
1743 out_bdput:
1744 bdput(bdev);
1745 out:
1746 return ret ? ret : count;
1749 int blk_trace_init_sysfs(struct device *dev)
1751 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1754 void blk_trace_remove_sysfs(struct device *dev)
1756 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1759 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1761 #ifdef CONFIG_EVENT_TRACING
1763 void blk_dump_cmd(char *buf, struct request *rq)
1765 int i, end;
1766 int len = rq->cmd_len;
1767 unsigned char *cmd = rq->cmd;
1769 if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
1770 buf[0] = '\0';
1771 return;
1774 for (end = len - 1; end >= 0; end--)
1775 if (cmd[end])
1776 break;
1777 end++;
1779 for (i = 0; i < len; i++) {
1780 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1781 if (i == end && end != len - 1) {
1782 sprintf(buf, " ..");
1783 break;
1788 void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1790 int i = 0;
1792 if (rw & WRITE)
1793 rwbs[i++] = 'W';
1794 else if (rw & REQ_DISCARD)
1795 rwbs[i++] = 'D';
1796 else if (bytes)
1797 rwbs[i++] = 'R';
1798 else
1799 rwbs[i++] = 'N';
1801 if (rw & REQ_RAHEAD)
1802 rwbs[i++] = 'A';
1803 if (rw & REQ_SYNC)
1804 rwbs[i++] = 'S';
1805 if (rw & REQ_META)
1806 rwbs[i++] = 'M';
1807 if (rw & REQ_SECURE)
1808 rwbs[i++] = 'E';
1810 rwbs[i] = '\0';
1813 #endif /* CONFIG_EVENT_TRACING */