mm-only debug patch...
[mmotm.git] / tools / perf / builtin-sched.c
blobcb0dc782cef97d0f4324ab4224ecab48ac3d9bac
1 #include "builtin.h"
2 #include "perf.h"
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/data_map.h"
16 #include <sys/types.h>
17 #include <sys/prctl.h>
19 #include <semaphore.h>
20 #include <pthread.h>
21 #include <math.h>
23 static char const *input_name = "perf.data";
25 static unsigned long total_comm = 0;
27 static struct rb_root threads;
28 static struct thread *last_match;
30 static struct perf_header *header;
31 static u64 sample_type;
33 static char default_sort_order[] = "avg, max, switch, runtime";
34 static char *sort_order = default_sort_order;
36 static char *cwd;
37 static int cwdlen;
39 #define PR_SET_NAME 15 /* Set process name */
40 #define MAX_CPUS 4096
42 #define BUG_ON(x) assert(!(x))
44 static u64 run_measurement_overhead;
45 static u64 sleep_measurement_overhead;
47 #define COMM_LEN 20
48 #define SYM_LEN 129
50 #define MAX_PID 65536
52 static unsigned long nr_tasks;
54 struct sched_atom;
56 struct task_desc {
57 unsigned long nr;
58 unsigned long pid;
59 char comm[COMM_LEN];
61 unsigned long nr_events;
62 unsigned long curr_event;
63 struct sched_atom **atoms;
65 pthread_t thread;
66 sem_t sleep_sem;
68 sem_t ready_for_work;
69 sem_t work_done_sem;
71 u64 cpu_usage;
74 enum sched_event_type {
75 SCHED_EVENT_RUN,
76 SCHED_EVENT_SLEEP,
77 SCHED_EVENT_WAKEUP,
80 struct sched_atom {
81 enum sched_event_type type;
82 u64 timestamp;
83 u64 duration;
84 unsigned long nr;
85 int specific_wait;
86 sem_t *wait_sem;
87 struct task_desc *wakee;
90 static struct task_desc *pid_to_task[MAX_PID];
92 static struct task_desc **tasks;
94 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
95 static u64 start_time;
97 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
99 static unsigned long nr_run_events;
100 static unsigned long nr_sleep_events;
101 static unsigned long nr_wakeup_events;
103 static unsigned long nr_sleep_corrections;
104 static unsigned long nr_run_events_optimized;
106 static unsigned long targetless_wakeups;
107 static unsigned long multitarget_wakeups;
109 static u64 cpu_usage;
110 static u64 runavg_cpu_usage;
111 static u64 parent_cpu_usage;
112 static u64 runavg_parent_cpu_usage;
114 static unsigned long nr_runs;
115 static u64 sum_runtime;
116 static u64 sum_fluct;
117 static u64 run_avg;
119 static unsigned long replay_repeat = 10;
120 static unsigned long nr_timestamps;
121 static unsigned long nr_unordered_timestamps;
122 static unsigned long nr_state_machine_bugs;
123 static unsigned long nr_context_switch_bugs;
124 static unsigned long nr_events;
125 static unsigned long nr_lost_chunks;
126 static unsigned long nr_lost_events;
128 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
130 enum thread_state {
131 THREAD_SLEEPING = 0,
132 THREAD_WAIT_CPU,
133 THREAD_SCHED_IN,
134 THREAD_IGNORE
137 struct work_atom {
138 struct list_head list;
139 enum thread_state state;
140 u64 sched_out_time;
141 u64 wake_up_time;
142 u64 sched_in_time;
143 u64 runtime;
146 struct work_atoms {
147 struct list_head work_list;
148 struct thread *thread;
149 struct rb_node node;
150 u64 max_lat;
151 u64 total_lat;
152 u64 nb_atoms;
153 u64 total_runtime;
156 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
158 static struct rb_root atom_root, sorted_atom_root;
160 static u64 all_runtime;
161 static u64 all_count;
164 static u64 get_nsecs(void)
166 struct timespec ts;
168 clock_gettime(CLOCK_MONOTONIC, &ts);
170 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
173 static void burn_nsecs(u64 nsecs)
175 u64 T0 = get_nsecs(), T1;
177 do {
178 T1 = get_nsecs();
179 } while (T1 + run_measurement_overhead < T0 + nsecs);
182 static void sleep_nsecs(u64 nsecs)
184 struct timespec ts;
186 ts.tv_nsec = nsecs % 999999999;
187 ts.tv_sec = nsecs / 999999999;
189 nanosleep(&ts, NULL);
192 static void calibrate_run_measurement_overhead(void)
194 u64 T0, T1, delta, min_delta = 1000000000ULL;
195 int i;
197 for (i = 0; i < 10; i++) {
198 T0 = get_nsecs();
199 burn_nsecs(0);
200 T1 = get_nsecs();
201 delta = T1-T0;
202 min_delta = min(min_delta, delta);
204 run_measurement_overhead = min_delta;
206 printf("run measurement overhead: %Ld nsecs\n", min_delta);
209 static void calibrate_sleep_measurement_overhead(void)
211 u64 T0, T1, delta, min_delta = 1000000000ULL;
212 int i;
214 for (i = 0; i < 10; i++) {
215 T0 = get_nsecs();
216 sleep_nsecs(10000);
217 T1 = get_nsecs();
218 delta = T1-T0;
219 min_delta = min(min_delta, delta);
221 min_delta -= 10000;
222 sleep_measurement_overhead = min_delta;
224 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
227 static struct sched_atom *
228 get_new_event(struct task_desc *task, u64 timestamp)
230 struct sched_atom *event = calloc(1, sizeof(*event));
231 unsigned long idx = task->nr_events;
232 size_t size;
234 event->timestamp = timestamp;
235 event->nr = idx;
237 task->nr_events++;
238 size = sizeof(struct sched_atom *) * task->nr_events;
239 task->atoms = realloc(task->atoms, size);
240 BUG_ON(!task->atoms);
242 task->atoms[idx] = event;
244 return event;
247 static struct sched_atom *last_event(struct task_desc *task)
249 if (!task->nr_events)
250 return NULL;
252 return task->atoms[task->nr_events - 1];
255 static void
256 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
258 struct sched_atom *event, *curr_event = last_event(task);
261 * optimize an existing RUN event by merging this one
262 * to it:
264 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
265 nr_run_events_optimized++;
266 curr_event->duration += duration;
267 return;
270 event = get_new_event(task, timestamp);
272 event->type = SCHED_EVENT_RUN;
273 event->duration = duration;
275 nr_run_events++;
278 static void
279 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
280 struct task_desc *wakee)
282 struct sched_atom *event, *wakee_event;
284 event = get_new_event(task, timestamp);
285 event->type = SCHED_EVENT_WAKEUP;
286 event->wakee = wakee;
288 wakee_event = last_event(wakee);
289 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
290 targetless_wakeups++;
291 return;
293 if (wakee_event->wait_sem) {
294 multitarget_wakeups++;
295 return;
298 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
299 sem_init(wakee_event->wait_sem, 0, 0);
300 wakee_event->specific_wait = 1;
301 event->wait_sem = wakee_event->wait_sem;
303 nr_wakeup_events++;
306 static void
307 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
308 u64 task_state __used)
310 struct sched_atom *event = get_new_event(task, timestamp);
312 event->type = SCHED_EVENT_SLEEP;
314 nr_sleep_events++;
317 static struct task_desc *register_pid(unsigned long pid, const char *comm)
319 struct task_desc *task;
321 BUG_ON(pid >= MAX_PID);
323 task = pid_to_task[pid];
325 if (task)
326 return task;
328 task = calloc(1, sizeof(*task));
329 task->pid = pid;
330 task->nr = nr_tasks;
331 strcpy(task->comm, comm);
333 * every task starts in sleeping state - this gets ignored
334 * if there's no wakeup pointing to this sleep state:
336 add_sched_event_sleep(task, 0, 0);
338 pid_to_task[pid] = task;
339 nr_tasks++;
340 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
341 BUG_ON(!tasks);
342 tasks[task->nr] = task;
344 if (verbose)
345 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
347 return task;
351 static void print_task_traces(void)
353 struct task_desc *task;
354 unsigned long i;
356 for (i = 0; i < nr_tasks; i++) {
357 task = tasks[i];
358 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
359 task->nr, task->comm, task->pid, task->nr_events);
363 static void add_cross_task_wakeups(void)
365 struct task_desc *task1, *task2;
366 unsigned long i, j;
368 for (i = 0; i < nr_tasks; i++) {
369 task1 = tasks[i];
370 j = i + 1;
371 if (j == nr_tasks)
372 j = 0;
373 task2 = tasks[j];
374 add_sched_event_wakeup(task1, 0, task2);
378 static void
379 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
381 int ret = 0;
382 u64 now;
383 long long delta;
385 now = get_nsecs();
386 delta = start_time + atom->timestamp - now;
388 switch (atom->type) {
389 case SCHED_EVENT_RUN:
390 burn_nsecs(atom->duration);
391 break;
392 case SCHED_EVENT_SLEEP:
393 if (atom->wait_sem)
394 ret = sem_wait(atom->wait_sem);
395 BUG_ON(ret);
396 break;
397 case SCHED_EVENT_WAKEUP:
398 if (atom->wait_sem)
399 ret = sem_post(atom->wait_sem);
400 BUG_ON(ret);
401 break;
402 default:
403 BUG_ON(1);
407 static u64 get_cpu_usage_nsec_parent(void)
409 struct rusage ru;
410 u64 sum;
411 int err;
413 err = getrusage(RUSAGE_SELF, &ru);
414 BUG_ON(err);
416 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
417 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
419 return sum;
422 static u64 get_cpu_usage_nsec_self(void)
424 char filename [] = "/proc/1234567890/sched";
425 unsigned long msecs, nsecs;
426 char *line = NULL;
427 u64 total = 0;
428 size_t len = 0;
429 ssize_t chars;
430 FILE *file;
431 int ret;
433 sprintf(filename, "/proc/%d/sched", getpid());
434 file = fopen(filename, "r");
435 BUG_ON(!file);
437 while ((chars = getline(&line, &len, file)) != -1) {
438 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
439 &msecs, &nsecs);
440 if (ret == 2) {
441 total = msecs*1e6 + nsecs;
442 break;
445 if (line)
446 free(line);
447 fclose(file);
449 return total;
452 static void *thread_func(void *ctx)
454 struct task_desc *this_task = ctx;
455 u64 cpu_usage_0, cpu_usage_1;
456 unsigned long i, ret;
457 char comm2[22];
459 sprintf(comm2, ":%s", this_task->comm);
460 prctl(PR_SET_NAME, comm2);
462 again:
463 ret = sem_post(&this_task->ready_for_work);
464 BUG_ON(ret);
465 ret = pthread_mutex_lock(&start_work_mutex);
466 BUG_ON(ret);
467 ret = pthread_mutex_unlock(&start_work_mutex);
468 BUG_ON(ret);
470 cpu_usage_0 = get_cpu_usage_nsec_self();
472 for (i = 0; i < this_task->nr_events; i++) {
473 this_task->curr_event = i;
474 process_sched_event(this_task, this_task->atoms[i]);
477 cpu_usage_1 = get_cpu_usage_nsec_self();
478 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
480 ret = sem_post(&this_task->work_done_sem);
481 BUG_ON(ret);
483 ret = pthread_mutex_lock(&work_done_wait_mutex);
484 BUG_ON(ret);
485 ret = pthread_mutex_unlock(&work_done_wait_mutex);
486 BUG_ON(ret);
488 goto again;
491 static void create_tasks(void)
493 struct task_desc *task;
494 pthread_attr_t attr;
495 unsigned long i;
496 int err;
498 err = pthread_attr_init(&attr);
499 BUG_ON(err);
500 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
501 BUG_ON(err);
502 err = pthread_mutex_lock(&start_work_mutex);
503 BUG_ON(err);
504 err = pthread_mutex_lock(&work_done_wait_mutex);
505 BUG_ON(err);
506 for (i = 0; i < nr_tasks; i++) {
507 task = tasks[i];
508 sem_init(&task->sleep_sem, 0, 0);
509 sem_init(&task->ready_for_work, 0, 0);
510 sem_init(&task->work_done_sem, 0, 0);
511 task->curr_event = 0;
512 err = pthread_create(&task->thread, &attr, thread_func, task);
513 BUG_ON(err);
517 static void wait_for_tasks(void)
519 u64 cpu_usage_0, cpu_usage_1;
520 struct task_desc *task;
521 unsigned long i, ret;
523 start_time = get_nsecs();
524 cpu_usage = 0;
525 pthread_mutex_unlock(&work_done_wait_mutex);
527 for (i = 0; i < nr_tasks; i++) {
528 task = tasks[i];
529 ret = sem_wait(&task->ready_for_work);
530 BUG_ON(ret);
531 sem_init(&task->ready_for_work, 0, 0);
533 ret = pthread_mutex_lock(&work_done_wait_mutex);
534 BUG_ON(ret);
536 cpu_usage_0 = get_cpu_usage_nsec_parent();
538 pthread_mutex_unlock(&start_work_mutex);
540 for (i = 0; i < nr_tasks; i++) {
541 task = tasks[i];
542 ret = sem_wait(&task->work_done_sem);
543 BUG_ON(ret);
544 sem_init(&task->work_done_sem, 0, 0);
545 cpu_usage += task->cpu_usage;
546 task->cpu_usage = 0;
549 cpu_usage_1 = get_cpu_usage_nsec_parent();
550 if (!runavg_cpu_usage)
551 runavg_cpu_usage = cpu_usage;
552 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
554 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
555 if (!runavg_parent_cpu_usage)
556 runavg_parent_cpu_usage = parent_cpu_usage;
557 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
558 parent_cpu_usage)/10;
560 ret = pthread_mutex_lock(&start_work_mutex);
561 BUG_ON(ret);
563 for (i = 0; i < nr_tasks; i++) {
564 task = tasks[i];
565 sem_init(&task->sleep_sem, 0, 0);
566 task->curr_event = 0;
570 static void run_one_test(void)
572 u64 T0, T1, delta, avg_delta, fluct, std_dev;
574 T0 = get_nsecs();
575 wait_for_tasks();
576 T1 = get_nsecs();
578 delta = T1 - T0;
579 sum_runtime += delta;
580 nr_runs++;
582 avg_delta = sum_runtime / nr_runs;
583 if (delta < avg_delta)
584 fluct = avg_delta - delta;
585 else
586 fluct = delta - avg_delta;
587 sum_fluct += fluct;
588 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
589 if (!run_avg)
590 run_avg = delta;
591 run_avg = (run_avg*9 + delta)/10;
593 printf("#%-3ld: %0.3f, ",
594 nr_runs, (double)delta/1000000.0);
596 printf("ravg: %0.2f, ",
597 (double)run_avg/1e6);
599 printf("cpu: %0.2f / %0.2f",
600 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
602 #if 0
604 * rusage statistics done by the parent, these are less
605 * accurate than the sum_exec_runtime based statistics:
607 printf(" [%0.2f / %0.2f]",
608 (double)parent_cpu_usage/1e6,
609 (double)runavg_parent_cpu_usage/1e6);
610 #endif
612 printf("\n");
614 if (nr_sleep_corrections)
615 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
616 nr_sleep_corrections = 0;
619 static void test_calibrations(void)
621 u64 T0, T1;
623 T0 = get_nsecs();
624 burn_nsecs(1e6);
625 T1 = get_nsecs();
627 printf("the run test took %Ld nsecs\n", T1-T0);
629 T0 = get_nsecs();
630 sleep_nsecs(1e6);
631 T1 = get_nsecs();
633 printf("the sleep test took %Ld nsecs\n", T1-T0);
636 static int
637 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
639 struct thread *thread;
641 thread = threads__findnew(event->comm.tid, &threads, &last_match);
643 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
644 (void *)(offset + head),
645 (void *)(long)(event->header.size),
646 event->comm.comm, event->comm.pid);
648 if (thread == NULL ||
649 thread__set_comm(thread, event->comm.comm)) {
650 dump_printf("problem processing perf_event_comm, skipping event.\n");
651 return -1;
653 total_comm++;
655 return 0;
659 struct raw_event_sample {
660 u32 size;
661 char data[0];
664 #define FILL_FIELD(ptr, field, event, data) \
665 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
667 #define FILL_ARRAY(ptr, array, event, data) \
668 do { \
669 void *__array = raw_field_ptr(event, #array, data); \
670 memcpy(ptr.array, __array, sizeof(ptr.array)); \
671 } while(0)
673 #define FILL_COMMON_FIELDS(ptr, event, data) \
674 do { \
675 FILL_FIELD(ptr, common_type, event, data); \
676 FILL_FIELD(ptr, common_flags, event, data); \
677 FILL_FIELD(ptr, common_preempt_count, event, data); \
678 FILL_FIELD(ptr, common_pid, event, data); \
679 FILL_FIELD(ptr, common_tgid, event, data); \
680 } while (0)
684 struct trace_switch_event {
685 u32 size;
687 u16 common_type;
688 u8 common_flags;
689 u8 common_preempt_count;
690 u32 common_pid;
691 u32 common_tgid;
693 char prev_comm[16];
694 u32 prev_pid;
695 u32 prev_prio;
696 u64 prev_state;
697 char next_comm[16];
698 u32 next_pid;
699 u32 next_prio;
702 struct trace_runtime_event {
703 u32 size;
705 u16 common_type;
706 u8 common_flags;
707 u8 common_preempt_count;
708 u32 common_pid;
709 u32 common_tgid;
711 char comm[16];
712 u32 pid;
713 u64 runtime;
714 u64 vruntime;
717 struct trace_wakeup_event {
718 u32 size;
720 u16 common_type;
721 u8 common_flags;
722 u8 common_preempt_count;
723 u32 common_pid;
724 u32 common_tgid;
726 char comm[16];
727 u32 pid;
729 u32 prio;
730 u32 success;
731 u32 cpu;
734 struct trace_fork_event {
735 u32 size;
737 u16 common_type;
738 u8 common_flags;
739 u8 common_preempt_count;
740 u32 common_pid;
741 u32 common_tgid;
743 char parent_comm[16];
744 u32 parent_pid;
745 char child_comm[16];
746 u32 child_pid;
749 struct trace_sched_handler {
750 void (*switch_event)(struct trace_switch_event *,
751 struct event *,
752 int cpu,
753 u64 timestamp,
754 struct thread *thread);
756 void (*runtime_event)(struct trace_runtime_event *,
757 struct event *,
758 int cpu,
759 u64 timestamp,
760 struct thread *thread);
762 void (*wakeup_event)(struct trace_wakeup_event *,
763 struct event *,
764 int cpu,
765 u64 timestamp,
766 struct thread *thread);
768 void (*fork_event)(struct trace_fork_event *,
769 struct event *,
770 int cpu,
771 u64 timestamp,
772 struct thread *thread);
776 static void
777 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
778 struct event *event,
779 int cpu __used,
780 u64 timestamp __used,
781 struct thread *thread __used)
783 struct task_desc *waker, *wakee;
785 if (verbose) {
786 printf("sched_wakeup event %p\n", event);
788 printf(" ... pid %d woke up %s/%d\n",
789 wakeup_event->common_pid,
790 wakeup_event->comm,
791 wakeup_event->pid);
794 waker = register_pid(wakeup_event->common_pid, "<unknown>");
795 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
797 add_sched_event_wakeup(waker, timestamp, wakee);
800 static u64 cpu_last_switched[MAX_CPUS];
802 static void
803 replay_switch_event(struct trace_switch_event *switch_event,
804 struct event *event,
805 int cpu,
806 u64 timestamp,
807 struct thread *thread __used)
809 struct task_desc *prev, *next;
810 u64 timestamp0;
811 s64 delta;
813 if (verbose)
814 printf("sched_switch event %p\n", event);
816 if (cpu >= MAX_CPUS || cpu < 0)
817 return;
819 timestamp0 = cpu_last_switched[cpu];
820 if (timestamp0)
821 delta = timestamp - timestamp0;
822 else
823 delta = 0;
825 if (delta < 0)
826 die("hm, delta: %Ld < 0 ?\n", delta);
828 if (verbose) {
829 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
830 switch_event->prev_comm, switch_event->prev_pid,
831 switch_event->next_comm, switch_event->next_pid,
832 delta);
835 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
836 next = register_pid(switch_event->next_pid, switch_event->next_comm);
838 cpu_last_switched[cpu] = timestamp;
840 add_sched_event_run(prev, timestamp, delta);
841 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
845 static void
846 replay_fork_event(struct trace_fork_event *fork_event,
847 struct event *event,
848 int cpu __used,
849 u64 timestamp __used,
850 struct thread *thread __used)
852 if (verbose) {
853 printf("sched_fork event %p\n", event);
854 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
855 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
857 register_pid(fork_event->parent_pid, fork_event->parent_comm);
858 register_pid(fork_event->child_pid, fork_event->child_comm);
861 static struct trace_sched_handler replay_ops = {
862 .wakeup_event = replay_wakeup_event,
863 .switch_event = replay_switch_event,
864 .fork_event = replay_fork_event,
867 struct sort_dimension {
868 const char *name;
869 sort_fn_t cmp;
870 struct list_head list;
873 static LIST_HEAD(cmp_pid);
875 static int
876 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
878 struct sort_dimension *sort;
879 int ret = 0;
881 BUG_ON(list_empty(list));
883 list_for_each_entry(sort, list, list) {
884 ret = sort->cmp(l, r);
885 if (ret)
886 return ret;
889 return ret;
892 static struct work_atoms *
893 thread_atoms_search(struct rb_root *root, struct thread *thread,
894 struct list_head *sort_list)
896 struct rb_node *node = root->rb_node;
897 struct work_atoms key = { .thread = thread };
899 while (node) {
900 struct work_atoms *atoms;
901 int cmp;
903 atoms = container_of(node, struct work_atoms, node);
905 cmp = thread_lat_cmp(sort_list, &key, atoms);
906 if (cmp > 0)
907 node = node->rb_left;
908 else if (cmp < 0)
909 node = node->rb_right;
910 else {
911 BUG_ON(thread != atoms->thread);
912 return atoms;
915 return NULL;
918 static void
919 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
920 struct list_head *sort_list)
922 struct rb_node **new = &(root->rb_node), *parent = NULL;
924 while (*new) {
925 struct work_atoms *this;
926 int cmp;
928 this = container_of(*new, struct work_atoms, node);
929 parent = *new;
931 cmp = thread_lat_cmp(sort_list, data, this);
933 if (cmp > 0)
934 new = &((*new)->rb_left);
935 else
936 new = &((*new)->rb_right);
939 rb_link_node(&data->node, parent, new);
940 rb_insert_color(&data->node, root);
943 static void thread_atoms_insert(struct thread *thread)
945 struct work_atoms *atoms;
947 atoms = calloc(sizeof(*atoms), 1);
948 if (!atoms)
949 die("No memory");
951 atoms->thread = thread;
952 INIT_LIST_HEAD(&atoms->work_list);
953 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
956 static void
957 latency_fork_event(struct trace_fork_event *fork_event __used,
958 struct event *event __used,
959 int cpu __used,
960 u64 timestamp __used,
961 struct thread *thread __used)
963 /* should insert the newcomer */
966 __used
967 static char sched_out_state(struct trace_switch_event *switch_event)
969 const char *str = TASK_STATE_TO_CHAR_STR;
971 return str[switch_event->prev_state];
974 static void
975 add_sched_out_event(struct work_atoms *atoms,
976 char run_state,
977 u64 timestamp)
979 struct work_atom *atom;
981 atom = calloc(sizeof(*atom), 1);
982 if (!atom)
983 die("Non memory");
985 atom->sched_out_time = timestamp;
987 if (run_state == 'R') {
988 atom->state = THREAD_WAIT_CPU;
989 atom->wake_up_time = atom->sched_out_time;
992 list_add_tail(&atom->list, &atoms->work_list);
995 static void
996 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
998 struct work_atom *atom;
1000 BUG_ON(list_empty(&atoms->work_list));
1002 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1004 atom->runtime += delta;
1005 atoms->total_runtime += delta;
1008 static void
1009 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1011 struct work_atom *atom;
1012 u64 delta;
1014 if (list_empty(&atoms->work_list))
1015 return;
1017 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1019 if (atom->state != THREAD_WAIT_CPU)
1020 return;
1022 if (timestamp < atom->wake_up_time) {
1023 atom->state = THREAD_IGNORE;
1024 return;
1027 atom->state = THREAD_SCHED_IN;
1028 atom->sched_in_time = timestamp;
1030 delta = atom->sched_in_time - atom->wake_up_time;
1031 atoms->total_lat += delta;
1032 if (delta > atoms->max_lat)
1033 atoms->max_lat = delta;
1034 atoms->nb_atoms++;
1037 static void
1038 latency_switch_event(struct trace_switch_event *switch_event,
1039 struct event *event __used,
1040 int cpu,
1041 u64 timestamp,
1042 struct thread *thread __used)
1044 struct work_atoms *out_events, *in_events;
1045 struct thread *sched_out, *sched_in;
1046 u64 timestamp0;
1047 s64 delta;
1049 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1051 timestamp0 = cpu_last_switched[cpu];
1052 cpu_last_switched[cpu] = timestamp;
1053 if (timestamp0)
1054 delta = timestamp - timestamp0;
1055 else
1056 delta = 0;
1058 if (delta < 0)
1059 die("hm, delta: %Ld < 0 ?\n", delta);
1062 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1063 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1065 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1066 if (!out_events) {
1067 thread_atoms_insert(sched_out);
1068 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1069 if (!out_events)
1070 die("out-event: Internal tree error");
1072 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1074 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1075 if (!in_events) {
1076 thread_atoms_insert(sched_in);
1077 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1078 if (!in_events)
1079 die("in-event: Internal tree error");
1081 * Take came in we have not heard about yet,
1082 * add in an initial atom in runnable state:
1084 add_sched_out_event(in_events, 'R', timestamp);
1086 add_sched_in_event(in_events, timestamp);
1089 static void
1090 latency_runtime_event(struct trace_runtime_event *runtime_event,
1091 struct event *event __used,
1092 int cpu,
1093 u64 timestamp,
1094 struct thread *this_thread __used)
1096 struct work_atoms *atoms;
1097 struct thread *thread;
1099 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1101 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1102 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1103 if (!atoms) {
1104 thread_atoms_insert(thread);
1105 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1106 if (!atoms)
1107 die("in-event: Internal tree error");
1108 add_sched_out_event(atoms, 'R', timestamp);
1111 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1114 static void
1115 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1116 struct event *__event __used,
1117 int cpu __used,
1118 u64 timestamp,
1119 struct thread *thread __used)
1121 struct work_atoms *atoms;
1122 struct work_atom *atom;
1123 struct thread *wakee;
1125 /* Note for later, it may be interesting to observe the failing cases */
1126 if (!wakeup_event->success)
1127 return;
1129 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
1130 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1131 if (!atoms) {
1132 thread_atoms_insert(wakee);
1133 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1134 if (!atoms)
1135 die("wakeup-event: Internal tree error");
1136 add_sched_out_event(atoms, 'S', timestamp);
1139 BUG_ON(list_empty(&atoms->work_list));
1141 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1143 if (atom->state != THREAD_SLEEPING)
1144 nr_state_machine_bugs++;
1146 nr_timestamps++;
1147 if (atom->sched_out_time > timestamp) {
1148 nr_unordered_timestamps++;
1149 return;
1152 atom->state = THREAD_WAIT_CPU;
1153 atom->wake_up_time = timestamp;
1156 static struct trace_sched_handler lat_ops = {
1157 .wakeup_event = latency_wakeup_event,
1158 .switch_event = latency_switch_event,
1159 .runtime_event = latency_runtime_event,
1160 .fork_event = latency_fork_event,
1163 static void output_lat_thread(struct work_atoms *work_list)
1165 int i;
1166 int ret;
1167 u64 avg;
1169 if (!work_list->nb_atoms)
1170 return;
1172 * Ignore idle threads:
1174 if (!strcmp(work_list->thread->comm, "swapper"))
1175 return;
1177 all_runtime += work_list->total_runtime;
1178 all_count += work_list->nb_atoms;
1180 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
1182 for (i = 0; i < 24 - ret; i++)
1183 printf(" ");
1185 avg = work_list->total_lat / work_list->nb_atoms;
1187 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1188 (double)work_list->total_runtime / 1e6,
1189 work_list->nb_atoms, (double)avg / 1e6,
1190 (double)work_list->max_lat / 1e6);
1193 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1195 if (l->thread->pid < r->thread->pid)
1196 return -1;
1197 if (l->thread->pid > r->thread->pid)
1198 return 1;
1200 return 0;
1203 static struct sort_dimension pid_sort_dimension = {
1204 .name = "pid",
1205 .cmp = pid_cmp,
1208 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1210 u64 avgl, avgr;
1212 if (!l->nb_atoms)
1213 return -1;
1215 if (!r->nb_atoms)
1216 return 1;
1218 avgl = l->total_lat / l->nb_atoms;
1219 avgr = r->total_lat / r->nb_atoms;
1221 if (avgl < avgr)
1222 return -1;
1223 if (avgl > avgr)
1224 return 1;
1226 return 0;
1229 static struct sort_dimension avg_sort_dimension = {
1230 .name = "avg",
1231 .cmp = avg_cmp,
1234 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1236 if (l->max_lat < r->max_lat)
1237 return -1;
1238 if (l->max_lat > r->max_lat)
1239 return 1;
1241 return 0;
1244 static struct sort_dimension max_sort_dimension = {
1245 .name = "max",
1246 .cmp = max_cmp,
1249 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1251 if (l->nb_atoms < r->nb_atoms)
1252 return -1;
1253 if (l->nb_atoms > r->nb_atoms)
1254 return 1;
1256 return 0;
1259 static struct sort_dimension switch_sort_dimension = {
1260 .name = "switch",
1261 .cmp = switch_cmp,
1264 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1266 if (l->total_runtime < r->total_runtime)
1267 return -1;
1268 if (l->total_runtime > r->total_runtime)
1269 return 1;
1271 return 0;
1274 static struct sort_dimension runtime_sort_dimension = {
1275 .name = "runtime",
1276 .cmp = runtime_cmp,
1279 static struct sort_dimension *available_sorts[] = {
1280 &pid_sort_dimension,
1281 &avg_sort_dimension,
1282 &max_sort_dimension,
1283 &switch_sort_dimension,
1284 &runtime_sort_dimension,
1287 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1289 static LIST_HEAD(sort_list);
1291 static int sort_dimension__add(const char *tok, struct list_head *list)
1293 int i;
1295 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1296 if (!strcmp(available_sorts[i]->name, tok)) {
1297 list_add_tail(&available_sorts[i]->list, list);
1299 return 0;
1303 return -1;
1306 static void setup_sorting(void);
1308 static void sort_lat(void)
1310 struct rb_node *node;
1312 for (;;) {
1313 struct work_atoms *data;
1314 node = rb_first(&atom_root);
1315 if (!node)
1316 break;
1318 rb_erase(node, &atom_root);
1319 data = rb_entry(node, struct work_atoms, node);
1320 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1324 static struct trace_sched_handler *trace_handler;
1326 static void
1327 process_sched_wakeup_event(struct raw_event_sample *raw,
1328 struct event *event,
1329 int cpu __used,
1330 u64 timestamp __used,
1331 struct thread *thread __used)
1333 struct trace_wakeup_event wakeup_event;
1335 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1337 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1338 FILL_FIELD(wakeup_event, pid, event, raw->data);
1339 FILL_FIELD(wakeup_event, prio, event, raw->data);
1340 FILL_FIELD(wakeup_event, success, event, raw->data);
1341 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1343 if (trace_handler->wakeup_event)
1344 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
1348 * Track the current task - that way we can know whether there's any
1349 * weird events, such as a task being switched away that is not current.
1351 static int max_cpu;
1353 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1355 static struct thread *curr_thread[MAX_CPUS];
1357 static char next_shortname1 = 'A';
1358 static char next_shortname2 = '0';
1360 static void
1361 map_switch_event(struct trace_switch_event *switch_event,
1362 struct event *event __used,
1363 int this_cpu,
1364 u64 timestamp,
1365 struct thread *thread __used)
1367 struct thread *sched_out, *sched_in;
1368 int new_shortname;
1369 u64 timestamp0;
1370 s64 delta;
1371 int cpu;
1373 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1375 if (this_cpu > max_cpu)
1376 max_cpu = this_cpu;
1378 timestamp0 = cpu_last_switched[this_cpu];
1379 cpu_last_switched[this_cpu] = timestamp;
1380 if (timestamp0)
1381 delta = timestamp - timestamp0;
1382 else
1383 delta = 0;
1385 if (delta < 0)
1386 die("hm, delta: %Ld < 0 ?\n", delta);
1389 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1390 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1392 curr_thread[this_cpu] = sched_in;
1394 printf(" ");
1396 new_shortname = 0;
1397 if (!sched_in->shortname[0]) {
1398 sched_in->shortname[0] = next_shortname1;
1399 sched_in->shortname[1] = next_shortname2;
1401 if (next_shortname1 < 'Z') {
1402 next_shortname1++;
1403 } else {
1404 next_shortname1='A';
1405 if (next_shortname2 < '9') {
1406 next_shortname2++;
1407 } else {
1408 next_shortname2='0';
1411 new_shortname = 1;
1414 for (cpu = 0; cpu <= max_cpu; cpu++) {
1415 if (cpu != this_cpu)
1416 printf(" ");
1417 else
1418 printf("*");
1420 if (curr_thread[cpu]) {
1421 if (curr_thread[cpu]->pid)
1422 printf("%2s ", curr_thread[cpu]->shortname);
1423 else
1424 printf(". ");
1425 } else
1426 printf(" ");
1429 printf(" %12.6f secs ", (double)timestamp/1e9);
1430 if (new_shortname) {
1431 printf("%s => %s:%d\n",
1432 sched_in->shortname, sched_in->comm, sched_in->pid);
1433 } else {
1434 printf("\n");
1439 static void
1440 process_sched_switch_event(struct raw_event_sample *raw,
1441 struct event *event,
1442 int this_cpu,
1443 u64 timestamp __used,
1444 struct thread *thread __used)
1446 struct trace_switch_event switch_event;
1448 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1450 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1451 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1452 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1453 FILL_FIELD(switch_event, prev_state, event, raw->data);
1454 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1455 FILL_FIELD(switch_event, next_pid, event, raw->data);
1456 FILL_FIELD(switch_event, next_prio, event, raw->data);
1458 if (curr_pid[this_cpu] != (u32)-1) {
1460 * Are we trying to switch away a PID that is
1461 * not current?
1463 if (curr_pid[this_cpu] != switch_event.prev_pid)
1464 nr_context_switch_bugs++;
1466 if (trace_handler->switch_event)
1467 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
1469 curr_pid[this_cpu] = switch_event.next_pid;
1472 static void
1473 process_sched_runtime_event(struct raw_event_sample *raw,
1474 struct event *event,
1475 int cpu __used,
1476 u64 timestamp __used,
1477 struct thread *thread __used)
1479 struct trace_runtime_event runtime_event;
1481 FILL_ARRAY(runtime_event, comm, event, raw->data);
1482 FILL_FIELD(runtime_event, pid, event, raw->data);
1483 FILL_FIELD(runtime_event, runtime, event, raw->data);
1484 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1486 if (trace_handler->runtime_event)
1487 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
1490 static void
1491 process_sched_fork_event(struct raw_event_sample *raw,
1492 struct event *event,
1493 int cpu __used,
1494 u64 timestamp __used,
1495 struct thread *thread __used)
1497 struct trace_fork_event fork_event;
1499 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1501 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1502 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1503 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1504 FILL_FIELD(fork_event, child_pid, event, raw->data);
1506 if (trace_handler->fork_event)
1507 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
1510 static void
1511 process_sched_exit_event(struct event *event,
1512 int cpu __used,
1513 u64 timestamp __used,
1514 struct thread *thread __used)
1516 if (verbose)
1517 printf("sched_exit event %p\n", event);
1520 static void
1521 process_raw_event(event_t *raw_event __used, void *more_data,
1522 int cpu, u64 timestamp, struct thread *thread)
1524 struct raw_event_sample *raw = more_data;
1525 struct event *event;
1526 int type;
1528 type = trace_parse_common_type(raw->data);
1529 event = trace_find_event(type);
1531 if (!strcmp(event->name, "sched_switch"))
1532 process_sched_switch_event(raw, event, cpu, timestamp, thread);
1533 if (!strcmp(event->name, "sched_stat_runtime"))
1534 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
1535 if (!strcmp(event->name, "sched_wakeup"))
1536 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1537 if (!strcmp(event->name, "sched_wakeup_new"))
1538 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1539 if (!strcmp(event->name, "sched_process_fork"))
1540 process_sched_fork_event(raw, event, cpu, timestamp, thread);
1541 if (!strcmp(event->name, "sched_process_exit"))
1542 process_sched_exit_event(event, cpu, timestamp, thread);
1545 static int
1546 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1548 struct thread *thread;
1549 u64 ip = event->ip.ip;
1550 u64 timestamp = -1;
1551 u32 cpu = -1;
1552 u64 period = 1;
1553 void *more_data = event->ip.__more_data;
1555 if (!(sample_type & PERF_SAMPLE_RAW))
1556 return 0;
1558 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1560 if (sample_type & PERF_SAMPLE_TIME) {
1561 timestamp = *(u64 *)more_data;
1562 more_data += sizeof(u64);
1565 if (sample_type & PERF_SAMPLE_CPU) {
1566 cpu = *(u32 *)more_data;
1567 more_data += sizeof(u32);
1568 more_data += sizeof(u32); /* reserved */
1571 if (sample_type & PERF_SAMPLE_PERIOD) {
1572 period = *(u64 *)more_data;
1573 more_data += sizeof(u64);
1576 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1577 (void *)(offset + head),
1578 (void *)(long)(event->header.size),
1579 event->header.misc,
1580 event->ip.pid, event->ip.tid,
1581 (void *)(long)ip,
1582 (long long)period);
1584 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1586 if (thread == NULL) {
1587 eprintf("problem processing %d event, skipping it.\n",
1588 event->header.type);
1589 return -1;
1592 process_raw_event(event, more_data, cpu, timestamp, thread);
1594 return 0;
1597 static int
1598 process_lost_event(event_t *event __used,
1599 unsigned long offset __used,
1600 unsigned long head __used)
1602 nr_lost_chunks++;
1603 nr_lost_events += event->lost.lost;
1605 return 0;
1608 static int sample_type_check(u64 type)
1610 sample_type = type;
1612 if (!(sample_type & PERF_SAMPLE_RAW)) {
1613 fprintf(stderr,
1614 "No trace sample to read. Did you call perf record "
1615 "without -R?");
1616 return -1;
1619 return 0;
1622 static struct perf_file_handler file_handler = {
1623 .process_sample_event = process_sample_event,
1624 .process_comm_event = process_comm_event,
1625 .process_lost_event = process_lost_event,
1626 .sample_type_check = sample_type_check,
1629 static int read_events(void)
1631 register_idle_thread(&threads, &last_match);
1632 register_perf_file_handler(&file_handler);
1634 return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
1637 static void print_bad_events(void)
1639 if (nr_unordered_timestamps && nr_timestamps) {
1640 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1641 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1642 nr_unordered_timestamps, nr_timestamps);
1644 if (nr_lost_events && nr_events) {
1645 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1646 (double)nr_lost_events/(double)nr_events*100.0,
1647 nr_lost_events, nr_events, nr_lost_chunks);
1649 if (nr_state_machine_bugs && nr_timestamps) {
1650 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1651 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1652 nr_state_machine_bugs, nr_timestamps);
1653 if (nr_lost_events)
1654 printf(" (due to lost events?)");
1655 printf("\n");
1657 if (nr_context_switch_bugs && nr_timestamps) {
1658 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1659 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1660 nr_context_switch_bugs, nr_timestamps);
1661 if (nr_lost_events)
1662 printf(" (due to lost events?)");
1663 printf("\n");
1667 static void __cmd_lat(void)
1669 struct rb_node *next;
1671 setup_pager();
1672 read_events();
1673 sort_lat();
1675 printf("\n -----------------------------------------------------------------------------------------\n");
1676 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1677 printf(" -----------------------------------------------------------------------------------------\n");
1679 next = rb_first(&sorted_atom_root);
1681 while (next) {
1682 struct work_atoms *work_list;
1684 work_list = rb_entry(next, struct work_atoms, node);
1685 output_lat_thread(work_list);
1686 next = rb_next(next);
1689 printf(" -----------------------------------------------------------------------------------------\n");
1690 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1691 (double)all_runtime/1e6, all_count);
1693 printf(" ---------------------------------------------------\n");
1695 print_bad_events();
1696 printf("\n");
1700 static struct trace_sched_handler map_ops = {
1701 .wakeup_event = NULL,
1702 .switch_event = map_switch_event,
1703 .runtime_event = NULL,
1704 .fork_event = NULL,
1707 static void __cmd_map(void)
1709 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1711 setup_pager();
1712 read_events();
1713 print_bad_events();
1716 static void __cmd_replay(void)
1718 unsigned long i;
1720 calibrate_run_measurement_overhead();
1721 calibrate_sleep_measurement_overhead();
1723 test_calibrations();
1725 read_events();
1727 printf("nr_run_events: %ld\n", nr_run_events);
1728 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1729 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1731 if (targetless_wakeups)
1732 printf("target-less wakeups: %ld\n", targetless_wakeups);
1733 if (multitarget_wakeups)
1734 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1735 if (nr_run_events_optimized)
1736 printf("run atoms optimized: %ld\n",
1737 nr_run_events_optimized);
1739 print_task_traces();
1740 add_cross_task_wakeups();
1742 create_tasks();
1743 printf("------------------------------------------------------------\n");
1744 for (i = 0; i < replay_repeat; i++)
1745 run_one_test();
1749 static const char * const sched_usage[] = {
1750 "perf sched [<options>] {record|latency|map|replay|trace}",
1751 NULL
1754 static const struct option sched_options[] = {
1755 OPT_STRING('i', "input", &input_name, "file",
1756 "input file name"),
1757 OPT_BOOLEAN('v', "verbose", &verbose,
1758 "be more verbose (show symbol address, etc)"),
1759 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1760 "dump raw trace in ASCII"),
1761 OPT_END()
1764 static const char * const latency_usage[] = {
1765 "perf sched latency [<options>]",
1766 NULL
1769 static const struct option latency_options[] = {
1770 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1771 "sort by key(s): runtime, switch, avg, max"),
1772 OPT_BOOLEAN('v', "verbose", &verbose,
1773 "be more verbose (show symbol address, etc)"),
1774 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1775 "dump raw trace in ASCII"),
1776 OPT_END()
1779 static const char * const replay_usage[] = {
1780 "perf sched replay [<options>]",
1781 NULL
1784 static const struct option replay_options[] = {
1785 OPT_INTEGER('r', "repeat", &replay_repeat,
1786 "repeat the workload replay N times (-1: infinite)"),
1787 OPT_BOOLEAN('v', "verbose", &verbose,
1788 "be more verbose (show symbol address, etc)"),
1789 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1790 "dump raw trace in ASCII"),
1791 OPT_END()
1794 static void setup_sorting(void)
1796 char *tmp, *tok, *str = strdup(sort_order);
1798 for (tok = strtok_r(str, ", ", &tmp);
1799 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1800 if (sort_dimension__add(tok, &sort_list) < 0) {
1801 error("Unknown --sort key: `%s'", tok);
1802 usage_with_options(latency_usage, latency_options);
1806 free(str);
1808 sort_dimension__add("pid", &cmp_pid);
1811 static const char *record_args[] = {
1812 "record",
1813 "-a",
1814 "-R",
1815 "-M",
1816 "-f",
1817 "-m", "1024",
1818 "-c", "1",
1819 "-e", "sched:sched_switch:r",
1820 "-e", "sched:sched_stat_wait:r",
1821 "-e", "sched:sched_stat_sleep:r",
1822 "-e", "sched:sched_stat_iowait:r",
1823 "-e", "sched:sched_stat_runtime:r",
1824 "-e", "sched:sched_process_exit:r",
1825 "-e", "sched:sched_process_fork:r",
1826 "-e", "sched:sched_wakeup:r",
1827 "-e", "sched:sched_migrate_task:r",
1830 static int __cmd_record(int argc, const char **argv)
1832 unsigned int rec_argc, i, j;
1833 const char **rec_argv;
1835 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1836 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1838 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1839 rec_argv[i] = strdup(record_args[i]);
1841 for (j = 1; j < (unsigned int)argc; j++, i++)
1842 rec_argv[i] = argv[j];
1844 BUG_ON(i != rec_argc);
1846 return cmd_record(i, rec_argv, NULL);
1849 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1851 symbol__init();
1853 argc = parse_options(argc, argv, sched_options, sched_usage,
1854 PARSE_OPT_STOP_AT_NON_OPTION);
1855 if (!argc)
1856 usage_with_options(sched_usage, sched_options);
1858 if (!strncmp(argv[0], "rec", 3)) {
1859 return __cmd_record(argc, argv);
1860 } else if (!strncmp(argv[0], "lat", 3)) {
1861 trace_handler = &lat_ops;
1862 if (argc > 1) {
1863 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1864 if (argc)
1865 usage_with_options(latency_usage, latency_options);
1867 setup_sorting();
1868 __cmd_lat();
1869 } else if (!strcmp(argv[0], "map")) {
1870 trace_handler = &map_ops;
1871 setup_sorting();
1872 __cmd_map();
1873 } else if (!strncmp(argv[0], "rep", 3)) {
1874 trace_handler = &replay_ops;
1875 if (argc) {
1876 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1877 if (argc)
1878 usage_with_options(replay_usage, replay_options);
1880 __cmd_replay();
1881 } else if (!strcmp(argv[0], "trace")) {
1883 * Aliased to 'perf trace' for now:
1885 return cmd_trace(argc, argv, prefix);
1886 } else {
1887 usage_with_options(sched_usage, sched_options);
1890 return 0;