5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/data_map.h"
16 #include <sys/types.h>
17 #include <sys/prctl.h>
19 #include <semaphore.h>
23 static char const *input_name
= "perf.data";
25 static unsigned long total_comm
= 0;
27 static struct rb_root threads
;
28 static struct thread
*last_match
;
30 static struct perf_header
*header
;
31 static u64 sample_type
;
33 static char default_sort_order
[] = "avg, max, switch, runtime";
34 static char *sort_order
= default_sort_order
;
39 #define PR_SET_NAME 15 /* Set process name */
42 #define BUG_ON(x) assert(!(x))
44 static u64 run_measurement_overhead
;
45 static u64 sleep_measurement_overhead
;
52 static unsigned long nr_tasks
;
61 unsigned long nr_events
;
62 unsigned long curr_event
;
63 struct sched_atom
**atoms
;
74 enum sched_event_type
{
81 enum sched_event_type type
;
87 struct task_desc
*wakee
;
90 static struct task_desc
*pid_to_task
[MAX_PID
];
92 static struct task_desc
**tasks
;
94 static pthread_mutex_t start_work_mutex
= PTHREAD_MUTEX_INITIALIZER
;
95 static u64 start_time
;
97 static pthread_mutex_t work_done_wait_mutex
= PTHREAD_MUTEX_INITIALIZER
;
99 static unsigned long nr_run_events
;
100 static unsigned long nr_sleep_events
;
101 static unsigned long nr_wakeup_events
;
103 static unsigned long nr_sleep_corrections
;
104 static unsigned long nr_run_events_optimized
;
106 static unsigned long targetless_wakeups
;
107 static unsigned long multitarget_wakeups
;
109 static u64 cpu_usage
;
110 static u64 runavg_cpu_usage
;
111 static u64 parent_cpu_usage
;
112 static u64 runavg_parent_cpu_usage
;
114 static unsigned long nr_runs
;
115 static u64 sum_runtime
;
116 static u64 sum_fluct
;
119 static unsigned long replay_repeat
= 10;
120 static unsigned long nr_timestamps
;
121 static unsigned long nr_unordered_timestamps
;
122 static unsigned long nr_state_machine_bugs
;
123 static unsigned long nr_context_switch_bugs
;
124 static unsigned long nr_events
;
125 static unsigned long nr_lost_chunks
;
126 static unsigned long nr_lost_events
;
128 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
138 struct list_head list
;
139 enum thread_state state
;
147 struct list_head work_list
;
148 struct thread
*thread
;
156 typedef int (*sort_fn_t
)(struct work_atoms
*, struct work_atoms
*);
158 static struct rb_root atom_root
, sorted_atom_root
;
160 static u64 all_runtime
;
161 static u64 all_count
;
164 static u64
get_nsecs(void)
168 clock_gettime(CLOCK_MONOTONIC
, &ts
);
170 return ts
.tv_sec
* 1000000000ULL + ts
.tv_nsec
;
173 static void burn_nsecs(u64 nsecs
)
175 u64 T0
= get_nsecs(), T1
;
179 } while (T1
+ run_measurement_overhead
< T0
+ nsecs
);
182 static void sleep_nsecs(u64 nsecs
)
186 ts
.tv_nsec
= nsecs
% 999999999;
187 ts
.tv_sec
= nsecs
/ 999999999;
189 nanosleep(&ts
, NULL
);
192 static void calibrate_run_measurement_overhead(void)
194 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
197 for (i
= 0; i
< 10; i
++) {
202 min_delta
= min(min_delta
, delta
);
204 run_measurement_overhead
= min_delta
;
206 printf("run measurement overhead: %Ld nsecs\n", min_delta
);
209 static void calibrate_sleep_measurement_overhead(void)
211 u64 T0
, T1
, delta
, min_delta
= 1000000000ULL;
214 for (i
= 0; i
< 10; i
++) {
219 min_delta
= min(min_delta
, delta
);
222 sleep_measurement_overhead
= min_delta
;
224 printf("sleep measurement overhead: %Ld nsecs\n", min_delta
);
227 static struct sched_atom
*
228 get_new_event(struct task_desc
*task
, u64 timestamp
)
230 struct sched_atom
*event
= calloc(1, sizeof(*event
));
231 unsigned long idx
= task
->nr_events
;
234 event
->timestamp
= timestamp
;
238 size
= sizeof(struct sched_atom
*) * task
->nr_events
;
239 task
->atoms
= realloc(task
->atoms
, size
);
240 BUG_ON(!task
->atoms
);
242 task
->atoms
[idx
] = event
;
247 static struct sched_atom
*last_event(struct task_desc
*task
)
249 if (!task
->nr_events
)
252 return task
->atoms
[task
->nr_events
- 1];
256 add_sched_event_run(struct task_desc
*task
, u64 timestamp
, u64 duration
)
258 struct sched_atom
*event
, *curr_event
= last_event(task
);
261 * optimize an existing RUN event by merging this one
264 if (curr_event
&& curr_event
->type
== SCHED_EVENT_RUN
) {
265 nr_run_events_optimized
++;
266 curr_event
->duration
+= duration
;
270 event
= get_new_event(task
, timestamp
);
272 event
->type
= SCHED_EVENT_RUN
;
273 event
->duration
= duration
;
279 add_sched_event_wakeup(struct task_desc
*task
, u64 timestamp
,
280 struct task_desc
*wakee
)
282 struct sched_atom
*event
, *wakee_event
;
284 event
= get_new_event(task
, timestamp
);
285 event
->type
= SCHED_EVENT_WAKEUP
;
286 event
->wakee
= wakee
;
288 wakee_event
= last_event(wakee
);
289 if (!wakee_event
|| wakee_event
->type
!= SCHED_EVENT_SLEEP
) {
290 targetless_wakeups
++;
293 if (wakee_event
->wait_sem
) {
294 multitarget_wakeups
++;
298 wakee_event
->wait_sem
= calloc(1, sizeof(*wakee_event
->wait_sem
));
299 sem_init(wakee_event
->wait_sem
, 0, 0);
300 wakee_event
->specific_wait
= 1;
301 event
->wait_sem
= wakee_event
->wait_sem
;
307 add_sched_event_sleep(struct task_desc
*task
, u64 timestamp
,
308 u64 task_state __used
)
310 struct sched_atom
*event
= get_new_event(task
, timestamp
);
312 event
->type
= SCHED_EVENT_SLEEP
;
317 static struct task_desc
*register_pid(unsigned long pid
, const char *comm
)
319 struct task_desc
*task
;
321 BUG_ON(pid
>= MAX_PID
);
323 task
= pid_to_task
[pid
];
328 task
= calloc(1, sizeof(*task
));
331 strcpy(task
->comm
, comm
);
333 * every task starts in sleeping state - this gets ignored
334 * if there's no wakeup pointing to this sleep state:
336 add_sched_event_sleep(task
, 0, 0);
338 pid_to_task
[pid
] = task
;
340 tasks
= realloc(tasks
, nr_tasks
*sizeof(struct task_task
*));
342 tasks
[task
->nr
] = task
;
345 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks
, pid
, comm
);
351 static void print_task_traces(void)
353 struct task_desc
*task
;
356 for (i
= 0; i
< nr_tasks
; i
++) {
358 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
359 task
->nr
, task
->comm
, task
->pid
, task
->nr_events
);
363 static void add_cross_task_wakeups(void)
365 struct task_desc
*task1
, *task2
;
368 for (i
= 0; i
< nr_tasks
; i
++) {
374 add_sched_event_wakeup(task1
, 0, task2
);
379 process_sched_event(struct task_desc
*this_task __used
, struct sched_atom
*atom
)
386 delta
= start_time
+ atom
->timestamp
- now
;
388 switch (atom
->type
) {
389 case SCHED_EVENT_RUN
:
390 burn_nsecs(atom
->duration
);
392 case SCHED_EVENT_SLEEP
:
394 ret
= sem_wait(atom
->wait_sem
);
397 case SCHED_EVENT_WAKEUP
:
399 ret
= sem_post(atom
->wait_sem
);
407 static u64
get_cpu_usage_nsec_parent(void)
413 err
= getrusage(RUSAGE_SELF
, &ru
);
416 sum
= ru
.ru_utime
.tv_sec
*1e9
+ ru
.ru_utime
.tv_usec
*1e3
;
417 sum
+= ru
.ru_stime
.tv_sec
*1e9
+ ru
.ru_stime
.tv_usec
*1e3
;
422 static u64
get_cpu_usage_nsec_self(void)
424 char filename
[] = "/proc/1234567890/sched";
425 unsigned long msecs
, nsecs
;
433 sprintf(filename
, "/proc/%d/sched", getpid());
434 file
= fopen(filename
, "r");
437 while ((chars
= getline(&line
, &len
, file
)) != -1) {
438 ret
= sscanf(line
, "se.sum_exec_runtime : %ld.%06ld\n",
441 total
= msecs
*1e6
+ nsecs
;
452 static void *thread_func(void *ctx
)
454 struct task_desc
*this_task
= ctx
;
455 u64 cpu_usage_0
, cpu_usage_1
;
456 unsigned long i
, ret
;
459 sprintf(comm2
, ":%s", this_task
->comm
);
460 prctl(PR_SET_NAME
, comm2
);
463 ret
= sem_post(&this_task
->ready_for_work
);
465 ret
= pthread_mutex_lock(&start_work_mutex
);
467 ret
= pthread_mutex_unlock(&start_work_mutex
);
470 cpu_usage_0
= get_cpu_usage_nsec_self();
472 for (i
= 0; i
< this_task
->nr_events
; i
++) {
473 this_task
->curr_event
= i
;
474 process_sched_event(this_task
, this_task
->atoms
[i
]);
477 cpu_usage_1
= get_cpu_usage_nsec_self();
478 this_task
->cpu_usage
= cpu_usage_1
- cpu_usage_0
;
480 ret
= sem_post(&this_task
->work_done_sem
);
483 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
485 ret
= pthread_mutex_unlock(&work_done_wait_mutex
);
491 static void create_tasks(void)
493 struct task_desc
*task
;
498 err
= pthread_attr_init(&attr
);
500 err
= pthread_attr_setstacksize(&attr
, (size_t)(16*1024));
502 err
= pthread_mutex_lock(&start_work_mutex
);
504 err
= pthread_mutex_lock(&work_done_wait_mutex
);
506 for (i
= 0; i
< nr_tasks
; i
++) {
508 sem_init(&task
->sleep_sem
, 0, 0);
509 sem_init(&task
->ready_for_work
, 0, 0);
510 sem_init(&task
->work_done_sem
, 0, 0);
511 task
->curr_event
= 0;
512 err
= pthread_create(&task
->thread
, &attr
, thread_func
, task
);
517 static void wait_for_tasks(void)
519 u64 cpu_usage_0
, cpu_usage_1
;
520 struct task_desc
*task
;
521 unsigned long i
, ret
;
523 start_time
= get_nsecs();
525 pthread_mutex_unlock(&work_done_wait_mutex
);
527 for (i
= 0; i
< nr_tasks
; i
++) {
529 ret
= sem_wait(&task
->ready_for_work
);
531 sem_init(&task
->ready_for_work
, 0, 0);
533 ret
= pthread_mutex_lock(&work_done_wait_mutex
);
536 cpu_usage_0
= get_cpu_usage_nsec_parent();
538 pthread_mutex_unlock(&start_work_mutex
);
540 for (i
= 0; i
< nr_tasks
; i
++) {
542 ret
= sem_wait(&task
->work_done_sem
);
544 sem_init(&task
->work_done_sem
, 0, 0);
545 cpu_usage
+= task
->cpu_usage
;
549 cpu_usage_1
= get_cpu_usage_nsec_parent();
550 if (!runavg_cpu_usage
)
551 runavg_cpu_usage
= cpu_usage
;
552 runavg_cpu_usage
= (runavg_cpu_usage
*9 + cpu_usage
)/10;
554 parent_cpu_usage
= cpu_usage_1
- cpu_usage_0
;
555 if (!runavg_parent_cpu_usage
)
556 runavg_parent_cpu_usage
= parent_cpu_usage
;
557 runavg_parent_cpu_usage
= (runavg_parent_cpu_usage
*9 +
558 parent_cpu_usage
)/10;
560 ret
= pthread_mutex_lock(&start_work_mutex
);
563 for (i
= 0; i
< nr_tasks
; i
++) {
565 sem_init(&task
->sleep_sem
, 0, 0);
566 task
->curr_event
= 0;
570 static void run_one_test(void)
572 u64 T0
, T1
, delta
, avg_delta
, fluct
, std_dev
;
579 sum_runtime
+= delta
;
582 avg_delta
= sum_runtime
/ nr_runs
;
583 if (delta
< avg_delta
)
584 fluct
= avg_delta
- delta
;
586 fluct
= delta
- avg_delta
;
588 std_dev
= sum_fluct
/ nr_runs
/ sqrt(nr_runs
);
591 run_avg
= (run_avg
*9 + delta
)/10;
593 printf("#%-3ld: %0.3f, ",
594 nr_runs
, (double)delta
/1000000.0);
596 printf("ravg: %0.2f, ",
597 (double)run_avg
/1e6
);
599 printf("cpu: %0.2f / %0.2f",
600 (double)cpu_usage
/1e6
, (double)runavg_cpu_usage
/1e6
);
604 * rusage statistics done by the parent, these are less
605 * accurate than the sum_exec_runtime based statistics:
607 printf(" [%0.2f / %0.2f]",
608 (double)parent_cpu_usage
/1e6
,
609 (double)runavg_parent_cpu_usage
/1e6
);
614 if (nr_sleep_corrections
)
615 printf(" (%ld sleep corrections)\n", nr_sleep_corrections
);
616 nr_sleep_corrections
= 0;
619 static void test_calibrations(void)
627 printf("the run test took %Ld nsecs\n", T1
-T0
);
633 printf("the sleep test took %Ld nsecs\n", T1
-T0
);
637 process_comm_event(event_t
*event
, unsigned long offset
, unsigned long head
)
639 struct thread
*thread
;
641 thread
= threads__findnew(event
->comm
.tid
, &threads
, &last_match
);
643 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
644 (void *)(offset
+ head
),
645 (void *)(long)(event
->header
.size
),
646 event
->comm
.comm
, event
->comm
.pid
);
648 if (thread
== NULL
||
649 thread__set_comm(thread
, event
->comm
.comm
)) {
650 dump_printf("problem processing perf_event_comm, skipping event.\n");
659 struct raw_event_sample
{
664 #define FILL_FIELD(ptr, field, event, data) \
665 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
667 #define FILL_ARRAY(ptr, array, event, data) \
669 void *__array = raw_field_ptr(event, #array, data); \
670 memcpy(ptr.array, __array, sizeof(ptr.array)); \
673 #define FILL_COMMON_FIELDS(ptr, event, data) \
675 FILL_FIELD(ptr, common_type, event, data); \
676 FILL_FIELD(ptr, common_flags, event, data); \
677 FILL_FIELD(ptr, common_preempt_count, event, data); \
678 FILL_FIELD(ptr, common_pid, event, data); \
679 FILL_FIELD(ptr, common_tgid, event, data); \
684 struct trace_switch_event
{
689 u8 common_preempt_count
;
702 struct trace_runtime_event
{
707 u8 common_preempt_count
;
717 struct trace_wakeup_event
{
722 u8 common_preempt_count
;
734 struct trace_fork_event
{
739 u8 common_preempt_count
;
743 char parent_comm
[16];
749 struct trace_sched_handler
{
750 void (*switch_event
)(struct trace_switch_event
*,
754 struct thread
*thread
);
756 void (*runtime_event
)(struct trace_runtime_event
*,
760 struct thread
*thread
);
762 void (*wakeup_event
)(struct trace_wakeup_event
*,
766 struct thread
*thread
);
768 void (*fork_event
)(struct trace_fork_event
*,
772 struct thread
*thread
);
777 replay_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
780 u64 timestamp __used
,
781 struct thread
*thread __used
)
783 struct task_desc
*waker
, *wakee
;
786 printf("sched_wakeup event %p\n", event
);
788 printf(" ... pid %d woke up %s/%d\n",
789 wakeup_event
->common_pid
,
794 waker
= register_pid(wakeup_event
->common_pid
, "<unknown>");
795 wakee
= register_pid(wakeup_event
->pid
, wakeup_event
->comm
);
797 add_sched_event_wakeup(waker
, timestamp
, wakee
);
800 static u64 cpu_last_switched
[MAX_CPUS
];
803 replay_switch_event(struct trace_switch_event
*switch_event
,
807 struct thread
*thread __used
)
809 struct task_desc
*prev
, *next
;
814 printf("sched_switch event %p\n", event
);
816 if (cpu
>= MAX_CPUS
|| cpu
< 0)
819 timestamp0
= cpu_last_switched
[cpu
];
821 delta
= timestamp
- timestamp0
;
826 die("hm, delta: %Ld < 0 ?\n", delta
);
829 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
830 switch_event
->prev_comm
, switch_event
->prev_pid
,
831 switch_event
->next_comm
, switch_event
->next_pid
,
835 prev
= register_pid(switch_event
->prev_pid
, switch_event
->prev_comm
);
836 next
= register_pid(switch_event
->next_pid
, switch_event
->next_comm
);
838 cpu_last_switched
[cpu
] = timestamp
;
840 add_sched_event_run(prev
, timestamp
, delta
);
841 add_sched_event_sleep(prev
, timestamp
, switch_event
->prev_state
);
846 replay_fork_event(struct trace_fork_event
*fork_event
,
849 u64 timestamp __used
,
850 struct thread
*thread __used
)
853 printf("sched_fork event %p\n", event
);
854 printf("... parent: %s/%d\n", fork_event
->parent_comm
, fork_event
->parent_pid
);
855 printf("... child: %s/%d\n", fork_event
->child_comm
, fork_event
->child_pid
);
857 register_pid(fork_event
->parent_pid
, fork_event
->parent_comm
);
858 register_pid(fork_event
->child_pid
, fork_event
->child_comm
);
861 static struct trace_sched_handler replay_ops
= {
862 .wakeup_event
= replay_wakeup_event
,
863 .switch_event
= replay_switch_event
,
864 .fork_event
= replay_fork_event
,
867 struct sort_dimension
{
870 struct list_head list
;
873 static LIST_HEAD(cmp_pid
);
876 thread_lat_cmp(struct list_head
*list
, struct work_atoms
*l
, struct work_atoms
*r
)
878 struct sort_dimension
*sort
;
881 BUG_ON(list_empty(list
));
883 list_for_each_entry(sort
, list
, list
) {
884 ret
= sort
->cmp(l
, r
);
892 static struct work_atoms
*
893 thread_atoms_search(struct rb_root
*root
, struct thread
*thread
,
894 struct list_head
*sort_list
)
896 struct rb_node
*node
= root
->rb_node
;
897 struct work_atoms key
= { .thread
= thread
};
900 struct work_atoms
*atoms
;
903 atoms
= container_of(node
, struct work_atoms
, node
);
905 cmp
= thread_lat_cmp(sort_list
, &key
, atoms
);
907 node
= node
->rb_left
;
909 node
= node
->rb_right
;
911 BUG_ON(thread
!= atoms
->thread
);
919 __thread_latency_insert(struct rb_root
*root
, struct work_atoms
*data
,
920 struct list_head
*sort_list
)
922 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
925 struct work_atoms
*this;
928 this = container_of(*new, struct work_atoms
, node
);
931 cmp
= thread_lat_cmp(sort_list
, data
, this);
934 new = &((*new)->rb_left
);
936 new = &((*new)->rb_right
);
939 rb_link_node(&data
->node
, parent
, new);
940 rb_insert_color(&data
->node
, root
);
943 static void thread_atoms_insert(struct thread
*thread
)
945 struct work_atoms
*atoms
;
947 atoms
= calloc(sizeof(*atoms
), 1);
951 atoms
->thread
= thread
;
952 INIT_LIST_HEAD(&atoms
->work_list
);
953 __thread_latency_insert(&atom_root
, atoms
, &cmp_pid
);
957 latency_fork_event(struct trace_fork_event
*fork_event __used
,
958 struct event
*event __used
,
960 u64 timestamp __used
,
961 struct thread
*thread __used
)
963 /* should insert the newcomer */
967 static char sched_out_state(struct trace_switch_event
*switch_event
)
969 const char *str
= TASK_STATE_TO_CHAR_STR
;
971 return str
[switch_event
->prev_state
];
975 add_sched_out_event(struct work_atoms
*atoms
,
979 struct work_atom
*atom
;
981 atom
= calloc(sizeof(*atom
), 1);
985 atom
->sched_out_time
= timestamp
;
987 if (run_state
== 'R') {
988 atom
->state
= THREAD_WAIT_CPU
;
989 atom
->wake_up_time
= atom
->sched_out_time
;
992 list_add_tail(&atom
->list
, &atoms
->work_list
);
996 add_runtime_event(struct work_atoms
*atoms
, u64 delta
, u64 timestamp __used
)
998 struct work_atom
*atom
;
1000 BUG_ON(list_empty(&atoms
->work_list
));
1002 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1004 atom
->runtime
+= delta
;
1005 atoms
->total_runtime
+= delta
;
1009 add_sched_in_event(struct work_atoms
*atoms
, u64 timestamp
)
1011 struct work_atom
*atom
;
1014 if (list_empty(&atoms
->work_list
))
1017 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1019 if (atom
->state
!= THREAD_WAIT_CPU
)
1022 if (timestamp
< atom
->wake_up_time
) {
1023 atom
->state
= THREAD_IGNORE
;
1027 atom
->state
= THREAD_SCHED_IN
;
1028 atom
->sched_in_time
= timestamp
;
1030 delta
= atom
->sched_in_time
- atom
->wake_up_time
;
1031 atoms
->total_lat
+= delta
;
1032 if (delta
> atoms
->max_lat
)
1033 atoms
->max_lat
= delta
;
1038 latency_switch_event(struct trace_switch_event
*switch_event
,
1039 struct event
*event __used
,
1042 struct thread
*thread __used
)
1044 struct work_atoms
*out_events
, *in_events
;
1045 struct thread
*sched_out
, *sched_in
;
1049 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1051 timestamp0
= cpu_last_switched
[cpu
];
1052 cpu_last_switched
[cpu
] = timestamp
;
1054 delta
= timestamp
- timestamp0
;
1059 die("hm, delta: %Ld < 0 ?\n", delta
);
1062 sched_out
= threads__findnew(switch_event
->prev_pid
, &threads
, &last_match
);
1063 sched_in
= threads__findnew(switch_event
->next_pid
, &threads
, &last_match
);
1065 out_events
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1067 thread_atoms_insert(sched_out
);
1068 out_events
= thread_atoms_search(&atom_root
, sched_out
, &cmp_pid
);
1070 die("out-event: Internal tree error");
1072 add_sched_out_event(out_events
, sched_out_state(switch_event
), timestamp
);
1074 in_events
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1076 thread_atoms_insert(sched_in
);
1077 in_events
= thread_atoms_search(&atom_root
, sched_in
, &cmp_pid
);
1079 die("in-event: Internal tree error");
1081 * Take came in we have not heard about yet,
1082 * add in an initial atom in runnable state:
1084 add_sched_out_event(in_events
, 'R', timestamp
);
1086 add_sched_in_event(in_events
, timestamp
);
1090 latency_runtime_event(struct trace_runtime_event
*runtime_event
,
1091 struct event
*event __used
,
1094 struct thread
*this_thread __used
)
1096 struct work_atoms
*atoms
;
1097 struct thread
*thread
;
1099 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1101 thread
= threads__findnew(runtime_event
->pid
, &threads
, &last_match
);
1102 atoms
= thread_atoms_search(&atom_root
, thread
, &cmp_pid
);
1104 thread_atoms_insert(thread
);
1105 atoms
= thread_atoms_search(&atom_root
, thread
, &cmp_pid
);
1107 die("in-event: Internal tree error");
1108 add_sched_out_event(atoms
, 'R', timestamp
);
1111 add_runtime_event(atoms
, runtime_event
->runtime
, timestamp
);
1115 latency_wakeup_event(struct trace_wakeup_event
*wakeup_event
,
1116 struct event
*__event __used
,
1119 struct thread
*thread __used
)
1121 struct work_atoms
*atoms
;
1122 struct work_atom
*atom
;
1123 struct thread
*wakee
;
1125 /* Note for later, it may be interesting to observe the failing cases */
1126 if (!wakeup_event
->success
)
1129 wakee
= threads__findnew(wakeup_event
->pid
, &threads
, &last_match
);
1130 atoms
= thread_atoms_search(&atom_root
, wakee
, &cmp_pid
);
1132 thread_atoms_insert(wakee
);
1133 atoms
= thread_atoms_search(&atom_root
, wakee
, &cmp_pid
);
1135 die("wakeup-event: Internal tree error");
1136 add_sched_out_event(atoms
, 'S', timestamp
);
1139 BUG_ON(list_empty(&atoms
->work_list
));
1141 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1143 if (atom
->state
!= THREAD_SLEEPING
)
1144 nr_state_machine_bugs
++;
1147 if (atom
->sched_out_time
> timestamp
) {
1148 nr_unordered_timestamps
++;
1152 atom
->state
= THREAD_WAIT_CPU
;
1153 atom
->wake_up_time
= timestamp
;
1156 static struct trace_sched_handler lat_ops
= {
1157 .wakeup_event
= latency_wakeup_event
,
1158 .switch_event
= latency_switch_event
,
1159 .runtime_event
= latency_runtime_event
,
1160 .fork_event
= latency_fork_event
,
1163 static void output_lat_thread(struct work_atoms
*work_list
)
1169 if (!work_list
->nb_atoms
)
1172 * Ignore idle threads:
1174 if (!strcmp(work_list
->thread
->comm
, "swapper"))
1177 all_runtime
+= work_list
->total_runtime
;
1178 all_count
+= work_list
->nb_atoms
;
1180 ret
= printf(" %s:%d ", work_list
->thread
->comm
, work_list
->thread
->pid
);
1182 for (i
= 0; i
< 24 - ret
; i
++)
1185 avg
= work_list
->total_lat
/ work_list
->nb_atoms
;
1187 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1188 (double)work_list
->total_runtime
/ 1e6
,
1189 work_list
->nb_atoms
, (double)avg
/ 1e6
,
1190 (double)work_list
->max_lat
/ 1e6
);
1193 static int pid_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1195 if (l
->thread
->pid
< r
->thread
->pid
)
1197 if (l
->thread
->pid
> r
->thread
->pid
)
1203 static struct sort_dimension pid_sort_dimension
= {
1208 static int avg_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1218 avgl
= l
->total_lat
/ l
->nb_atoms
;
1219 avgr
= r
->total_lat
/ r
->nb_atoms
;
1229 static struct sort_dimension avg_sort_dimension
= {
1234 static int max_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1236 if (l
->max_lat
< r
->max_lat
)
1238 if (l
->max_lat
> r
->max_lat
)
1244 static struct sort_dimension max_sort_dimension
= {
1249 static int switch_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1251 if (l
->nb_atoms
< r
->nb_atoms
)
1253 if (l
->nb_atoms
> r
->nb_atoms
)
1259 static struct sort_dimension switch_sort_dimension
= {
1264 static int runtime_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1266 if (l
->total_runtime
< r
->total_runtime
)
1268 if (l
->total_runtime
> r
->total_runtime
)
1274 static struct sort_dimension runtime_sort_dimension
= {
1279 static struct sort_dimension
*available_sorts
[] = {
1280 &pid_sort_dimension
,
1281 &avg_sort_dimension
,
1282 &max_sort_dimension
,
1283 &switch_sort_dimension
,
1284 &runtime_sort_dimension
,
1287 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1289 static LIST_HEAD(sort_list
);
1291 static int sort_dimension__add(const char *tok
, struct list_head
*list
)
1295 for (i
= 0; i
< NB_AVAILABLE_SORTS
; i
++) {
1296 if (!strcmp(available_sorts
[i
]->name
, tok
)) {
1297 list_add_tail(&available_sorts
[i
]->list
, list
);
1306 static void setup_sorting(void);
1308 static void sort_lat(void)
1310 struct rb_node
*node
;
1313 struct work_atoms
*data
;
1314 node
= rb_first(&atom_root
);
1318 rb_erase(node
, &atom_root
);
1319 data
= rb_entry(node
, struct work_atoms
, node
);
1320 __thread_latency_insert(&sorted_atom_root
, data
, &sort_list
);
1324 static struct trace_sched_handler
*trace_handler
;
1327 process_sched_wakeup_event(struct raw_event_sample
*raw
,
1328 struct event
*event
,
1330 u64 timestamp __used
,
1331 struct thread
*thread __used
)
1333 struct trace_wakeup_event wakeup_event
;
1335 FILL_COMMON_FIELDS(wakeup_event
, event
, raw
->data
);
1337 FILL_ARRAY(wakeup_event
, comm
, event
, raw
->data
);
1338 FILL_FIELD(wakeup_event
, pid
, event
, raw
->data
);
1339 FILL_FIELD(wakeup_event
, prio
, event
, raw
->data
);
1340 FILL_FIELD(wakeup_event
, success
, event
, raw
->data
);
1341 FILL_FIELD(wakeup_event
, cpu
, event
, raw
->data
);
1343 if (trace_handler
->wakeup_event
)
1344 trace_handler
->wakeup_event(&wakeup_event
, event
, cpu
, timestamp
, thread
);
1348 * Track the current task - that way we can know whether there's any
1349 * weird events, such as a task being switched away that is not current.
1353 static u32 curr_pid
[MAX_CPUS
] = { [0 ... MAX_CPUS
-1] = -1 };
1355 static struct thread
*curr_thread
[MAX_CPUS
];
1357 static char next_shortname1
= 'A';
1358 static char next_shortname2
= '0';
1361 map_switch_event(struct trace_switch_event
*switch_event
,
1362 struct event
*event __used
,
1365 struct thread
*thread __used
)
1367 struct thread
*sched_out
, *sched_in
;
1373 BUG_ON(this_cpu
>= MAX_CPUS
|| this_cpu
< 0);
1375 if (this_cpu
> max_cpu
)
1378 timestamp0
= cpu_last_switched
[this_cpu
];
1379 cpu_last_switched
[this_cpu
] = timestamp
;
1381 delta
= timestamp
- timestamp0
;
1386 die("hm, delta: %Ld < 0 ?\n", delta
);
1389 sched_out
= threads__findnew(switch_event
->prev_pid
, &threads
, &last_match
);
1390 sched_in
= threads__findnew(switch_event
->next_pid
, &threads
, &last_match
);
1392 curr_thread
[this_cpu
] = sched_in
;
1397 if (!sched_in
->shortname
[0]) {
1398 sched_in
->shortname
[0] = next_shortname1
;
1399 sched_in
->shortname
[1] = next_shortname2
;
1401 if (next_shortname1
< 'Z') {
1404 next_shortname1
='A';
1405 if (next_shortname2
< '9') {
1408 next_shortname2
='0';
1414 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
1415 if (cpu
!= this_cpu
)
1420 if (curr_thread
[cpu
]) {
1421 if (curr_thread
[cpu
]->pid
)
1422 printf("%2s ", curr_thread
[cpu
]->shortname
);
1429 printf(" %12.6f secs ", (double)timestamp
/1e9
);
1430 if (new_shortname
) {
1431 printf("%s => %s:%d\n",
1432 sched_in
->shortname
, sched_in
->comm
, sched_in
->pid
);
1440 process_sched_switch_event(struct raw_event_sample
*raw
,
1441 struct event
*event
,
1443 u64 timestamp __used
,
1444 struct thread
*thread __used
)
1446 struct trace_switch_event switch_event
;
1448 FILL_COMMON_FIELDS(switch_event
, event
, raw
->data
);
1450 FILL_ARRAY(switch_event
, prev_comm
, event
, raw
->data
);
1451 FILL_FIELD(switch_event
, prev_pid
, event
, raw
->data
);
1452 FILL_FIELD(switch_event
, prev_prio
, event
, raw
->data
);
1453 FILL_FIELD(switch_event
, prev_state
, event
, raw
->data
);
1454 FILL_ARRAY(switch_event
, next_comm
, event
, raw
->data
);
1455 FILL_FIELD(switch_event
, next_pid
, event
, raw
->data
);
1456 FILL_FIELD(switch_event
, next_prio
, event
, raw
->data
);
1458 if (curr_pid
[this_cpu
] != (u32
)-1) {
1460 * Are we trying to switch away a PID that is
1463 if (curr_pid
[this_cpu
] != switch_event
.prev_pid
)
1464 nr_context_switch_bugs
++;
1466 if (trace_handler
->switch_event
)
1467 trace_handler
->switch_event(&switch_event
, event
, this_cpu
, timestamp
, thread
);
1469 curr_pid
[this_cpu
] = switch_event
.next_pid
;
1473 process_sched_runtime_event(struct raw_event_sample
*raw
,
1474 struct event
*event
,
1476 u64 timestamp __used
,
1477 struct thread
*thread __used
)
1479 struct trace_runtime_event runtime_event
;
1481 FILL_ARRAY(runtime_event
, comm
, event
, raw
->data
);
1482 FILL_FIELD(runtime_event
, pid
, event
, raw
->data
);
1483 FILL_FIELD(runtime_event
, runtime
, event
, raw
->data
);
1484 FILL_FIELD(runtime_event
, vruntime
, event
, raw
->data
);
1486 if (trace_handler
->runtime_event
)
1487 trace_handler
->runtime_event(&runtime_event
, event
, cpu
, timestamp
, thread
);
1491 process_sched_fork_event(struct raw_event_sample
*raw
,
1492 struct event
*event
,
1494 u64 timestamp __used
,
1495 struct thread
*thread __used
)
1497 struct trace_fork_event fork_event
;
1499 FILL_COMMON_FIELDS(fork_event
, event
, raw
->data
);
1501 FILL_ARRAY(fork_event
, parent_comm
, event
, raw
->data
);
1502 FILL_FIELD(fork_event
, parent_pid
, event
, raw
->data
);
1503 FILL_ARRAY(fork_event
, child_comm
, event
, raw
->data
);
1504 FILL_FIELD(fork_event
, child_pid
, event
, raw
->data
);
1506 if (trace_handler
->fork_event
)
1507 trace_handler
->fork_event(&fork_event
, event
, cpu
, timestamp
, thread
);
1511 process_sched_exit_event(struct event
*event
,
1513 u64 timestamp __used
,
1514 struct thread
*thread __used
)
1517 printf("sched_exit event %p\n", event
);
1521 process_raw_event(event_t
*raw_event __used
, void *more_data
,
1522 int cpu
, u64 timestamp
, struct thread
*thread
)
1524 struct raw_event_sample
*raw
= more_data
;
1525 struct event
*event
;
1528 type
= trace_parse_common_type(raw
->data
);
1529 event
= trace_find_event(type
);
1531 if (!strcmp(event
->name
, "sched_switch"))
1532 process_sched_switch_event(raw
, event
, cpu
, timestamp
, thread
);
1533 if (!strcmp(event
->name
, "sched_stat_runtime"))
1534 process_sched_runtime_event(raw
, event
, cpu
, timestamp
, thread
);
1535 if (!strcmp(event
->name
, "sched_wakeup"))
1536 process_sched_wakeup_event(raw
, event
, cpu
, timestamp
, thread
);
1537 if (!strcmp(event
->name
, "sched_wakeup_new"))
1538 process_sched_wakeup_event(raw
, event
, cpu
, timestamp
, thread
);
1539 if (!strcmp(event
->name
, "sched_process_fork"))
1540 process_sched_fork_event(raw
, event
, cpu
, timestamp
, thread
);
1541 if (!strcmp(event
->name
, "sched_process_exit"))
1542 process_sched_exit_event(event
, cpu
, timestamp
, thread
);
1546 process_sample_event(event_t
*event
, unsigned long offset
, unsigned long head
)
1548 struct thread
*thread
;
1549 u64 ip
= event
->ip
.ip
;
1553 void *more_data
= event
->ip
.__more_data
;
1555 if (!(sample_type
& PERF_SAMPLE_RAW
))
1558 thread
= threads__findnew(event
->ip
.pid
, &threads
, &last_match
);
1560 if (sample_type
& PERF_SAMPLE_TIME
) {
1561 timestamp
= *(u64
*)more_data
;
1562 more_data
+= sizeof(u64
);
1565 if (sample_type
& PERF_SAMPLE_CPU
) {
1566 cpu
= *(u32
*)more_data
;
1567 more_data
+= sizeof(u32
);
1568 more_data
+= sizeof(u32
); /* reserved */
1571 if (sample_type
& PERF_SAMPLE_PERIOD
) {
1572 period
= *(u64
*)more_data
;
1573 more_data
+= sizeof(u64
);
1576 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1577 (void *)(offset
+ head
),
1578 (void *)(long)(event
->header
.size
),
1580 event
->ip
.pid
, event
->ip
.tid
,
1584 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
1586 if (thread
== NULL
) {
1587 eprintf("problem processing %d event, skipping it.\n",
1588 event
->header
.type
);
1592 process_raw_event(event
, more_data
, cpu
, timestamp
, thread
);
1598 process_lost_event(event_t
*event __used
,
1599 unsigned long offset __used
,
1600 unsigned long head __used
)
1603 nr_lost_events
+= event
->lost
.lost
;
1608 static int sample_type_check(u64 type
)
1612 if (!(sample_type
& PERF_SAMPLE_RAW
)) {
1614 "No trace sample to read. Did you call perf record "
1622 static struct perf_file_handler file_handler
= {
1623 .process_sample_event
= process_sample_event
,
1624 .process_comm_event
= process_comm_event
,
1625 .process_lost_event
= process_lost_event
,
1626 .sample_type_check
= sample_type_check
,
1629 static int read_events(void)
1631 register_idle_thread(&threads
, &last_match
);
1632 register_perf_file_handler(&file_handler
);
1634 return mmap_dispatch_perf_file(&header
, input_name
, 0, 0, &cwdlen
, &cwd
);
1637 static void print_bad_events(void)
1639 if (nr_unordered_timestamps
&& nr_timestamps
) {
1640 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1641 (double)nr_unordered_timestamps
/(double)nr_timestamps
*100.0,
1642 nr_unordered_timestamps
, nr_timestamps
);
1644 if (nr_lost_events
&& nr_events
) {
1645 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1646 (double)nr_lost_events
/(double)nr_events
*100.0,
1647 nr_lost_events
, nr_events
, nr_lost_chunks
);
1649 if (nr_state_machine_bugs
&& nr_timestamps
) {
1650 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1651 (double)nr_state_machine_bugs
/(double)nr_timestamps
*100.0,
1652 nr_state_machine_bugs
, nr_timestamps
);
1654 printf(" (due to lost events?)");
1657 if (nr_context_switch_bugs
&& nr_timestamps
) {
1658 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1659 (double)nr_context_switch_bugs
/(double)nr_timestamps
*100.0,
1660 nr_context_switch_bugs
, nr_timestamps
);
1662 printf(" (due to lost events?)");
1667 static void __cmd_lat(void)
1669 struct rb_node
*next
;
1675 printf("\n -----------------------------------------------------------------------------------------\n");
1676 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1677 printf(" -----------------------------------------------------------------------------------------\n");
1679 next
= rb_first(&sorted_atom_root
);
1682 struct work_atoms
*work_list
;
1684 work_list
= rb_entry(next
, struct work_atoms
, node
);
1685 output_lat_thread(work_list
);
1686 next
= rb_next(next
);
1689 printf(" -----------------------------------------------------------------------------------------\n");
1690 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1691 (double)all_runtime
/1e6
, all_count
);
1693 printf(" ---------------------------------------------------\n");
1700 static struct trace_sched_handler map_ops
= {
1701 .wakeup_event
= NULL
,
1702 .switch_event
= map_switch_event
,
1703 .runtime_event
= NULL
,
1707 static void __cmd_map(void)
1709 max_cpu
= sysconf(_SC_NPROCESSORS_CONF
);
1716 static void __cmd_replay(void)
1720 calibrate_run_measurement_overhead();
1721 calibrate_sleep_measurement_overhead();
1723 test_calibrations();
1727 printf("nr_run_events: %ld\n", nr_run_events
);
1728 printf("nr_sleep_events: %ld\n", nr_sleep_events
);
1729 printf("nr_wakeup_events: %ld\n", nr_wakeup_events
);
1731 if (targetless_wakeups
)
1732 printf("target-less wakeups: %ld\n", targetless_wakeups
);
1733 if (multitarget_wakeups
)
1734 printf("multi-target wakeups: %ld\n", multitarget_wakeups
);
1735 if (nr_run_events_optimized
)
1736 printf("run atoms optimized: %ld\n",
1737 nr_run_events_optimized
);
1739 print_task_traces();
1740 add_cross_task_wakeups();
1743 printf("------------------------------------------------------------\n");
1744 for (i
= 0; i
< replay_repeat
; i
++)
1749 static const char * const sched_usage
[] = {
1750 "perf sched [<options>] {record|latency|map|replay|trace}",
1754 static const struct option sched_options
[] = {
1755 OPT_STRING('i', "input", &input_name
, "file",
1757 OPT_BOOLEAN('v', "verbose", &verbose
,
1758 "be more verbose (show symbol address, etc)"),
1759 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1760 "dump raw trace in ASCII"),
1764 static const char * const latency_usage
[] = {
1765 "perf sched latency [<options>]",
1769 static const struct option latency_options
[] = {
1770 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1771 "sort by key(s): runtime, switch, avg, max"),
1772 OPT_BOOLEAN('v', "verbose", &verbose
,
1773 "be more verbose (show symbol address, etc)"),
1774 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1775 "dump raw trace in ASCII"),
1779 static const char * const replay_usage
[] = {
1780 "perf sched replay [<options>]",
1784 static const struct option replay_options
[] = {
1785 OPT_INTEGER('r', "repeat", &replay_repeat
,
1786 "repeat the workload replay N times (-1: infinite)"),
1787 OPT_BOOLEAN('v', "verbose", &verbose
,
1788 "be more verbose (show symbol address, etc)"),
1789 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
1790 "dump raw trace in ASCII"),
1794 static void setup_sorting(void)
1796 char *tmp
, *tok
, *str
= strdup(sort_order
);
1798 for (tok
= strtok_r(str
, ", ", &tmp
);
1799 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
1800 if (sort_dimension__add(tok
, &sort_list
) < 0) {
1801 error("Unknown --sort key: `%s'", tok
);
1802 usage_with_options(latency_usage
, latency_options
);
1808 sort_dimension__add("pid", &cmp_pid
);
1811 static const char *record_args
[] = {
1819 "-e", "sched:sched_switch:r",
1820 "-e", "sched:sched_stat_wait:r",
1821 "-e", "sched:sched_stat_sleep:r",
1822 "-e", "sched:sched_stat_iowait:r",
1823 "-e", "sched:sched_stat_runtime:r",
1824 "-e", "sched:sched_process_exit:r",
1825 "-e", "sched:sched_process_fork:r",
1826 "-e", "sched:sched_wakeup:r",
1827 "-e", "sched:sched_migrate_task:r",
1830 static int __cmd_record(int argc
, const char **argv
)
1832 unsigned int rec_argc
, i
, j
;
1833 const char **rec_argv
;
1835 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
1836 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
1838 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
1839 rec_argv
[i
] = strdup(record_args
[i
]);
1841 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
1842 rec_argv
[i
] = argv
[j
];
1844 BUG_ON(i
!= rec_argc
);
1846 return cmd_record(i
, rec_argv
, NULL
);
1849 int cmd_sched(int argc
, const char **argv
, const char *prefix __used
)
1853 argc
= parse_options(argc
, argv
, sched_options
, sched_usage
,
1854 PARSE_OPT_STOP_AT_NON_OPTION
);
1856 usage_with_options(sched_usage
, sched_options
);
1858 if (!strncmp(argv
[0], "rec", 3)) {
1859 return __cmd_record(argc
, argv
);
1860 } else if (!strncmp(argv
[0], "lat", 3)) {
1861 trace_handler
= &lat_ops
;
1863 argc
= parse_options(argc
, argv
, latency_options
, latency_usage
, 0);
1865 usage_with_options(latency_usage
, latency_options
);
1869 } else if (!strcmp(argv
[0], "map")) {
1870 trace_handler
= &map_ops
;
1873 } else if (!strncmp(argv
[0], "rep", 3)) {
1874 trace_handler
= &replay_ops
;
1876 argc
= parse_options(argc
, argv
, replay_options
, replay_usage
, 0);
1878 usage_with_options(replay_usage
, replay_options
);
1881 } else if (!strcmp(argv
[0], "trace")) {
1883 * Aliased to 'perf trace' for now:
1885 return cmd_trace(argc
, argv
, prefix
);
1887 usage_with_options(sched_usage
, sched_options
);