1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/cpumap.h"
7 #include "util/evlist.h"
8 #include "util/evsel.h"
9 #include "util/evsel_fprintf.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/cloexec.h"
16 #include "util/thread_map.h"
17 #include "util/color.h"
18 #include "util/stat.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/time-utils.h"
23 #include <subcmd/pager.h>
24 #include <subcmd/parse-options.h>
25 #include "util/trace-event.h"
27 #include "util/debug.h"
28 #include "util/event.h"
30 #include <linux/kernel.h>
31 #include <linux/log2.h>
32 #include <linux/zalloc.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
38 #include <semaphore.h>
41 #include <api/fs/fs.h>
42 #include <perf/cpumap.h>
43 #include <linux/time64.h>
44 #include <linux/err.h>
46 #include <linux/ctype.h>
48 #define PR_SET_NAME 15 /* Set process name */
52 #define MAX_PID 1024000
54 static const char *cpu_list
;
55 static DECLARE_BITMAP(cpu_bitmap
, MAX_NR_CPUS
);
64 unsigned long nr_events
;
65 unsigned long curr_event
;
66 struct sched_atom
**atoms
;
77 enum sched_event_type
{
81 SCHED_EVENT_MIGRATION
,
85 enum sched_event_type type
;
91 struct task_desc
*wakee
;
94 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
96 /* task state bitmask, copied from include/linux/sched.h */
97 #define TASK_RUNNING 0
98 #define TASK_INTERRUPTIBLE 1
99 #define TASK_UNINTERRUPTIBLE 2
100 #define __TASK_STOPPED 4
101 #define __TASK_TRACED 8
102 /* in tsk->exit_state */
104 #define EXIT_ZOMBIE 32
105 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
106 /* in tsk->state again */
108 #define TASK_WAKEKILL 128
109 #define TASK_WAKING 256
110 #define TASK_PARKED 512
120 struct list_head list
;
121 enum thread_state state
;
129 struct list_head work_list
;
130 struct thread
*thread
;
141 typedef int (*sort_fn_t
)(struct work_atoms
*, struct work_atoms
*);
145 struct trace_sched_handler
{
146 int (*switch_event
)(struct perf_sched
*sched
, struct evsel
*evsel
,
147 struct perf_sample
*sample
, struct machine
*machine
);
149 int (*runtime_event
)(struct perf_sched
*sched
, struct evsel
*evsel
,
150 struct perf_sample
*sample
, struct machine
*machine
);
152 int (*wakeup_event
)(struct perf_sched
*sched
, struct evsel
*evsel
,
153 struct perf_sample
*sample
, struct machine
*machine
);
155 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
156 int (*fork_event
)(struct perf_sched
*sched
, union perf_event
*event
,
157 struct machine
*machine
);
159 int (*migrate_task_event
)(struct perf_sched
*sched
,
161 struct perf_sample
*sample
,
162 struct machine
*machine
);
165 #define COLOR_PIDS PERF_COLOR_BLUE
166 #define COLOR_CPUS PERF_COLOR_BG_RED
168 struct perf_sched_map
{
169 DECLARE_BITMAP(comp_cpus_mask
, MAX_CPUS
);
172 struct perf_thread_map
*color_pids
;
173 const char *color_pids_str
;
174 struct perf_cpu_map
*color_cpus
;
175 const char *color_cpus_str
;
176 struct perf_cpu_map
*cpus
;
177 const char *cpus_str
;
181 struct perf_tool tool
;
182 const char *sort_order
;
183 unsigned long nr_tasks
;
184 struct task_desc
**pid_to_task
;
185 struct task_desc
**tasks
;
186 const struct trace_sched_handler
*tp_handler
;
187 pthread_mutex_t start_work_mutex
;
188 pthread_mutex_t work_done_wait_mutex
;
191 * Track the current task - that way we can know whether there's any
192 * weird events, such as a task being switched away that is not current.
195 u32 curr_pid
[MAX_CPUS
];
196 struct thread
*curr_thread
[MAX_CPUS
];
197 char next_shortname1
;
198 char next_shortname2
;
199 unsigned int replay_repeat
;
200 unsigned long nr_run_events
;
201 unsigned long nr_sleep_events
;
202 unsigned long nr_wakeup_events
;
203 unsigned long nr_sleep_corrections
;
204 unsigned long nr_run_events_optimized
;
205 unsigned long targetless_wakeups
;
206 unsigned long multitarget_wakeups
;
207 unsigned long nr_runs
;
208 unsigned long nr_timestamps
;
209 unsigned long nr_unordered_timestamps
;
210 unsigned long nr_context_switch_bugs
;
211 unsigned long nr_events
;
212 unsigned long nr_lost_chunks
;
213 unsigned long nr_lost_events
;
214 u64 run_measurement_overhead
;
215 u64 sleep_measurement_overhead
;
218 u64 runavg_cpu_usage
;
219 u64 parent_cpu_usage
;
220 u64 runavg_parent_cpu_usage
;
226 u64 cpu_last_switched
[MAX_CPUS
];
227 struct rb_root_cached atom_root
, sorted_atom_root
, merged_atom_root
;
228 struct list_head sort_list
, cmp_pid
;
231 struct perf_sched_map map
;
233 /* options for timehist command */
238 unsigned int max_stack
;
239 bool show_cpu_visual
;
242 bool show_migrations
;
245 const char *time_str
;
246 struct perf_time_interval ptime
;
247 struct perf_time_interval hist_time
;
250 /* per thread run time data */
251 struct thread_runtime
{
252 u64 last_time
; /* time of previous sched in/out event */
253 u64 dt_run
; /* run time */
254 u64 dt_sleep
; /* time between CPU access by sleep (off cpu) */
255 u64 dt_iowait
; /* time between CPU access by iowait (off cpu) */
256 u64 dt_preempt
; /* time between CPU access by preempt (off cpu) */
257 u64 dt_delay
; /* time between wakeup and sched-in */
258 u64 ready_to_run
; /* time of wakeup */
260 struct stats run_stats
;
262 u64 total_sleep_time
;
263 u64 total_iowait_time
;
264 u64 total_preempt_time
;
265 u64 total_delay_time
;
275 /* per event run time data */
276 struct evsel_runtime
{
277 u64
*last_time
; /* time this event was last seen per cpu */
278 u32 ncpu
; /* highest cpu slot allocated */
281 /* per cpu idle time data */
282 struct idle_thread_runtime
{
283 struct thread_runtime tr
;
284 struct thread
*last_thread
;
285 struct rb_root_cached sorted_root
;
286 struct callchain_root callchain
;
287 struct callchain_cursor cursor
;
290 /* track idle times per cpu */
291 static struct thread
**idle_threads
;
292 static int idle_max_cpu
;
293 static char idle_comm
[] = "<idle>";
295 static u64
get_nsecs(void)
299 clock_gettime(CLOCK_MONOTONIC
, &ts
);
301 return ts
.tv_sec
* NSEC_PER_SEC
+ ts
.tv_nsec
;
304 static void burn_nsecs(struct perf_sched
*sched
, u64 nsecs
)
306 u64 T0
= get_nsecs(), T1
;
310 } while (T1
+ sched
->run_measurement_overhead
< T0
+ nsecs
);
313 static void sleep_nsecs(u64 nsecs
)
317 ts
.tv_nsec
= nsecs
% 999999999;
318 ts
.tv_sec
= nsecs
/ 999999999;
320 nanosleep(&ts
, NULL
);
323 static void calibrate_run_measurement_overhead(struct perf_sched
*sched
)
325 u64 T0
, T1
, delta
, min_delta
= NSEC_PER_SEC
;
328 for (i
= 0; i
< 10; i
++) {
330 burn_nsecs(sched
, 0);
333 min_delta
= min(min_delta
, delta
);
335 sched
->run_measurement_overhead
= min_delta
;
337 printf("run measurement overhead: %" PRIu64
" nsecs\n", min_delta
);
340 static void calibrate_sleep_measurement_overhead(struct perf_sched
*sched
)
342 u64 T0
, T1
, delta
, min_delta
= NSEC_PER_SEC
;
345 for (i
= 0; i
< 10; i
++) {
350 min_delta
= min(min_delta
, delta
);
353 sched
->sleep_measurement_overhead
= min_delta
;
355 printf("sleep measurement overhead: %" PRIu64
" nsecs\n", min_delta
);
358 static struct sched_atom
*
359 get_new_event(struct task_desc
*task
, u64 timestamp
)
361 struct sched_atom
*event
= zalloc(sizeof(*event
));
362 unsigned long idx
= task
->nr_events
;
365 event
->timestamp
= timestamp
;
369 size
= sizeof(struct sched_atom
*) * task
->nr_events
;
370 task
->atoms
= realloc(task
->atoms
, size
);
371 BUG_ON(!task
->atoms
);
373 task
->atoms
[idx
] = event
;
378 static struct sched_atom
*last_event(struct task_desc
*task
)
380 if (!task
->nr_events
)
383 return task
->atoms
[task
->nr_events
- 1];
386 static void add_sched_event_run(struct perf_sched
*sched
, struct task_desc
*task
,
387 u64 timestamp
, u64 duration
)
389 struct sched_atom
*event
, *curr_event
= last_event(task
);
392 * optimize an existing RUN event by merging this one
395 if (curr_event
&& curr_event
->type
== SCHED_EVENT_RUN
) {
396 sched
->nr_run_events_optimized
++;
397 curr_event
->duration
+= duration
;
401 event
= get_new_event(task
, timestamp
);
403 event
->type
= SCHED_EVENT_RUN
;
404 event
->duration
= duration
;
406 sched
->nr_run_events
++;
409 static void add_sched_event_wakeup(struct perf_sched
*sched
, struct task_desc
*task
,
410 u64 timestamp
, struct task_desc
*wakee
)
412 struct sched_atom
*event
, *wakee_event
;
414 event
= get_new_event(task
, timestamp
);
415 event
->type
= SCHED_EVENT_WAKEUP
;
416 event
->wakee
= wakee
;
418 wakee_event
= last_event(wakee
);
419 if (!wakee_event
|| wakee_event
->type
!= SCHED_EVENT_SLEEP
) {
420 sched
->targetless_wakeups
++;
423 if (wakee_event
->wait_sem
) {
424 sched
->multitarget_wakeups
++;
428 wakee_event
->wait_sem
= zalloc(sizeof(*wakee_event
->wait_sem
));
429 sem_init(wakee_event
->wait_sem
, 0, 0);
430 wakee_event
->specific_wait
= 1;
431 event
->wait_sem
= wakee_event
->wait_sem
;
433 sched
->nr_wakeup_events
++;
436 static void add_sched_event_sleep(struct perf_sched
*sched
, struct task_desc
*task
,
437 u64 timestamp
, u64 task_state __maybe_unused
)
439 struct sched_atom
*event
= get_new_event(task
, timestamp
);
441 event
->type
= SCHED_EVENT_SLEEP
;
443 sched
->nr_sleep_events
++;
446 static struct task_desc
*register_pid(struct perf_sched
*sched
,
447 unsigned long pid
, const char *comm
)
449 struct task_desc
*task
;
452 if (sched
->pid_to_task
== NULL
) {
453 if (sysctl__read_int("kernel/pid_max", &pid_max
) < 0)
455 BUG_ON((sched
->pid_to_task
= calloc(pid_max
, sizeof(struct task_desc
*))) == NULL
);
457 if (pid
>= (unsigned long)pid_max
) {
458 BUG_ON((sched
->pid_to_task
= realloc(sched
->pid_to_task
, (pid
+ 1) *
459 sizeof(struct task_desc
*))) == NULL
);
460 while (pid
>= (unsigned long)pid_max
)
461 sched
->pid_to_task
[pid_max
++] = NULL
;
464 task
= sched
->pid_to_task
[pid
];
469 task
= zalloc(sizeof(*task
));
471 task
->nr
= sched
->nr_tasks
;
472 strcpy(task
->comm
, comm
);
474 * every task starts in sleeping state - this gets ignored
475 * if there's no wakeup pointing to this sleep state:
477 add_sched_event_sleep(sched
, task
, 0, 0);
479 sched
->pid_to_task
[pid
] = task
;
481 sched
->tasks
= realloc(sched
->tasks
, sched
->nr_tasks
* sizeof(struct task_desc
*));
482 BUG_ON(!sched
->tasks
);
483 sched
->tasks
[task
->nr
] = task
;
486 printf("registered task #%ld, PID %ld (%s)\n", sched
->nr_tasks
, pid
, comm
);
492 static void print_task_traces(struct perf_sched
*sched
)
494 struct task_desc
*task
;
497 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
498 task
= sched
->tasks
[i
];
499 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
500 task
->nr
, task
->comm
, task
->pid
, task
->nr_events
);
504 static void add_cross_task_wakeups(struct perf_sched
*sched
)
506 struct task_desc
*task1
, *task2
;
509 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
510 task1
= sched
->tasks
[i
];
512 if (j
== sched
->nr_tasks
)
514 task2
= sched
->tasks
[j
];
515 add_sched_event_wakeup(sched
, task1
, 0, task2
);
519 static void perf_sched__process_event(struct perf_sched
*sched
,
520 struct sched_atom
*atom
)
524 switch (atom
->type
) {
525 case SCHED_EVENT_RUN
:
526 burn_nsecs(sched
, atom
->duration
);
528 case SCHED_EVENT_SLEEP
:
530 ret
= sem_wait(atom
->wait_sem
);
533 case SCHED_EVENT_WAKEUP
:
535 ret
= sem_post(atom
->wait_sem
);
538 case SCHED_EVENT_MIGRATION
:
545 static u64
get_cpu_usage_nsec_parent(void)
551 err
= getrusage(RUSAGE_SELF
, &ru
);
554 sum
= ru
.ru_utime
.tv_sec
* NSEC_PER_SEC
+ ru
.ru_utime
.tv_usec
* NSEC_PER_USEC
;
555 sum
+= ru
.ru_stime
.tv_sec
* NSEC_PER_SEC
+ ru
.ru_stime
.tv_usec
* NSEC_PER_USEC
;
560 static int self_open_counters(struct perf_sched
*sched
, unsigned long cur_task
)
562 struct perf_event_attr attr
;
563 char sbuf
[STRERR_BUFSIZE
], info
[STRERR_BUFSIZE
];
566 bool need_privilege
= false;
568 memset(&attr
, 0, sizeof(attr
));
570 attr
.type
= PERF_TYPE_SOFTWARE
;
571 attr
.config
= PERF_COUNT_SW_TASK_CLOCK
;
574 fd
= sys_perf_event_open(&attr
, 0, -1, -1,
575 perf_event_open_cloexec_flag());
578 if (errno
== EMFILE
) {
580 BUG_ON(getrlimit(RLIMIT_NOFILE
, &limit
) == -1);
581 limit
.rlim_cur
+= sched
->nr_tasks
- cur_task
;
582 if (limit
.rlim_cur
> limit
.rlim_max
) {
583 limit
.rlim_max
= limit
.rlim_cur
;
584 need_privilege
= true;
586 if (setrlimit(RLIMIT_NOFILE
, &limit
) == -1) {
587 if (need_privilege
&& errno
== EPERM
)
588 strcpy(info
, "Need privilege\n");
592 strcpy(info
, "Have a try with -f option\n");
594 pr_err("Error: sys_perf_event_open() syscall returned "
595 "with %d (%s)\n%s", fd
,
596 str_error_r(errno
, sbuf
, sizeof(sbuf
)), info
);
602 static u64
get_cpu_usage_nsec_self(int fd
)
607 ret
= read(fd
, &runtime
, sizeof(runtime
));
608 BUG_ON(ret
!= sizeof(runtime
));
613 struct sched_thread_parms
{
614 struct task_desc
*task
;
615 struct perf_sched
*sched
;
619 static void *thread_func(void *ctx
)
621 struct sched_thread_parms
*parms
= ctx
;
622 struct task_desc
*this_task
= parms
->task
;
623 struct perf_sched
*sched
= parms
->sched
;
624 u64 cpu_usage_0
, cpu_usage_1
;
625 unsigned long i
, ret
;
631 sprintf(comm2
, ":%s", this_task
->comm
);
632 prctl(PR_SET_NAME
, comm2
);
636 ret
= sem_post(&this_task
->ready_for_work
);
638 ret
= pthread_mutex_lock(&sched
->start_work_mutex
);
640 ret
= pthread_mutex_unlock(&sched
->start_work_mutex
);
643 cpu_usage_0
= get_cpu_usage_nsec_self(fd
);
645 for (i
= 0; i
< this_task
->nr_events
; i
++) {
646 this_task
->curr_event
= i
;
647 perf_sched__process_event(sched
, this_task
->atoms
[i
]);
650 cpu_usage_1
= get_cpu_usage_nsec_self(fd
);
651 this_task
->cpu_usage
= cpu_usage_1
- cpu_usage_0
;
652 ret
= sem_post(&this_task
->work_done_sem
);
655 ret
= pthread_mutex_lock(&sched
->work_done_wait_mutex
);
657 ret
= pthread_mutex_unlock(&sched
->work_done_wait_mutex
);
663 static void create_tasks(struct perf_sched
*sched
)
665 struct task_desc
*task
;
670 err
= pthread_attr_init(&attr
);
672 err
= pthread_attr_setstacksize(&attr
,
673 (size_t) max(16 * 1024, PTHREAD_STACK_MIN
));
675 err
= pthread_mutex_lock(&sched
->start_work_mutex
);
677 err
= pthread_mutex_lock(&sched
->work_done_wait_mutex
);
679 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
680 struct sched_thread_parms
*parms
= malloc(sizeof(*parms
));
681 BUG_ON(parms
== NULL
);
682 parms
->task
= task
= sched
->tasks
[i
];
683 parms
->sched
= sched
;
684 parms
->fd
= self_open_counters(sched
, i
);
685 sem_init(&task
->sleep_sem
, 0, 0);
686 sem_init(&task
->ready_for_work
, 0, 0);
687 sem_init(&task
->work_done_sem
, 0, 0);
688 task
->curr_event
= 0;
689 err
= pthread_create(&task
->thread
, &attr
, thread_func
, parms
);
694 static void wait_for_tasks(struct perf_sched
*sched
)
696 u64 cpu_usage_0
, cpu_usage_1
;
697 struct task_desc
*task
;
698 unsigned long i
, ret
;
700 sched
->start_time
= get_nsecs();
701 sched
->cpu_usage
= 0;
702 pthread_mutex_unlock(&sched
->work_done_wait_mutex
);
704 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
705 task
= sched
->tasks
[i
];
706 ret
= sem_wait(&task
->ready_for_work
);
708 sem_init(&task
->ready_for_work
, 0, 0);
710 ret
= pthread_mutex_lock(&sched
->work_done_wait_mutex
);
713 cpu_usage_0
= get_cpu_usage_nsec_parent();
715 pthread_mutex_unlock(&sched
->start_work_mutex
);
717 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
718 task
= sched
->tasks
[i
];
719 ret
= sem_wait(&task
->work_done_sem
);
721 sem_init(&task
->work_done_sem
, 0, 0);
722 sched
->cpu_usage
+= task
->cpu_usage
;
726 cpu_usage_1
= get_cpu_usage_nsec_parent();
727 if (!sched
->runavg_cpu_usage
)
728 sched
->runavg_cpu_usage
= sched
->cpu_usage
;
729 sched
->runavg_cpu_usage
= (sched
->runavg_cpu_usage
* (sched
->replay_repeat
- 1) + sched
->cpu_usage
) / sched
->replay_repeat
;
731 sched
->parent_cpu_usage
= cpu_usage_1
- cpu_usage_0
;
732 if (!sched
->runavg_parent_cpu_usage
)
733 sched
->runavg_parent_cpu_usage
= sched
->parent_cpu_usage
;
734 sched
->runavg_parent_cpu_usage
= (sched
->runavg_parent_cpu_usage
* (sched
->replay_repeat
- 1) +
735 sched
->parent_cpu_usage
)/sched
->replay_repeat
;
737 ret
= pthread_mutex_lock(&sched
->start_work_mutex
);
740 for (i
= 0; i
< sched
->nr_tasks
; i
++) {
741 task
= sched
->tasks
[i
];
742 sem_init(&task
->sleep_sem
, 0, 0);
743 task
->curr_event
= 0;
747 static void run_one_test(struct perf_sched
*sched
)
749 u64 T0
, T1
, delta
, avg_delta
, fluct
;
752 wait_for_tasks(sched
);
756 sched
->sum_runtime
+= delta
;
759 avg_delta
= sched
->sum_runtime
/ sched
->nr_runs
;
760 if (delta
< avg_delta
)
761 fluct
= avg_delta
- delta
;
763 fluct
= delta
- avg_delta
;
764 sched
->sum_fluct
+= fluct
;
766 sched
->run_avg
= delta
;
767 sched
->run_avg
= (sched
->run_avg
* (sched
->replay_repeat
- 1) + delta
) / sched
->replay_repeat
;
769 printf("#%-3ld: %0.3f, ", sched
->nr_runs
, (double)delta
/ NSEC_PER_MSEC
);
771 printf("ravg: %0.2f, ", (double)sched
->run_avg
/ NSEC_PER_MSEC
);
773 printf("cpu: %0.2f / %0.2f",
774 (double)sched
->cpu_usage
/ NSEC_PER_MSEC
, (double)sched
->runavg_cpu_usage
/ NSEC_PER_MSEC
);
778 * rusage statistics done by the parent, these are less
779 * accurate than the sched->sum_exec_runtime based statistics:
781 printf(" [%0.2f / %0.2f]",
782 (double)sched
->parent_cpu_usage
/ NSEC_PER_MSEC
,
783 (double)sched
->runavg_parent_cpu_usage
/ NSEC_PER_MSEC
);
788 if (sched
->nr_sleep_corrections
)
789 printf(" (%ld sleep corrections)\n", sched
->nr_sleep_corrections
);
790 sched
->nr_sleep_corrections
= 0;
793 static void test_calibrations(struct perf_sched
*sched
)
798 burn_nsecs(sched
, NSEC_PER_MSEC
);
801 printf("the run test took %" PRIu64
" nsecs\n", T1
- T0
);
804 sleep_nsecs(NSEC_PER_MSEC
);
807 printf("the sleep test took %" PRIu64
" nsecs\n", T1
- T0
);
811 replay_wakeup_event(struct perf_sched
*sched
,
812 struct evsel
*evsel
, struct perf_sample
*sample
,
813 struct machine
*machine __maybe_unused
)
815 const char *comm
= evsel__strval(evsel
, sample
, "comm");
816 const u32 pid
= evsel__intval(evsel
, sample
, "pid");
817 struct task_desc
*waker
, *wakee
;
820 printf("sched_wakeup event %p\n", evsel
);
822 printf(" ... pid %d woke up %s/%d\n", sample
->tid
, comm
, pid
);
825 waker
= register_pid(sched
, sample
->tid
, "<unknown>");
826 wakee
= register_pid(sched
, pid
, comm
);
828 add_sched_event_wakeup(sched
, waker
, sample
->time
, wakee
);
832 static int replay_switch_event(struct perf_sched
*sched
,
834 struct perf_sample
*sample
,
835 struct machine
*machine __maybe_unused
)
837 const char *prev_comm
= evsel__strval(evsel
, sample
, "prev_comm"),
838 *next_comm
= evsel__strval(evsel
, sample
, "next_comm");
839 const u32 prev_pid
= evsel__intval(evsel
, sample
, "prev_pid"),
840 next_pid
= evsel__intval(evsel
, sample
, "next_pid");
841 const u64 prev_state
= evsel__intval(evsel
, sample
, "prev_state");
842 struct task_desc
*prev
, __maybe_unused
*next
;
843 u64 timestamp0
, timestamp
= sample
->time
;
844 int cpu
= sample
->cpu
;
848 printf("sched_switch event %p\n", evsel
);
850 if (cpu
>= MAX_CPUS
|| cpu
< 0)
853 timestamp0
= sched
->cpu_last_switched
[cpu
];
855 delta
= timestamp
- timestamp0
;
860 pr_err("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
864 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64
" nsecs]\n",
865 prev_comm
, prev_pid
, next_comm
, next_pid
, delta
);
867 prev
= register_pid(sched
, prev_pid
, prev_comm
);
868 next
= register_pid(sched
, next_pid
, next_comm
);
870 sched
->cpu_last_switched
[cpu
] = timestamp
;
872 add_sched_event_run(sched
, prev
, timestamp
, delta
);
873 add_sched_event_sleep(sched
, prev
, timestamp
, prev_state
);
878 static int replay_fork_event(struct perf_sched
*sched
,
879 union perf_event
*event
,
880 struct machine
*machine
)
882 struct thread
*child
, *parent
;
884 child
= machine__findnew_thread(machine
, event
->fork
.pid
,
886 parent
= machine__findnew_thread(machine
, event
->fork
.ppid
,
889 if (child
== NULL
|| parent
== NULL
) {
890 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
896 printf("fork event\n");
897 printf("... parent: %s/%d\n", thread__comm_str(parent
), parent
->tid
);
898 printf("... child: %s/%d\n", thread__comm_str(child
), child
->tid
);
901 register_pid(sched
, parent
->tid
, thread__comm_str(parent
));
902 register_pid(sched
, child
->tid
, thread__comm_str(child
));
909 struct sort_dimension
{
912 struct list_head list
;
916 * handle runtime stats saved per thread
918 static struct thread_runtime
*thread__init_runtime(struct thread
*thread
)
920 struct thread_runtime
*r
;
922 r
= zalloc(sizeof(struct thread_runtime
));
926 init_stats(&r
->run_stats
);
927 thread__set_priv(thread
, r
);
932 static struct thread_runtime
*thread__get_runtime(struct thread
*thread
)
934 struct thread_runtime
*tr
;
936 tr
= thread__priv(thread
);
938 tr
= thread__init_runtime(thread
);
940 pr_debug("Failed to malloc memory for runtime data.\n");
947 thread_lat_cmp(struct list_head
*list
, struct work_atoms
*l
, struct work_atoms
*r
)
949 struct sort_dimension
*sort
;
952 BUG_ON(list_empty(list
));
954 list_for_each_entry(sort
, list
, list
) {
955 ret
= sort
->cmp(l
, r
);
963 static struct work_atoms
*
964 thread_atoms_search(struct rb_root_cached
*root
, struct thread
*thread
,
965 struct list_head
*sort_list
)
967 struct rb_node
*node
= root
->rb_root
.rb_node
;
968 struct work_atoms key
= { .thread
= thread
};
971 struct work_atoms
*atoms
;
974 atoms
= container_of(node
, struct work_atoms
, node
);
976 cmp
= thread_lat_cmp(sort_list
, &key
, atoms
);
978 node
= node
->rb_left
;
980 node
= node
->rb_right
;
982 BUG_ON(thread
!= atoms
->thread
);
990 __thread_latency_insert(struct rb_root_cached
*root
, struct work_atoms
*data
,
991 struct list_head
*sort_list
)
993 struct rb_node
**new = &(root
->rb_root
.rb_node
), *parent
= NULL
;
994 bool leftmost
= true;
997 struct work_atoms
*this;
1000 this = container_of(*new, struct work_atoms
, node
);
1003 cmp
= thread_lat_cmp(sort_list
, data
, this);
1006 new = &((*new)->rb_left
);
1008 new = &((*new)->rb_right
);
1013 rb_link_node(&data
->node
, parent
, new);
1014 rb_insert_color_cached(&data
->node
, root
, leftmost
);
1017 static int thread_atoms_insert(struct perf_sched
*sched
, struct thread
*thread
)
1019 struct work_atoms
*atoms
= zalloc(sizeof(*atoms
));
1021 pr_err("No memory at %s\n", __func__
);
1025 atoms
->thread
= thread__get(thread
);
1026 INIT_LIST_HEAD(&atoms
->work_list
);
1027 __thread_latency_insert(&sched
->atom_root
, atoms
, &sched
->cmp_pid
);
1031 static char sched_out_state(u64 prev_state
)
1033 const char *str
= TASK_STATE_TO_CHAR_STR
;
1035 return str
[prev_state
];
1039 add_sched_out_event(struct work_atoms
*atoms
,
1043 struct work_atom
*atom
= zalloc(sizeof(*atom
));
1045 pr_err("Non memory at %s", __func__
);
1049 atom
->sched_out_time
= timestamp
;
1051 if (run_state
== 'R') {
1052 atom
->state
= THREAD_WAIT_CPU
;
1053 atom
->wake_up_time
= atom
->sched_out_time
;
1056 list_add_tail(&atom
->list
, &atoms
->work_list
);
1061 add_runtime_event(struct work_atoms
*atoms
, u64 delta
,
1062 u64 timestamp __maybe_unused
)
1064 struct work_atom
*atom
;
1066 BUG_ON(list_empty(&atoms
->work_list
));
1068 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1070 atom
->runtime
+= delta
;
1071 atoms
->total_runtime
+= delta
;
1075 add_sched_in_event(struct work_atoms
*atoms
, u64 timestamp
)
1077 struct work_atom
*atom
;
1080 if (list_empty(&atoms
->work_list
))
1083 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1085 if (atom
->state
!= THREAD_WAIT_CPU
)
1088 if (timestamp
< atom
->wake_up_time
) {
1089 atom
->state
= THREAD_IGNORE
;
1093 atom
->state
= THREAD_SCHED_IN
;
1094 atom
->sched_in_time
= timestamp
;
1096 delta
= atom
->sched_in_time
- atom
->wake_up_time
;
1097 atoms
->total_lat
+= delta
;
1098 if (delta
> atoms
->max_lat
) {
1099 atoms
->max_lat
= delta
;
1100 atoms
->max_lat_start
= atom
->wake_up_time
;
1101 atoms
->max_lat_end
= timestamp
;
1106 static int latency_switch_event(struct perf_sched
*sched
,
1107 struct evsel
*evsel
,
1108 struct perf_sample
*sample
,
1109 struct machine
*machine
)
1111 const u32 prev_pid
= evsel__intval(evsel
, sample
, "prev_pid"),
1112 next_pid
= evsel__intval(evsel
, sample
, "next_pid");
1113 const u64 prev_state
= evsel__intval(evsel
, sample
, "prev_state");
1114 struct work_atoms
*out_events
, *in_events
;
1115 struct thread
*sched_out
, *sched_in
;
1116 u64 timestamp0
, timestamp
= sample
->time
;
1117 int cpu
= sample
->cpu
, err
= -1;
1120 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1122 timestamp0
= sched
->cpu_last_switched
[cpu
];
1123 sched
->cpu_last_switched
[cpu
] = timestamp
;
1125 delta
= timestamp
- timestamp0
;
1130 pr_err("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
1134 sched_out
= machine__findnew_thread(machine
, -1, prev_pid
);
1135 sched_in
= machine__findnew_thread(machine
, -1, next_pid
);
1136 if (sched_out
== NULL
|| sched_in
== NULL
)
1139 out_events
= thread_atoms_search(&sched
->atom_root
, sched_out
, &sched
->cmp_pid
);
1141 if (thread_atoms_insert(sched
, sched_out
))
1143 out_events
= thread_atoms_search(&sched
->atom_root
, sched_out
, &sched
->cmp_pid
);
1145 pr_err("out-event: Internal tree error");
1149 if (add_sched_out_event(out_events
, sched_out_state(prev_state
), timestamp
))
1152 in_events
= thread_atoms_search(&sched
->atom_root
, sched_in
, &sched
->cmp_pid
);
1154 if (thread_atoms_insert(sched
, sched_in
))
1156 in_events
= thread_atoms_search(&sched
->atom_root
, sched_in
, &sched
->cmp_pid
);
1158 pr_err("in-event: Internal tree error");
1162 * Take came in we have not heard about yet,
1163 * add in an initial atom in runnable state:
1165 if (add_sched_out_event(in_events
, 'R', timestamp
))
1168 add_sched_in_event(in_events
, timestamp
);
1171 thread__put(sched_out
);
1172 thread__put(sched_in
);
1176 static int latency_runtime_event(struct perf_sched
*sched
,
1177 struct evsel
*evsel
,
1178 struct perf_sample
*sample
,
1179 struct machine
*machine
)
1181 const u32 pid
= evsel__intval(evsel
, sample
, "pid");
1182 const u64 runtime
= evsel__intval(evsel
, sample
, "runtime");
1183 struct thread
*thread
= machine__findnew_thread(machine
, -1, pid
);
1184 struct work_atoms
*atoms
= thread_atoms_search(&sched
->atom_root
, thread
, &sched
->cmp_pid
);
1185 u64 timestamp
= sample
->time
;
1186 int cpu
= sample
->cpu
, err
= -1;
1191 BUG_ON(cpu
>= MAX_CPUS
|| cpu
< 0);
1193 if (thread_atoms_insert(sched
, thread
))
1195 atoms
= thread_atoms_search(&sched
->atom_root
, thread
, &sched
->cmp_pid
);
1197 pr_err("in-event: Internal tree error");
1200 if (add_sched_out_event(atoms
, 'R', timestamp
))
1204 add_runtime_event(atoms
, runtime
, timestamp
);
1207 thread__put(thread
);
1211 static int latency_wakeup_event(struct perf_sched
*sched
,
1212 struct evsel
*evsel
,
1213 struct perf_sample
*sample
,
1214 struct machine
*machine
)
1216 const u32 pid
= evsel__intval(evsel
, sample
, "pid");
1217 struct work_atoms
*atoms
;
1218 struct work_atom
*atom
;
1219 struct thread
*wakee
;
1220 u64 timestamp
= sample
->time
;
1223 wakee
= machine__findnew_thread(machine
, -1, pid
);
1226 atoms
= thread_atoms_search(&sched
->atom_root
, wakee
, &sched
->cmp_pid
);
1228 if (thread_atoms_insert(sched
, wakee
))
1230 atoms
= thread_atoms_search(&sched
->atom_root
, wakee
, &sched
->cmp_pid
);
1232 pr_err("wakeup-event: Internal tree error");
1235 if (add_sched_out_event(atoms
, 'S', timestamp
))
1239 BUG_ON(list_empty(&atoms
->work_list
));
1241 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1244 * As we do not guarantee the wakeup event happens when
1245 * task is out of run queue, also may happen when task is
1246 * on run queue and wakeup only change ->state to TASK_RUNNING,
1247 * then we should not set the ->wake_up_time when wake up a
1248 * task which is on run queue.
1250 * You WILL be missing events if you've recorded only
1251 * one CPU, or are only looking at only one, so don't
1252 * skip in this case.
1254 if (sched
->profile_cpu
== -1 && atom
->state
!= THREAD_SLEEPING
)
1257 sched
->nr_timestamps
++;
1258 if (atom
->sched_out_time
> timestamp
) {
1259 sched
->nr_unordered_timestamps
++;
1263 atom
->state
= THREAD_WAIT_CPU
;
1264 atom
->wake_up_time
= timestamp
;
1272 static int latency_migrate_task_event(struct perf_sched
*sched
,
1273 struct evsel
*evsel
,
1274 struct perf_sample
*sample
,
1275 struct machine
*machine
)
1277 const u32 pid
= evsel__intval(evsel
, sample
, "pid");
1278 u64 timestamp
= sample
->time
;
1279 struct work_atoms
*atoms
;
1280 struct work_atom
*atom
;
1281 struct thread
*migrant
;
1285 * Only need to worry about migration when profiling one CPU.
1287 if (sched
->profile_cpu
== -1)
1290 migrant
= machine__findnew_thread(machine
, -1, pid
);
1291 if (migrant
== NULL
)
1293 atoms
= thread_atoms_search(&sched
->atom_root
, migrant
, &sched
->cmp_pid
);
1295 if (thread_atoms_insert(sched
, migrant
))
1297 register_pid(sched
, migrant
->tid
, thread__comm_str(migrant
));
1298 atoms
= thread_atoms_search(&sched
->atom_root
, migrant
, &sched
->cmp_pid
);
1300 pr_err("migration-event: Internal tree error");
1303 if (add_sched_out_event(atoms
, 'R', timestamp
))
1307 BUG_ON(list_empty(&atoms
->work_list
));
1309 atom
= list_entry(atoms
->work_list
.prev
, struct work_atom
, list
);
1310 atom
->sched_in_time
= atom
->sched_out_time
= atom
->wake_up_time
= timestamp
;
1312 sched
->nr_timestamps
++;
1314 if (atom
->sched_out_time
> timestamp
)
1315 sched
->nr_unordered_timestamps
++;
1318 thread__put(migrant
);
1322 static void output_lat_thread(struct perf_sched
*sched
, struct work_atoms
*work_list
)
1327 char max_lat_start
[32], max_lat_end
[32];
1329 if (!work_list
->nb_atoms
)
1332 * Ignore idle threads:
1334 if (!strcmp(thread__comm_str(work_list
->thread
), "swapper"))
1337 sched
->all_runtime
+= work_list
->total_runtime
;
1338 sched
->all_count
+= work_list
->nb_atoms
;
1340 if (work_list
->num_merged
> 1)
1341 ret
= printf(" %s:(%d) ", thread__comm_str(work_list
->thread
), work_list
->num_merged
);
1343 ret
= printf(" %s:%d ", thread__comm_str(work_list
->thread
), work_list
->thread
->tid
);
1345 for (i
= 0; i
< 24 - ret
; i
++)
1348 avg
= work_list
->total_lat
/ work_list
->nb_atoms
;
1349 timestamp__scnprintf_usec(work_list
->max_lat_start
, max_lat_start
, sizeof(max_lat_start
));
1350 timestamp__scnprintf_usec(work_list
->max_lat_end
, max_lat_end
, sizeof(max_lat_end
));
1352 printf("|%11.3f ms |%9" PRIu64
" | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1353 (double)work_list
->total_runtime
/ NSEC_PER_MSEC
,
1354 work_list
->nb_atoms
, (double)avg
/ NSEC_PER_MSEC
,
1355 (double)work_list
->max_lat
/ NSEC_PER_MSEC
,
1356 max_lat_start
, max_lat_end
);
1359 static int pid_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1361 if (l
->thread
== r
->thread
)
1363 if (l
->thread
->tid
< r
->thread
->tid
)
1365 if (l
->thread
->tid
> r
->thread
->tid
)
1367 return (int)(l
->thread
- r
->thread
);
1370 static int avg_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1380 avgl
= l
->total_lat
/ l
->nb_atoms
;
1381 avgr
= r
->total_lat
/ r
->nb_atoms
;
1391 static int max_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1393 if (l
->max_lat
< r
->max_lat
)
1395 if (l
->max_lat
> r
->max_lat
)
1401 static int switch_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1403 if (l
->nb_atoms
< r
->nb_atoms
)
1405 if (l
->nb_atoms
> r
->nb_atoms
)
1411 static int runtime_cmp(struct work_atoms
*l
, struct work_atoms
*r
)
1413 if (l
->total_runtime
< r
->total_runtime
)
1415 if (l
->total_runtime
> r
->total_runtime
)
1421 static int sort_dimension__add(const char *tok
, struct list_head
*list
)
1424 static struct sort_dimension avg_sort_dimension
= {
1428 static struct sort_dimension max_sort_dimension
= {
1432 static struct sort_dimension pid_sort_dimension
= {
1436 static struct sort_dimension runtime_sort_dimension
= {
1440 static struct sort_dimension switch_sort_dimension
= {
1444 struct sort_dimension
*available_sorts
[] = {
1445 &pid_sort_dimension
,
1446 &avg_sort_dimension
,
1447 &max_sort_dimension
,
1448 &switch_sort_dimension
,
1449 &runtime_sort_dimension
,
1452 for (i
= 0; i
< ARRAY_SIZE(available_sorts
); i
++) {
1453 if (!strcmp(available_sorts
[i
]->name
, tok
)) {
1454 list_add_tail(&available_sorts
[i
]->list
, list
);
1463 static void perf_sched__sort_lat(struct perf_sched
*sched
)
1465 struct rb_node
*node
;
1466 struct rb_root_cached
*root
= &sched
->atom_root
;
1469 struct work_atoms
*data
;
1470 node
= rb_first_cached(root
);
1474 rb_erase_cached(node
, root
);
1475 data
= rb_entry(node
, struct work_atoms
, node
);
1476 __thread_latency_insert(&sched
->sorted_atom_root
, data
, &sched
->sort_list
);
1478 if (root
== &sched
->atom_root
) {
1479 root
= &sched
->merged_atom_root
;
1484 static int process_sched_wakeup_event(struct perf_tool
*tool
,
1485 struct evsel
*evsel
,
1486 struct perf_sample
*sample
,
1487 struct machine
*machine
)
1489 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1491 if (sched
->tp_handler
->wakeup_event
)
1492 return sched
->tp_handler
->wakeup_event(sched
, evsel
, sample
, machine
);
1502 static bool thread__has_color(struct thread
*thread
)
1504 union map_priv priv
= {
1505 .ptr
= thread__priv(thread
),
1511 static struct thread
*
1512 map__findnew_thread(struct perf_sched
*sched
, struct machine
*machine
, pid_t pid
, pid_t tid
)
1514 struct thread
*thread
= machine__findnew_thread(machine
, pid
, tid
);
1515 union map_priv priv
= {
1519 if (!sched
->map
.color_pids
|| !thread
|| thread__priv(thread
))
1522 if (thread_map__has(sched
->map
.color_pids
, tid
))
1525 thread__set_priv(thread
, priv
.ptr
);
1529 static int map_switch_event(struct perf_sched
*sched
, struct evsel
*evsel
,
1530 struct perf_sample
*sample
, struct machine
*machine
)
1532 const u32 next_pid
= evsel__intval(evsel
, sample
, "next_pid");
1533 struct thread
*sched_in
;
1534 struct thread_runtime
*tr
;
1536 u64 timestamp0
, timestamp
= sample
->time
;
1538 int i
, this_cpu
= sample
->cpu
;
1540 bool new_cpu
= false;
1541 const char *color
= PERF_COLOR_NORMAL
;
1542 char stimestamp
[32];
1544 BUG_ON(this_cpu
>= MAX_CPUS
|| this_cpu
< 0);
1546 if (this_cpu
> sched
->max_cpu
)
1547 sched
->max_cpu
= this_cpu
;
1549 if (sched
->map
.comp
) {
1550 cpus_nr
= bitmap_weight(sched
->map
.comp_cpus_mask
, MAX_CPUS
);
1551 if (!test_and_set_bit(this_cpu
, sched
->map
.comp_cpus_mask
)) {
1552 sched
->map
.comp_cpus
[cpus_nr
++] = this_cpu
;
1556 cpus_nr
= sched
->max_cpu
;
1558 timestamp0
= sched
->cpu_last_switched
[this_cpu
];
1559 sched
->cpu_last_switched
[this_cpu
] = timestamp
;
1561 delta
= timestamp
- timestamp0
;
1566 pr_err("hm, delta: %" PRIu64
" < 0 ?\n", delta
);
1570 sched_in
= map__findnew_thread(sched
, machine
, -1, next_pid
);
1571 if (sched_in
== NULL
)
1574 tr
= thread__get_runtime(sched_in
);
1576 thread__put(sched_in
);
1580 sched
->curr_thread
[this_cpu
] = thread__get(sched_in
);
1585 if (!tr
->shortname
[0]) {
1586 if (!strcmp(thread__comm_str(sched_in
), "swapper")) {
1588 * Don't allocate a letter-number for swapper:0
1589 * as a shortname. Instead, we use '.' for it.
1591 tr
->shortname
[0] = '.';
1592 tr
->shortname
[1] = ' ';
1594 tr
->shortname
[0] = sched
->next_shortname1
;
1595 tr
->shortname
[1] = sched
->next_shortname2
;
1597 if (sched
->next_shortname1
< 'Z') {
1598 sched
->next_shortname1
++;
1600 sched
->next_shortname1
= 'A';
1601 if (sched
->next_shortname2
< '9')
1602 sched
->next_shortname2
++;
1604 sched
->next_shortname2
= '0';
1610 for (i
= 0; i
< cpus_nr
; i
++) {
1611 int cpu
= sched
->map
.comp
? sched
->map
.comp_cpus
[i
] : i
;
1612 struct thread
*curr_thread
= sched
->curr_thread
[cpu
];
1613 struct thread_runtime
*curr_tr
;
1614 const char *pid_color
= color
;
1615 const char *cpu_color
= color
;
1617 if (curr_thread
&& thread__has_color(curr_thread
))
1618 pid_color
= COLOR_PIDS
;
1620 if (sched
->map
.cpus
&& !cpu_map__has(sched
->map
.cpus
, cpu
))
1623 if (sched
->map
.color_cpus
&& cpu_map__has(sched
->map
.color_cpus
, cpu
))
1624 cpu_color
= COLOR_CPUS
;
1626 if (cpu
!= this_cpu
)
1627 color_fprintf(stdout
, color
, " ");
1629 color_fprintf(stdout
, cpu_color
, "*");
1631 if (sched
->curr_thread
[cpu
]) {
1632 curr_tr
= thread__get_runtime(sched
->curr_thread
[cpu
]);
1633 if (curr_tr
== NULL
) {
1634 thread__put(sched_in
);
1637 color_fprintf(stdout
, pid_color
, "%2s ", curr_tr
->shortname
);
1639 color_fprintf(stdout
, color
, " ");
1642 if (sched
->map
.cpus
&& !cpu_map__has(sched
->map
.cpus
, this_cpu
))
1645 timestamp__scnprintf_usec(timestamp
, stimestamp
, sizeof(stimestamp
));
1646 color_fprintf(stdout
, color
, " %12s secs ", stimestamp
);
1647 if (new_shortname
|| tr
->comm_changed
|| (verbose
> 0 && sched_in
->tid
)) {
1648 const char *pid_color
= color
;
1650 if (thread__has_color(sched_in
))
1651 pid_color
= COLOR_PIDS
;
1653 color_fprintf(stdout
, pid_color
, "%s => %s:%d",
1654 tr
->shortname
, thread__comm_str(sched_in
), sched_in
->tid
);
1655 tr
->comm_changed
= false;
1658 if (sched
->map
.comp
&& new_cpu
)
1659 color_fprintf(stdout
, color
, " (CPU %d)", this_cpu
);
1662 color_fprintf(stdout
, color
, "\n");
1664 thread__put(sched_in
);
1669 static int process_sched_switch_event(struct perf_tool
*tool
,
1670 struct evsel
*evsel
,
1671 struct perf_sample
*sample
,
1672 struct machine
*machine
)
1674 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1675 int this_cpu
= sample
->cpu
, err
= 0;
1676 u32 prev_pid
= evsel__intval(evsel
, sample
, "prev_pid"),
1677 next_pid
= evsel__intval(evsel
, sample
, "next_pid");
1679 if (sched
->curr_pid
[this_cpu
] != (u32
)-1) {
1681 * Are we trying to switch away a PID that is
1684 if (sched
->curr_pid
[this_cpu
] != prev_pid
)
1685 sched
->nr_context_switch_bugs
++;
1688 if (sched
->tp_handler
->switch_event
)
1689 err
= sched
->tp_handler
->switch_event(sched
, evsel
, sample
, machine
);
1691 sched
->curr_pid
[this_cpu
] = next_pid
;
1695 static int process_sched_runtime_event(struct perf_tool
*tool
,
1696 struct evsel
*evsel
,
1697 struct perf_sample
*sample
,
1698 struct machine
*machine
)
1700 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1702 if (sched
->tp_handler
->runtime_event
)
1703 return sched
->tp_handler
->runtime_event(sched
, evsel
, sample
, machine
);
1708 static int perf_sched__process_fork_event(struct perf_tool
*tool
,
1709 union perf_event
*event
,
1710 struct perf_sample
*sample
,
1711 struct machine
*machine
)
1713 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1715 /* run the fork event through the perf machineruy */
1716 perf_event__process_fork(tool
, event
, sample
, machine
);
1718 /* and then run additional processing needed for this command */
1719 if (sched
->tp_handler
->fork_event
)
1720 return sched
->tp_handler
->fork_event(sched
, event
, machine
);
1725 static int process_sched_migrate_task_event(struct perf_tool
*tool
,
1726 struct evsel
*evsel
,
1727 struct perf_sample
*sample
,
1728 struct machine
*machine
)
1730 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
1732 if (sched
->tp_handler
->migrate_task_event
)
1733 return sched
->tp_handler
->migrate_task_event(sched
, evsel
, sample
, machine
);
1738 typedef int (*tracepoint_handler
)(struct perf_tool
*tool
,
1739 struct evsel
*evsel
,
1740 struct perf_sample
*sample
,
1741 struct machine
*machine
);
1743 static int perf_sched__process_tracepoint_sample(struct perf_tool
*tool __maybe_unused
,
1744 union perf_event
*event __maybe_unused
,
1745 struct perf_sample
*sample
,
1746 struct evsel
*evsel
,
1747 struct machine
*machine
)
1751 if (evsel
->handler
!= NULL
) {
1752 tracepoint_handler f
= evsel
->handler
;
1753 err
= f(tool
, evsel
, sample
, machine
);
1759 static int perf_sched__process_comm(struct perf_tool
*tool __maybe_unused
,
1760 union perf_event
*event
,
1761 struct perf_sample
*sample
,
1762 struct machine
*machine
)
1764 struct thread
*thread
;
1765 struct thread_runtime
*tr
;
1768 err
= perf_event__process_comm(tool
, event
, sample
, machine
);
1772 thread
= machine__find_thread(machine
, sample
->pid
, sample
->tid
);
1774 pr_err("Internal error: can't find thread\n");
1778 tr
= thread__get_runtime(thread
);
1780 thread__put(thread
);
1784 tr
->comm_changed
= true;
1785 thread__put(thread
);
1790 static int perf_sched__read_events(struct perf_sched
*sched
)
1792 const struct evsel_str_handler handlers
[] = {
1793 { "sched:sched_switch", process_sched_switch_event
, },
1794 { "sched:sched_stat_runtime", process_sched_runtime_event
, },
1795 { "sched:sched_wakeup", process_sched_wakeup_event
, },
1796 { "sched:sched_wakeup_new", process_sched_wakeup_event
, },
1797 { "sched:sched_migrate_task", process_sched_migrate_task_event
, },
1799 struct perf_session
*session
;
1800 struct perf_data data
= {
1802 .mode
= PERF_DATA_MODE_READ
,
1803 .force
= sched
->force
,
1807 session
= perf_session__new(&data
, false, &sched
->tool
);
1808 if (IS_ERR(session
)) {
1809 pr_debug("Error creating perf session");
1810 return PTR_ERR(session
);
1813 symbol__init(&session
->header
.env
);
1815 if (perf_session__set_tracepoints_handlers(session
, handlers
))
1818 if (perf_session__has_traces(session
, "record -R")) {
1819 int err
= perf_session__process_events(session
);
1821 pr_err("Failed to process events, error %d", err
);
1825 sched
->nr_events
= session
->evlist
->stats
.nr_events
[0];
1826 sched
->nr_lost_events
= session
->evlist
->stats
.total_lost
;
1827 sched
->nr_lost_chunks
= session
->evlist
->stats
.nr_events
[PERF_RECORD_LOST
];
1832 perf_session__delete(session
);
1837 * scheduling times are printed as msec.usec
1839 static inline void print_sched_time(unsigned long long nsecs
, int width
)
1841 unsigned long msecs
;
1842 unsigned long usecs
;
1844 msecs
= nsecs
/ NSEC_PER_MSEC
;
1845 nsecs
-= msecs
* NSEC_PER_MSEC
;
1846 usecs
= nsecs
/ NSEC_PER_USEC
;
1847 printf("%*lu.%03lu ", width
, msecs
, usecs
);
1851 * returns runtime data for event, allocating memory for it the
1852 * first time it is used.
1854 static struct evsel_runtime
*evsel__get_runtime(struct evsel
*evsel
)
1856 struct evsel_runtime
*r
= evsel
->priv
;
1859 r
= zalloc(sizeof(struct evsel_runtime
));
1867 * save last time event was seen per cpu
1869 static void evsel__save_time(struct evsel
*evsel
, u64 timestamp
, u32 cpu
)
1871 struct evsel_runtime
*r
= evsel__get_runtime(evsel
);
1876 if ((cpu
>= r
->ncpu
) || (r
->last_time
== NULL
)) {
1877 int i
, n
= __roundup_pow_of_two(cpu
+1);
1878 void *p
= r
->last_time
;
1880 p
= realloc(r
->last_time
, n
* sizeof(u64
));
1885 for (i
= r
->ncpu
; i
< n
; ++i
)
1886 r
->last_time
[i
] = (u64
) 0;
1891 r
->last_time
[cpu
] = timestamp
;
1894 /* returns last time this event was seen on the given cpu */
1895 static u64
evsel__get_time(struct evsel
*evsel
, u32 cpu
)
1897 struct evsel_runtime
*r
= evsel__get_runtime(evsel
);
1899 if ((r
== NULL
) || (r
->last_time
== NULL
) || (cpu
>= r
->ncpu
))
1902 return r
->last_time
[cpu
];
1905 static int comm_width
= 30;
1907 static char *timehist_get_commstr(struct thread
*thread
)
1909 static char str
[32];
1910 const char *comm
= thread__comm_str(thread
);
1911 pid_t tid
= thread
->tid
;
1912 pid_t pid
= thread
->pid_
;
1916 n
= scnprintf(str
, sizeof(str
), "%s", comm
);
1918 else if (tid
!= pid
)
1919 n
= scnprintf(str
, sizeof(str
), "%s[%d/%d]", comm
, tid
, pid
);
1922 n
= scnprintf(str
, sizeof(str
), "%s[%d]", comm
, tid
);
1930 static void timehist_header(struct perf_sched
*sched
)
1932 u32 ncpus
= sched
->max_cpu
+ 1;
1935 printf("%15s %6s ", "time", "cpu");
1937 if (sched
->show_cpu_visual
) {
1939 for (i
= 0, j
= 0; i
< ncpus
; ++i
) {
1947 printf(" %-*s %9s %9s %9s", comm_width
,
1948 "task name", "wait time", "sch delay", "run time");
1950 if (sched
->show_state
)
1951 printf(" %s", "state");
1958 printf("%15s %-6s ", "", "");
1960 if (sched
->show_cpu_visual
)
1961 printf(" %*s ", ncpus
, "");
1963 printf(" %-*s %9s %9s %9s", comm_width
,
1964 "[tid/pid]", "(msec)", "(msec)", "(msec)");
1966 if (sched
->show_state
)
1974 printf("%.15s %.6s ", graph_dotted_line
, graph_dotted_line
);
1976 if (sched
->show_cpu_visual
)
1977 printf(" %.*s ", ncpus
, graph_dotted_line
);
1979 printf(" %.*s %.9s %.9s %.9s", comm_width
,
1980 graph_dotted_line
, graph_dotted_line
, graph_dotted_line
,
1983 if (sched
->show_state
)
1984 printf(" %.5s", graph_dotted_line
);
1989 static char task_state_char(struct thread
*thread
, int state
)
1991 static const char state_to_char
[] = TASK_STATE_TO_CHAR_STR
;
1992 unsigned bit
= state
? ffs(state
) : 0;
1995 if (thread
->tid
== 0)
1998 return bit
< sizeof(state_to_char
) - 1 ? state_to_char
[bit
] : '?';
2001 static void timehist_print_sample(struct perf_sched
*sched
,
2002 struct evsel
*evsel
,
2003 struct perf_sample
*sample
,
2004 struct addr_location
*al
,
2005 struct thread
*thread
,
2008 struct thread_runtime
*tr
= thread__priv(thread
);
2009 const char *next_comm
= evsel__strval(evsel
, sample
, "next_comm");
2010 const u32 next_pid
= evsel__intval(evsel
, sample
, "next_pid");
2011 u32 max_cpus
= sched
->max_cpu
+ 1;
2016 if (cpu_list
&& !test_bit(sample
->cpu
, cpu_bitmap
))
2019 timestamp__scnprintf_usec(t
, tstr
, sizeof(tstr
));
2020 printf("%15s [%04d] ", tstr
, sample
->cpu
);
2022 if (sched
->show_cpu_visual
) {
2027 for (i
= 0; i
< max_cpus
; ++i
) {
2028 /* flag idle times with 'i'; others are sched events */
2029 if (i
== sample
->cpu
)
2030 c
= (thread
->tid
== 0) ? 'i' : 's';
2038 printf(" %-*s ", comm_width
, timehist_get_commstr(thread
));
2040 wait_time
= tr
->dt_sleep
+ tr
->dt_iowait
+ tr
->dt_preempt
;
2041 print_sched_time(wait_time
, 6);
2043 print_sched_time(tr
->dt_delay
, 6);
2044 print_sched_time(tr
->dt_run
, 6);
2046 if (sched
->show_state
)
2047 printf(" %5c ", task_state_char(thread
, state
));
2049 if (sched
->show_next
) {
2050 snprintf(nstr
, sizeof(nstr
), "next: %s[%d]", next_comm
, next_pid
);
2051 printf(" %-*s", comm_width
, nstr
);
2054 if (sched
->show_wakeups
&& !sched
->show_next
)
2055 printf(" %-*s", comm_width
, "");
2057 if (thread
->tid
== 0)
2060 if (sched
->show_callchain
)
2063 sample__fprintf_sym(sample
, al
, 0,
2064 EVSEL__PRINT_SYM
| EVSEL__PRINT_ONELINE
|
2065 EVSEL__PRINT_CALLCHAIN_ARROW
|
2066 EVSEL__PRINT_SKIP_IGNORED
,
2067 &callchain_cursor
, symbol_conf
.bt_stop_list
, stdout
);
2074 * Explanation of delta-time stats:
2076 * t = time of current schedule out event
2077 * tprev = time of previous sched out event
2078 * also time of schedule-in event for current task
2079 * last_time = time of last sched change event for current task
2080 * (i.e, time process was last scheduled out)
2081 * ready_to_run = time of wakeup for current task
2083 * -----|------------|------------|------------|------
2084 * last ready tprev t
2087 * |-------- dt_wait --------|
2088 * |- dt_delay -|-- dt_run --|
2090 * dt_run = run time of current task
2091 * dt_wait = time between last schedule out event for task and tprev
2092 * represents time spent off the cpu
2093 * dt_delay = time between wakeup and schedule-in of task
2096 static void timehist_update_runtime_stats(struct thread_runtime
*r
,
2106 r
->dt_run
= t
- tprev
;
2107 if (r
->ready_to_run
) {
2108 if (r
->ready_to_run
> tprev
)
2109 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2111 r
->dt_delay
= tprev
- r
->ready_to_run
;
2114 if (r
->last_time
> tprev
)
2115 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2116 else if (r
->last_time
) {
2117 u64 dt_wait
= tprev
- r
->last_time
;
2119 if (r
->last_state
== TASK_RUNNING
)
2120 r
->dt_preempt
= dt_wait
;
2121 else if (r
->last_state
== TASK_UNINTERRUPTIBLE
)
2122 r
->dt_iowait
= dt_wait
;
2124 r
->dt_sleep
= dt_wait
;
2128 update_stats(&r
->run_stats
, r
->dt_run
);
2130 r
->total_run_time
+= r
->dt_run
;
2131 r
->total_delay_time
+= r
->dt_delay
;
2132 r
->total_sleep_time
+= r
->dt_sleep
;
2133 r
->total_iowait_time
+= r
->dt_iowait
;
2134 r
->total_preempt_time
+= r
->dt_preempt
;
2137 static bool is_idle_sample(struct perf_sample
*sample
,
2138 struct evsel
*evsel
)
2140 /* pid 0 == swapper == idle task */
2141 if (strcmp(evsel__name(evsel
), "sched:sched_switch") == 0)
2142 return evsel__intval(evsel
, sample
, "prev_pid") == 0;
2144 return sample
->pid
== 0;
2147 static void save_task_callchain(struct perf_sched
*sched
,
2148 struct perf_sample
*sample
,
2149 struct evsel
*evsel
,
2150 struct machine
*machine
)
2152 struct callchain_cursor
*cursor
= &callchain_cursor
;
2153 struct thread
*thread
;
2155 /* want main thread for process - has maps */
2156 thread
= machine__findnew_thread(machine
, sample
->pid
, sample
->pid
);
2157 if (thread
== NULL
) {
2158 pr_debug("Failed to get thread for pid %d.\n", sample
->pid
);
2162 if (!sched
->show_callchain
|| sample
->callchain
== NULL
)
2165 if (thread__resolve_callchain(thread
, cursor
, evsel
, sample
,
2166 NULL
, NULL
, sched
->max_stack
+ 2) != 0) {
2168 pr_err("Failed to resolve callchain. Skipping\n");
2173 callchain_cursor_commit(cursor
);
2176 struct callchain_cursor_node
*node
;
2179 node
= callchain_cursor_current(cursor
);
2185 if (!strcmp(sym
->name
, "schedule") ||
2186 !strcmp(sym
->name
, "__schedule") ||
2187 !strcmp(sym
->name
, "preempt_schedule"))
2191 callchain_cursor_advance(cursor
);
2195 static int init_idle_thread(struct thread
*thread
)
2197 struct idle_thread_runtime
*itr
;
2199 thread__set_comm(thread
, idle_comm
, 0);
2201 itr
= zalloc(sizeof(*itr
));
2205 init_stats(&itr
->tr
.run_stats
);
2206 callchain_init(&itr
->callchain
);
2207 callchain_cursor_reset(&itr
->cursor
);
2208 thread__set_priv(thread
, itr
);
2214 * Track idle stats per cpu by maintaining a local thread
2215 * struct for the idle task on each cpu.
2217 static int init_idle_threads(int ncpu
)
2221 idle_threads
= zalloc(ncpu
* sizeof(struct thread
*));
2225 idle_max_cpu
= ncpu
;
2227 /* allocate the actual thread struct if needed */
2228 for (i
= 0; i
< ncpu
; ++i
) {
2229 idle_threads
[i
] = thread__new(0, 0);
2230 if (idle_threads
[i
] == NULL
)
2233 ret
= init_idle_thread(idle_threads
[i
]);
2241 static void free_idle_threads(void)
2245 if (idle_threads
== NULL
)
2248 for (i
= 0; i
< idle_max_cpu
; ++i
) {
2249 if ((idle_threads
[i
]))
2250 thread__delete(idle_threads
[i
]);
2256 static struct thread
*get_idle_thread(int cpu
)
2259 * expand/allocate array of pointers to local thread
2262 if ((cpu
>= idle_max_cpu
) || (idle_threads
== NULL
)) {
2263 int i
, j
= __roundup_pow_of_two(cpu
+1);
2266 p
= realloc(idle_threads
, j
* sizeof(struct thread
*));
2270 idle_threads
= (struct thread
**) p
;
2271 for (i
= idle_max_cpu
; i
< j
; ++i
)
2272 idle_threads
[i
] = NULL
;
2277 /* allocate a new thread struct if needed */
2278 if (idle_threads
[cpu
] == NULL
) {
2279 idle_threads
[cpu
] = thread__new(0, 0);
2281 if (idle_threads
[cpu
]) {
2282 if (init_idle_thread(idle_threads
[cpu
]) < 0)
2287 return idle_threads
[cpu
];
2290 static void save_idle_callchain(struct perf_sched
*sched
,
2291 struct idle_thread_runtime
*itr
,
2292 struct perf_sample
*sample
)
2294 if (!sched
->show_callchain
|| sample
->callchain
== NULL
)
2297 callchain_cursor__copy(&itr
->cursor
, &callchain_cursor
);
2300 static struct thread
*timehist_get_thread(struct perf_sched
*sched
,
2301 struct perf_sample
*sample
,
2302 struct machine
*machine
,
2303 struct evsel
*evsel
)
2305 struct thread
*thread
;
2307 if (is_idle_sample(sample
, evsel
)) {
2308 thread
= get_idle_thread(sample
->cpu
);
2310 pr_err("Failed to get idle thread for cpu %d.\n", sample
->cpu
);
2313 /* there were samples with tid 0 but non-zero pid */
2314 thread
= machine__findnew_thread(machine
, sample
->pid
,
2315 sample
->tid
?: sample
->pid
);
2316 if (thread
== NULL
) {
2317 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2321 save_task_callchain(sched
, sample
, evsel
, machine
);
2322 if (sched
->idle_hist
) {
2323 struct thread
*idle
;
2324 struct idle_thread_runtime
*itr
;
2326 idle
= get_idle_thread(sample
->cpu
);
2328 pr_err("Failed to get idle thread for cpu %d.\n", sample
->cpu
);
2332 itr
= thread__priv(idle
);
2336 itr
->last_thread
= thread
;
2338 /* copy task callchain when entering to idle */
2339 if (evsel__intval(evsel
, sample
, "next_pid") == 0)
2340 save_idle_callchain(sched
, itr
, sample
);
2347 static bool timehist_skip_sample(struct perf_sched
*sched
,
2348 struct thread
*thread
,
2349 struct evsel
*evsel
,
2350 struct perf_sample
*sample
)
2354 if (thread__is_filtered(thread
)) {
2356 sched
->skipped_samples
++;
2359 if (sched
->idle_hist
) {
2360 if (strcmp(evsel__name(evsel
), "sched:sched_switch"))
2362 else if (evsel__intval(evsel
, sample
, "prev_pid") != 0 &&
2363 evsel__intval(evsel
, sample
, "next_pid") != 0)
2370 static void timehist_print_wakeup_event(struct perf_sched
*sched
,
2371 struct evsel
*evsel
,
2372 struct perf_sample
*sample
,
2373 struct machine
*machine
,
2374 struct thread
*awakened
)
2376 struct thread
*thread
;
2379 thread
= machine__findnew_thread(machine
, sample
->pid
, sample
->tid
);
2383 /* show wakeup unless both awakee and awaker are filtered */
2384 if (timehist_skip_sample(sched
, thread
, evsel
, sample
) &&
2385 timehist_skip_sample(sched
, awakened
, evsel
, sample
)) {
2389 timestamp__scnprintf_usec(sample
->time
, tstr
, sizeof(tstr
));
2390 printf("%15s [%04d] ", tstr
, sample
->cpu
);
2391 if (sched
->show_cpu_visual
)
2392 printf(" %*s ", sched
->max_cpu
+ 1, "");
2394 printf(" %-*s ", comm_width
, timehist_get_commstr(thread
));
2397 printf(" %9s %9s %9s ", "", "", "");
2399 printf("awakened: %s", timehist_get_commstr(awakened
));
2404 static int timehist_sched_wakeup_ignore(struct perf_tool
*tool __maybe_unused
,
2405 union perf_event
*event __maybe_unused
,
2406 struct evsel
*evsel __maybe_unused
,
2407 struct perf_sample
*sample __maybe_unused
,
2408 struct machine
*machine __maybe_unused
)
2413 static int timehist_sched_wakeup_event(struct perf_tool
*tool
,
2414 union perf_event
*event __maybe_unused
,
2415 struct evsel
*evsel
,
2416 struct perf_sample
*sample
,
2417 struct machine
*machine
)
2419 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2420 struct thread
*thread
;
2421 struct thread_runtime
*tr
= NULL
;
2422 /* want pid of awakened task not pid in sample */
2423 const u32 pid
= evsel__intval(evsel
, sample
, "pid");
2425 thread
= machine__findnew_thread(machine
, 0, pid
);
2429 tr
= thread__get_runtime(thread
);
2433 if (tr
->ready_to_run
== 0)
2434 tr
->ready_to_run
= sample
->time
;
2436 /* show wakeups if requested */
2437 if (sched
->show_wakeups
&&
2438 !perf_time__skip_sample(&sched
->ptime
, sample
->time
))
2439 timehist_print_wakeup_event(sched
, evsel
, sample
, machine
, thread
);
2444 static void timehist_print_migration_event(struct perf_sched
*sched
,
2445 struct evsel
*evsel
,
2446 struct perf_sample
*sample
,
2447 struct machine
*machine
,
2448 struct thread
*migrated
)
2450 struct thread
*thread
;
2452 u32 max_cpus
= sched
->max_cpu
+ 1;
2455 if (sched
->summary_only
)
2458 max_cpus
= sched
->max_cpu
+ 1;
2459 ocpu
= evsel__intval(evsel
, sample
, "orig_cpu");
2460 dcpu
= evsel__intval(evsel
, sample
, "dest_cpu");
2462 thread
= machine__findnew_thread(machine
, sample
->pid
, sample
->tid
);
2466 if (timehist_skip_sample(sched
, thread
, evsel
, sample
) &&
2467 timehist_skip_sample(sched
, migrated
, evsel
, sample
)) {
2471 timestamp__scnprintf_usec(sample
->time
, tstr
, sizeof(tstr
));
2472 printf("%15s [%04d] ", tstr
, sample
->cpu
);
2474 if (sched
->show_cpu_visual
) {
2479 for (i
= 0; i
< max_cpus
; ++i
) {
2480 c
= (i
== sample
->cpu
) ? 'm' : ' ';
2486 printf(" %-*s ", comm_width
, timehist_get_commstr(thread
));
2489 printf(" %9s %9s %9s ", "", "", "");
2491 printf("migrated: %s", timehist_get_commstr(migrated
));
2492 printf(" cpu %d => %d", ocpu
, dcpu
);
2497 static int timehist_migrate_task_event(struct perf_tool
*tool
,
2498 union perf_event
*event __maybe_unused
,
2499 struct evsel
*evsel
,
2500 struct perf_sample
*sample
,
2501 struct machine
*machine
)
2503 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2504 struct thread
*thread
;
2505 struct thread_runtime
*tr
= NULL
;
2506 /* want pid of migrated task not pid in sample */
2507 const u32 pid
= evsel__intval(evsel
, sample
, "pid");
2509 thread
= machine__findnew_thread(machine
, 0, pid
);
2513 tr
= thread__get_runtime(thread
);
2519 /* show migrations if requested */
2520 timehist_print_migration_event(sched
, evsel
, sample
, machine
, thread
);
2525 static int timehist_sched_change_event(struct perf_tool
*tool
,
2526 union perf_event
*event
,
2527 struct evsel
*evsel
,
2528 struct perf_sample
*sample
,
2529 struct machine
*machine
)
2531 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2532 struct perf_time_interval
*ptime
= &sched
->ptime
;
2533 struct addr_location al
;
2534 struct thread
*thread
;
2535 struct thread_runtime
*tr
= NULL
;
2536 u64 tprev
, t
= sample
->time
;
2538 int state
= evsel__intval(evsel
, sample
, "prev_state");
2540 if (machine__resolve(machine
, &al
, sample
) < 0) {
2541 pr_err("problem processing %d event. skipping it\n",
2542 event
->header
.type
);
2547 thread
= timehist_get_thread(sched
, sample
, machine
, evsel
);
2548 if (thread
== NULL
) {
2553 if (timehist_skip_sample(sched
, thread
, evsel
, sample
))
2556 tr
= thread__get_runtime(thread
);
2562 tprev
= evsel__get_time(evsel
, sample
->cpu
);
2565 * If start time given:
2566 * - sample time is under window user cares about - skip sample
2567 * - tprev is under window user cares about - reset to start of window
2569 if (ptime
->start
&& ptime
->start
> t
)
2572 if (tprev
&& ptime
->start
> tprev
)
2573 tprev
= ptime
->start
;
2576 * If end time given:
2577 * - previous sched event is out of window - we are done
2578 * - sample time is beyond window user cares about - reset it
2579 * to close out stats for time window interest
2582 if (tprev
> ptime
->end
)
2589 if (!sched
->idle_hist
|| thread
->tid
== 0) {
2590 if (!cpu_list
|| test_bit(sample
->cpu
, cpu_bitmap
))
2591 timehist_update_runtime_stats(tr
, t
, tprev
);
2593 if (sched
->idle_hist
) {
2594 struct idle_thread_runtime
*itr
= (void *)tr
;
2595 struct thread_runtime
*last_tr
;
2597 BUG_ON(thread
->tid
!= 0);
2599 if (itr
->last_thread
== NULL
)
2602 /* add current idle time as last thread's runtime */
2603 last_tr
= thread__get_runtime(itr
->last_thread
);
2604 if (last_tr
== NULL
)
2607 timehist_update_runtime_stats(last_tr
, t
, tprev
);
2609 * remove delta time of last thread as it's not updated
2610 * and otherwise it will show an invalid value next
2611 * time. we only care total run time and run stat.
2613 last_tr
->dt_run
= 0;
2614 last_tr
->dt_delay
= 0;
2615 last_tr
->dt_sleep
= 0;
2616 last_tr
->dt_iowait
= 0;
2617 last_tr
->dt_preempt
= 0;
2620 callchain_append(&itr
->callchain
, &itr
->cursor
, t
- tprev
);
2622 itr
->last_thread
= NULL
;
2626 if (!sched
->summary_only
)
2627 timehist_print_sample(sched
, evsel
, sample
, &al
, thread
, t
, state
);
2630 if (sched
->hist_time
.start
== 0 && t
>= ptime
->start
)
2631 sched
->hist_time
.start
= t
;
2632 if (ptime
->end
== 0 || t
<= ptime
->end
)
2633 sched
->hist_time
.end
= t
;
2636 /* time of this sched_switch event becomes last time task seen */
2637 tr
->last_time
= sample
->time
;
2639 /* last state is used to determine where to account wait time */
2640 tr
->last_state
= state
;
2642 /* sched out event for task so reset ready to run time */
2643 tr
->ready_to_run
= 0;
2646 evsel__save_time(evsel
, sample
->time
, sample
->cpu
);
2651 static int timehist_sched_switch_event(struct perf_tool
*tool
,
2652 union perf_event
*event
,
2653 struct evsel
*evsel
,
2654 struct perf_sample
*sample
,
2655 struct machine
*machine __maybe_unused
)
2657 return timehist_sched_change_event(tool
, event
, evsel
, sample
, machine
);
2660 static int process_lost(struct perf_tool
*tool __maybe_unused
,
2661 union perf_event
*event
,
2662 struct perf_sample
*sample
,
2663 struct machine
*machine __maybe_unused
)
2667 timestamp__scnprintf_usec(sample
->time
, tstr
, sizeof(tstr
));
2668 printf("%15s ", tstr
);
2669 printf("lost %" PRI_lu64
" events on cpu %d\n", event
->lost
.lost
, sample
->cpu
);
2675 static void print_thread_runtime(struct thread
*t
,
2676 struct thread_runtime
*r
)
2678 double mean
= avg_stats(&r
->run_stats
);
2681 printf("%*s %5d %9" PRIu64
" ",
2682 comm_width
, timehist_get_commstr(t
), t
->ppid
,
2683 (u64
) r
->run_stats
.n
);
2685 print_sched_time(r
->total_run_time
, 8);
2686 stddev
= rel_stddev_stats(stddev_stats(&r
->run_stats
), mean
);
2687 print_sched_time(r
->run_stats
.min
, 6);
2689 print_sched_time((u64
) mean
, 6);
2691 print_sched_time(r
->run_stats
.max
, 6);
2693 printf("%5.2f", stddev
);
2694 printf(" %5" PRIu64
, r
->migrations
);
2698 static void print_thread_waittime(struct thread
*t
,
2699 struct thread_runtime
*r
)
2701 printf("%*s %5d %9" PRIu64
" ",
2702 comm_width
, timehist_get_commstr(t
), t
->ppid
,
2703 (u64
) r
->run_stats
.n
);
2705 print_sched_time(r
->total_run_time
, 8);
2706 print_sched_time(r
->total_sleep_time
, 6);
2708 print_sched_time(r
->total_iowait_time
, 6);
2710 print_sched_time(r
->total_preempt_time
, 6);
2712 print_sched_time(r
->total_delay_time
, 6);
2716 struct total_run_stats
{
2717 struct perf_sched
*sched
;
2723 static int __show_thread_runtime(struct thread
*t
, void *priv
)
2725 struct total_run_stats
*stats
= priv
;
2726 struct thread_runtime
*r
;
2728 if (thread__is_filtered(t
))
2731 r
= thread__priv(t
);
2732 if (r
&& r
->run_stats
.n
) {
2733 stats
->task_count
++;
2734 stats
->sched_count
+= r
->run_stats
.n
;
2735 stats
->total_run_time
+= r
->total_run_time
;
2737 if (stats
->sched
->show_state
)
2738 print_thread_waittime(t
, r
);
2740 print_thread_runtime(t
, r
);
2746 static int show_thread_runtime(struct thread
*t
, void *priv
)
2751 return __show_thread_runtime(t
, priv
);
2754 static int show_deadthread_runtime(struct thread
*t
, void *priv
)
2759 return __show_thread_runtime(t
, priv
);
2762 static size_t callchain__fprintf_folded(FILE *fp
, struct callchain_node
*node
)
2764 const char *sep
= " <- ";
2765 struct callchain_list
*chain
;
2773 ret
= callchain__fprintf_folded(fp
, node
->parent
);
2776 list_for_each_entry(chain
, &node
->val
, list
) {
2777 if (chain
->ip
>= PERF_CONTEXT_MAX
)
2779 if (chain
->ms
.sym
&& chain
->ms
.sym
->ignore
)
2781 ret
+= fprintf(fp
, "%s%s", first
? "" : sep
,
2782 callchain_list__sym_name(chain
, bf
, sizeof(bf
),
2790 static size_t timehist_print_idlehist_callchain(struct rb_root_cached
*root
)
2794 struct callchain_node
*chain
;
2795 struct rb_node
*rb_node
= rb_first_cached(root
);
2797 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2798 printf(" %.16s %.8s %.50s\n", graph_dotted_line
, graph_dotted_line
,
2802 chain
= rb_entry(rb_node
, struct callchain_node
, rb_node
);
2803 rb_node
= rb_next(rb_node
);
2805 ret
+= fprintf(fp
, " ");
2806 print_sched_time(chain
->hit
, 12);
2807 ret
+= 16; /* print_sched_time returns 2nd arg + 4 */
2808 ret
+= fprintf(fp
, " %8d ", chain
->count
);
2809 ret
+= callchain__fprintf_folded(fp
, chain
);
2810 ret
+= fprintf(fp
, "\n");
2816 static void timehist_print_summary(struct perf_sched
*sched
,
2817 struct perf_session
*session
)
2819 struct machine
*m
= &session
->machines
.host
;
2820 struct total_run_stats totals
;
2823 struct thread_runtime
*r
;
2825 u64 hist_time
= sched
->hist_time
.end
- sched
->hist_time
.start
;
2827 memset(&totals
, 0, sizeof(totals
));
2828 totals
.sched
= sched
;
2830 if (sched
->idle_hist
) {
2831 printf("\nIdle-time summary\n");
2832 printf("%*s parent sched-out ", comm_width
, "comm");
2833 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2834 } else if (sched
->show_state
) {
2835 printf("\nWait-time summary\n");
2836 printf("%*s parent sched-in ", comm_width
, "comm");
2837 printf(" run-time sleep iowait preempt delay\n");
2839 printf("\nRuntime summary\n");
2840 printf("%*s parent sched-in ", comm_width
, "comm");
2841 printf(" run-time min-run avg-run max-run stddev migrations\n");
2843 printf("%*s (count) ", comm_width
, "");
2844 printf(" (msec) (msec) (msec) (msec) %s\n",
2845 sched
->show_state
? "(msec)" : "%");
2846 printf("%.117s\n", graph_dotted_line
);
2848 machine__for_each_thread(m
, show_thread_runtime
, &totals
);
2849 task_count
= totals
.task_count
;
2851 printf("<no still running tasks>\n");
2853 printf("\nTerminated tasks:\n");
2854 machine__for_each_thread(m
, show_deadthread_runtime
, &totals
);
2855 if (task_count
== totals
.task_count
)
2856 printf("<no terminated tasks>\n");
2858 /* CPU idle stats not tracked when samples were skipped */
2859 if (sched
->skipped_samples
&& !sched
->idle_hist
)
2862 printf("\nIdle stats:\n");
2863 for (i
= 0; i
< idle_max_cpu
; ++i
) {
2864 if (cpu_list
&& !test_bit(i
, cpu_bitmap
))
2867 t
= idle_threads
[i
];
2871 r
= thread__priv(t
);
2872 if (r
&& r
->run_stats
.n
) {
2873 totals
.sched_count
+= r
->run_stats
.n
;
2874 printf(" CPU %2d idle for ", i
);
2875 print_sched_time(r
->total_run_time
, 6);
2876 printf(" msec (%6.2f%%)\n", 100.0 * r
->total_run_time
/ hist_time
);
2878 printf(" CPU %2d idle entire time window\n", i
);
2881 if (sched
->idle_hist
&& sched
->show_callchain
) {
2882 callchain_param
.mode
= CHAIN_FOLDED
;
2883 callchain_param
.value
= CCVAL_PERIOD
;
2885 callchain_register_param(&callchain_param
);
2887 printf("\nIdle stats by callchain:\n");
2888 for (i
= 0; i
< idle_max_cpu
; ++i
) {
2889 struct idle_thread_runtime
*itr
;
2891 t
= idle_threads
[i
];
2895 itr
= thread__priv(t
);
2899 callchain_param
.sort(&itr
->sorted_root
.rb_root
, &itr
->callchain
,
2900 0, &callchain_param
);
2902 printf(" CPU %2d:", i
);
2903 print_sched_time(itr
->tr
.total_run_time
, 6);
2905 timehist_print_idlehist_callchain(&itr
->sorted_root
);
2911 " Total number of unique tasks: %" PRIu64
"\n"
2912 "Total number of context switches: %" PRIu64
"\n",
2913 totals
.task_count
, totals
.sched_count
);
2915 printf(" Total run time (msec): ");
2916 print_sched_time(totals
.total_run_time
, 2);
2919 printf(" Total scheduling time (msec): ");
2920 print_sched_time(hist_time
, 2);
2921 printf(" (x %d)\n", sched
->max_cpu
);
2924 typedef int (*sched_handler
)(struct perf_tool
*tool
,
2925 union perf_event
*event
,
2926 struct evsel
*evsel
,
2927 struct perf_sample
*sample
,
2928 struct machine
*machine
);
2930 static int perf_timehist__process_sample(struct perf_tool
*tool
,
2931 union perf_event
*event
,
2932 struct perf_sample
*sample
,
2933 struct evsel
*evsel
,
2934 struct machine
*machine
)
2936 struct perf_sched
*sched
= container_of(tool
, struct perf_sched
, tool
);
2938 int this_cpu
= sample
->cpu
;
2940 if (this_cpu
> sched
->max_cpu
)
2941 sched
->max_cpu
= this_cpu
;
2943 if (evsel
->handler
!= NULL
) {
2944 sched_handler f
= evsel
->handler
;
2946 err
= f(tool
, event
, evsel
, sample
, machine
);
2952 static int timehist_check_attr(struct perf_sched
*sched
,
2953 struct evlist
*evlist
)
2955 struct evsel
*evsel
;
2956 struct evsel_runtime
*er
;
2958 list_for_each_entry(evsel
, &evlist
->core
.entries
, core
.node
) {
2959 er
= evsel__get_runtime(evsel
);
2961 pr_err("Failed to allocate memory for evsel runtime data\n");
2965 if (sched
->show_callchain
&& !evsel__has_callchain(evsel
)) {
2966 pr_info("Samples do not have callchains.\n");
2967 sched
->show_callchain
= 0;
2968 symbol_conf
.use_callchain
= 0;
2975 static int perf_sched__timehist(struct perf_sched
*sched
)
2977 struct evsel_str_handler handlers
[] = {
2978 { "sched:sched_switch", timehist_sched_switch_event
, },
2979 { "sched:sched_wakeup", timehist_sched_wakeup_event
, },
2980 { "sched:sched_waking", timehist_sched_wakeup_event
, },
2981 { "sched:sched_wakeup_new", timehist_sched_wakeup_event
, },
2983 const struct evsel_str_handler migrate_handlers
[] = {
2984 { "sched:sched_migrate_task", timehist_migrate_task_event
, },
2986 struct perf_data data
= {
2988 .mode
= PERF_DATA_MODE_READ
,
2989 .force
= sched
->force
,
2992 struct perf_session
*session
;
2993 struct evlist
*evlist
;
2997 * event handlers for timehist option
2999 sched
->tool
.sample
= perf_timehist__process_sample
;
3000 sched
->tool
.mmap
= perf_event__process_mmap
;
3001 sched
->tool
.comm
= perf_event__process_comm
;
3002 sched
->tool
.exit
= perf_event__process_exit
;
3003 sched
->tool
.fork
= perf_event__process_fork
;
3004 sched
->tool
.lost
= process_lost
;
3005 sched
->tool
.attr
= perf_event__process_attr
;
3006 sched
->tool
.tracing_data
= perf_event__process_tracing_data
;
3007 sched
->tool
.build_id
= perf_event__process_build_id
;
3009 sched
->tool
.ordered_events
= true;
3010 sched
->tool
.ordering_requires_timestamps
= true;
3012 symbol_conf
.use_callchain
= sched
->show_callchain
;
3014 session
= perf_session__new(&data
, false, &sched
->tool
);
3015 if (IS_ERR(session
))
3016 return PTR_ERR(session
);
3019 err
= perf_session__cpu_bitmap(session
, cpu_list
, cpu_bitmap
);
3024 evlist
= session
->evlist
;
3026 symbol__init(&session
->header
.env
);
3028 if (perf_time__parse_str(&sched
->ptime
, sched
->time_str
) != 0) {
3029 pr_err("Invalid time string\n");
3033 if (timehist_check_attr(sched
, evlist
) != 0)
3038 /* prefer sched_waking if it is captured */
3039 if (evlist__find_tracepoint_by_name(session
->evlist
, "sched:sched_waking"))
3040 handlers
[1].handler
= timehist_sched_wakeup_ignore
;
3042 /* setup per-evsel handlers */
3043 if (perf_session__set_tracepoints_handlers(session
, handlers
))
3046 /* sched_switch event at a minimum needs to exist */
3047 if (!evlist__find_tracepoint_by_name(session
->evlist
, "sched:sched_switch")) {
3048 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3052 if (sched
->show_migrations
&&
3053 perf_session__set_tracepoints_handlers(session
, migrate_handlers
))
3056 /* pre-allocate struct for per-CPU idle stats */
3057 sched
->max_cpu
= session
->header
.env
.nr_cpus_online
;
3058 if (sched
->max_cpu
== 0)
3060 if (init_idle_threads(sched
->max_cpu
))
3063 /* summary_only implies summary option, but don't overwrite summary if set */
3064 if (sched
->summary_only
)
3065 sched
->summary
= sched
->summary_only
;
3067 if (!sched
->summary_only
)
3068 timehist_header(sched
);
3070 err
= perf_session__process_events(session
);
3072 pr_err("Failed to process events, error %d", err
);
3076 sched
->nr_events
= evlist
->stats
.nr_events
[0];
3077 sched
->nr_lost_events
= evlist
->stats
.total_lost
;
3078 sched
->nr_lost_chunks
= evlist
->stats
.nr_events
[PERF_RECORD_LOST
];
3081 timehist_print_summary(sched
, session
);
3084 free_idle_threads();
3085 perf_session__delete(session
);
3091 static void print_bad_events(struct perf_sched
*sched
)
3093 if (sched
->nr_unordered_timestamps
&& sched
->nr_timestamps
) {
3094 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3095 (double)sched
->nr_unordered_timestamps
/(double)sched
->nr_timestamps
*100.0,
3096 sched
->nr_unordered_timestamps
, sched
->nr_timestamps
);
3098 if (sched
->nr_lost_events
&& sched
->nr_events
) {
3099 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3100 (double)sched
->nr_lost_events
/(double)sched
->nr_events
* 100.0,
3101 sched
->nr_lost_events
, sched
->nr_events
, sched
->nr_lost_chunks
);
3103 if (sched
->nr_context_switch_bugs
&& sched
->nr_timestamps
) {
3104 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3105 (double)sched
->nr_context_switch_bugs
/(double)sched
->nr_timestamps
*100.0,
3106 sched
->nr_context_switch_bugs
, sched
->nr_timestamps
);
3107 if (sched
->nr_lost_events
)
3108 printf(" (due to lost events?)");
3113 static void __merge_work_atoms(struct rb_root_cached
*root
, struct work_atoms
*data
)
3115 struct rb_node
**new = &(root
->rb_root
.rb_node
), *parent
= NULL
;
3116 struct work_atoms
*this;
3117 const char *comm
= thread__comm_str(data
->thread
), *this_comm
;
3118 bool leftmost
= true;
3123 this = container_of(*new, struct work_atoms
, node
);
3126 this_comm
= thread__comm_str(this->thread
);
3127 cmp
= strcmp(comm
, this_comm
);
3129 new = &((*new)->rb_left
);
3130 } else if (cmp
< 0) {
3131 new = &((*new)->rb_right
);
3135 this->total_runtime
+= data
->total_runtime
;
3136 this->nb_atoms
+= data
->nb_atoms
;
3137 this->total_lat
+= data
->total_lat
;
3138 list_splice(&data
->work_list
, &this->work_list
);
3139 if (this->max_lat
< data
->max_lat
) {
3140 this->max_lat
= data
->max_lat
;
3141 this->max_lat_start
= data
->max_lat_start
;
3142 this->max_lat_end
= data
->max_lat_end
;
3150 rb_link_node(&data
->node
, parent
, new);
3151 rb_insert_color_cached(&data
->node
, root
, leftmost
);
3154 static void perf_sched__merge_lat(struct perf_sched
*sched
)
3156 struct work_atoms
*data
;
3157 struct rb_node
*node
;
3159 if (sched
->skip_merge
)
3162 while ((node
= rb_first_cached(&sched
->atom_root
))) {
3163 rb_erase_cached(node
, &sched
->atom_root
);
3164 data
= rb_entry(node
, struct work_atoms
, node
);
3165 __merge_work_atoms(&sched
->merged_atom_root
, data
);
3169 static int perf_sched__lat(struct perf_sched
*sched
)
3171 struct rb_node
*next
;
3175 if (perf_sched__read_events(sched
))
3178 perf_sched__merge_lat(sched
);
3179 perf_sched__sort_lat(sched
);
3181 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3182 printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3183 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3185 next
= rb_first_cached(&sched
->sorted_atom_root
);
3188 struct work_atoms
*work_list
;
3190 work_list
= rb_entry(next
, struct work_atoms
, node
);
3191 output_lat_thread(sched
, work_list
);
3192 next
= rb_next(next
);
3193 thread__zput(work_list
->thread
);
3196 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3197 printf(" TOTAL: |%11.3f ms |%9" PRIu64
" |\n",
3198 (double)sched
->all_runtime
/ NSEC_PER_MSEC
, sched
->all_count
);
3200 printf(" ---------------------------------------------------\n");
3202 print_bad_events(sched
);
3208 static int setup_map_cpus(struct perf_sched
*sched
)
3210 struct perf_cpu_map
*map
;
3212 sched
->max_cpu
= sysconf(_SC_NPROCESSORS_CONF
);
3214 if (sched
->map
.comp
) {
3215 sched
->map
.comp_cpus
= zalloc(sched
->max_cpu
* sizeof(int));
3216 if (!sched
->map
.comp_cpus
)
3220 if (!sched
->map
.cpus_str
)
3223 map
= perf_cpu_map__new(sched
->map
.cpus_str
);
3225 pr_err("failed to get cpus map from %s\n", sched
->map
.cpus_str
);
3229 sched
->map
.cpus
= map
;
3233 static int setup_color_pids(struct perf_sched
*sched
)
3235 struct perf_thread_map
*map
;
3237 if (!sched
->map
.color_pids_str
)
3240 map
= thread_map__new_by_tid_str(sched
->map
.color_pids_str
);
3242 pr_err("failed to get thread map from %s\n", sched
->map
.color_pids_str
);
3246 sched
->map
.color_pids
= map
;
3250 static int setup_color_cpus(struct perf_sched
*sched
)
3252 struct perf_cpu_map
*map
;
3254 if (!sched
->map
.color_cpus_str
)
3257 map
= perf_cpu_map__new(sched
->map
.color_cpus_str
);
3259 pr_err("failed to get thread map from %s\n", sched
->map
.color_cpus_str
);
3263 sched
->map
.color_cpus
= map
;
3267 static int perf_sched__map(struct perf_sched
*sched
)
3269 if (setup_map_cpus(sched
))
3272 if (setup_color_pids(sched
))
3275 if (setup_color_cpus(sched
))
3279 if (perf_sched__read_events(sched
))
3281 print_bad_events(sched
);
3285 static int perf_sched__replay(struct perf_sched
*sched
)
3289 calibrate_run_measurement_overhead(sched
);
3290 calibrate_sleep_measurement_overhead(sched
);
3292 test_calibrations(sched
);
3294 if (perf_sched__read_events(sched
))
3297 printf("nr_run_events: %ld\n", sched
->nr_run_events
);
3298 printf("nr_sleep_events: %ld\n", sched
->nr_sleep_events
);
3299 printf("nr_wakeup_events: %ld\n", sched
->nr_wakeup_events
);
3301 if (sched
->targetless_wakeups
)
3302 printf("target-less wakeups: %ld\n", sched
->targetless_wakeups
);
3303 if (sched
->multitarget_wakeups
)
3304 printf("multi-target wakeups: %ld\n", sched
->multitarget_wakeups
);
3305 if (sched
->nr_run_events_optimized
)
3306 printf("run atoms optimized: %ld\n",
3307 sched
->nr_run_events_optimized
);
3309 print_task_traces(sched
);
3310 add_cross_task_wakeups(sched
);
3312 create_tasks(sched
);
3313 printf("------------------------------------------------------------\n");
3314 for (i
= 0; i
< sched
->replay_repeat
; i
++)
3315 run_one_test(sched
);
3320 static void setup_sorting(struct perf_sched
*sched
, const struct option
*options
,
3321 const char * const usage_msg
[])
3323 char *tmp
, *tok
, *str
= strdup(sched
->sort_order
);
3325 for (tok
= strtok_r(str
, ", ", &tmp
);
3326 tok
; tok
= strtok_r(NULL
, ", ", &tmp
)) {
3327 if (sort_dimension__add(tok
, &sched
->sort_list
) < 0) {
3328 usage_with_options_msg(usage_msg
, options
,
3329 "Unknown --sort key: `%s'", tok
);
3335 sort_dimension__add("pid", &sched
->cmp_pid
);
3338 static int __cmd_record(int argc
, const char **argv
)
3340 unsigned int rec_argc
, i
, j
;
3341 const char **rec_argv
;
3342 const char * const record_args
[] = {
3348 "-e", "sched:sched_switch",
3349 "-e", "sched:sched_stat_wait",
3350 "-e", "sched:sched_stat_sleep",
3351 "-e", "sched:sched_stat_iowait",
3352 "-e", "sched:sched_stat_runtime",
3353 "-e", "sched:sched_process_fork",
3354 "-e", "sched:sched_wakeup_new",
3355 "-e", "sched:sched_migrate_task",
3357 struct tep_event
*waking_event
;
3360 * +2 for either "-e", "sched:sched_wakeup" or
3361 * "-e", "sched:sched_waking"
3363 rec_argc
= ARRAY_SIZE(record_args
) + 2 + argc
- 1;
3364 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
3366 if (rec_argv
== NULL
)
3369 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
3370 rec_argv
[i
] = strdup(record_args
[i
]);
3372 rec_argv
[i
++] = "-e";
3373 waking_event
= trace_event__tp_format("sched", "sched_waking");
3374 if (!IS_ERR(waking_event
))
3375 rec_argv
[i
++] = strdup("sched:sched_waking");
3377 rec_argv
[i
++] = strdup("sched:sched_wakeup");
3379 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
3380 rec_argv
[i
] = argv
[j
];
3382 BUG_ON(i
!= rec_argc
);
3384 return cmd_record(i
, rec_argv
);
3387 int cmd_sched(int argc
, const char **argv
)
3389 static const char default_sort_order
[] = "avg, max, switch, runtime";
3390 struct perf_sched sched
= {
3392 .sample
= perf_sched__process_tracepoint_sample
,
3393 .comm
= perf_sched__process_comm
,
3394 .namespaces
= perf_event__process_namespaces
,
3395 .lost
= perf_event__process_lost
,
3396 .fork
= perf_sched__process_fork_event
,
3397 .ordered_events
= true,
3399 .cmp_pid
= LIST_HEAD_INIT(sched
.cmp_pid
),
3400 .sort_list
= LIST_HEAD_INIT(sched
.sort_list
),
3401 .start_work_mutex
= PTHREAD_MUTEX_INITIALIZER
,
3402 .work_done_wait_mutex
= PTHREAD_MUTEX_INITIALIZER
,
3403 .sort_order
= default_sort_order
,
3404 .replay_repeat
= 10,
3406 .next_shortname1
= 'A',
3407 .next_shortname2
= '0',
3409 .show_callchain
= 1,
3412 const struct option sched_options
[] = {
3413 OPT_STRING('i', "input", &input_name
, "file",
3415 OPT_INCR('v', "verbose", &verbose
,
3416 "be more verbose (show symbol address, etc)"),
3417 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
,
3418 "dump raw trace in ASCII"),
3419 OPT_BOOLEAN('f', "force", &sched
.force
, "don't complain, do it"),
3422 const struct option latency_options
[] = {
3423 OPT_STRING('s', "sort", &sched
.sort_order
, "key[,key2...]",
3424 "sort by key(s): runtime, switch, avg, max"),
3425 OPT_INTEGER('C', "CPU", &sched
.profile_cpu
,
3426 "CPU to profile on"),
3427 OPT_BOOLEAN('p', "pids", &sched
.skip_merge
,
3428 "latency stats per pid instead of per comm"),
3429 OPT_PARENT(sched_options
)
3431 const struct option replay_options
[] = {
3432 OPT_UINTEGER('r', "repeat", &sched
.replay_repeat
,
3433 "repeat the workload replay N times (-1: infinite)"),
3434 OPT_PARENT(sched_options
)
3436 const struct option map_options
[] = {
3437 OPT_BOOLEAN(0, "compact", &sched
.map
.comp
,
3438 "map output in compact mode"),
3439 OPT_STRING(0, "color-pids", &sched
.map
.color_pids_str
, "pids",
3440 "highlight given pids in map"),
3441 OPT_STRING(0, "color-cpus", &sched
.map
.color_cpus_str
, "cpus",
3442 "highlight given CPUs in map"),
3443 OPT_STRING(0, "cpus", &sched
.map
.cpus_str
, "cpus",
3444 "display given CPUs in map"),
3445 OPT_PARENT(sched_options
)
3447 const struct option timehist_options
[] = {
3448 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
3449 "file", "vmlinux pathname"),
3450 OPT_STRING(0, "kallsyms", &symbol_conf
.kallsyms_name
,
3451 "file", "kallsyms pathname"),
3452 OPT_BOOLEAN('g', "call-graph", &sched
.show_callchain
,
3453 "Display call chains if present (default on)"),
3454 OPT_UINTEGER(0, "max-stack", &sched
.max_stack
,
3455 "Maximum number of functions to display backtrace."),
3456 OPT_STRING(0, "symfs", &symbol_conf
.symfs
, "directory",
3457 "Look for files with symbols relative to this directory"),
3458 OPT_BOOLEAN('s', "summary", &sched
.summary_only
,
3459 "Show only syscall summary with statistics"),
3460 OPT_BOOLEAN('S', "with-summary", &sched
.summary
,
3461 "Show all syscalls and summary with statistics"),
3462 OPT_BOOLEAN('w', "wakeups", &sched
.show_wakeups
, "Show wakeup events"),
3463 OPT_BOOLEAN('n', "next", &sched
.show_next
, "Show next task"),
3464 OPT_BOOLEAN('M', "migrations", &sched
.show_migrations
, "Show migration events"),
3465 OPT_BOOLEAN('V', "cpu-visual", &sched
.show_cpu_visual
, "Add CPU visual"),
3466 OPT_BOOLEAN('I', "idle-hist", &sched
.idle_hist
, "Show idle events only"),
3467 OPT_STRING(0, "time", &sched
.time_str
, "str",
3468 "Time span for analysis (start,stop)"),
3469 OPT_BOOLEAN(0, "state", &sched
.show_state
, "Show task state when sched-out"),
3470 OPT_STRING('p', "pid", &symbol_conf
.pid_list_str
, "pid[,pid...]",
3471 "analyze events only for given process id(s)"),
3472 OPT_STRING('t', "tid", &symbol_conf
.tid_list_str
, "tid[,tid...]",
3473 "analyze events only for given thread id(s)"),
3474 OPT_STRING('C', "cpu", &cpu_list
, "cpu", "list of cpus to profile"),
3475 OPT_PARENT(sched_options
)
3478 const char * const latency_usage
[] = {
3479 "perf sched latency [<options>]",
3482 const char * const replay_usage
[] = {
3483 "perf sched replay [<options>]",
3486 const char * const map_usage
[] = {
3487 "perf sched map [<options>]",
3490 const char * const timehist_usage
[] = {
3491 "perf sched timehist [<options>]",
3494 const char *const sched_subcommands
[] = { "record", "latency", "map",
3497 const char *sched_usage
[] = {
3501 struct trace_sched_handler lat_ops
= {
3502 .wakeup_event
= latency_wakeup_event
,
3503 .switch_event
= latency_switch_event
,
3504 .runtime_event
= latency_runtime_event
,
3505 .migrate_task_event
= latency_migrate_task_event
,
3507 struct trace_sched_handler map_ops
= {
3508 .switch_event
= map_switch_event
,
3510 struct trace_sched_handler replay_ops
= {
3511 .wakeup_event
= replay_wakeup_event
,
3512 .switch_event
= replay_switch_event
,
3513 .fork_event
= replay_fork_event
,
3517 for (i
= 0; i
< ARRAY_SIZE(sched
.curr_pid
); i
++)
3518 sched
.curr_pid
[i
] = -1;
3520 argc
= parse_options_subcommand(argc
, argv
, sched_options
, sched_subcommands
,
3521 sched_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
3523 usage_with_options(sched_usage
, sched_options
);
3526 * Aliased to 'perf script' for now:
3528 if (!strcmp(argv
[0], "script"))
3529 return cmd_script(argc
, argv
);
3531 if (!strncmp(argv
[0], "rec", 3)) {
3532 return __cmd_record(argc
, argv
);
3533 } else if (!strncmp(argv
[0], "lat", 3)) {
3534 sched
.tp_handler
= &lat_ops
;
3536 argc
= parse_options(argc
, argv
, latency_options
, latency_usage
, 0);
3538 usage_with_options(latency_usage
, latency_options
);
3540 setup_sorting(&sched
, latency_options
, latency_usage
);
3541 return perf_sched__lat(&sched
);
3542 } else if (!strcmp(argv
[0], "map")) {
3544 argc
= parse_options(argc
, argv
, map_options
, map_usage
, 0);
3546 usage_with_options(map_usage
, map_options
);
3548 sched
.tp_handler
= &map_ops
;
3549 setup_sorting(&sched
, latency_options
, latency_usage
);
3550 return perf_sched__map(&sched
);
3551 } else if (!strncmp(argv
[0], "rep", 3)) {
3552 sched
.tp_handler
= &replay_ops
;
3554 argc
= parse_options(argc
, argv
, replay_options
, replay_usage
, 0);
3556 usage_with_options(replay_usage
, replay_options
);
3558 return perf_sched__replay(&sched
);
3559 } else if (!strcmp(argv
[0], "timehist")) {
3561 argc
= parse_options(argc
, argv
, timehist_options
,
3564 usage_with_options(timehist_usage
, timehist_options
);
3566 if ((sched
.show_wakeups
|| sched
.show_next
) &&
3567 sched
.summary_only
) {
3568 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3569 parse_options_usage(timehist_usage
, timehist_options
, "s", true);
3570 if (sched
.show_wakeups
)
3571 parse_options_usage(NULL
, timehist_options
, "w", true);
3572 if (sched
.show_next
)
3573 parse_options_usage(NULL
, timehist_options
, "n", true);
3577 return perf_sched__timehist(&sched
);
3579 usage_with_options(sched_usage
, sched_options
);