5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
10 #include "util/tool.h"
12 #include "util/parse-options.h"
13 #include "util/trace-event.h"
15 #include "util/debug.h"
17 #include <linux/rbtree.h>
20 typedef int (*sort_fn_t
)(struct alloc_stat
*, struct alloc_stat
*);
22 static const char *input_name
;
24 static int alloc_flag
;
25 static int caller_flag
;
27 static int alloc_lines
= -1;
28 static int caller_lines
= -1;
32 static char default_sort_order
[] = "frag,hit,bytes";
34 static int *cpunode_map
;
35 static int max_cpu_num
;
50 static struct rb_root root_alloc_stat
;
51 static struct rb_root root_alloc_sorted
;
52 static struct rb_root root_caller_stat
;
53 static struct rb_root root_caller_sorted
;
55 static unsigned long total_requested
, total_allocated
;
56 static unsigned long nr_allocs
, nr_cross_allocs
;
58 #define PATH_SYS_NODE "/sys/devices/system/node"
61 struct perf_tool tool
;
62 struct perf_session
*session
;
65 static void init_cpunode_map(void)
70 fp
= fopen("/sys/devices/system/cpu/kernel_max", "r");
76 if (fscanf(fp
, "%d", &max_cpu_num
) < 1)
77 die("Failed to read 'kernel_max' from sysfs");
80 cpunode_map
= calloc(max_cpu_num
, sizeof(int));
83 for (i
= 0; i
< max_cpu_num
; i
++)
88 static void setup_cpunode_map(void)
90 struct dirent
*dent1
, *dent2
;
92 unsigned int cpu
, mem
;
97 dir1
= opendir(PATH_SYS_NODE
);
101 while ((dent1
= readdir(dir1
)) != NULL
) {
102 if (dent1
->d_type
!= DT_DIR
||
103 sscanf(dent1
->d_name
, "node%u", &mem
) < 1)
106 snprintf(buf
, PATH_MAX
, "%s/%s", PATH_SYS_NODE
, dent1
->d_name
);
110 while ((dent2
= readdir(dir2
)) != NULL
) {
111 if (dent2
->d_type
!= DT_LNK
||
112 sscanf(dent2
->d_name
, "cpu%u", &cpu
) < 1)
114 cpunode_map
[cpu
] = mem
;
121 static void insert_alloc_stat(unsigned long call_site
, unsigned long ptr
,
122 int bytes_req
, int bytes_alloc
, int cpu
)
124 struct rb_node
**node
= &root_alloc_stat
.rb_node
;
125 struct rb_node
*parent
= NULL
;
126 struct alloc_stat
*data
= NULL
;
130 data
= rb_entry(*node
, struct alloc_stat
, node
);
133 node
= &(*node
)->rb_right
;
134 else if (ptr
< data
->ptr
)
135 node
= &(*node
)->rb_left
;
140 if (data
&& data
->ptr
== ptr
) {
142 data
->bytes_req
+= bytes_req
;
143 data
->bytes_alloc
+= bytes_alloc
;
145 data
= malloc(sizeof(*data
));
151 data
->bytes_req
= bytes_req
;
152 data
->bytes_alloc
= bytes_alloc
;
154 rb_link_node(&data
->node
, parent
, node
);
155 rb_insert_color(&data
->node
, &root_alloc_stat
);
157 data
->call_site
= call_site
;
158 data
->alloc_cpu
= cpu
;
161 static void insert_caller_stat(unsigned long call_site
,
162 int bytes_req
, int bytes_alloc
)
164 struct rb_node
**node
= &root_caller_stat
.rb_node
;
165 struct rb_node
*parent
= NULL
;
166 struct alloc_stat
*data
= NULL
;
170 data
= rb_entry(*node
, struct alloc_stat
, node
);
172 if (call_site
> data
->call_site
)
173 node
= &(*node
)->rb_right
;
174 else if (call_site
< data
->call_site
)
175 node
= &(*node
)->rb_left
;
180 if (data
&& data
->call_site
== call_site
) {
182 data
->bytes_req
+= bytes_req
;
183 data
->bytes_alloc
+= bytes_alloc
;
185 data
= malloc(sizeof(*data
));
188 data
->call_site
= call_site
;
191 data
->bytes_req
= bytes_req
;
192 data
->bytes_alloc
= bytes_alloc
;
194 rb_link_node(&data
->node
, parent
, node
);
195 rb_insert_color(&data
->node
, &root_caller_stat
);
199 static void process_alloc_event(void *data
,
200 struct event_format
*event
,
202 u64 timestamp __used
,
203 struct thread
*thread __used
,
206 unsigned long call_site
;
212 ptr
= raw_field_value(event
, "ptr", data
);
213 call_site
= raw_field_value(event
, "call_site", data
);
214 bytes_req
= raw_field_value(event
, "bytes_req", data
);
215 bytes_alloc
= raw_field_value(event
, "bytes_alloc", data
);
217 insert_alloc_stat(call_site
, ptr
, bytes_req
, bytes_alloc
, cpu
);
218 insert_caller_stat(call_site
, bytes_req
, bytes_alloc
);
220 total_requested
+= bytes_req
;
221 total_allocated
+= bytes_alloc
;
224 node1
= cpunode_map
[cpu
];
225 node2
= raw_field_value(event
, "node", data
);
232 static int ptr_cmp(struct alloc_stat
*, struct alloc_stat
*);
233 static int callsite_cmp(struct alloc_stat
*, struct alloc_stat
*);
235 static struct alloc_stat
*search_alloc_stat(unsigned long ptr
,
236 unsigned long call_site
,
237 struct rb_root
*root
,
240 struct rb_node
*node
= root
->rb_node
;
241 struct alloc_stat key
= { .ptr
= ptr
, .call_site
= call_site
};
244 struct alloc_stat
*data
;
247 data
= rb_entry(node
, struct alloc_stat
, node
);
249 cmp
= sort_fn(&key
, data
);
251 node
= node
->rb_left
;
253 node
= node
->rb_right
;
260 static void process_free_event(void *data
,
261 struct event_format
*event
,
263 u64 timestamp __used
,
264 struct thread
*thread __used
)
267 struct alloc_stat
*s_alloc
, *s_caller
;
269 ptr
= raw_field_value(event
, "ptr", data
);
271 s_alloc
= search_alloc_stat(ptr
, 0, &root_alloc_stat
, ptr_cmp
);
275 if (cpu
!= s_alloc
->alloc_cpu
) {
278 s_caller
= search_alloc_stat(0, s_alloc
->call_site
,
279 &root_caller_stat
, callsite_cmp
);
281 s_caller
->pingpong
++;
283 s_alloc
->alloc_cpu
= -1;
286 static void process_raw_event(struct perf_tool
*tool
,
287 union perf_event
*raw_event __used
, void *data
,
288 int cpu
, u64 timestamp
, struct thread
*thread
)
290 struct perf_kmem
*kmem
= container_of(tool
, struct perf_kmem
, tool
);
291 struct event_format
*event
;
294 type
= trace_parse_common_type(kmem
->session
->pevent
, data
);
295 event
= pevent_find_event(kmem
->session
->pevent
, type
);
297 if (!strcmp(event
->name
, "kmalloc") ||
298 !strcmp(event
->name
, "kmem_cache_alloc")) {
299 process_alloc_event(data
, event
, cpu
, timestamp
, thread
, 0);
303 if (!strcmp(event
->name
, "kmalloc_node") ||
304 !strcmp(event
->name
, "kmem_cache_alloc_node")) {
305 process_alloc_event(data
, event
, cpu
, timestamp
, thread
, 1);
309 if (!strcmp(event
->name
, "kfree") ||
310 !strcmp(event
->name
, "kmem_cache_free")) {
311 process_free_event(data
, event
, cpu
, timestamp
, thread
);
316 static int process_sample_event(struct perf_tool
*tool
,
317 union perf_event
*event
,
318 struct perf_sample
*sample
,
319 struct perf_evsel
*evsel __used
,
320 struct machine
*machine
)
322 struct thread
*thread
= machine__findnew_thread(machine
, event
->ip
.pid
);
324 if (thread
== NULL
) {
325 pr_debug("problem processing %d event, skipping it.\n",
330 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
332 process_raw_event(tool
, event
, sample
->raw_data
, sample
->cpu
,
333 sample
->time
, thread
);
338 static struct perf_kmem perf_kmem
= {
340 .sample
= process_sample_event
,
341 .comm
= perf_event__process_comm
,
342 .ordered_samples
= true,
346 static double fragmentation(unsigned long n_req
, unsigned long n_alloc
)
351 return 100.0 - (100.0 * n_req
/ n_alloc
);
354 static void __print_result(struct rb_root
*root
, struct perf_session
*session
,
355 int n_lines
, int is_caller
)
357 struct rb_node
*next
;
358 struct machine
*machine
;
360 printf("%.102s\n", graph_dotted_line
);
361 printf(" %-34s |", is_caller
? "Callsite": "Alloc Ptr");
362 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
363 printf("%.102s\n", graph_dotted_line
);
365 next
= rb_first(root
);
367 machine
= perf_session__find_host_machine(session
);
369 pr_err("__print_result: couldn't find kernel information\n");
372 while (next
&& n_lines
--) {
373 struct alloc_stat
*data
= rb_entry(next
, struct alloc_stat
,
375 struct symbol
*sym
= NULL
;
381 addr
= data
->call_site
;
383 sym
= machine__find_kernel_function(machine
, addr
, &map
, NULL
);
388 snprintf(buf
, sizeof(buf
), "%s+%" PRIx64
"", sym
->name
,
389 addr
- map
->unmap_ip(map
, sym
->start
));
391 snprintf(buf
, sizeof(buf
), "%#" PRIx64
"", addr
);
392 printf(" %-34s |", buf
);
394 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
395 (unsigned long long)data
->bytes_alloc
,
396 (unsigned long)data
->bytes_alloc
/ data
->hit
,
397 (unsigned long long)data
->bytes_req
,
398 (unsigned long)data
->bytes_req
/ data
->hit
,
399 (unsigned long)data
->hit
,
400 (unsigned long)data
->pingpong
,
401 fragmentation(data
->bytes_req
, data
->bytes_alloc
));
403 next
= rb_next(next
);
407 printf(" ... | ... | ... | ... | ... | ... \n");
409 printf("%.102s\n", graph_dotted_line
);
412 static void print_summary(void)
414 printf("\nSUMMARY\n=======\n");
415 printf("Total bytes requested: %lu\n", total_requested
);
416 printf("Total bytes allocated: %lu\n", total_allocated
);
417 printf("Total bytes wasted on internal fragmentation: %lu\n",
418 total_allocated
- total_requested
);
419 printf("Internal fragmentation: %f%%\n",
420 fragmentation(total_requested
, total_allocated
));
421 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs
, nr_allocs
);
424 static void print_result(struct perf_session
*session
)
427 __print_result(&root_caller_sorted
, session
, caller_lines
, 1);
429 __print_result(&root_alloc_sorted
, session
, alloc_lines
, 0);
433 struct sort_dimension
{
436 struct list_head list
;
439 static LIST_HEAD(caller_sort
);
440 static LIST_HEAD(alloc_sort
);
442 static void sort_insert(struct rb_root
*root
, struct alloc_stat
*data
,
443 struct list_head
*sort_list
)
445 struct rb_node
**new = &(root
->rb_node
);
446 struct rb_node
*parent
= NULL
;
447 struct sort_dimension
*sort
;
450 struct alloc_stat
*this;
453 this = rb_entry(*new, struct alloc_stat
, node
);
456 list_for_each_entry(sort
, sort_list
, list
) {
457 cmp
= sort
->cmp(data
, this);
463 new = &((*new)->rb_left
);
465 new = &((*new)->rb_right
);
468 rb_link_node(&data
->node
, parent
, new);
469 rb_insert_color(&data
->node
, root
);
472 static void __sort_result(struct rb_root
*root
, struct rb_root
*root_sorted
,
473 struct list_head
*sort_list
)
475 struct rb_node
*node
;
476 struct alloc_stat
*data
;
479 node
= rb_first(root
);
483 rb_erase(node
, root
);
484 data
= rb_entry(node
, struct alloc_stat
, node
);
485 sort_insert(root_sorted
, data
, sort_list
);
489 static void sort_result(void)
491 __sort_result(&root_alloc_stat
, &root_alloc_sorted
, &alloc_sort
);
492 __sort_result(&root_caller_stat
, &root_caller_sorted
, &caller_sort
);
495 static int __cmd_kmem(void)
498 struct perf_session
*session
;
500 session
= perf_session__new(input_name
, O_RDONLY
, 0, false,
505 perf_kmem
.session
= session
;
507 if (perf_session__create_kernel_maps(session
) < 0)
510 if (!perf_session__has_traces(session
, "kmem record"))
514 err
= perf_session__process_events(session
, &perf_kmem
.tool
);
518 print_result(session
);
520 perf_session__delete(session
);
524 static const char * const kmem_usage
[] = {
525 "perf kmem [<options>] {record|stat}",
529 static int ptr_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
533 else if (l
->ptr
> r
->ptr
)
538 static struct sort_dimension ptr_sort_dimension
= {
543 static int callsite_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
545 if (l
->call_site
< r
->call_site
)
547 else if (l
->call_site
> r
->call_site
)
552 static struct sort_dimension callsite_sort_dimension
= {
557 static int hit_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
561 else if (l
->hit
> r
->hit
)
566 static struct sort_dimension hit_sort_dimension
= {
571 static int bytes_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
573 if (l
->bytes_alloc
< r
->bytes_alloc
)
575 else if (l
->bytes_alloc
> r
->bytes_alloc
)
580 static struct sort_dimension bytes_sort_dimension
= {
585 static int frag_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
589 x
= fragmentation(l
->bytes_req
, l
->bytes_alloc
);
590 y
= fragmentation(r
->bytes_req
, r
->bytes_alloc
);
599 static struct sort_dimension frag_sort_dimension
= {
604 static int pingpong_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
606 if (l
->pingpong
< r
->pingpong
)
608 else if (l
->pingpong
> r
->pingpong
)
613 static struct sort_dimension pingpong_sort_dimension
= {
618 static struct sort_dimension
*avail_sorts
[] = {
620 &callsite_sort_dimension
,
622 &bytes_sort_dimension
,
623 &frag_sort_dimension
,
624 &pingpong_sort_dimension
,
627 #define NUM_AVAIL_SORTS \
628 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
630 static int sort_dimension__add(const char *tok
, struct list_head
*list
)
632 struct sort_dimension
*sort
;
635 for (i
= 0; i
< NUM_AVAIL_SORTS
; i
++) {
636 if (!strcmp(avail_sorts
[i
]->name
, tok
)) {
637 sort
= malloc(sizeof(*sort
));
640 memcpy(sort
, avail_sorts
[i
], sizeof(*sort
));
641 list_add_tail(&sort
->list
, list
);
649 static int setup_sorting(struct list_head
*sort_list
, const char *arg
)
652 char *str
= strdup(arg
);
658 tok
= strsep(&str
, ",");
661 if (sort_dimension__add(tok
, sort_list
) < 0) {
662 error("Unknown --sort key: '%s'", tok
);
672 static int parse_sort_opt(const struct option
*opt __used
,
673 const char *arg
, int unset __used
)
678 if (caller_flag
> alloc_flag
)
679 return setup_sorting(&caller_sort
, arg
);
681 return setup_sorting(&alloc_sort
, arg
);
686 static int parse_caller_opt(const struct option
*opt __used
,
687 const char *arg __used
, int unset __used
)
689 caller_flag
= (alloc_flag
+ 1);
693 static int parse_alloc_opt(const struct option
*opt __used
,
694 const char *arg __used
, int unset __used
)
696 alloc_flag
= (caller_flag
+ 1);
700 static int parse_line_opt(const struct option
*opt __used
,
701 const char *arg
, int unset __used
)
708 lines
= strtoul(arg
, NULL
, 10);
710 if (caller_flag
> alloc_flag
)
711 caller_lines
= lines
;
718 static const struct option kmem_options
[] = {
719 OPT_STRING('i', "input", &input_name
, "file",
721 OPT_CALLBACK_NOOPT(0, "caller", NULL
, NULL
,
722 "show per-callsite statistics",
724 OPT_CALLBACK_NOOPT(0, "alloc", NULL
, NULL
,
725 "show per-allocation statistics",
727 OPT_CALLBACK('s', "sort", NULL
, "key[,key2...]",
728 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
730 OPT_CALLBACK('l', "line", NULL
, "num",
733 OPT_BOOLEAN(0, "raw-ip", &raw_ip
, "show raw ip instead of symbol"),
737 static const char *record_args
[] = {
743 "-e", "kmem:kmalloc",
744 "-e", "kmem:kmalloc_node",
746 "-e", "kmem:kmem_cache_alloc",
747 "-e", "kmem:kmem_cache_alloc_node",
748 "-e", "kmem:kmem_cache_free",
751 static int __cmd_record(int argc
, const char **argv
)
753 unsigned int rec_argc
, i
, j
;
754 const char **rec_argv
;
756 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
757 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
759 if (rec_argv
== NULL
)
762 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
763 rec_argv
[i
] = strdup(record_args
[i
]);
765 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
766 rec_argv
[i
] = argv
[j
];
768 return cmd_record(i
, rec_argv
, NULL
);
771 int cmd_kmem(int argc
, const char **argv
, const char *prefix __used
)
773 argc
= parse_options(argc
, argv
, kmem_options
, kmem_usage
, 0);
776 usage_with_options(kmem_usage
, kmem_options
);
780 if (!strncmp(argv
[0], "rec", 3)) {
781 return __cmd_record(argc
, argv
);
782 } else if (!strcmp(argv
[0], "stat")) {
785 if (list_empty(&caller_sort
))
786 setup_sorting(&caller_sort
, default_sort_order
);
787 if (list_empty(&alloc_sort
))
788 setup_sorting(&alloc_sort
, default_sort_order
);
792 usage_with_options(kmem_usage
, kmem_options
);