1 // SPDX-License-Identifier: GPL-2.0
7 #include "util/evlist.h" // for struct evsel_str_handler
8 #include "util/evsel.h"
9 #include "util/symbol.h"
10 #include "util/thread.h"
11 #include "util/header.h"
13 #include <subcmd/pager.h>
14 #include <subcmd/parse-options.h>
15 #include "util/trace-event.h"
17 #include "util/debug.h"
18 #include "util/session.h"
19 #include "util/tool.h"
20 #include "util/data.h"
22 #include <sys/types.h>
23 #include <sys/prctl.h>
24 #include <semaphore.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/kernel.h>
32 #include <linux/zalloc.h>
33 #include <linux/err.h>
35 static struct perf_session
*session
;
37 /* based on kernel/lockdep.c */
38 #define LOCKHASH_BITS 12
39 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
41 static struct list_head lockhash_table
[LOCKHASH_SIZE
];
43 #define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
44 #define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
47 struct list_head hash_entry
;
48 struct rb_node rb
; /* used for sorting */
51 * FIXME: perf_evsel__intval() returns u64,
52 * so address of lockdep_map should be dealed as 64bit.
53 * Is there more better solution?
55 void *addr
; /* address of lockdep_map, used as ID */
56 char *name
; /* for strcpy(), we cannot use const */
58 unsigned int nr_acquire
;
59 unsigned int nr_acquired
;
60 unsigned int nr_contended
;
61 unsigned int nr_release
;
63 unsigned int nr_readlock
;
64 unsigned int nr_trylock
;
66 /* these times are in nano sec. */
72 int discard
; /* flag of blacklist */
76 * States of lock_seq_stat
78 * UNINITIALIZED is required for detecting first event of acquire.
79 * As the nature of lock events, there is no guarantee
80 * that the first event for the locks are acquire,
81 * it can be acquired, contended or release.
83 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
84 #define SEQ_STATE_RELEASED 1
85 #define SEQ_STATE_ACQUIRING 2
86 #define SEQ_STATE_ACQUIRED 3
87 #define SEQ_STATE_READ_ACQUIRED 4
88 #define SEQ_STATE_CONTENDED 5
92 * Imported from include/linux/sched.h.
93 * Should this be synchronized?
95 #define MAX_LOCK_DEPTH 48
98 * struct lock_seq_stat:
99 * Place to put on state of one lock sequence
100 * 1) acquire -> acquired -> release
101 * 2) acquire -> contended -> acquired -> release
102 * 3) acquire (with read or try) -> release
103 * 4) Are there other patterns?
105 struct lock_seq_stat
{
106 struct list_head list
;
118 struct list_head seq_list
;
121 static struct rb_root thread_stats
;
123 static struct thread_stat
*thread_stat_find(u32 tid
)
125 struct rb_node
*node
;
126 struct thread_stat
*st
;
128 node
= thread_stats
.rb_node
;
130 st
= container_of(node
, struct thread_stat
, rb
);
133 else if (tid
< st
->tid
)
134 node
= node
->rb_left
;
136 node
= node
->rb_right
;
142 static void thread_stat_insert(struct thread_stat
*new)
144 struct rb_node
**rb
= &thread_stats
.rb_node
;
145 struct rb_node
*parent
= NULL
;
146 struct thread_stat
*p
;
149 p
= container_of(*rb
, struct thread_stat
, rb
);
152 if (new->tid
< p
->tid
)
153 rb
= &(*rb
)->rb_left
;
154 else if (new->tid
> p
->tid
)
155 rb
= &(*rb
)->rb_right
;
157 BUG_ON("inserting invalid thread_stat\n");
160 rb_link_node(&new->rb
, parent
, rb
);
161 rb_insert_color(&new->rb
, &thread_stats
);
164 static struct thread_stat
*thread_stat_findnew_after_first(u32 tid
)
166 struct thread_stat
*st
;
168 st
= thread_stat_find(tid
);
172 st
= zalloc(sizeof(struct thread_stat
));
174 pr_err("memory allocation failed\n");
179 INIT_LIST_HEAD(&st
->seq_list
);
181 thread_stat_insert(st
);
186 static struct thread_stat
*thread_stat_findnew_first(u32 tid
);
187 static struct thread_stat
*(*thread_stat_findnew
)(u32 tid
) =
188 thread_stat_findnew_first
;
190 static struct thread_stat
*thread_stat_findnew_first(u32 tid
)
192 struct thread_stat
*st
;
194 st
= zalloc(sizeof(struct thread_stat
));
196 pr_err("memory allocation failed\n");
200 INIT_LIST_HEAD(&st
->seq_list
);
202 rb_link_node(&st
->rb
, NULL
, &thread_stats
.rb_node
);
203 rb_insert_color(&st
->rb
, &thread_stats
);
205 thread_stat_findnew
= thread_stat_findnew_after_first
;
209 /* build simple key function one is bigger than two */
210 #define SINGLE_KEY(member) \
211 static int lock_stat_key_ ## member(struct lock_stat *one, \
212 struct lock_stat *two) \
214 return one->member > two->member; \
217 SINGLE_KEY(nr_acquired
)
218 SINGLE_KEY(nr_contended
)
219 SINGLE_KEY(avg_wait_time
)
220 SINGLE_KEY(wait_time_total
)
221 SINGLE_KEY(wait_time_max
)
223 static int lock_stat_key_wait_time_min(struct lock_stat
*one
,
224 struct lock_stat
*two
)
226 u64 s1
= one
->wait_time_min
;
227 u64 s2
= two
->wait_time_min
;
228 if (s1
== ULLONG_MAX
)
230 if (s2
== ULLONG_MAX
)
237 * name: the value for specify by user
238 * this should be simpler than raw name of member
239 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
242 int (*key
)(struct lock_stat
*, struct lock_stat
*);
245 static const char *sort_key
= "acquired";
247 static int (*compare
)(struct lock_stat
*, struct lock_stat
*);
249 static struct rb_root result
; /* place to store sorted data */
251 #define DEF_KEY_LOCK(name, fn_suffix) \
252 { #name, lock_stat_key_ ## fn_suffix }
253 struct lock_key keys
[] = {
254 DEF_KEY_LOCK(acquired
, nr_acquired
),
255 DEF_KEY_LOCK(contended
, nr_contended
),
256 DEF_KEY_LOCK(avg_wait
, avg_wait_time
),
257 DEF_KEY_LOCK(wait_total
, wait_time_total
),
258 DEF_KEY_LOCK(wait_min
, wait_time_min
),
259 DEF_KEY_LOCK(wait_max
, wait_time_max
),
261 /* extra comparisons much complicated should be here */
266 static int select_key(void)
270 for (i
= 0; keys
[i
].name
; i
++) {
271 if (!strcmp(keys
[i
].name
, sort_key
)) {
272 compare
= keys
[i
].key
;
277 pr_err("Unknown compare key: %s\n", sort_key
);
282 static void insert_to_result(struct lock_stat
*st
,
283 int (*bigger
)(struct lock_stat
*, struct lock_stat
*))
285 struct rb_node
**rb
= &result
.rb_node
;
286 struct rb_node
*parent
= NULL
;
290 p
= container_of(*rb
, struct lock_stat
, rb
);
294 rb
= &(*rb
)->rb_left
;
296 rb
= &(*rb
)->rb_right
;
299 rb_link_node(&st
->rb
, parent
, rb
);
300 rb_insert_color(&st
->rb
, &result
);
303 /* returns left most element of result, and erase it */
304 static struct lock_stat
*pop_from_result(void)
306 struct rb_node
*node
= result
.rb_node
;
311 while (node
->rb_left
)
312 node
= node
->rb_left
;
314 rb_erase(node
, &result
);
315 return container_of(node
, struct lock_stat
, rb
);
318 static struct lock_stat
*lock_stat_findnew(void *addr
, const char *name
)
320 struct list_head
*entry
= lockhashentry(addr
);
321 struct lock_stat
*ret
, *new;
323 list_for_each_entry(ret
, entry
, hash_entry
) {
324 if (ret
->addr
== addr
)
328 new = zalloc(sizeof(struct lock_stat
));
333 new->name
= zalloc(sizeof(char) * strlen(name
) + 1);
339 strcpy(new->name
, name
);
340 new->wait_time_min
= ULLONG_MAX
;
342 list_add(&new->hash_entry
, entry
);
346 pr_err("memory allocation failed\n");
350 struct trace_lock_handler
{
351 int (*acquire_event
)(struct evsel
*evsel
,
352 struct perf_sample
*sample
);
354 int (*acquired_event
)(struct evsel
*evsel
,
355 struct perf_sample
*sample
);
357 int (*contended_event
)(struct evsel
*evsel
,
358 struct perf_sample
*sample
);
360 int (*release_event
)(struct evsel
*evsel
,
361 struct perf_sample
*sample
);
364 static struct lock_seq_stat
*get_seq(struct thread_stat
*ts
, void *addr
)
366 struct lock_seq_stat
*seq
;
368 list_for_each_entry(seq
, &ts
->seq_list
, list
) {
369 if (seq
->addr
== addr
)
373 seq
= zalloc(sizeof(struct lock_seq_stat
));
375 pr_err("memory allocation failed\n");
378 seq
->state
= SEQ_STATE_UNINITIALIZED
;
381 list_add(&seq
->list
, &ts
->seq_list
);
393 static int bad_hist
[BROKEN_MAX
];
400 static int report_lock_acquire_event(struct evsel
*evsel
,
401 struct perf_sample
*sample
)
404 struct lock_stat
*ls
;
405 struct thread_stat
*ts
;
406 struct lock_seq_stat
*seq
;
407 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
408 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
409 int flag
= perf_evsel__intval(evsel
, sample
, "flag");
411 memcpy(&addr
, &tmp
, sizeof(void *));
413 ls
= lock_stat_findnew(addr
, name
);
419 ts
= thread_stat_findnew(sample
->tid
);
423 seq
= get_seq(ts
, addr
);
427 switch (seq
->state
) {
428 case SEQ_STATE_UNINITIALIZED
:
429 case SEQ_STATE_RELEASED
:
431 seq
->state
= SEQ_STATE_ACQUIRING
;
435 if (flag
& READ_LOCK
)
437 seq
->state
= SEQ_STATE_READ_ACQUIRED
;
442 case SEQ_STATE_READ_ACQUIRED
:
443 if (flag
& READ_LOCK
) {
451 case SEQ_STATE_ACQUIRED
:
452 case SEQ_STATE_ACQUIRING
:
453 case SEQ_STATE_CONTENDED
:
455 /* broken lock sequence, discard it */
457 bad_hist
[BROKEN_ACQUIRE
]++;
458 list_del_init(&seq
->list
);
462 BUG_ON("Unknown state of lock sequence found!\n");
467 seq
->prev_event_time
= sample
->time
;
472 static int report_lock_acquired_event(struct evsel
*evsel
,
473 struct perf_sample
*sample
)
476 struct lock_stat
*ls
;
477 struct thread_stat
*ts
;
478 struct lock_seq_stat
*seq
;
480 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
481 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
483 memcpy(&addr
, &tmp
, sizeof(void *));
485 ls
= lock_stat_findnew(addr
, name
);
491 ts
= thread_stat_findnew(sample
->tid
);
495 seq
= get_seq(ts
, addr
);
499 switch (seq
->state
) {
500 case SEQ_STATE_UNINITIALIZED
:
501 /* orphan event, do nothing */
503 case SEQ_STATE_ACQUIRING
:
505 case SEQ_STATE_CONTENDED
:
506 contended_term
= sample
->time
- seq
->prev_event_time
;
507 ls
->wait_time_total
+= contended_term
;
508 if (contended_term
< ls
->wait_time_min
)
509 ls
->wait_time_min
= contended_term
;
510 if (ls
->wait_time_max
< contended_term
)
511 ls
->wait_time_max
= contended_term
;
513 case SEQ_STATE_RELEASED
:
514 case SEQ_STATE_ACQUIRED
:
515 case SEQ_STATE_READ_ACQUIRED
:
516 /* broken lock sequence, discard it */
518 bad_hist
[BROKEN_ACQUIRED
]++;
519 list_del_init(&seq
->list
);
523 BUG_ON("Unknown state of lock sequence found!\n");
527 seq
->state
= SEQ_STATE_ACQUIRED
;
529 ls
->avg_wait_time
= ls
->nr_contended
? ls
->wait_time_total
/ls
->nr_contended
: 0;
530 seq
->prev_event_time
= sample
->time
;
535 static int report_lock_contended_event(struct evsel
*evsel
,
536 struct perf_sample
*sample
)
539 struct lock_stat
*ls
;
540 struct thread_stat
*ts
;
541 struct lock_seq_stat
*seq
;
542 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
543 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
545 memcpy(&addr
, &tmp
, sizeof(void *));
547 ls
= lock_stat_findnew(addr
, name
);
553 ts
= thread_stat_findnew(sample
->tid
);
557 seq
= get_seq(ts
, addr
);
561 switch (seq
->state
) {
562 case SEQ_STATE_UNINITIALIZED
:
563 /* orphan event, do nothing */
565 case SEQ_STATE_ACQUIRING
:
567 case SEQ_STATE_RELEASED
:
568 case SEQ_STATE_ACQUIRED
:
569 case SEQ_STATE_READ_ACQUIRED
:
570 case SEQ_STATE_CONTENDED
:
571 /* broken lock sequence, discard it */
573 bad_hist
[BROKEN_CONTENDED
]++;
574 list_del_init(&seq
->list
);
578 BUG_ON("Unknown state of lock sequence found!\n");
582 seq
->state
= SEQ_STATE_CONTENDED
;
584 ls
->avg_wait_time
= ls
->wait_time_total
/ls
->nr_contended
;
585 seq
->prev_event_time
= sample
->time
;
590 static int report_lock_release_event(struct evsel
*evsel
,
591 struct perf_sample
*sample
)
594 struct lock_stat
*ls
;
595 struct thread_stat
*ts
;
596 struct lock_seq_stat
*seq
;
597 const char *name
= perf_evsel__strval(evsel
, sample
, "name");
598 u64 tmp
= perf_evsel__intval(evsel
, sample
, "lockdep_addr");
600 memcpy(&addr
, &tmp
, sizeof(void *));
602 ls
= lock_stat_findnew(addr
, name
);
608 ts
= thread_stat_findnew(sample
->tid
);
612 seq
= get_seq(ts
, addr
);
616 switch (seq
->state
) {
617 case SEQ_STATE_UNINITIALIZED
:
619 case SEQ_STATE_ACQUIRED
:
621 case SEQ_STATE_READ_ACQUIRED
:
623 BUG_ON(seq
->read_count
< 0);
624 if (!seq
->read_count
) {
629 case SEQ_STATE_ACQUIRING
:
630 case SEQ_STATE_CONTENDED
:
631 case SEQ_STATE_RELEASED
:
632 /* broken lock sequence, discard it */
634 bad_hist
[BROKEN_RELEASE
]++;
637 BUG_ON("Unknown state of lock sequence found!\n");
643 list_del_init(&seq
->list
);
649 /* lock oriented handlers */
650 /* TODO: handlers for CPU oriented, thread oriented */
651 static struct trace_lock_handler report_lock_ops
= {
652 .acquire_event
= report_lock_acquire_event
,
653 .acquired_event
= report_lock_acquired_event
,
654 .contended_event
= report_lock_contended_event
,
655 .release_event
= report_lock_release_event
,
658 static struct trace_lock_handler
*trace_handler
;
660 static int perf_evsel__process_lock_acquire(struct evsel
*evsel
,
661 struct perf_sample
*sample
)
663 if (trace_handler
->acquire_event
)
664 return trace_handler
->acquire_event(evsel
, sample
);
668 static int perf_evsel__process_lock_acquired(struct evsel
*evsel
,
669 struct perf_sample
*sample
)
671 if (trace_handler
->acquired_event
)
672 return trace_handler
->acquired_event(evsel
, sample
);
676 static int perf_evsel__process_lock_contended(struct evsel
*evsel
,
677 struct perf_sample
*sample
)
679 if (trace_handler
->contended_event
)
680 return trace_handler
->contended_event(evsel
, sample
);
684 static int perf_evsel__process_lock_release(struct evsel
*evsel
,
685 struct perf_sample
*sample
)
687 if (trace_handler
->release_event
)
688 return trace_handler
->release_event(evsel
, sample
);
692 static void print_bad_events(int bad
, int total
)
694 /* Output for debug, this have to be removed */
696 const char *name
[4] =
697 { "acquire", "acquired", "contended", "release" };
699 pr_info("\n=== output for debug===\n\n");
700 pr_info("bad: %d, total: %d\n", bad
, total
);
701 pr_info("bad rate: %.2f %%\n", (double)bad
/ (double)total
* 100);
702 pr_info("histogram of events caused bad sequence\n");
703 for (i
= 0; i
< BROKEN_MAX
; i
++)
704 pr_info(" %10s: %d\n", name
[i
], bad_hist
[i
]);
707 /* TODO: various way to print, coloring, nano or milli sec */
708 static void print_result(void)
710 struct lock_stat
*st
;
714 pr_info("%20s ", "Name");
715 pr_info("%10s ", "acquired");
716 pr_info("%10s ", "contended");
718 pr_info("%15s ", "avg wait (ns)");
719 pr_info("%15s ", "total wait (ns)");
720 pr_info("%15s ", "max wait (ns)");
721 pr_info("%15s ", "min wait (ns)");
726 while ((st
= pop_from_result())) {
734 if (strlen(st
->name
) < 16) {
735 /* output raw name */
736 pr_info("%20s ", st
->name
);
738 strncpy(cut_name
, st
->name
, 16);
743 /* cut off name for saving output style */
744 pr_info("%20s ", cut_name
);
747 pr_info("%10u ", st
->nr_acquired
);
748 pr_info("%10u ", st
->nr_contended
);
750 pr_info("%15" PRIu64
" ", st
->avg_wait_time
);
751 pr_info("%15" PRIu64
" ", st
->wait_time_total
);
752 pr_info("%15" PRIu64
" ", st
->wait_time_max
);
753 pr_info("%15" PRIu64
" ", st
->wait_time_min
== ULLONG_MAX
?
754 0 : st
->wait_time_min
);
758 print_bad_events(bad
, total
);
761 static bool info_threads
, info_map
;
763 static void dump_threads(void)
765 struct thread_stat
*st
;
766 struct rb_node
*node
;
769 pr_info("%10s: comm\n", "Thread ID");
771 node
= rb_first(&thread_stats
);
773 st
= container_of(node
, struct thread_stat
, rb
);
774 t
= perf_session__findnew(session
, st
->tid
);
775 pr_info("%10d: %s\n", st
->tid
, thread__comm_str(t
));
776 node
= rb_next(node
);
781 static void dump_map(void)
784 struct lock_stat
*st
;
786 pr_info("Address of instance: name of class\n");
787 for (i
= 0; i
< LOCKHASH_SIZE
; i
++) {
788 list_for_each_entry(st
, &lockhash_table
[i
], hash_entry
) {
789 pr_info(" %p: %s\n", st
->addr
, st
->name
);
794 static int dump_info(void)
804 pr_err("Unknown type of information\n");
810 typedef int (*tracepoint_handler
)(struct evsel
*evsel
,
811 struct perf_sample
*sample
);
813 static int process_sample_event(struct perf_tool
*tool __maybe_unused
,
814 union perf_event
*event
,
815 struct perf_sample
*sample
,
817 struct machine
*machine
)
820 struct thread
*thread
= machine__findnew_thread(machine
, sample
->pid
,
823 if (thread
== NULL
) {
824 pr_debug("problem processing %d event, skipping it.\n",
829 if (evsel
->handler
!= NULL
) {
830 tracepoint_handler f
= evsel
->handler
;
831 err
= f(evsel
, sample
);
839 static void sort_result(void)
842 struct lock_stat
*st
;
844 for (i
= 0; i
< LOCKHASH_SIZE
; i
++) {
845 list_for_each_entry(st
, &lockhash_table
[i
], hash_entry
) {
846 insert_to_result(st
, compare
);
851 static const struct evsel_str_handler lock_tracepoints
[] = {
852 { "lock:lock_acquire", perf_evsel__process_lock_acquire
, }, /* CONFIG_LOCKDEP */
853 { "lock:lock_acquired", perf_evsel__process_lock_acquired
, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
854 { "lock:lock_contended", perf_evsel__process_lock_contended
, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
855 { "lock:lock_release", perf_evsel__process_lock_release
, }, /* CONFIG_LOCKDEP */
860 static int __cmd_report(bool display_info
)
863 struct perf_tool eops
= {
864 .sample
= process_sample_event
,
865 .comm
= perf_event__process_comm
,
866 .namespaces
= perf_event__process_namespaces
,
867 .ordered_events
= true,
869 struct perf_data data
= {
871 .mode
= PERF_DATA_MODE_READ
,
875 session
= perf_session__new(&data
, false, &eops
);
876 if (IS_ERR(session
)) {
877 pr_err("Initializing perf session failed\n");
878 return PTR_ERR(session
);
881 symbol__init(&session
->header
.env
);
883 if (!perf_session__has_traces(session
, "lock record"))
886 if (perf_session__set_tracepoints_handlers(session
, lock_tracepoints
)) {
887 pr_err("Initializing perf session tracepoint handlers failed\n");
894 err
= perf_session__process_events(session
);
899 if (display_info
) /* used for info subcommand */
907 perf_session__delete(session
);
911 static int __cmd_record(int argc
, const char **argv
)
913 const char *record_args
[] = {
914 "record", "-R", "-m", "1024", "-c", "1",
916 unsigned int rec_argc
, i
, j
, ret
;
917 const char **rec_argv
;
919 for (i
= 0; i
< ARRAY_SIZE(lock_tracepoints
); i
++) {
920 if (!is_valid_tracepoint(lock_tracepoints
[i
].name
)) {
921 pr_err("tracepoint %s is not enabled. "
922 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
923 lock_tracepoints
[i
].name
);
928 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
929 /* factor of 2 is for -e in front of each tracepoint */
930 rec_argc
+= 2 * ARRAY_SIZE(lock_tracepoints
);
932 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
936 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
937 rec_argv
[i
] = strdup(record_args
[i
]);
939 for (j
= 0; j
< ARRAY_SIZE(lock_tracepoints
); j
++) {
940 rec_argv
[i
++] = "-e";
941 rec_argv
[i
++] = strdup(lock_tracepoints
[j
].name
);
944 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
945 rec_argv
[i
] = argv
[j
];
947 BUG_ON(i
!= rec_argc
);
949 ret
= cmd_record(i
, rec_argv
);
954 int cmd_lock(int argc
, const char **argv
)
956 const struct option lock_options
[] = {
957 OPT_STRING('i', "input", &input_name
, "file", "input file name"),
958 OPT_INCR('v', "verbose", &verbose
, "be more verbose (show symbol address, etc)"),
959 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace
, "dump raw trace in ASCII"),
960 OPT_BOOLEAN('f', "force", &force
, "don't complain, do it"),
964 const struct option info_options
[] = {
965 OPT_BOOLEAN('t', "threads", &info_threads
,
966 "dump thread list in perf.data"),
967 OPT_BOOLEAN('m', "map", &info_map
,
968 "map of lock instances (address:name table)"),
969 OPT_PARENT(lock_options
)
972 const struct option report_options
[] = {
973 OPT_STRING('k', "key", &sort_key
, "acquired",
974 "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
976 OPT_PARENT(lock_options
)
979 const char * const info_usage
[] = {
980 "perf lock info [<options>]",
983 const char *const lock_subcommands
[] = { "record", "report", "script",
985 const char *lock_usage
[] = {
989 const char * const report_usage
[] = {
990 "perf lock report [<options>]",
996 for (i
= 0; i
< LOCKHASH_SIZE
; i
++)
997 INIT_LIST_HEAD(lockhash_table
+ i
);
999 argc
= parse_options_subcommand(argc
, argv
, lock_options
, lock_subcommands
,
1000 lock_usage
, PARSE_OPT_STOP_AT_NON_OPTION
);
1002 usage_with_options(lock_usage
, lock_options
);
1004 if (!strncmp(argv
[0], "rec", 3)) {
1005 return __cmd_record(argc
, argv
);
1006 } else if (!strncmp(argv
[0], "report", 6)) {
1007 trace_handler
= &report_lock_ops
;
1009 argc
= parse_options(argc
, argv
,
1010 report_options
, report_usage
, 0);
1012 usage_with_options(report_usage
, report_options
);
1014 rc
= __cmd_report(false);
1015 } else if (!strcmp(argv
[0], "script")) {
1016 /* Aliased to 'perf script' */
1017 return cmd_script(argc
, argv
);
1018 } else if (!strcmp(argv
[0], "info")) {
1020 argc
= parse_options(argc
, argv
,
1021 info_options
, info_usage
, 0);
1023 usage_with_options(info_usage
, info_options
);
1025 /* recycling report_lock_ops */
1026 trace_handler
= &report_lock_ops
;
1027 rc
= __cmd_report(true);
1029 usage_with_options(lock_usage
, lock_options
);