2 * Performance event support for s390x
4 * Copyright IBM Corp. 2012
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
11 #define KMSG_COMPONENT "perf"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/kernel.h>
15 #include <linux/perf_event.h>
16 #include <linux/kvm_host.h>
17 #include <linux/percpu.h>
18 #include <linux/export.h>
20 #include <asm/cpu_mf.h>
21 #include <asm/lowcore.h>
22 #include <asm/processor.h>
24 const char *perf_pmu_name(void)
26 if (cpum_cf_avail() || cpum_sf_avail())
27 return "CPU-measurement facilities (CPUMF)";
30 EXPORT_SYMBOL(perf_pmu_name
);
32 int perf_num_counters(void)
37 num
+= PERF_CPUM_CF_MAX_CTR
;
41 EXPORT_SYMBOL(perf_num_counters
);
43 static struct kvm_s390_sie_block
*sie_block(struct pt_regs
*regs
)
45 struct stack_frame
*stack
= (struct stack_frame
*) regs
->gprs
[15];
50 return (struct kvm_s390_sie_block
*) stack
->empty1
[0];
53 static bool is_in_guest(struct pt_regs
*regs
)
55 unsigned long ip
= instruction_pointer(regs
);
60 return ip
== (unsigned long) &sie_exit
;
63 static unsigned long guest_is_user_mode(struct pt_regs
*regs
)
65 return sie_block(regs
)->gpsw
.mask
& PSW_MASK_PSTATE
;
68 static unsigned long instruction_pointer_guest(struct pt_regs
*regs
)
70 return sie_block(regs
)->gpsw
.addr
& PSW_ADDR_INSN
;
73 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
75 return is_in_guest(regs
) ? instruction_pointer_guest(regs
)
76 : instruction_pointer(regs
);
79 static unsigned long perf_misc_guest_flags(struct pt_regs
*regs
)
81 return guest_is_user_mode(regs
) ? PERF_RECORD_MISC_GUEST_USER
82 : PERF_RECORD_MISC_GUEST_KERNEL
;
85 unsigned long perf_misc_flags(struct pt_regs
*regs
)
87 if (is_in_guest(regs
))
88 return perf_misc_guest_flags(regs
);
90 return user_mode(regs
) ? PERF_RECORD_MISC_USER
91 : PERF_RECORD_MISC_KERNEL
;
94 void perf_event_print_debug(void)
96 struct cpumf_ctr_info cf_info
;
100 if (!cpum_cf_avail())
103 local_irq_save(flags
);
105 cpu
= smp_processor_id();
106 memset(&cf_info
, 0, sizeof(cf_info
));
107 if (!qctri(&cf_info
)) {
108 pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
109 cpu
, cf_info
.cfvn
, cf_info
.csvn
,
110 cf_info
.auth_ctl
, cf_info
.enable_ctl
, cf_info
.act_ctl
);
111 print_hex_dump_bytes("CPUMF Query: ", DUMP_PREFIX_OFFSET
,
112 &cf_info
, sizeof(cf_info
));
115 local_irq_restore(flags
);
118 /* See also arch/s390/kernel/traps.c */
119 static unsigned long __store_trace(struct perf_callchain_entry
*entry
,
121 unsigned long low
, unsigned long high
)
123 struct stack_frame
*sf
;
124 struct pt_regs
*regs
;
127 sp
= sp
& PSW_ADDR_INSN
;
128 if (sp
< low
|| sp
> high
- sizeof(*sf
))
130 sf
= (struct stack_frame
*) sp
;
131 perf_callchain_store(entry
, sf
->gprs
[8] & PSW_ADDR_INSN
);
132 /* Follow the backchain. */
135 sp
= sf
->back_chain
& PSW_ADDR_INSN
;
138 if (sp
<= low
|| sp
> high
- sizeof(*sf
))
140 sf
= (struct stack_frame
*) sp
;
141 perf_callchain_store(entry
,
142 sf
->gprs
[8] & PSW_ADDR_INSN
);
144 /* Zero backchain detected, check for interrupt frame. */
145 sp
= (unsigned long) (sf
+ 1);
146 if (sp
<= low
|| sp
> high
- sizeof(*regs
))
148 regs
= (struct pt_regs
*) sp
;
149 perf_callchain_store(entry
, sf
->gprs
[8] & PSW_ADDR_INSN
);
155 void perf_callchain_kernel(struct perf_callchain_entry
*entry
,
156 struct pt_regs
*regs
)
159 struct stack_frame
*head_sf
;
164 head
= regs
->gprs
[15];
165 head_sf
= (struct stack_frame
*) head
;
167 if (!head_sf
|| !head_sf
->back_chain
)
170 head
= head_sf
->back_chain
;
171 head
= __store_trace(entry
, head
, S390_lowcore
.async_stack
- ASYNC_SIZE
,
172 S390_lowcore
.async_stack
);
174 __store_trace(entry
, head
, S390_lowcore
.thread_info
,
175 S390_lowcore
.thread_info
+ THREAD_SIZE
);