1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Hypervisor supplied "gpci" ("get performance counter info") performance
6 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
7 * Copyright 2014 IBM Corporation.
10 #define pr_fmt(fmt) "hv-gpci: " fmt
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <asm/firmware.h>
15 #include <asm/hvcall.h>
19 #include "hv-common.h"
23 * perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
24 * secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
28 EVENT_DEFINE_RANGE_FORMAT(request
, config
, 0, 31);
31 * Note that starting_index, phys_processor_idx, sibling_part_id,
32 * hw_chip_id, partition_id all refer to the same bit range. They
33 * are basically aliases for the starting_index. The specific alias
34 * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
36 EVENT_DEFINE_RANGE_FORMAT(starting_index
, config
, 32, 63);
37 EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx
, config
, 32, 63);
38 EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id
, config
, 32, 63);
39 EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id
, config
, 32, 63);
40 EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id
, config
, 32, 63);
43 EVENT_DEFINE_RANGE_FORMAT(secondary_index
, config1
, 0, 15);
45 EVENT_DEFINE_RANGE_FORMAT(counter_info_version
, config1
, 16, 23);
46 /* u8, bytes of data (1-8) */
47 EVENT_DEFINE_RANGE_FORMAT(length
, config1
, 24, 31);
48 /* u32, byte offset */
49 EVENT_DEFINE_RANGE_FORMAT(offset
, config1
, 32, 63);
51 static struct attribute
*format_attrs
[] = {
52 &format_attr_request
.attr
,
53 &format_attr_starting_index
.attr
,
54 &format_attr_phys_processor_idx
.attr
,
55 &format_attr_sibling_part_id
.attr
,
56 &format_attr_hw_chip_id
.attr
,
57 &format_attr_partition_id
.attr
,
58 &format_attr_secondary_index
.attr
,
59 &format_attr_counter_info_version
.attr
,
61 &format_attr_offset
.attr
,
62 &format_attr_length
.attr
,
66 static struct attribute_group format_group
= {
68 .attrs
= format_attrs
,
71 static struct attribute_group event_group
= {
73 .attrs
= hv_gpci_event_attrs
,
76 #define HV_CAPS_ATTR(_name, _format) \
77 static ssize_t _name##_show(struct device *dev, \
78 struct device_attribute *attr, \
81 struct hv_perf_caps caps; \
82 unsigned long hret = hv_perf_caps_get(&caps); \
86 return sprintf(page, _format, caps._name); \
88 static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
90 static ssize_t
kernel_version_show(struct device
*dev
,
91 struct device_attribute
*attr
,
94 return sprintf(page
, "0x%x\n", COUNTER_INFO_VERSION_CURRENT
);
97 static DEVICE_ATTR_RO(kernel_version
);
98 HV_CAPS_ATTR(version
, "0x%x\n");
99 HV_CAPS_ATTR(ga
, "%d\n");
100 HV_CAPS_ATTR(expanded
, "%d\n");
101 HV_CAPS_ATTR(lab
, "%d\n");
102 HV_CAPS_ATTR(collect_privileged
, "%d\n");
104 static struct attribute
*interface_attrs
[] = {
105 &dev_attr_kernel_version
.attr
,
106 &hv_caps_attr_version
.attr
,
107 &hv_caps_attr_ga
.attr
,
108 &hv_caps_attr_expanded
.attr
,
109 &hv_caps_attr_lab
.attr
,
110 &hv_caps_attr_collect_privileged
.attr
,
114 static struct attribute_group interface_group
= {
116 .attrs
= interface_attrs
,
119 static const struct attribute_group
*attr_groups
[] = {
126 #define HGPCI_REQ_BUFFER_SIZE 4096
127 #define HGPCI_MAX_DATA_BYTES \
128 (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params))
130 static DEFINE_PER_CPU(char, hv_gpci_reqb
[HGPCI_REQ_BUFFER_SIZE
]) __aligned(sizeof(uint64_t));
132 struct hv_gpci_request_buffer
{
133 struct hv_get_perf_counter_info_params params
;
134 uint8_t bytes
[HGPCI_MAX_DATA_BYTES
];
137 static unsigned long single_gpci_request(u32 req
, u32 starting_index
,
138 u16 secondary_index
, u8 version_in
, u32 offset
, u8 length
,
144 struct hv_gpci_request_buffer
*arg
;
146 arg
= (void *)get_cpu_var(hv_gpci_reqb
);
147 memset(arg
, 0, HGPCI_REQ_BUFFER_SIZE
);
149 arg
->params
.counter_request
= cpu_to_be32(req
);
150 arg
->params
.starting_index
= cpu_to_be32(starting_index
);
151 arg
->params
.secondary_index
= cpu_to_be16(secondary_index
);
152 arg
->params
.counter_info_version_in
= version_in
;
154 ret
= plpar_hcall_norets(H_GET_PERF_COUNTER_INFO
,
155 virt_to_phys(arg
), HGPCI_REQ_BUFFER_SIZE
);
157 pr_devel("hcall failed: 0x%lx\n", ret
);
162 * we verify offset and length are within the zeroed buffer at event
166 for (i
= offset
; i
< offset
+ length
; i
++)
167 count
|= arg
->bytes
[i
] << (i
- offset
);
171 put_cpu_var(hv_gpci_reqb
);
175 static u64
h_gpci_get_value(struct perf_event
*event
)
178 unsigned long ret
= single_gpci_request(event_get_request(event
),
179 event_get_starting_index(event
),
180 event_get_secondary_index(event
),
181 event_get_counter_info_version(event
),
182 event_get_offset(event
),
183 event_get_length(event
),
190 static void h_gpci_event_update(struct perf_event
*event
)
193 u64 now
= h_gpci_get_value(event
);
194 prev
= local64_xchg(&event
->hw
.prev_count
, now
);
195 local64_add(now
- prev
, &event
->count
);
198 static void h_gpci_event_start(struct perf_event
*event
, int flags
)
200 local64_set(&event
->hw
.prev_count
, h_gpci_get_value(event
));
203 static void h_gpci_event_stop(struct perf_event
*event
, int flags
)
205 h_gpci_event_update(event
);
208 static int h_gpci_event_add(struct perf_event
*event
, int flags
)
210 if (flags
& PERF_EF_START
)
211 h_gpci_event_start(event
, flags
);
216 static int h_gpci_event_init(struct perf_event
*event
)
222 if (event
->attr
.type
!= event
->pmu
->type
)
225 /* config2 is unused */
226 if (event
->attr
.config2
) {
227 pr_devel("config2 set when reserved\n");
231 /* no branch sampling */
232 if (has_branch_stack(event
))
235 length
= event_get_length(event
);
236 if (length
< 1 || length
> 8) {
237 pr_devel("length invalid\n");
241 /* last byte within the buffer? */
242 if ((event_get_offset(event
) + length
) > HGPCI_MAX_DATA_BYTES
) {
243 pr_devel("request outside of buffer: %zu > %zu\n",
244 (size_t)event_get_offset(event
) + length
,
245 HGPCI_MAX_DATA_BYTES
);
249 /* check if the request works... */
250 if (single_gpci_request(event_get_request(event
),
251 event_get_starting_index(event
),
252 event_get_secondary_index(event
),
253 event_get_counter_info_version(event
),
254 event_get_offset(event
),
257 pr_devel("gpci hcall failed\n");
264 static struct pmu h_gpci_pmu
= {
265 .task_ctx_nr
= perf_invalid_context
,
268 .attr_groups
= attr_groups
,
269 .event_init
= h_gpci_event_init
,
270 .add
= h_gpci_event_add
,
271 .del
= h_gpci_event_stop
,
272 .start
= h_gpci_event_start
,
273 .stop
= h_gpci_event_stop
,
274 .read
= h_gpci_event_update
,
275 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
278 static int hv_gpci_init(void)
282 struct hv_perf_caps caps
;
284 hv_gpci_assert_offsets_correct();
286 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
287 pr_debug("not a virtualized system, not enabling\n");
291 hret
= hv_perf_caps_get(&caps
);
293 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
298 /* sampling not supported */
299 h_gpci_pmu
.capabilities
|= PERF_PMU_CAP_NO_INTERRUPT
;
301 r
= perf_pmu_register(&h_gpci_pmu
, h_gpci_pmu
.name
, -1);
308 device_initcall(hv_gpci_init
);