1 // SPDX-License-Identifier: GPL-2.0-only
3 * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support
5 * Copyright (C) 2022 HiSilicon Limited
6 * Author: Qi Liu <liuqi115@huawei.com>
8 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
11 #define pr_fmt(fmt) "cpa pmu: " fmt
12 #include <linux/acpi.h>
13 #include <linux/bug.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/list.h>
18 #include <linux/smp.h>
20 #include "hisi_uncore_pmu.h"
22 /* CPA register definition */
23 #define CPA_PERF_CTRL 0x1c00
24 #define CPA_EVENT_CTRL 0x1c04
25 #define CPA_INT_MASK 0x1c70
26 #define CPA_INT_STATUS 0x1c78
27 #define CPA_INT_CLEAR 0x1c7c
28 #define CPA_EVENT_TYPE0 0x1c80
29 #define CPA_VERSION 0x1cf0
30 #define CPA_CNT0_LOWER 0x1d00
31 #define CPA_CFG_REG 0x0534
33 /* CPA operation command */
34 #define CPA_PERF_CTRL_EN BIT_ULL(0)
35 #define CPA_EVTYPE_MASK 0xffUL
36 #define CPA_PM_CTRL BIT_ULL(9)
38 /* CPA has 8-counters */
39 #define CPA_NR_COUNTERS 0x8
40 #define CPA_COUNTER_BITS 64
41 #define CPA_NR_EVENTS 0xff
42 #define CPA_REG_OFFSET 0x8
44 static u32
hisi_cpa_pmu_get_counter_offset(int idx
)
46 return (CPA_CNT0_LOWER
+ idx
* CPA_REG_OFFSET
);
49 static u64
hisi_cpa_pmu_read_counter(struct hisi_pmu
*cpa_pmu
,
50 struct hw_perf_event
*hwc
)
52 return readq(cpa_pmu
->base
+ hisi_cpa_pmu_get_counter_offset(hwc
->idx
));
55 static void hisi_cpa_pmu_write_counter(struct hisi_pmu
*cpa_pmu
,
56 struct hw_perf_event
*hwc
, u64 val
)
58 writeq(val
, cpa_pmu
->base
+ hisi_cpa_pmu_get_counter_offset(hwc
->idx
));
61 static void hisi_cpa_pmu_write_evtype(struct hisi_pmu
*cpa_pmu
, int idx
,
64 u32 reg
, reg_idx
, shift
, val
;
67 * Select the appropriate event select register(CPA_EVENT_TYPE0/1).
68 * There are 2 event select registers for the 8 hardware counters.
69 * Event code is 8-bits and for the former 4 hardware counters,
70 * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
71 * CPA_EVENT_TYPE1 is chosen.
73 reg
= CPA_EVENT_TYPE0
+ (idx
/ 4) * 4;
75 shift
= CPA_REG_OFFSET
* reg_idx
;
77 /* Write event code to CPA_EVENT_TYPEx Register */
78 val
= readl(cpa_pmu
->base
+ reg
);
79 val
&= ~(CPA_EVTYPE_MASK
<< shift
);
81 writel(val
, cpa_pmu
->base
+ reg
);
84 static void hisi_cpa_pmu_start_counters(struct hisi_pmu
*cpa_pmu
)
88 val
= readl(cpa_pmu
->base
+ CPA_PERF_CTRL
);
89 val
|= CPA_PERF_CTRL_EN
;
90 writel(val
, cpa_pmu
->base
+ CPA_PERF_CTRL
);
93 static void hisi_cpa_pmu_stop_counters(struct hisi_pmu
*cpa_pmu
)
97 val
= readl(cpa_pmu
->base
+ CPA_PERF_CTRL
);
98 val
&= ~(CPA_PERF_CTRL_EN
);
99 writel(val
, cpa_pmu
->base
+ CPA_PERF_CTRL
);
102 static void hisi_cpa_pmu_disable_pm(struct hisi_pmu
*cpa_pmu
)
106 val
= readl(cpa_pmu
->base
+ CPA_CFG_REG
);
108 writel(val
, cpa_pmu
->base
+ CPA_CFG_REG
);
111 static void hisi_cpa_pmu_enable_pm(struct hisi_pmu
*cpa_pmu
)
115 val
= readl(cpa_pmu
->base
+ CPA_CFG_REG
);
116 val
&= ~(CPA_PM_CTRL
);
117 writel(val
, cpa_pmu
->base
+ CPA_CFG_REG
);
120 static void hisi_cpa_pmu_enable_counter(struct hisi_pmu
*cpa_pmu
,
121 struct hw_perf_event
*hwc
)
125 /* Enable counter index in CPA_EVENT_CTRL register */
126 val
= readl(cpa_pmu
->base
+ CPA_EVENT_CTRL
);
127 val
|= 1 << hwc
->idx
;
128 writel(val
, cpa_pmu
->base
+ CPA_EVENT_CTRL
);
131 static void hisi_cpa_pmu_disable_counter(struct hisi_pmu
*cpa_pmu
,
132 struct hw_perf_event
*hwc
)
136 /* Clear counter index in CPA_EVENT_CTRL register */
137 val
= readl(cpa_pmu
->base
+ CPA_EVENT_CTRL
);
138 val
&= ~(1UL << hwc
->idx
);
139 writel(val
, cpa_pmu
->base
+ CPA_EVENT_CTRL
);
142 static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu
*cpa_pmu
,
143 struct hw_perf_event
*hwc
)
147 /* Write 0 to enable interrupt */
148 val
= readl(cpa_pmu
->base
+ CPA_INT_MASK
);
149 val
&= ~(1UL << hwc
->idx
);
150 writel(val
, cpa_pmu
->base
+ CPA_INT_MASK
);
153 static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu
*cpa_pmu
,
154 struct hw_perf_event
*hwc
)
158 /* Write 1 to mask interrupt */
159 val
= readl(cpa_pmu
->base
+ CPA_INT_MASK
);
160 val
|= 1 << hwc
->idx
;
161 writel(val
, cpa_pmu
->base
+ CPA_INT_MASK
);
164 static u32
hisi_cpa_pmu_get_int_status(struct hisi_pmu
*cpa_pmu
)
166 return readl(cpa_pmu
->base
+ CPA_INT_STATUS
);
169 static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu
*cpa_pmu
, int idx
)
171 writel(1 << idx
, cpa_pmu
->base
+ CPA_INT_CLEAR
);
174 static const struct acpi_device_id hisi_cpa_pmu_acpi_match
[] = {
178 MODULE_DEVICE_TABLE(acpi
, hisi_cpa_pmu_acpi_match
);
180 static int hisi_cpa_pmu_init_data(struct platform_device
*pdev
,
181 struct hisi_pmu
*cpa_pmu
)
183 if (device_property_read_u32(&pdev
->dev
, "hisilicon,scl-id",
184 &cpa_pmu
->sicl_id
)) {
185 dev_err(&pdev
->dev
, "Can not read sicl-id\n");
189 if (device_property_read_u32(&pdev
->dev
, "hisilicon,idx-id",
190 &cpa_pmu
->index_id
)) {
191 dev_err(&pdev
->dev
, "Cannot read idx-id\n");
195 cpa_pmu
->ccl_id
= -1;
196 cpa_pmu
->sccl_id
= -1;
197 cpa_pmu
->base
= devm_platform_ioremap_resource(pdev
, 0);
198 if (IS_ERR(cpa_pmu
->base
))
199 return PTR_ERR(cpa_pmu
->base
);
201 cpa_pmu
->identifier
= readl(cpa_pmu
->base
+ CPA_VERSION
);
206 static struct attribute
*hisi_cpa_pmu_format_attr
[] = {
207 HISI_PMU_FORMAT_ATTR(event
, "config:0-15"),
211 static const struct attribute_group hisi_cpa_pmu_format_group
= {
213 .attrs
= hisi_cpa_pmu_format_attr
,
216 static struct attribute
*hisi_cpa_pmu_events_attr
[] = {
217 HISI_PMU_EVENT_ATTR(cpa_cycles
, 0x00),
218 HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat
, 0x61),
219 HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat
, 0x62),
220 HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat
, 0xE1),
221 HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat
, 0xE2),
225 static const struct attribute_group hisi_cpa_pmu_events_group
= {
227 .attrs
= hisi_cpa_pmu_events_attr
,
230 static DEVICE_ATTR(cpumask
, 0444, hisi_cpumask_sysfs_show
, NULL
);
232 static struct attribute
*hisi_cpa_pmu_cpumask_attrs
[] = {
233 &dev_attr_cpumask
.attr
,
237 static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group
= {
238 .attrs
= hisi_cpa_pmu_cpumask_attrs
,
241 static struct device_attribute hisi_cpa_pmu_identifier_attr
=
242 __ATTR(identifier
, 0444, hisi_uncore_pmu_identifier_attr_show
, NULL
);
244 static struct attribute
*hisi_cpa_pmu_identifier_attrs
[] = {
245 &hisi_cpa_pmu_identifier_attr
.attr
,
249 static const struct attribute_group hisi_cpa_pmu_identifier_group
= {
250 .attrs
= hisi_cpa_pmu_identifier_attrs
,
253 static const struct attribute_group
*hisi_cpa_pmu_attr_groups
[] = {
254 &hisi_cpa_pmu_format_group
,
255 &hisi_cpa_pmu_events_group
,
256 &hisi_cpa_pmu_cpumask_attr_group
,
257 &hisi_cpa_pmu_identifier_group
,
261 static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops
= {
262 .write_evtype
= hisi_cpa_pmu_write_evtype
,
263 .get_event_idx
= hisi_uncore_pmu_get_event_idx
,
264 .start_counters
= hisi_cpa_pmu_start_counters
,
265 .stop_counters
= hisi_cpa_pmu_stop_counters
,
266 .enable_counter
= hisi_cpa_pmu_enable_counter
,
267 .disable_counter
= hisi_cpa_pmu_disable_counter
,
268 .enable_counter_int
= hisi_cpa_pmu_enable_counter_int
,
269 .disable_counter_int
= hisi_cpa_pmu_disable_counter_int
,
270 .write_counter
= hisi_cpa_pmu_write_counter
,
271 .read_counter
= hisi_cpa_pmu_read_counter
,
272 .get_int_status
= hisi_cpa_pmu_get_int_status
,
273 .clear_int_status
= hisi_cpa_pmu_clear_int_status
,
276 static int hisi_cpa_pmu_dev_probe(struct platform_device
*pdev
,
277 struct hisi_pmu
*cpa_pmu
)
281 ret
= hisi_cpa_pmu_init_data(pdev
, cpa_pmu
);
285 ret
= hisi_uncore_pmu_init_irq(cpa_pmu
, pdev
);
289 cpa_pmu
->counter_bits
= CPA_COUNTER_BITS
;
290 cpa_pmu
->check_event
= CPA_NR_EVENTS
;
291 cpa_pmu
->pmu_events
.attr_groups
= hisi_cpa_pmu_attr_groups
;
292 cpa_pmu
->ops
= &hisi_uncore_cpa_pmu_ops
;
293 cpa_pmu
->num_counters
= CPA_NR_COUNTERS
;
294 cpa_pmu
->dev
= &pdev
->dev
;
295 cpa_pmu
->on_cpu
= -1;
300 static int hisi_cpa_pmu_probe(struct platform_device
*pdev
)
302 struct hisi_pmu
*cpa_pmu
;
306 cpa_pmu
= devm_kzalloc(&pdev
->dev
, sizeof(*cpa_pmu
), GFP_KERNEL
);
310 ret
= hisi_cpa_pmu_dev_probe(pdev
, cpa_pmu
);
314 name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "hisi_sicl%d_cpa%u",
315 cpa_pmu
->sicl_id
, cpa_pmu
->index_id
);
319 hisi_pmu_init(cpa_pmu
, THIS_MODULE
);
321 /* Power Management should be disabled before using CPA PMU. */
322 hisi_cpa_pmu_disable_pm(cpa_pmu
);
323 ret
= cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE
,
326 dev_err(&pdev
->dev
, "Error %d registering hotplug\n", ret
);
327 hisi_cpa_pmu_enable_pm(cpa_pmu
);
331 ret
= perf_pmu_register(&cpa_pmu
->pmu
, name
, -1);
333 dev_err(cpa_pmu
->dev
, "PMU register failed\n");
334 cpuhp_state_remove_instance_nocalls(
335 CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE
, &cpa_pmu
->node
);
336 hisi_cpa_pmu_enable_pm(cpa_pmu
);
340 platform_set_drvdata(pdev
, cpa_pmu
);
344 static void hisi_cpa_pmu_remove(struct platform_device
*pdev
)
346 struct hisi_pmu
*cpa_pmu
= platform_get_drvdata(pdev
);
348 perf_pmu_unregister(&cpa_pmu
->pmu
);
349 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE
,
351 hisi_cpa_pmu_enable_pm(cpa_pmu
);
354 static struct platform_driver hisi_cpa_pmu_driver
= {
356 .name
= "hisi_cpa_pmu",
357 .acpi_match_table
= ACPI_PTR(hisi_cpa_pmu_acpi_match
),
358 .suppress_bind_attrs
= true,
360 .probe
= hisi_cpa_pmu_probe
,
361 .remove
= hisi_cpa_pmu_remove
,
364 static int __init
hisi_cpa_pmu_module_init(void)
368 ret
= cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE
,
369 "AP_PERF_ARM_HISI_CPA_ONLINE",
370 hisi_uncore_pmu_online_cpu
,
371 hisi_uncore_pmu_offline_cpu
);
373 pr_err("setup hotplug failed: %d\n", ret
);
377 ret
= platform_driver_register(&hisi_cpa_pmu_driver
);
379 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE
);
383 module_init(hisi_cpa_pmu_module_init
);
385 static void __exit
hisi_cpa_pmu_module_exit(void)
387 platform_driver_unregister(&hisi_cpa_pmu_driver
);
388 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE
);
390 module_exit(hisi_cpa_pmu_module_exit
);
392 MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
393 MODULE_LICENSE("GPL v2");
394 MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");