1 // SPDX-License-Identifier: GPL-2.0-only
3 * HiSilicon SoC HHA uncore Hardware event counters support
5 * Copyright (C) 2017 Hisilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 * Anurup M <anurup.m@huawei.com>
9 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
20 #include "hisi_uncore_pmu.h"
22 /* HHA register definition */
23 #define HHA_INT_MASK 0x0804
24 #define HHA_INT_STATUS 0x0808
25 #define HHA_INT_CLEAR 0x080C
26 #define HHA_PERF_CTRL 0x1E00
27 #define HHA_EVENT_CTRL 0x1E04
28 #define HHA_EVENT_TYPE0 0x1E80
30 * Each counter is 48-bits and [48:63] are reserved
31 * which are Read-As-Zero and Writes-Ignored.
33 #define HHA_CNT0_LOWER 0x1F00
35 /* HHA has 16-counters */
36 #define HHA_NR_COUNTERS 0x10
38 #define HHA_PERF_CTRL_EN 0x1
39 #define HHA_EVTYPE_NONE 0xff
42 * Select the counter register offset using the counter index
43 * each counter is 48-bits.
45 static u32
hisi_hha_pmu_get_counter_offset(int cntr_idx
)
47 return (HHA_CNT0_LOWER
+ (cntr_idx
* 8));
50 static u64
hisi_hha_pmu_read_counter(struct hisi_pmu
*hha_pmu
,
51 struct hw_perf_event
*hwc
)
55 if (!hisi_uncore_pmu_counter_valid(hha_pmu
, idx
)) {
56 dev_err(hha_pmu
->dev
, "Unsupported event index:%d!\n", idx
);
60 /* Read 64 bits and like L3C, top 16 bits are RAZ */
61 return readq(hha_pmu
->base
+ hisi_hha_pmu_get_counter_offset(idx
));
64 static void hisi_hha_pmu_write_counter(struct hisi_pmu
*hha_pmu
,
65 struct hw_perf_event
*hwc
, u64 val
)
69 if (!hisi_uncore_pmu_counter_valid(hha_pmu
, idx
)) {
70 dev_err(hha_pmu
->dev
, "Unsupported event index:%d!\n", idx
);
74 /* Write 64 bits and like L3C, top 16 bits are WI */
75 writeq(val
, hha_pmu
->base
+ hisi_hha_pmu_get_counter_offset(idx
));
78 static void hisi_hha_pmu_write_evtype(struct hisi_pmu
*hha_pmu
, int idx
,
81 u32 reg
, reg_idx
, shift
, val
;
84 * Select the appropriate event select register(HHA_EVENT_TYPEx).
85 * There are 4 event select registers for the 16 hardware counters.
86 * Event code is 8-bits and for the first 4 hardware counters,
87 * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
88 * HHA_EVENT_TYPE1 is chosen and so on.
90 reg
= HHA_EVENT_TYPE0
+ 4 * (idx
/ 4);
94 /* Write event code to HHA_EVENT_TYPEx register */
95 val
= readl(hha_pmu
->base
+ reg
);
96 val
&= ~(HHA_EVTYPE_NONE
<< shift
);
97 val
|= (type
<< shift
);
98 writel(val
, hha_pmu
->base
+ reg
);
101 static void hisi_hha_pmu_start_counters(struct hisi_pmu
*hha_pmu
)
106 * Set perf_enable bit in HHA_PERF_CTRL to start event
107 * counting for all enabled counters.
109 val
= readl(hha_pmu
->base
+ HHA_PERF_CTRL
);
110 val
|= HHA_PERF_CTRL_EN
;
111 writel(val
, hha_pmu
->base
+ HHA_PERF_CTRL
);
114 static void hisi_hha_pmu_stop_counters(struct hisi_pmu
*hha_pmu
)
119 * Clear perf_enable bit in HHA_PERF_CTRL to stop event
120 * counting for all enabled counters.
122 val
= readl(hha_pmu
->base
+ HHA_PERF_CTRL
);
123 val
&= ~(HHA_PERF_CTRL_EN
);
124 writel(val
, hha_pmu
->base
+ HHA_PERF_CTRL
);
127 static void hisi_hha_pmu_enable_counter(struct hisi_pmu
*hha_pmu
,
128 struct hw_perf_event
*hwc
)
132 /* Enable counter index in HHA_EVENT_CTRL register */
133 val
= readl(hha_pmu
->base
+ HHA_EVENT_CTRL
);
134 val
|= (1 << hwc
->idx
);
135 writel(val
, hha_pmu
->base
+ HHA_EVENT_CTRL
);
138 static void hisi_hha_pmu_disable_counter(struct hisi_pmu
*hha_pmu
,
139 struct hw_perf_event
*hwc
)
143 /* Clear counter index in HHA_EVENT_CTRL register */
144 val
= readl(hha_pmu
->base
+ HHA_EVENT_CTRL
);
145 val
&= ~(1 << hwc
->idx
);
146 writel(val
, hha_pmu
->base
+ HHA_EVENT_CTRL
);
149 static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu
*hha_pmu
,
150 struct hw_perf_event
*hwc
)
154 /* Write 0 to enable interrupt */
155 val
= readl(hha_pmu
->base
+ HHA_INT_MASK
);
156 val
&= ~(1 << hwc
->idx
);
157 writel(val
, hha_pmu
->base
+ HHA_INT_MASK
);
160 static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu
*hha_pmu
,
161 struct hw_perf_event
*hwc
)
165 /* Write 1 to mask interrupt */
166 val
= readl(hha_pmu
->base
+ HHA_INT_MASK
);
167 val
|= (1 << hwc
->idx
);
168 writel(val
, hha_pmu
->base
+ HHA_INT_MASK
);
171 static irqreturn_t
hisi_hha_pmu_isr(int irq
, void *dev_id
)
173 struct hisi_pmu
*hha_pmu
= dev_id
;
174 struct perf_event
*event
;
175 unsigned long overflown
;
178 /* Read HHA_INT_STATUS register */
179 overflown
= readl(hha_pmu
->base
+ HHA_INT_STATUS
);
184 * Find the counter index which overflowed if the bit was set
187 for_each_set_bit(idx
, &overflown
, HHA_NR_COUNTERS
) {
188 /* Write 1 to clear the IRQ status flag */
189 writel((1 << idx
), hha_pmu
->base
+ HHA_INT_CLEAR
);
191 /* Get the corresponding event struct */
192 event
= hha_pmu
->pmu_events
.hw_events
[idx
];
196 hisi_uncore_pmu_event_update(event
);
197 hisi_uncore_pmu_set_event_period(event
);
203 static int hisi_hha_pmu_init_irq(struct hisi_pmu
*hha_pmu
,
204 struct platform_device
*pdev
)
208 /* Read and init IRQ */
209 irq
= platform_get_irq(pdev
, 0);
213 ret
= devm_request_irq(&pdev
->dev
, irq
, hisi_hha_pmu_isr
,
214 IRQF_NOBALANCING
| IRQF_NO_THREAD
,
215 dev_name(&pdev
->dev
), hha_pmu
);
218 "Fail to request IRQ:%d ret:%d\n", irq
, ret
);
227 static const struct acpi_device_id hisi_hha_pmu_acpi_match
[] = {
231 MODULE_DEVICE_TABLE(acpi
, hisi_hha_pmu_acpi_match
);
233 static int hisi_hha_pmu_init_data(struct platform_device
*pdev
,
234 struct hisi_pmu
*hha_pmu
)
236 unsigned long long id
;
237 struct resource
*res
;
240 status
= acpi_evaluate_integer(ACPI_HANDLE(&pdev
->dev
),
242 if (ACPI_FAILURE(status
))
245 hha_pmu
->index_id
= id
;
248 * Use SCCL_ID and UID to identify the HHA PMU, while
249 * SCCL_ID is in MPIDR[aff2].
251 if (device_property_read_u32(&pdev
->dev
, "hisilicon,scl-id",
252 &hha_pmu
->sccl_id
)) {
253 dev_err(&pdev
->dev
, "Can not read hha sccl-id!\n");
256 /* HHA PMUs only share the same SCCL */
257 hha_pmu
->ccl_id
= -1;
259 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
260 hha_pmu
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
261 if (IS_ERR(hha_pmu
->base
)) {
262 dev_err(&pdev
->dev
, "ioremap failed for hha_pmu resource\n");
263 return PTR_ERR(hha_pmu
->base
);
269 static struct attribute
*hisi_hha_pmu_format_attr
[] = {
270 HISI_PMU_FORMAT_ATTR(event
, "config:0-7"),
274 static const struct attribute_group hisi_hha_pmu_format_group
= {
276 .attrs
= hisi_hha_pmu_format_attr
,
279 static struct attribute
*hisi_hha_pmu_events_attr
[] = {
280 HISI_PMU_EVENT_ATTR(rx_ops_num
, 0x00),
281 HISI_PMU_EVENT_ATTR(rx_outer
, 0x01),
282 HISI_PMU_EVENT_ATTR(rx_sccl
, 0x02),
283 HISI_PMU_EVENT_ATTR(rx_ccix
, 0x03),
284 HISI_PMU_EVENT_ATTR(rx_wbi
, 0x04),
285 HISI_PMU_EVENT_ATTR(rx_wbip
, 0x05),
286 HISI_PMU_EVENT_ATTR(rx_wtistash
, 0x11),
287 HISI_PMU_EVENT_ATTR(rd_ddr_64b
, 0x1c),
288 HISI_PMU_EVENT_ATTR(wr_ddr_64b
, 0x1d),
289 HISI_PMU_EVENT_ATTR(rd_ddr_128b
, 0x1e),
290 HISI_PMU_EVENT_ATTR(wr_ddr_128b
, 0x1f),
291 HISI_PMU_EVENT_ATTR(spill_num
, 0x20),
292 HISI_PMU_EVENT_ATTR(spill_success
, 0x21),
293 HISI_PMU_EVENT_ATTR(bi_num
, 0x23),
294 HISI_PMU_EVENT_ATTR(mediated_num
, 0x32),
295 HISI_PMU_EVENT_ATTR(tx_snp_num
, 0x33),
296 HISI_PMU_EVENT_ATTR(tx_snp_outer
, 0x34),
297 HISI_PMU_EVENT_ATTR(tx_snp_ccix
, 0x35),
298 HISI_PMU_EVENT_ATTR(rx_snprspdata
, 0x38),
299 HISI_PMU_EVENT_ATTR(rx_snprsp_outer
, 0x3c),
300 HISI_PMU_EVENT_ATTR(sdir
-lookup
, 0x40),
301 HISI_PMU_EVENT_ATTR(edir
-lookup
, 0x41),
302 HISI_PMU_EVENT_ATTR(sdir
-hit
, 0x42),
303 HISI_PMU_EVENT_ATTR(edir
-hit
, 0x43),
304 HISI_PMU_EVENT_ATTR(sdir
-home
-migrate
, 0x4c),
305 HISI_PMU_EVENT_ATTR(edir
-home
-migrate
, 0x4d),
309 static const struct attribute_group hisi_hha_pmu_events_group
= {
311 .attrs
= hisi_hha_pmu_events_attr
,
314 static DEVICE_ATTR(cpumask
, 0444, hisi_cpumask_sysfs_show
, NULL
);
316 static struct attribute
*hisi_hha_pmu_cpumask_attrs
[] = {
317 &dev_attr_cpumask
.attr
,
321 static const struct attribute_group hisi_hha_pmu_cpumask_attr_group
= {
322 .attrs
= hisi_hha_pmu_cpumask_attrs
,
325 static const struct attribute_group
*hisi_hha_pmu_attr_groups
[] = {
326 &hisi_hha_pmu_format_group
,
327 &hisi_hha_pmu_events_group
,
328 &hisi_hha_pmu_cpumask_attr_group
,
332 static const struct hisi_uncore_ops hisi_uncore_hha_ops
= {
333 .write_evtype
= hisi_hha_pmu_write_evtype
,
334 .get_event_idx
= hisi_uncore_pmu_get_event_idx
,
335 .start_counters
= hisi_hha_pmu_start_counters
,
336 .stop_counters
= hisi_hha_pmu_stop_counters
,
337 .enable_counter
= hisi_hha_pmu_enable_counter
,
338 .disable_counter
= hisi_hha_pmu_disable_counter
,
339 .enable_counter_int
= hisi_hha_pmu_enable_counter_int
,
340 .disable_counter_int
= hisi_hha_pmu_disable_counter_int
,
341 .write_counter
= hisi_hha_pmu_write_counter
,
342 .read_counter
= hisi_hha_pmu_read_counter
,
345 static int hisi_hha_pmu_dev_probe(struct platform_device
*pdev
,
346 struct hisi_pmu
*hha_pmu
)
350 ret
= hisi_hha_pmu_init_data(pdev
, hha_pmu
);
354 ret
= hisi_hha_pmu_init_irq(hha_pmu
, pdev
);
358 hha_pmu
->num_counters
= HHA_NR_COUNTERS
;
359 hha_pmu
->counter_bits
= 48;
360 hha_pmu
->ops
= &hisi_uncore_hha_ops
;
361 hha_pmu
->dev
= &pdev
->dev
;
362 hha_pmu
->on_cpu
= -1;
363 hha_pmu
->check_event
= 0x65;
368 static int hisi_hha_pmu_probe(struct platform_device
*pdev
)
370 struct hisi_pmu
*hha_pmu
;
374 hha_pmu
= devm_kzalloc(&pdev
->dev
, sizeof(*hha_pmu
), GFP_KERNEL
);
378 platform_set_drvdata(pdev
, hha_pmu
);
380 ret
= hisi_hha_pmu_dev_probe(pdev
, hha_pmu
);
384 ret
= cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE
,
387 dev_err(&pdev
->dev
, "Error %d registering hotplug\n", ret
);
391 name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "hisi_sccl%u_hha%u",
392 hha_pmu
->sccl_id
, hha_pmu
->index_id
);
393 hha_pmu
->pmu
= (struct pmu
) {
395 .module
= THIS_MODULE
,
396 .task_ctx_nr
= perf_invalid_context
,
397 .event_init
= hisi_uncore_pmu_event_init
,
398 .pmu_enable
= hisi_uncore_pmu_enable
,
399 .pmu_disable
= hisi_uncore_pmu_disable
,
400 .add
= hisi_uncore_pmu_add
,
401 .del
= hisi_uncore_pmu_del
,
402 .start
= hisi_uncore_pmu_start
,
403 .stop
= hisi_uncore_pmu_stop
,
404 .read
= hisi_uncore_pmu_read
,
405 .attr_groups
= hisi_hha_pmu_attr_groups
,
406 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
409 ret
= perf_pmu_register(&hha_pmu
->pmu
, name
, -1);
411 dev_err(hha_pmu
->dev
, "HHA PMU register failed!\n");
412 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE
,
419 static int hisi_hha_pmu_remove(struct platform_device
*pdev
)
421 struct hisi_pmu
*hha_pmu
= platform_get_drvdata(pdev
);
423 perf_pmu_unregister(&hha_pmu
->pmu
);
424 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE
,
430 static struct platform_driver hisi_hha_pmu_driver
= {
432 .name
= "hisi_hha_pmu",
433 .acpi_match_table
= ACPI_PTR(hisi_hha_pmu_acpi_match
),
434 .suppress_bind_attrs
= true,
436 .probe
= hisi_hha_pmu_probe
,
437 .remove
= hisi_hha_pmu_remove
,
440 static int __init
hisi_hha_pmu_module_init(void)
444 ret
= cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE
,
445 "AP_PERF_ARM_HISI_HHA_ONLINE",
446 hisi_uncore_pmu_online_cpu
,
447 hisi_uncore_pmu_offline_cpu
);
449 pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret
);
453 ret
= platform_driver_register(&hisi_hha_pmu_driver
);
455 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE
);
459 module_init(hisi_hha_pmu_module_init
);
461 static void __exit
hisi_hha_pmu_module_exit(void)
463 platform_driver_unregister(&hisi_hha_pmu_driver
);
464 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE
);
466 module_exit(hisi_hha_pmu_module_exit
);
468 MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
469 MODULE_LICENSE("GPL v2");
470 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
471 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");