Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / perf / hisilicon / hisi_uncore_ddrc_pmu.c
blob5ac6c9113767eae6a4c64bb66035ecc06439caa0
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon SoC DDRC uncore Hardware event counters support
5 * Copyright (C) 2017 Hisilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 * Anurup M <anurup.m@huawei.com>
9 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
11 #include <linux/acpi.h>
12 #include <linux/bug.h>
13 #include <linux/cpuhotplug.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/smp.h>
20 #include "hisi_uncore_pmu.h"
22 /* DDRC register definition */
23 #define DDRC_PERF_CTRL 0x010
24 #define DDRC_FLUX_WR 0x380
25 #define DDRC_FLUX_RD 0x384
26 #define DDRC_FLUX_WCMD 0x388
27 #define DDRC_FLUX_RCMD 0x38c
28 #define DDRC_PRE_CMD 0x3c0
29 #define DDRC_ACT_CMD 0x3c4
30 #define DDRC_RNK_CHG 0x3cc
31 #define DDRC_RW_CHG 0x3d0
32 #define DDRC_EVENT_CTRL 0x6C0
33 #define DDRC_INT_MASK 0x6c8
34 #define DDRC_INT_STATUS 0x6cc
35 #define DDRC_INT_CLEAR 0x6d0
36 #define DDRC_VERSION 0x710
38 /* DDRC has 8-counters */
39 #define DDRC_NR_COUNTERS 0x8
40 #define DDRC_PERF_CTRL_EN 0x2
43 * For DDRC PMU, there are eight-events and every event has been mapped
44 * to fixed-purpose counters which register offset is not consistent.
45 * Therefore there is no write event type and we assume that event
46 * code (0 to 7) is equal to counter index in PMU driver.
48 #define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7)
50 static const u32 ddrc_reg_off[] = {
51 DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
52 DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
56 * Select the counter register offset using the counter index.
57 * In DDRC there are no programmable counter, the count
58 * is readed form the statistics counter register itself.
60 static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
62 return ddrc_reg_off[cntr_idx];
65 static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
66 struct hw_perf_event *hwc)
68 /* Use event code as counter index */
69 u32 idx = GET_DDRC_EVENTID(hwc);
71 if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
72 dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
73 return 0;
76 return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
79 static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
80 struct hw_perf_event *hwc, u64 val)
82 u32 idx = GET_DDRC_EVENTID(hwc);
84 if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
85 dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
86 return;
89 writel((u32)val,
90 ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
94 * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
95 * so there is no need to write event type.
97 static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
98 u32 type)
102 static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
104 u32 val;
106 /* Set perf_enable in DDRC_PERF_CTRL to start event counting */
107 val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
108 val |= DDRC_PERF_CTRL_EN;
109 writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
112 static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
114 u32 val;
116 /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
117 val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
118 val &= ~DDRC_PERF_CTRL_EN;
119 writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
122 static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
123 struct hw_perf_event *hwc)
125 u32 val;
127 /* Set counter index(event code) in DDRC_EVENT_CTRL register */
128 val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
129 val |= (1 << GET_DDRC_EVENTID(hwc));
130 writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
133 static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
134 struct hw_perf_event *hwc)
136 u32 val;
138 /* Clear counter index(event code) in DDRC_EVENT_CTRL register */
139 val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
140 val &= ~(1 << GET_DDRC_EVENTID(hwc));
141 writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
144 static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
146 struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
147 unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
148 struct hw_perf_event *hwc = &event->hw;
149 /* For DDRC PMU, we use event code as counter index */
150 int idx = GET_DDRC_EVENTID(hwc);
152 if (test_bit(idx, used_mask))
153 return -EAGAIN;
155 set_bit(idx, used_mask);
157 return idx;
160 static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
161 struct hw_perf_event *hwc)
163 u32 val;
165 /* Write 0 to enable interrupt */
166 val = readl(ddrc_pmu->base + DDRC_INT_MASK);
167 val &= ~(1 << GET_DDRC_EVENTID(hwc));
168 writel(val, ddrc_pmu->base + DDRC_INT_MASK);
171 static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
172 struct hw_perf_event *hwc)
174 u32 val;
176 /* Write 1 to mask interrupt */
177 val = readl(ddrc_pmu->base + DDRC_INT_MASK);
178 val |= (1 << GET_DDRC_EVENTID(hwc));
179 writel(val, ddrc_pmu->base + DDRC_INT_MASK);
182 static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
184 struct hisi_pmu *ddrc_pmu = dev_id;
185 struct perf_event *event;
186 unsigned long overflown;
187 int idx;
189 /* Read the DDRC_INT_STATUS register */
190 overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
191 if (!overflown)
192 return IRQ_NONE;
195 * Find the counter index which overflowed if the bit was set
196 * and handle it
198 for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
199 /* Write 1 to clear the IRQ status flag */
200 writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
202 /* Get the corresponding event struct */
203 event = ddrc_pmu->pmu_events.hw_events[idx];
204 if (!event)
205 continue;
207 hisi_uncore_pmu_event_update(event);
208 hisi_uncore_pmu_set_event_period(event);
211 return IRQ_HANDLED;
214 static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
215 struct platform_device *pdev)
217 int irq, ret;
219 /* Read and init IRQ */
220 irq = platform_get_irq(pdev, 0);
221 if (irq < 0)
222 return irq;
224 ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
225 IRQF_NOBALANCING | IRQF_NO_THREAD,
226 dev_name(&pdev->dev), ddrc_pmu);
227 if (ret < 0) {
228 dev_err(&pdev->dev,
229 "Fail to request IRQ:%d ret:%d\n", irq, ret);
230 return ret;
233 ddrc_pmu->irq = irq;
235 return 0;
238 static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
239 { "HISI0233", },
242 MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
244 static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
245 struct hisi_pmu *ddrc_pmu)
248 * Use the SCCL_ID and DDRC channel ID to identify the
249 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
251 if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
252 &ddrc_pmu->index_id)) {
253 dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
254 return -EINVAL;
257 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
258 &ddrc_pmu->sccl_id)) {
259 dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
260 return -EINVAL;
262 /* DDRC PMUs only share the same SCCL */
263 ddrc_pmu->ccl_id = -1;
265 ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
266 if (IS_ERR(ddrc_pmu->base)) {
267 dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
268 return PTR_ERR(ddrc_pmu->base);
271 ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
273 return 0;
276 static struct attribute *hisi_ddrc_pmu_format_attr[] = {
277 HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
278 NULL,
281 static const struct attribute_group hisi_ddrc_pmu_format_group = {
282 .name = "format",
283 .attrs = hisi_ddrc_pmu_format_attr,
286 static struct attribute *hisi_ddrc_pmu_events_attr[] = {
287 HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
288 HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
289 HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
290 HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03),
291 HISI_PMU_EVENT_ATTR(pre_cmd, 0x04),
292 HISI_PMU_EVENT_ATTR(act_cmd, 0x05),
293 HISI_PMU_EVENT_ATTR(rnk_chg, 0x06),
294 HISI_PMU_EVENT_ATTR(rw_chg, 0x07),
295 NULL,
298 static const struct attribute_group hisi_ddrc_pmu_events_group = {
299 .name = "events",
300 .attrs = hisi_ddrc_pmu_events_attr,
303 static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
305 static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
306 &dev_attr_cpumask.attr,
307 NULL,
310 static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
311 .attrs = hisi_ddrc_pmu_cpumask_attrs,
314 static struct device_attribute hisi_ddrc_pmu_identifier_attr =
315 __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
317 static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
318 &hisi_ddrc_pmu_identifier_attr.attr,
319 NULL
322 static struct attribute_group hisi_ddrc_pmu_identifier_group = {
323 .attrs = hisi_ddrc_pmu_identifier_attrs,
326 static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
327 &hisi_ddrc_pmu_format_group,
328 &hisi_ddrc_pmu_events_group,
329 &hisi_ddrc_pmu_cpumask_attr_group,
330 &hisi_ddrc_pmu_identifier_group,
331 NULL,
334 static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
335 .write_evtype = hisi_ddrc_pmu_write_evtype,
336 .get_event_idx = hisi_ddrc_pmu_get_event_idx,
337 .start_counters = hisi_ddrc_pmu_start_counters,
338 .stop_counters = hisi_ddrc_pmu_stop_counters,
339 .enable_counter = hisi_ddrc_pmu_enable_counter,
340 .disable_counter = hisi_ddrc_pmu_disable_counter,
341 .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
342 .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
343 .write_counter = hisi_ddrc_pmu_write_counter,
344 .read_counter = hisi_ddrc_pmu_read_counter,
347 static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
348 struct hisi_pmu *ddrc_pmu)
350 int ret;
352 ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
353 if (ret)
354 return ret;
356 ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
357 if (ret)
358 return ret;
360 ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
361 ddrc_pmu->counter_bits = 32;
362 ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
363 ddrc_pmu->dev = &pdev->dev;
364 ddrc_pmu->on_cpu = -1;
365 ddrc_pmu->check_event = 7;
367 return 0;
370 static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
372 struct hisi_pmu *ddrc_pmu;
373 char *name;
374 int ret;
376 ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
377 if (!ddrc_pmu)
378 return -ENOMEM;
380 platform_set_drvdata(pdev, ddrc_pmu);
382 ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
383 if (ret)
384 return ret;
386 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
387 &ddrc_pmu->node);
388 if (ret) {
389 dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
390 return ret;
393 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
394 ddrc_pmu->sccl_id, ddrc_pmu->index_id);
395 ddrc_pmu->pmu = (struct pmu) {
396 .name = name,
397 .module = THIS_MODULE,
398 .task_ctx_nr = perf_invalid_context,
399 .event_init = hisi_uncore_pmu_event_init,
400 .pmu_enable = hisi_uncore_pmu_enable,
401 .pmu_disable = hisi_uncore_pmu_disable,
402 .add = hisi_uncore_pmu_add,
403 .del = hisi_uncore_pmu_del,
404 .start = hisi_uncore_pmu_start,
405 .stop = hisi_uncore_pmu_stop,
406 .read = hisi_uncore_pmu_read,
407 .attr_groups = hisi_ddrc_pmu_attr_groups,
408 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
411 ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
412 if (ret) {
413 dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
414 cpuhp_state_remove_instance_nocalls(
415 CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
416 irq_set_affinity_hint(ddrc_pmu->irq, NULL);
419 return ret;
422 static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
424 struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
426 perf_pmu_unregister(&ddrc_pmu->pmu);
427 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
428 &ddrc_pmu->node);
429 irq_set_affinity_hint(ddrc_pmu->irq, NULL);
431 return 0;
434 static struct platform_driver hisi_ddrc_pmu_driver = {
435 .driver = {
436 .name = "hisi_ddrc_pmu",
437 .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
438 .suppress_bind_attrs = true,
440 .probe = hisi_ddrc_pmu_probe,
441 .remove = hisi_ddrc_pmu_remove,
444 static int __init hisi_ddrc_pmu_module_init(void)
446 int ret;
448 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
449 "AP_PERF_ARM_HISI_DDRC_ONLINE",
450 hisi_uncore_pmu_online_cpu,
451 hisi_uncore_pmu_offline_cpu);
452 if (ret) {
453 pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
454 return ret;
457 ret = platform_driver_register(&hisi_ddrc_pmu_driver);
458 if (ret)
459 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
461 return ret;
463 module_init(hisi_ddrc_pmu_module_init);
465 static void __exit hisi_ddrc_pmu_module_exit(void)
467 platform_driver_unregister(&hisi_ddrc_pmu_driver);
468 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
471 module_exit(hisi_ddrc_pmu_module_exit);
473 MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
474 MODULE_LICENSE("GPL v2");
475 MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
476 MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");