1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright 2011,2016 Freescale Semiconductor, Inc.
5 * Copyright 2011 Linaro Ltd.
9 #include <linux/hrtimer.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
13 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/platform_device.h>
17 #include <linux/property.h>
18 #include <linux/perf_event.h>
19 #include <linux/slab.h>
23 #define MMDC_MAPSR 0x404
24 #define BP_MMDC_MAPSR_PSD 0
25 #define BP_MMDC_MAPSR_PSS 4
27 #define MMDC_MDMISC 0x18
28 #define BM_MMDC_MDMISC_DDR_TYPE 0x18
29 #define BP_MMDC_MDMISC_DDR_TYPE 0x3
31 #define TOTAL_CYCLES 0x0
32 #define BUSY_CYCLES 0x1
33 #define READ_ACCESSES 0x2
34 #define WRITE_ACCESSES 0x3
35 #define READ_BYTES 0x4
36 #define WRITE_BYTES 0x5
38 /* Enables, resets, freezes, overflow profiling*/
44 #define PROFILE_SEL 0x10
46 #define MMDC_MADPCR0 0x410
47 #define MMDC_MADPCR1 0x414
48 #define MMDC_MADPSR0 0x418
49 #define MMDC_MADPSR1 0x41C
50 #define MMDC_MADPSR2 0x420
51 #define MMDC_MADPSR3 0x424
52 #define MMDC_MADPSR4 0x428
53 #define MMDC_MADPSR5 0x42C
55 #define MMDC_NUM_COUNTERS 6
57 #define MMDC_FLAG_PROFILE_SEL 0x1
58 #define MMDC_PRF_AXI_ID_CLEAR 0x0
60 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
64 struct fsl_mmdc_devtype_data
{
68 static const struct fsl_mmdc_devtype_data imx6q_data
= {
71 static const struct fsl_mmdc_devtype_data imx6qp_data
= {
72 .flags
= MMDC_FLAG_PROFILE_SEL
,
75 static const struct of_device_id imx_mmdc_dt_ids
[] = {
76 { .compatible
= "fsl,imx6q-mmdc", .data
= (void *)&imx6q_data
},
77 { .compatible
= "fsl,imx6qp-mmdc", .data
= (void *)&imx6qp_data
},
81 #ifdef CONFIG_PERF_EVENTS
83 static enum cpuhp_state cpuhp_mmdc_state
;
84 static DEFINE_IDA(mmdc_ida
);
86 PMU_EVENT_ATTR_STRING(total
-cycles
, mmdc_pmu_total_cycles
, "event=0x00")
87 PMU_EVENT_ATTR_STRING(busy
-cycles
, mmdc_pmu_busy_cycles
, "event=0x01")
88 PMU_EVENT_ATTR_STRING(read
-accesses
, mmdc_pmu_read_accesses
, "event=0x02")
89 PMU_EVENT_ATTR_STRING(write
-accesses
, mmdc_pmu_write_accesses
, "event=0x03")
90 PMU_EVENT_ATTR_STRING(read
-bytes
, mmdc_pmu_read_bytes
, "event=0x04")
91 PMU_EVENT_ATTR_STRING(read
-bytes
.unit
, mmdc_pmu_read_bytes_unit
, "MB");
92 PMU_EVENT_ATTR_STRING(read
-bytes
.scale
, mmdc_pmu_read_bytes_scale
, "0.000001");
93 PMU_EVENT_ATTR_STRING(write
-bytes
, mmdc_pmu_write_bytes
, "event=0x05")
94 PMU_EVENT_ATTR_STRING(write
-bytes
.unit
, mmdc_pmu_write_bytes_unit
, "MB");
95 PMU_EVENT_ATTR_STRING(write
-bytes
.scale
, mmdc_pmu_write_bytes_scale
, "0.000001");
99 void __iomem
*mmdc_base
;
101 struct hrtimer hrtimer
;
102 unsigned int active_events
;
105 struct perf_event
*mmdc_events
[MMDC_NUM_COUNTERS
];
106 struct hlist_node node
;
107 const struct fsl_mmdc_devtype_data
*devtype_data
;
108 struct clk
*mmdc_ipg_clk
;
112 * Polling period is set to one second, overflow of total-cycles (the fastest
113 * increasing counter) takes ten seconds so one second is safe
115 static unsigned int mmdc_pmu_poll_period_us
= 1000000;
117 module_param_named(pmu_pmu_poll_period_us
, mmdc_pmu_poll_period_us
, uint
,
120 static ktime_t
mmdc_pmu_timer_period(void)
122 return ns_to_ktime((u64
)mmdc_pmu_poll_period_us
* 1000);
125 static ssize_t
mmdc_pmu_cpumask_show(struct device
*dev
,
126 struct device_attribute
*attr
, char *buf
)
128 struct mmdc_pmu
*pmu_mmdc
= dev_get_drvdata(dev
);
130 return cpumap_print_to_pagebuf(true, buf
, &pmu_mmdc
->cpu
);
133 static struct device_attribute mmdc_pmu_cpumask_attr
=
134 __ATTR(cpumask
, S_IRUGO
, mmdc_pmu_cpumask_show
, NULL
);
136 static struct attribute
*mmdc_pmu_cpumask_attrs
[] = {
137 &mmdc_pmu_cpumask_attr
.attr
,
141 static struct attribute_group mmdc_pmu_cpumask_attr_group
= {
142 .attrs
= mmdc_pmu_cpumask_attrs
,
145 static struct attribute
*mmdc_pmu_events_attrs
[] = {
146 &mmdc_pmu_total_cycles
.attr
.attr
,
147 &mmdc_pmu_busy_cycles
.attr
.attr
,
148 &mmdc_pmu_read_accesses
.attr
.attr
,
149 &mmdc_pmu_write_accesses
.attr
.attr
,
150 &mmdc_pmu_read_bytes
.attr
.attr
,
151 &mmdc_pmu_read_bytes_unit
.attr
.attr
,
152 &mmdc_pmu_read_bytes_scale
.attr
.attr
,
153 &mmdc_pmu_write_bytes
.attr
.attr
,
154 &mmdc_pmu_write_bytes_unit
.attr
.attr
,
155 &mmdc_pmu_write_bytes_scale
.attr
.attr
,
159 static struct attribute_group mmdc_pmu_events_attr_group
= {
161 .attrs
= mmdc_pmu_events_attrs
,
164 PMU_FORMAT_ATTR(event
, "config:0-63");
165 PMU_FORMAT_ATTR(axi_id
, "config1:0-63");
167 static struct attribute
*mmdc_pmu_format_attrs
[] = {
168 &format_attr_event
.attr
,
169 &format_attr_axi_id
.attr
,
173 static struct attribute_group mmdc_pmu_format_attr_group
= {
175 .attrs
= mmdc_pmu_format_attrs
,
178 static const struct attribute_group
*attr_groups
[] = {
179 &mmdc_pmu_events_attr_group
,
180 &mmdc_pmu_format_attr_group
,
181 &mmdc_pmu_cpumask_attr_group
,
185 static u32
mmdc_pmu_read_counter(struct mmdc_pmu
*pmu_mmdc
, int cfg
)
187 void __iomem
*mmdc_base
, *reg
;
189 mmdc_base
= pmu_mmdc
->mmdc_base
;
193 reg
= mmdc_base
+ MMDC_MADPSR0
;
196 reg
= mmdc_base
+ MMDC_MADPSR1
;
199 reg
= mmdc_base
+ MMDC_MADPSR2
;
202 reg
= mmdc_base
+ MMDC_MADPSR3
;
205 reg
= mmdc_base
+ MMDC_MADPSR4
;
208 reg
= mmdc_base
+ MMDC_MADPSR5
;
212 "invalid configuration %d for mmdc counter", cfg
);
217 static int mmdc_pmu_offline_cpu(unsigned int cpu
, struct hlist_node
*node
)
219 struct mmdc_pmu
*pmu_mmdc
= hlist_entry_safe(node
, struct mmdc_pmu
, node
);
222 if (!cpumask_test_and_clear_cpu(cpu
, &pmu_mmdc
->cpu
))
225 target
= cpumask_any_but(cpu_online_mask
, cpu
);
226 if (target
>= nr_cpu_ids
)
229 perf_pmu_migrate_context(&pmu_mmdc
->pmu
, cpu
, target
);
230 cpumask_set_cpu(target
, &pmu_mmdc
->cpu
);
235 static bool mmdc_pmu_group_event_is_valid(struct perf_event
*event
,
237 unsigned long *used_counters
)
239 int cfg
= event
->attr
.config
;
241 if (is_software_event(event
))
244 if (event
->pmu
!= pmu
)
247 return !test_and_set_bit(cfg
, used_counters
);
251 * Each event has a single fixed-purpose counter, so we can only have a
252 * single active event for each at any point in time. Here we just check
253 * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
254 * event numbers are valid.
256 static bool mmdc_pmu_group_is_valid(struct perf_event
*event
)
258 struct pmu
*pmu
= event
->pmu
;
259 struct perf_event
*leader
= event
->group_leader
;
260 struct perf_event
*sibling
;
261 unsigned long counter_mask
= 0;
263 set_bit(leader
->attr
.config
, &counter_mask
);
265 if (event
!= leader
) {
266 if (!mmdc_pmu_group_event_is_valid(event
, pmu
, &counter_mask
))
270 for_each_sibling_event(sibling
, leader
) {
271 if (!mmdc_pmu_group_event_is_valid(sibling
, pmu
, &counter_mask
))
278 static int mmdc_pmu_event_init(struct perf_event
*event
)
280 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
281 int cfg
= event
->attr
.config
;
283 if (event
->attr
.type
!= event
->pmu
->type
)
286 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
289 if (event
->cpu
< 0) {
290 dev_warn(pmu_mmdc
->dev
, "Can't provide per-task data!\n");
294 if (event
->attr
.sample_period
)
297 if (cfg
< 0 || cfg
>= MMDC_NUM_COUNTERS
)
300 if (!mmdc_pmu_group_is_valid(event
))
303 event
->cpu
= cpumask_first(&pmu_mmdc
->cpu
);
307 static void mmdc_pmu_event_update(struct perf_event
*event
)
309 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
310 struct hw_perf_event
*hwc
= &event
->hw
;
311 u64 delta
, prev_raw_count
, new_raw_count
;
314 prev_raw_count
= local64_read(&hwc
->prev_count
);
315 new_raw_count
= mmdc_pmu_read_counter(pmu_mmdc
,
317 } while (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
318 new_raw_count
) != prev_raw_count
);
320 delta
= (new_raw_count
- prev_raw_count
) & 0xFFFFFFFF;
322 local64_add(delta
, &event
->count
);
325 static void mmdc_pmu_event_start(struct perf_event
*event
, int flags
)
327 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
328 struct hw_perf_event
*hwc
= &event
->hw
;
329 void __iomem
*mmdc_base
, *reg
;
332 mmdc_base
= pmu_mmdc
->mmdc_base
;
333 reg
= mmdc_base
+ MMDC_MADPCR0
;
336 * hrtimer is required because mmdc does not provide an interrupt so
337 * polling is necessary
339 hrtimer_start(&pmu_mmdc
->hrtimer
, mmdc_pmu_timer_period(),
340 HRTIMER_MODE_REL_PINNED
);
342 local64_set(&hwc
->prev_count
, 0);
344 writel(DBG_RST
, reg
);
347 * Write the AXI id parameter to MADPCR1.
349 val
= event
->attr
.config1
;
350 reg
= mmdc_base
+ MMDC_MADPCR1
;
353 reg
= mmdc_base
+ MMDC_MADPCR0
;
355 if (pmu_mmdc
->devtype_data
->flags
& MMDC_FLAG_PROFILE_SEL
)
361 static int mmdc_pmu_event_add(struct perf_event
*event
, int flags
)
363 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
364 struct hw_perf_event
*hwc
= &event
->hw
;
366 int cfg
= event
->attr
.config
;
368 if (flags
& PERF_EF_START
)
369 mmdc_pmu_event_start(event
, flags
);
371 if (pmu_mmdc
->mmdc_events
[cfg
] != NULL
)
374 pmu_mmdc
->mmdc_events
[cfg
] = event
;
375 pmu_mmdc
->active_events
++;
377 local64_set(&hwc
->prev_count
, mmdc_pmu_read_counter(pmu_mmdc
, cfg
));
382 static void mmdc_pmu_event_stop(struct perf_event
*event
, int flags
)
384 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
385 void __iomem
*mmdc_base
, *reg
;
387 mmdc_base
= pmu_mmdc
->mmdc_base
;
388 reg
= mmdc_base
+ MMDC_MADPCR0
;
390 writel(PRF_FRZ
, reg
);
392 reg
= mmdc_base
+ MMDC_MADPCR1
;
393 writel(MMDC_PRF_AXI_ID_CLEAR
, reg
);
395 mmdc_pmu_event_update(event
);
398 static void mmdc_pmu_event_del(struct perf_event
*event
, int flags
)
400 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
401 int cfg
= event
->attr
.config
;
403 pmu_mmdc
->mmdc_events
[cfg
] = NULL
;
404 pmu_mmdc
->active_events
--;
406 if (pmu_mmdc
->active_events
== 0)
407 hrtimer_cancel(&pmu_mmdc
->hrtimer
);
409 mmdc_pmu_event_stop(event
, PERF_EF_UPDATE
);
412 static void mmdc_pmu_overflow_handler(struct mmdc_pmu
*pmu_mmdc
)
416 for (i
= 0; i
< MMDC_NUM_COUNTERS
; i
++) {
417 struct perf_event
*event
= pmu_mmdc
->mmdc_events
[i
];
420 mmdc_pmu_event_update(event
);
424 static enum hrtimer_restart
mmdc_pmu_timer_handler(struct hrtimer
*hrtimer
)
426 struct mmdc_pmu
*pmu_mmdc
= container_of(hrtimer
, struct mmdc_pmu
,
429 mmdc_pmu_overflow_handler(pmu_mmdc
);
430 hrtimer_forward_now(hrtimer
, mmdc_pmu_timer_period());
432 return HRTIMER_RESTART
;
435 static int mmdc_pmu_init(struct mmdc_pmu
*pmu_mmdc
,
436 void __iomem
*mmdc_base
, struct device
*dev
)
438 *pmu_mmdc
= (struct mmdc_pmu
) {
439 .pmu
= (struct pmu
) {
441 .task_ctx_nr
= perf_invalid_context
,
442 .attr_groups
= attr_groups
,
443 .event_init
= mmdc_pmu_event_init
,
444 .add
= mmdc_pmu_event_add
,
445 .del
= mmdc_pmu_event_del
,
446 .start
= mmdc_pmu_event_start
,
447 .stop
= mmdc_pmu_event_stop
,
448 .read
= mmdc_pmu_event_update
,
449 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
451 .mmdc_base
= mmdc_base
,
456 pmu_mmdc
->id
= ida_alloc(&mmdc_ida
, GFP_KERNEL
);
461 static void imx_mmdc_remove(struct platform_device
*pdev
)
463 struct mmdc_pmu
*pmu_mmdc
= platform_get_drvdata(pdev
);
465 ida_free(&mmdc_ida
, pmu_mmdc
->id
);
466 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state
, &pmu_mmdc
->node
);
467 perf_pmu_unregister(&pmu_mmdc
->pmu
);
468 iounmap(pmu_mmdc
->mmdc_base
);
469 clk_disable_unprepare(pmu_mmdc
->mmdc_ipg_clk
);
473 static int imx_mmdc_perf_init(struct platform_device
*pdev
, void __iomem
*mmdc_base
,
474 struct clk
*mmdc_ipg_clk
)
476 struct mmdc_pmu
*pmu_mmdc
;
480 pmu_mmdc
= kzalloc(sizeof(*pmu_mmdc
), GFP_KERNEL
);
482 pr_err("failed to allocate PMU device!\n");
486 /* The first instance registers the hotplug state */
487 if (!cpuhp_mmdc_state
) {
488 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
,
489 "perf/arm/mmdc:online", NULL
,
490 mmdc_pmu_offline_cpu
);
492 pr_err("cpuhp_setup_state_multi failed\n");
495 cpuhp_mmdc_state
= ret
;
498 ret
= mmdc_pmu_init(pmu_mmdc
, mmdc_base
, &pdev
->dev
);
502 name
= devm_kasprintf(&pdev
->dev
,
503 GFP_KERNEL
, "mmdc%d", ret
);
509 pmu_mmdc
->mmdc_ipg_clk
= mmdc_ipg_clk
;
510 pmu_mmdc
->devtype_data
= device_get_match_data(&pdev
->dev
);
512 hrtimer_init(&pmu_mmdc
->hrtimer
, CLOCK_MONOTONIC
,
514 pmu_mmdc
->hrtimer
.function
= mmdc_pmu_timer_handler
;
516 cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc
->cpu
);
518 /* Register the pmu instance for cpu hotplug */
519 cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state
, &pmu_mmdc
->node
);
521 ret
= perf_pmu_register(&(pmu_mmdc
->pmu
), name
, -1);
523 goto pmu_register_err
;
525 platform_set_drvdata(pdev
, pmu_mmdc
);
529 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret
);
530 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state
, &pmu_mmdc
->node
);
531 hrtimer_cancel(&pmu_mmdc
->hrtimer
);
533 ida_free(&mmdc_ida
, pmu_mmdc
->id
);
540 #define imx_mmdc_remove NULL
541 #define imx_mmdc_perf_init(pdev, mmdc_base, mmdc_ipg_clk) 0
544 static int imx_mmdc_probe(struct platform_device
*pdev
)
546 struct device_node
*np
= pdev
->dev
.of_node
;
547 void __iomem
*mmdc_base
, *reg
;
548 struct clk
*mmdc_ipg_clk
;
552 /* the ipg clock is optional */
553 mmdc_ipg_clk
= devm_clk_get(&pdev
->dev
, NULL
);
554 if (IS_ERR(mmdc_ipg_clk
))
557 err
= clk_prepare_enable(mmdc_ipg_clk
);
559 dev_err(&pdev
->dev
, "Unable to enable mmdc ipg clock.\n");
563 mmdc_base
= of_iomap(np
, 0);
566 reg
= mmdc_base
+ MMDC_MDMISC
;
568 val
= readl_relaxed(reg
);
569 ddr_type
= (val
& BM_MMDC_MDMISC_DDR_TYPE
) >>
570 BP_MMDC_MDMISC_DDR_TYPE
;
572 reg
= mmdc_base
+ MMDC_MAPSR
;
574 /* Enable automatic power saving */
575 val
= readl_relaxed(reg
);
576 val
&= ~(1 << BP_MMDC_MAPSR_PSD
);
577 writel_relaxed(val
, reg
);
579 err
= imx_mmdc_perf_init(pdev
, mmdc_base
, mmdc_ipg_clk
);
582 clk_disable_unprepare(mmdc_ipg_clk
);
588 int imx_mmdc_get_ddr_type(void)
593 static struct platform_driver imx_mmdc_driver
= {
596 .of_match_table
= imx_mmdc_dt_ids
,
598 .probe
= imx_mmdc_probe
,
599 .remove
= imx_mmdc_remove
,
602 static int __init
imx_mmdc_init(void)
604 return platform_driver_register(&imx_mmdc_driver
);
606 postcore_initcall(imx_mmdc_init
);