1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright 2011,2016 Freescale Semiconductor, Inc.
5 * Copyright 2011 Linaro Ltd.
9 #include <linux/hrtimer.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
13 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/perf_event.h>
18 #include <linux/slab.h>
22 #define MMDC_MAPSR 0x404
23 #define BP_MMDC_MAPSR_PSD 0
24 #define BP_MMDC_MAPSR_PSS 4
26 #define MMDC_MDMISC 0x18
27 #define BM_MMDC_MDMISC_DDR_TYPE 0x18
28 #define BP_MMDC_MDMISC_DDR_TYPE 0x3
30 #define TOTAL_CYCLES 0x0
31 #define BUSY_CYCLES 0x1
32 #define READ_ACCESSES 0x2
33 #define WRITE_ACCESSES 0x3
34 #define READ_BYTES 0x4
35 #define WRITE_BYTES 0x5
37 /* Enables, resets, freezes, overflow profiling*/
43 #define PROFILE_SEL 0x10
45 #define MMDC_MADPCR0 0x410
46 #define MMDC_MADPCR1 0x414
47 #define MMDC_MADPSR0 0x418
48 #define MMDC_MADPSR1 0x41C
49 #define MMDC_MADPSR2 0x420
50 #define MMDC_MADPSR3 0x424
51 #define MMDC_MADPSR4 0x428
52 #define MMDC_MADPSR5 0x42C
54 #define MMDC_NUM_COUNTERS 6
56 #define MMDC_FLAG_PROFILE_SEL 0x1
57 #define MMDC_PRF_AXI_ID_CLEAR 0x0
59 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
63 struct fsl_mmdc_devtype_data
{
67 static const struct fsl_mmdc_devtype_data imx6q_data
= {
70 static const struct fsl_mmdc_devtype_data imx6qp_data
= {
71 .flags
= MMDC_FLAG_PROFILE_SEL
,
74 static const struct of_device_id imx_mmdc_dt_ids
[] = {
75 { .compatible
= "fsl,imx6q-mmdc", .data
= (void *)&imx6q_data
},
76 { .compatible
= "fsl,imx6qp-mmdc", .data
= (void *)&imx6qp_data
},
80 #ifdef CONFIG_PERF_EVENTS
82 static enum cpuhp_state cpuhp_mmdc_state
;
83 static DEFINE_IDA(mmdc_ida
);
85 PMU_EVENT_ATTR_STRING(total
-cycles
, mmdc_pmu_total_cycles
, "event=0x00")
86 PMU_EVENT_ATTR_STRING(busy
-cycles
, mmdc_pmu_busy_cycles
, "event=0x01")
87 PMU_EVENT_ATTR_STRING(read
-accesses
, mmdc_pmu_read_accesses
, "event=0x02")
88 PMU_EVENT_ATTR_STRING(write
-accesses
, mmdc_pmu_write_accesses
, "event=0x03")
89 PMU_EVENT_ATTR_STRING(read
-bytes
, mmdc_pmu_read_bytes
, "event=0x04")
90 PMU_EVENT_ATTR_STRING(read
-bytes
.unit
, mmdc_pmu_read_bytes_unit
, "MB");
91 PMU_EVENT_ATTR_STRING(read
-bytes
.scale
, mmdc_pmu_read_bytes_scale
, "0.000001");
92 PMU_EVENT_ATTR_STRING(write
-bytes
, mmdc_pmu_write_bytes
, "event=0x05")
93 PMU_EVENT_ATTR_STRING(write
-bytes
.unit
, mmdc_pmu_write_bytes_unit
, "MB");
94 PMU_EVENT_ATTR_STRING(write
-bytes
.scale
, mmdc_pmu_write_bytes_scale
, "0.000001");
98 void __iomem
*mmdc_base
;
100 struct hrtimer hrtimer
;
101 unsigned int active_events
;
103 struct perf_event
*mmdc_events
[MMDC_NUM_COUNTERS
];
104 struct hlist_node node
;
105 struct fsl_mmdc_devtype_data
*devtype_data
;
109 * Polling period is set to one second, overflow of total-cycles (the fastest
110 * increasing counter) takes ten seconds so one second is safe
112 static unsigned int mmdc_pmu_poll_period_us
= 1000000;
114 module_param_named(pmu_pmu_poll_period_us
, mmdc_pmu_poll_period_us
, uint
,
117 static ktime_t
mmdc_pmu_timer_period(void)
119 return ns_to_ktime((u64
)mmdc_pmu_poll_period_us
* 1000);
122 static ssize_t
mmdc_pmu_cpumask_show(struct device
*dev
,
123 struct device_attribute
*attr
, char *buf
)
125 struct mmdc_pmu
*pmu_mmdc
= dev_get_drvdata(dev
);
127 return cpumap_print_to_pagebuf(true, buf
, &pmu_mmdc
->cpu
);
130 static struct device_attribute mmdc_pmu_cpumask_attr
=
131 __ATTR(cpumask
, S_IRUGO
, mmdc_pmu_cpumask_show
, NULL
);
133 static struct attribute
*mmdc_pmu_cpumask_attrs
[] = {
134 &mmdc_pmu_cpumask_attr
.attr
,
138 static struct attribute_group mmdc_pmu_cpumask_attr_group
= {
139 .attrs
= mmdc_pmu_cpumask_attrs
,
142 static struct attribute
*mmdc_pmu_events_attrs
[] = {
143 &mmdc_pmu_total_cycles
.attr
.attr
,
144 &mmdc_pmu_busy_cycles
.attr
.attr
,
145 &mmdc_pmu_read_accesses
.attr
.attr
,
146 &mmdc_pmu_write_accesses
.attr
.attr
,
147 &mmdc_pmu_read_bytes
.attr
.attr
,
148 &mmdc_pmu_read_bytes_unit
.attr
.attr
,
149 &mmdc_pmu_read_bytes_scale
.attr
.attr
,
150 &mmdc_pmu_write_bytes
.attr
.attr
,
151 &mmdc_pmu_write_bytes_unit
.attr
.attr
,
152 &mmdc_pmu_write_bytes_scale
.attr
.attr
,
156 static struct attribute_group mmdc_pmu_events_attr_group
= {
158 .attrs
= mmdc_pmu_events_attrs
,
161 PMU_FORMAT_ATTR(event
, "config:0-63");
162 PMU_FORMAT_ATTR(axi_id
, "config1:0-63");
164 static struct attribute
*mmdc_pmu_format_attrs
[] = {
165 &format_attr_event
.attr
,
166 &format_attr_axi_id
.attr
,
170 static struct attribute_group mmdc_pmu_format_attr_group
= {
172 .attrs
= mmdc_pmu_format_attrs
,
175 static const struct attribute_group
*attr_groups
[] = {
176 &mmdc_pmu_events_attr_group
,
177 &mmdc_pmu_format_attr_group
,
178 &mmdc_pmu_cpumask_attr_group
,
182 static u32
mmdc_pmu_read_counter(struct mmdc_pmu
*pmu_mmdc
, int cfg
)
184 void __iomem
*mmdc_base
, *reg
;
186 mmdc_base
= pmu_mmdc
->mmdc_base
;
190 reg
= mmdc_base
+ MMDC_MADPSR0
;
193 reg
= mmdc_base
+ MMDC_MADPSR1
;
196 reg
= mmdc_base
+ MMDC_MADPSR2
;
199 reg
= mmdc_base
+ MMDC_MADPSR3
;
202 reg
= mmdc_base
+ MMDC_MADPSR4
;
205 reg
= mmdc_base
+ MMDC_MADPSR5
;
209 "invalid configuration %d for mmdc counter", cfg
);
214 static int mmdc_pmu_offline_cpu(unsigned int cpu
, struct hlist_node
*node
)
216 struct mmdc_pmu
*pmu_mmdc
= hlist_entry_safe(node
, struct mmdc_pmu
, node
);
219 if (!cpumask_test_and_clear_cpu(cpu
, &pmu_mmdc
->cpu
))
222 target
= cpumask_any_but(cpu_online_mask
, cpu
);
223 if (target
>= nr_cpu_ids
)
226 perf_pmu_migrate_context(&pmu_mmdc
->pmu
, cpu
, target
);
227 cpumask_set_cpu(target
, &pmu_mmdc
->cpu
);
232 static bool mmdc_pmu_group_event_is_valid(struct perf_event
*event
,
234 unsigned long *used_counters
)
236 int cfg
= event
->attr
.config
;
238 if (is_software_event(event
))
241 if (event
->pmu
!= pmu
)
244 return !test_and_set_bit(cfg
, used_counters
);
248 * Each event has a single fixed-purpose counter, so we can only have a
249 * single active event for each at any point in time. Here we just check
250 * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
251 * event numbers are valid.
253 static bool mmdc_pmu_group_is_valid(struct perf_event
*event
)
255 struct pmu
*pmu
= event
->pmu
;
256 struct perf_event
*leader
= event
->group_leader
;
257 struct perf_event
*sibling
;
258 unsigned long counter_mask
= 0;
260 set_bit(leader
->attr
.config
, &counter_mask
);
262 if (event
!= leader
) {
263 if (!mmdc_pmu_group_event_is_valid(event
, pmu
, &counter_mask
))
267 for_each_sibling_event(sibling
, leader
) {
268 if (!mmdc_pmu_group_event_is_valid(sibling
, pmu
, &counter_mask
))
275 static int mmdc_pmu_event_init(struct perf_event
*event
)
277 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
278 int cfg
= event
->attr
.config
;
280 if (event
->attr
.type
!= event
->pmu
->type
)
283 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
286 if (event
->cpu
< 0) {
287 dev_warn(pmu_mmdc
->dev
, "Can't provide per-task data!\n");
291 if (event
->attr
.sample_period
)
294 if (cfg
< 0 || cfg
>= MMDC_NUM_COUNTERS
)
297 if (!mmdc_pmu_group_is_valid(event
))
300 event
->cpu
= cpumask_first(&pmu_mmdc
->cpu
);
304 static void mmdc_pmu_event_update(struct perf_event
*event
)
306 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
307 struct hw_perf_event
*hwc
= &event
->hw
;
308 u64 delta
, prev_raw_count
, new_raw_count
;
311 prev_raw_count
= local64_read(&hwc
->prev_count
);
312 new_raw_count
= mmdc_pmu_read_counter(pmu_mmdc
,
314 } while (local64_cmpxchg(&hwc
->prev_count
, prev_raw_count
,
315 new_raw_count
) != prev_raw_count
);
317 delta
= (new_raw_count
- prev_raw_count
) & 0xFFFFFFFF;
319 local64_add(delta
, &event
->count
);
322 static void mmdc_pmu_event_start(struct perf_event
*event
, int flags
)
324 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
325 struct hw_perf_event
*hwc
= &event
->hw
;
326 void __iomem
*mmdc_base
, *reg
;
329 mmdc_base
= pmu_mmdc
->mmdc_base
;
330 reg
= mmdc_base
+ MMDC_MADPCR0
;
333 * hrtimer is required because mmdc does not provide an interrupt so
334 * polling is necessary
336 hrtimer_start(&pmu_mmdc
->hrtimer
, mmdc_pmu_timer_period(),
337 HRTIMER_MODE_REL_PINNED
);
339 local64_set(&hwc
->prev_count
, 0);
341 writel(DBG_RST
, reg
);
344 * Write the AXI id parameter to MADPCR1.
346 val
= event
->attr
.config1
;
347 reg
= mmdc_base
+ MMDC_MADPCR1
;
350 reg
= mmdc_base
+ MMDC_MADPCR0
;
352 if (pmu_mmdc
->devtype_data
->flags
& MMDC_FLAG_PROFILE_SEL
)
358 static int mmdc_pmu_event_add(struct perf_event
*event
, int flags
)
360 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
361 struct hw_perf_event
*hwc
= &event
->hw
;
363 int cfg
= event
->attr
.config
;
365 if (flags
& PERF_EF_START
)
366 mmdc_pmu_event_start(event
, flags
);
368 if (pmu_mmdc
->mmdc_events
[cfg
] != NULL
)
371 pmu_mmdc
->mmdc_events
[cfg
] = event
;
372 pmu_mmdc
->active_events
++;
374 local64_set(&hwc
->prev_count
, mmdc_pmu_read_counter(pmu_mmdc
, cfg
));
379 static void mmdc_pmu_event_stop(struct perf_event
*event
, int flags
)
381 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
382 void __iomem
*mmdc_base
, *reg
;
384 mmdc_base
= pmu_mmdc
->mmdc_base
;
385 reg
= mmdc_base
+ MMDC_MADPCR0
;
387 writel(PRF_FRZ
, reg
);
389 reg
= mmdc_base
+ MMDC_MADPCR1
;
390 writel(MMDC_PRF_AXI_ID_CLEAR
, reg
);
392 mmdc_pmu_event_update(event
);
395 static void mmdc_pmu_event_del(struct perf_event
*event
, int flags
)
397 struct mmdc_pmu
*pmu_mmdc
= to_mmdc_pmu(event
->pmu
);
398 int cfg
= event
->attr
.config
;
400 pmu_mmdc
->mmdc_events
[cfg
] = NULL
;
401 pmu_mmdc
->active_events
--;
403 if (pmu_mmdc
->active_events
== 0)
404 hrtimer_cancel(&pmu_mmdc
->hrtimer
);
406 mmdc_pmu_event_stop(event
, PERF_EF_UPDATE
);
409 static void mmdc_pmu_overflow_handler(struct mmdc_pmu
*pmu_mmdc
)
413 for (i
= 0; i
< MMDC_NUM_COUNTERS
; i
++) {
414 struct perf_event
*event
= pmu_mmdc
->mmdc_events
[i
];
417 mmdc_pmu_event_update(event
);
421 static enum hrtimer_restart
mmdc_pmu_timer_handler(struct hrtimer
*hrtimer
)
423 struct mmdc_pmu
*pmu_mmdc
= container_of(hrtimer
, struct mmdc_pmu
,
426 mmdc_pmu_overflow_handler(pmu_mmdc
);
427 hrtimer_forward_now(hrtimer
, mmdc_pmu_timer_period());
429 return HRTIMER_RESTART
;
432 static int mmdc_pmu_init(struct mmdc_pmu
*pmu_mmdc
,
433 void __iomem
*mmdc_base
, struct device
*dev
)
437 *pmu_mmdc
= (struct mmdc_pmu
) {
438 .pmu
= (struct pmu
) {
439 .task_ctx_nr
= perf_invalid_context
,
440 .attr_groups
= attr_groups
,
441 .event_init
= mmdc_pmu_event_init
,
442 .add
= mmdc_pmu_event_add
,
443 .del
= mmdc_pmu_event_del
,
444 .start
= mmdc_pmu_event_start
,
445 .stop
= mmdc_pmu_event_stop
,
446 .read
= mmdc_pmu_event_update
,
447 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
449 .mmdc_base
= mmdc_base
,
454 mmdc_num
= ida_simple_get(&mmdc_ida
, 0, 0, GFP_KERNEL
);
459 static int imx_mmdc_remove(struct platform_device
*pdev
)
461 struct mmdc_pmu
*pmu_mmdc
= platform_get_drvdata(pdev
);
463 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state
, &pmu_mmdc
->node
);
464 perf_pmu_unregister(&pmu_mmdc
->pmu
);
469 static int imx_mmdc_perf_init(struct platform_device
*pdev
, void __iomem
*mmdc_base
)
471 struct mmdc_pmu
*pmu_mmdc
;
475 const struct of_device_id
*of_id
=
476 of_match_device(imx_mmdc_dt_ids
, &pdev
->dev
);
478 pmu_mmdc
= kzalloc(sizeof(*pmu_mmdc
), GFP_KERNEL
);
480 pr_err("failed to allocate PMU device!\n");
484 /* The first instance registers the hotplug state */
485 if (!cpuhp_mmdc_state
) {
486 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
,
487 "perf/arm/mmdc:online", NULL
,
488 mmdc_pmu_offline_cpu
);
490 pr_err("cpuhp_setup_state_multi failed\n");
493 cpuhp_mmdc_state
= ret
;
496 mmdc_num
= mmdc_pmu_init(pmu_mmdc
, mmdc_base
, &pdev
->dev
);
500 name
= devm_kasprintf(&pdev
->dev
,
501 GFP_KERNEL
, "mmdc%d", mmdc_num
);
503 pmu_mmdc
->devtype_data
= (struct fsl_mmdc_devtype_data
*)of_id
->data
;
505 hrtimer_init(&pmu_mmdc
->hrtimer
, CLOCK_MONOTONIC
,
507 pmu_mmdc
->hrtimer
.function
= mmdc_pmu_timer_handler
;
509 cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc
->cpu
);
511 /* Register the pmu instance for cpu hotplug */
512 cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state
, &pmu_mmdc
->node
);
514 ret
= perf_pmu_register(&(pmu_mmdc
->pmu
), name
, -1);
516 goto pmu_register_err
;
518 platform_set_drvdata(pdev
, pmu_mmdc
);
522 pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret
);
523 cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state
, &pmu_mmdc
->node
);
524 hrtimer_cancel(&pmu_mmdc
->hrtimer
);
531 #define imx_mmdc_remove NULL
532 #define imx_mmdc_perf_init(pdev, mmdc_base) 0
535 static int imx_mmdc_probe(struct platform_device
*pdev
)
537 struct device_node
*np
= pdev
->dev
.of_node
;
538 void __iomem
*mmdc_base
, *reg
;
539 struct clk
*mmdc_ipg_clk
;
543 /* the ipg clock is optional */
544 mmdc_ipg_clk
= devm_clk_get(&pdev
->dev
, NULL
);
545 if (IS_ERR(mmdc_ipg_clk
))
548 err
= clk_prepare_enable(mmdc_ipg_clk
);
550 dev_err(&pdev
->dev
, "Unable to enable mmdc ipg clock.\n");
554 mmdc_base
= of_iomap(np
, 0);
557 reg
= mmdc_base
+ MMDC_MDMISC
;
559 val
= readl_relaxed(reg
);
560 ddr_type
= (val
& BM_MMDC_MDMISC_DDR_TYPE
) >>
561 BP_MMDC_MDMISC_DDR_TYPE
;
563 reg
= mmdc_base
+ MMDC_MAPSR
;
565 /* Enable automatic power saving */
566 val
= readl_relaxed(reg
);
567 val
&= ~(1 << BP_MMDC_MAPSR_PSD
);
568 writel_relaxed(val
, reg
);
570 return imx_mmdc_perf_init(pdev
, mmdc_base
);
573 int imx_mmdc_get_ddr_type(void)
578 static struct platform_driver imx_mmdc_driver
= {
581 .of_match_table
= imx_mmdc_dt_ids
,
583 .probe
= imx_mmdc_probe
,
584 .remove
= imx_mmdc_remove
,
587 static int __init
imx_mmdc_init(void)
589 return platform_driver_register(&imx_mmdc_driver
);
591 postcore_initcall(imx_mmdc_init
);