1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/bitfield.h>
5 #include <linux/init.h>
6 #include <linux/interrupt.h>
8 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/perf_event.h>
13 /* Performance monitor configuration */
15 #define MX93_PMCFG1_RD_TRANS_FILT_EN BIT(31)
16 #define MX93_PMCFG1_WR_TRANS_FILT_EN BIT(30)
17 #define MX93_PMCFG1_RD_BT_FILT_EN BIT(29)
18 #define MX93_PMCFG1_ID_MASK GENMASK(17, 0)
20 #define MX95_PMCFG1_WR_BEAT_FILT_EN BIT(31)
21 #define MX95_PMCFG1_RD_BEAT_FILT_EN BIT(30)
24 #define MX93_PMCFG2_ID GENMASK(17, 0)
30 #define MX95_PMCFG_ID_MASK GENMASK(9, 0)
31 #define MX95_PMCFG_ID GENMASK(25, 16)
33 /* Global control register affects all counters and takes priority over local control registers */
35 /* Global control register bits */
36 #define PMGC0_FAC BIT(31)
37 #define PMGC0_PMIE BIT(30)
38 #define PMGC0_FCECE BIT(29)
41 * 64bit counter0 exclusively dedicated to counting cycles
42 * 32bit counters monitor counter-specific events in addition to counting reference events
44 #define PMLCA(n) (0x40 + 0x10 + (0x10 * n))
45 #define PMLCB(n) (0x40 + 0x14 + (0x10 * n))
46 #define PMC(n) (0x40 + 0x18 + (0x10 * n))
47 /* Local control register bits */
48 #define PMLCA_FC BIT(31)
49 #define PMLCA_CE BIT(26)
50 #define PMLCA_EVENT GENMASK(22, 16)
52 #define NUM_COUNTERS 11
53 #define CYCLES_COUNTER 0
54 #define CYCLES_EVENT_ID 0
56 #define CONFIG_EVENT_MASK GENMASK(7, 0)
57 #define CONFIG_COUNTER_MASK GENMASK(23, 16)
59 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
61 #define DDR_PERF_DEV_NAME "imx9_ddr"
62 #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
64 static DEFINE_IDA(ddr_ida
);
66 struct imx_ddr_devtype_data
{
67 const char *identifier
; /* system PMU identifier for userspace */
74 struct hlist_node node
;
76 struct perf_event
*events
[NUM_COUNTERS
];
78 enum cpuhp_state cpuhp_state
;
79 const struct imx_ddr_devtype_data
*devtype_data
;
84 static const struct imx_ddr_devtype_data imx91_devtype_data
= {
85 .identifier
= "imx91",
88 static const struct imx_ddr_devtype_data imx93_devtype_data
= {
89 .identifier
= "imx93",
92 static const struct imx_ddr_devtype_data imx95_devtype_data
= {
93 .identifier
= "imx95",
96 static inline bool is_imx93(struct ddr_pmu
*pmu
)
98 return pmu
->devtype_data
== &imx93_devtype_data
;
101 static inline bool is_imx95(struct ddr_pmu
*pmu
)
103 return pmu
->devtype_data
== &imx95_devtype_data
;
106 static const struct of_device_id imx_ddr_pmu_dt_ids
[] = {
107 { .compatible
= "fsl,imx91-ddr-pmu", .data
= &imx91_devtype_data
},
108 { .compatible
= "fsl,imx93-ddr-pmu", .data
= &imx93_devtype_data
},
109 { .compatible
= "fsl,imx95-ddr-pmu", .data
= &imx95_devtype_data
},
112 MODULE_DEVICE_TABLE(of
, imx_ddr_pmu_dt_ids
);
114 static ssize_t
ddr_perf_identifier_show(struct device
*dev
,
115 struct device_attribute
*attr
,
118 struct ddr_pmu
*pmu
= dev_get_drvdata(dev
);
120 return sysfs_emit(page
, "%s\n", pmu
->devtype_data
->identifier
);
123 static struct device_attribute ddr_perf_identifier_attr
=
124 __ATTR(identifier
, 0444, ddr_perf_identifier_show
, NULL
);
126 static struct attribute
*ddr_perf_identifier_attrs
[] = {
127 &ddr_perf_identifier_attr
.attr
,
131 static struct attribute_group ddr_perf_identifier_attr_group
= {
132 .attrs
= ddr_perf_identifier_attrs
,
135 static ssize_t
ddr_perf_cpumask_show(struct device
*dev
,
136 struct device_attribute
*attr
, char *buf
)
138 struct ddr_pmu
*pmu
= dev_get_drvdata(dev
);
140 return cpumap_print_to_pagebuf(true, buf
, cpumask_of(pmu
->cpu
));
143 static struct device_attribute ddr_perf_cpumask_attr
=
144 __ATTR(cpumask
, 0444, ddr_perf_cpumask_show
, NULL
);
146 static struct attribute
*ddr_perf_cpumask_attrs
[] = {
147 &ddr_perf_cpumask_attr
.attr
,
151 static const struct attribute_group ddr_perf_cpumask_attr_group
= {
152 .attrs
= ddr_perf_cpumask_attrs
,
155 struct imx9_pmu_events_attr
{
156 struct device_attribute attr
;
158 const void *devtype_data
;
161 static ssize_t
ddr_pmu_event_show(struct device
*dev
,
162 struct device_attribute
*attr
, char *page
)
164 struct imx9_pmu_events_attr
*pmu_attr
;
166 pmu_attr
= container_of(attr
, struct imx9_pmu_events_attr
, attr
);
167 return sysfs_emit(page
, "event=0x%02llx\n", pmu_attr
->id
);
170 #define COUNTER_OFFSET_IN_EVENT 8
171 #define ID(counter, id) ((counter << COUNTER_OFFSET_IN_EVENT) | id)
173 #define DDR_PMU_EVENT_ATTR_COMM(_name, _id, _data) \
174 (&((struct imx9_pmu_events_attr[]) { \
175 { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
177 .devtype_data = _data, } \
180 #define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \
181 DDR_PMU_EVENT_ATTR_COMM(_name, _id, NULL)
183 #define IMX93_DDR_PMU_EVENT_ATTR(_name, _id) \
184 DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx93_devtype_data)
186 #define IMX95_DDR_PMU_EVENT_ATTR(_name, _id) \
187 DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx95_devtype_data)
189 static struct attribute
*ddr_perf_events_attrs
[] = {
190 /* counter0 cycles event */
191 IMX9_DDR_PMU_EVENT_ATTR(cycles
, 0),
193 /* reference events for all normal counters, need assert DEBUG19[21] bit */
194 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ddrc1_rmw_for_ecc
, 12),
195 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_rreorder
, 13),
196 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_wreorder
, 14),
197 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_0
, 15),
198 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_1
, 16),
199 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_2
, 17),
200 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_3
, 18),
201 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_4
, 19),
202 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_5
, 22),
203 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_6
, 23),
204 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_7
, 24),
205 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_8
, 25),
206 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_9
, 26),
207 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_10
, 27),
208 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_11
, 28),
209 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_12
, 31),
210 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_13
, 59),
211 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_15
, 61),
212 IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29
, 63),
214 /* counter1 specific events */
215 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0
, ID(1, 64)),
216 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1
, ID(1, 65)),
217 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2
, ID(1, 66)),
218 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3
, ID(1, 67)),
219 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4
, ID(1, 68)),
220 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5
, ID(1, 69)),
221 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6
, ID(1, 70)),
222 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7
, ID(1, 71)),
224 /* counter2 specific events */
225 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0
, ID(2, 64)),
226 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1
, ID(2, 65)),
227 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2
, ID(2, 66)),
228 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3
, ID(2, 67)),
229 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4
, ID(2, 68)),
230 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5
, ID(2, 69)),
231 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6
, ID(2, 70)),
232 IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7
, ID(2, 71)),
233 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty
, ID(2, 72)),
234 IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt
, ID(2, 73)), /* imx93 specific*/
235 IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_beat_filt
, ID(2, 73)), /* imx95 specific*/
237 /* counter3 specific events */
238 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0
, ID(3, 64)),
239 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1
, ID(3, 65)),
240 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2
, ID(3, 66)),
241 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3
, ID(3, 67)),
242 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4
, ID(3, 68)),
243 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5
, ID(3, 69)),
244 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6
, ID(3, 70)),
245 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7
, ID(3, 71)),
246 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full
, ID(3, 72)),
247 IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt
, ID(3, 73)), /* imx93 specific*/
248 IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt2
, ID(3, 73)), /* imx95 specific*/
250 /* counter4 specific events */
251 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0
, ID(4, 64)),
252 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1
, ID(4, 65)),
253 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2
, ID(4, 66)),
254 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3
, ID(4, 67)),
255 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4
, ID(4, 68)),
256 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5
, ID(4, 69)),
257 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6
, ID(4, 70)),
258 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7
, ID(4, 71)),
259 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw
, ID(4, 72)),
260 IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt
, ID(4, 73)), /* imx93 specific*/
261 IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt1
, ID(4, 73)), /* imx95 specific*/
263 /* counter5 specific events */
264 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0
, ID(5, 64)),
265 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1
, ID(5, 65)),
266 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2
, ID(5, 66)),
267 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3
, ID(5, 67)),
268 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4
, ID(5, 68)),
269 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5
, ID(5, 69)),
270 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6
, ID(5, 70)),
271 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7
, ID(5, 71)),
272 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1
, ID(5, 72)),
273 IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt0
, ID(5, 73)), /* imx95 specific*/
275 /* counter6 specific events */
276 IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0
, ID(6, 64)),
277 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2
, ID(6, 72)),
279 /* counter7 specific events */
280 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full
, ID(7, 64)),
281 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0
, ID(7, 65)),
283 /* counter8 specific events */
284 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched
, ID(8, 64)),
285 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full
, ID(8, 65)),
287 /* counter9 specific events */
288 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1
, ID(9, 65)),
289 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full
, ID(9, 66)),
291 /* counter10 specific events */
292 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk
, ID(10, 65)),
293 IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0
, ID(10, 66)),
298 ddr_perf_events_attrs_is_visible(struct kobject
*kobj
,
299 struct attribute
*attr
, int unused
)
301 struct pmu
*pmu
= dev_get_drvdata(kobj_to_dev(kobj
));
302 struct ddr_pmu
*ddr_pmu
= to_ddr_pmu(pmu
);
303 struct imx9_pmu_events_attr
*eattr
;
305 eattr
= container_of(attr
, typeof(*eattr
), attr
.attr
);
307 if (!eattr
->devtype_data
)
310 if (eattr
->devtype_data
!= ddr_pmu
->devtype_data
)
316 static const struct attribute_group ddr_perf_events_attr_group
= {
318 .attrs
= ddr_perf_events_attrs
,
319 .is_visible
= ddr_perf_events_attrs_is_visible
,
322 PMU_FORMAT_ATTR(event
, "config:0-7,16-23");
323 PMU_FORMAT_ATTR(counter
, "config:8-15");
324 PMU_FORMAT_ATTR(axi_id
, "config1:0-17");
325 PMU_FORMAT_ATTR(axi_mask
, "config2:0-17");
327 static struct attribute
*ddr_perf_format_attrs
[] = {
328 &format_attr_event
.attr
,
329 &format_attr_counter
.attr
,
330 &format_attr_axi_id
.attr
,
331 &format_attr_axi_mask
.attr
,
335 static const struct attribute_group ddr_perf_format_attr_group
= {
337 .attrs
= ddr_perf_format_attrs
,
340 static const struct attribute_group
*attr_groups
[] = {
341 &ddr_perf_identifier_attr_group
,
342 &ddr_perf_cpumask_attr_group
,
343 &ddr_perf_events_attr_group
,
344 &ddr_perf_format_attr_group
,
348 static void ddr_perf_clear_counter(struct ddr_pmu
*pmu
, int counter
)
350 if (counter
== CYCLES_COUNTER
) {
351 writel(0, pmu
->base
+ PMC(counter
) + 0x4);
352 writel(0, pmu
->base
+ PMC(counter
));
354 writel(0, pmu
->base
+ PMC(counter
));
358 static u64
ddr_perf_read_counter(struct ddr_pmu
*pmu
, int counter
)
360 u32 val_lower
, val_upper
;
363 if (counter
!= CYCLES_COUNTER
) {
364 val
= readl_relaxed(pmu
->base
+ PMC(counter
));
368 /* special handling for reading 64bit cycle counter */
370 val_upper
= readl_relaxed(pmu
->base
+ PMC(counter
) + 0x4);
371 val_lower
= readl_relaxed(pmu
->base
+ PMC(counter
));
372 } while (val_upper
!= readl_relaxed(pmu
->base
+ PMC(counter
) + 0x4));
381 static void ddr_perf_counter_global_config(struct ddr_pmu
*pmu
, bool enable
)
385 ctrl
= readl_relaxed(pmu
->base
+ PMGC0
);
389 * The performance monitor must be reset before event counting
390 * sequences. The performance monitor can be reset by first freezing
391 * one or more counters and then clearing the freeze condition to
392 * allow the counters to count according to the settings in the
393 * performance monitor registers. Counters can be frozen individually
394 * by setting PMLCAn[FC] bits, or simultaneously by setting PMGC0[FAC].
395 * Simply clearing these freeze bits will then allow the performance
396 * monitor to begin counting based on the register settings.
399 writel(ctrl
, pmu
->base
+ PMGC0
);
402 * Freeze all counters disabled, interrupt enabled, and freeze
403 * counters on condition enabled.
406 ctrl
|= PMGC0_PMIE
| PMGC0_FCECE
;
407 writel(ctrl
, pmu
->base
+ PMGC0
);
410 ctrl
&= ~(PMGC0_PMIE
| PMGC0_FCECE
);
411 writel(ctrl
, pmu
->base
+ PMGC0
);
415 static void ddr_perf_counter_local_config(struct ddr_pmu
*pmu
, int config
,
416 int counter
, bool enable
)
421 ctrl_a
= readl_relaxed(pmu
->base
+ PMLCA(counter
));
422 event
= FIELD_GET(CONFIG_EVENT_MASK
, config
);
426 writel(ctrl_a
, pmu
->base
+ PMLCA(counter
));
428 ddr_perf_clear_counter(pmu
, counter
);
430 /* Freeze counter disabled, condition enabled, and program event.*/
433 ctrl_a
&= ~FIELD_PREP(PMLCA_EVENT
, 0x7F);
434 ctrl_a
|= FIELD_PREP(PMLCA_EVENT
, event
);
435 writel(ctrl_a
, pmu
->base
+ PMLCA(counter
));
437 /* Freeze counter. */
439 writel(ctrl_a
, pmu
->base
+ PMLCA(counter
));
443 static void imx93_ddr_perf_monitor_config(struct ddr_pmu
*pmu
, int event
,
444 int counter
, int axi_id
, int axi_mask
)
447 u32 mask
[] = { MX93_PMCFG1_RD_TRANS_FILT_EN
,
448 MX93_PMCFG1_WR_TRANS_FILT_EN
,
449 MX93_PMCFG1_RD_BT_FILT_EN
};
451 pmcfg1
= readl_relaxed(pmu
->base
+ PMCFG1
);
453 if (counter
>= 2 && counter
<= 4)
454 pmcfg1
= event
== 73 ? pmcfg1
| mask
[counter
- 2] :
455 pmcfg1
& ~mask
[counter
- 2];
457 pmcfg1
&= ~FIELD_PREP(MX93_PMCFG1_ID_MASK
, 0x3FFFF);
458 pmcfg1
|= FIELD_PREP(MX93_PMCFG1_ID_MASK
, axi_mask
);
459 writel_relaxed(pmcfg1
, pmu
->base
+ PMCFG1
);
461 pmcfg2
= readl_relaxed(pmu
->base
+ PMCFG2
);
462 pmcfg2
&= ~FIELD_PREP(MX93_PMCFG2_ID
, 0x3FFFF);
463 pmcfg2
|= FIELD_PREP(MX93_PMCFG2_ID
, axi_id
);
464 writel_relaxed(pmcfg2
, pmu
->base
+ PMCFG2
);
467 static void imx95_ddr_perf_monitor_config(struct ddr_pmu
*pmu
, int event
,
468 int counter
, int axi_id
, int axi_mask
)
470 u32 pmcfg1
, pmcfg
, offset
= 0;
472 pmcfg1
= readl_relaxed(pmu
->base
+ PMCFG1
);
477 pmcfg1
|= MX95_PMCFG1_WR_BEAT_FILT_EN
;
481 pmcfg1
|= MX95_PMCFG1_RD_BEAT_FILT_EN
;
485 pmcfg1
|= MX95_PMCFG1_RD_BEAT_FILT_EN
;
489 pmcfg1
|= MX95_PMCFG1_RD_BEAT_FILT_EN
;
496 pmcfg1
&= ~MX95_PMCFG1_WR_BEAT_FILT_EN
;
501 pmcfg1
&= ~MX95_PMCFG1_RD_BEAT_FILT_EN
;
506 writel_relaxed(pmcfg1
, pmu
->base
+ PMCFG1
);
509 pmcfg
= readl_relaxed(pmu
->base
+ offset
);
510 pmcfg
&= ~(FIELD_PREP(MX95_PMCFG_ID_MASK
, 0x3FF) |
511 FIELD_PREP(MX95_PMCFG_ID
, 0x3FF));
512 pmcfg
|= (FIELD_PREP(MX95_PMCFG_ID_MASK
, axi_mask
) |
513 FIELD_PREP(MX95_PMCFG_ID
, axi_id
));
514 writel_relaxed(pmcfg
, pmu
->base
+ offset
);
518 static void ddr_perf_event_update(struct perf_event
*event
)
520 struct ddr_pmu
*pmu
= to_ddr_pmu(event
->pmu
);
521 struct hw_perf_event
*hwc
= &event
->hw
;
522 int counter
= hwc
->idx
;
525 new_raw_count
= ddr_perf_read_counter(pmu
, counter
);
526 local64_add(new_raw_count
, &event
->count
);
528 /* clear counter's value every time */
529 ddr_perf_clear_counter(pmu
, counter
);
532 static int ddr_perf_event_init(struct perf_event
*event
)
534 struct ddr_pmu
*pmu
= to_ddr_pmu(event
->pmu
);
535 struct hw_perf_event
*hwc
= &event
->hw
;
536 struct perf_event
*sibling
;
538 if (event
->attr
.type
!= event
->pmu
->type
)
541 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
544 if (event
->cpu
< 0) {
545 dev_warn(pmu
->dev
, "Can't provide per-task data!\n");
550 * We must NOT create groups containing mixed PMUs, although software
551 * events are acceptable (for example to create a CCN group
552 * periodically read when a hrtimer aka cpu-clock leader triggers).
554 if (event
->group_leader
->pmu
!= event
->pmu
&&
555 !is_software_event(event
->group_leader
))
558 for_each_sibling_event(sibling
, event
->group_leader
) {
559 if (sibling
->pmu
!= event
->pmu
&&
560 !is_software_event(sibling
))
564 event
->cpu
= pmu
->cpu
;
570 static void ddr_perf_event_start(struct perf_event
*event
, int flags
)
572 struct ddr_pmu
*pmu
= to_ddr_pmu(event
->pmu
);
573 struct hw_perf_event
*hwc
= &event
->hw
;
574 int counter
= hwc
->idx
;
576 local64_set(&hwc
->prev_count
, 0);
578 ddr_perf_counter_local_config(pmu
, event
->attr
.config
, counter
, true);
582 static int ddr_perf_alloc_counter(struct ddr_pmu
*pmu
, int event
, int counter
)
586 if (event
== CYCLES_EVENT_ID
) {
587 // Cycles counter is dedicated for cycle event.
588 if (pmu
->events
[CYCLES_COUNTER
] == NULL
)
589 return CYCLES_COUNTER
;
590 } else if (counter
!= 0) {
591 // Counter specific event use specific counter.
592 if (pmu
->events
[counter
] == NULL
)
595 // Auto allocate counter for referene event.
596 for (i
= 1; i
< NUM_COUNTERS
; i
++)
597 if (pmu
->events
[i
] == NULL
)
604 static int ddr_perf_event_add(struct perf_event
*event
, int flags
)
606 struct ddr_pmu
*pmu
= to_ddr_pmu(event
->pmu
);
607 struct hw_perf_event
*hwc
= &event
->hw
;
608 int cfg
= event
->attr
.config
;
609 int cfg1
= event
->attr
.config1
;
610 int cfg2
= event
->attr
.config2
;
611 int event_id
, counter
;
613 event_id
= FIELD_GET(CONFIG_EVENT_MASK
, cfg
);
614 counter
= FIELD_GET(CONFIG_COUNTER_MASK
, cfg
);
616 counter
= ddr_perf_alloc_counter(pmu
, event_id
, counter
);
618 dev_dbg(pmu
->dev
, "There are not enough counters\n");
622 pmu
->events
[counter
] = event
;
623 pmu
->active_events
++;
625 hwc
->state
|= PERF_HES_STOPPED
;
628 /* read trans, write trans, read beat */
629 imx93_ddr_perf_monitor_config(pmu
, event_id
, counter
, cfg1
, cfg2
);
632 /* write beat, read beat2, read beat1, read beat */
633 imx95_ddr_perf_monitor_config(pmu
, event_id
, counter
, cfg1
, cfg2
);
635 if (flags
& PERF_EF_START
)
636 ddr_perf_event_start(event
, flags
);
641 static void ddr_perf_event_stop(struct perf_event
*event
, int flags
)
643 struct ddr_pmu
*pmu
= to_ddr_pmu(event
->pmu
);
644 struct hw_perf_event
*hwc
= &event
->hw
;
645 int counter
= hwc
->idx
;
647 ddr_perf_counter_local_config(pmu
, event
->attr
.config
, counter
, false);
648 ddr_perf_event_update(event
);
650 hwc
->state
|= PERF_HES_STOPPED
;
653 static void ddr_perf_event_del(struct perf_event
*event
, int flags
)
655 struct ddr_pmu
*pmu
= to_ddr_pmu(event
->pmu
);
656 struct hw_perf_event
*hwc
= &event
->hw
;
657 int counter
= hwc
->idx
;
659 ddr_perf_event_stop(event
, PERF_EF_UPDATE
);
661 pmu
->events
[counter
] = NULL
;
662 pmu
->active_events
--;
666 static void ddr_perf_pmu_enable(struct pmu
*pmu
)
668 struct ddr_pmu
*ddr_pmu
= to_ddr_pmu(pmu
);
670 ddr_perf_counter_global_config(ddr_pmu
, true);
673 static void ddr_perf_pmu_disable(struct pmu
*pmu
)
675 struct ddr_pmu
*ddr_pmu
= to_ddr_pmu(pmu
);
677 ddr_perf_counter_global_config(ddr_pmu
, false);
680 static void ddr_perf_init(struct ddr_pmu
*pmu
, void __iomem
*base
,
683 *pmu
= (struct ddr_pmu
) {
684 .pmu
= (struct pmu
) {
685 .module
= THIS_MODULE
,
686 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
687 .task_ctx_nr
= perf_invalid_context
,
688 .attr_groups
= attr_groups
,
689 .event_init
= ddr_perf_event_init
,
690 .add
= ddr_perf_event_add
,
691 .del
= ddr_perf_event_del
,
692 .start
= ddr_perf_event_start
,
693 .stop
= ddr_perf_event_stop
,
694 .read
= ddr_perf_event_update
,
695 .pmu_enable
= ddr_perf_pmu_enable
,
696 .pmu_disable
= ddr_perf_pmu_disable
,
703 static irqreturn_t
ddr_perf_irq_handler(int irq
, void *p
)
705 struct ddr_pmu
*pmu
= (struct ddr_pmu
*)p
;
706 struct perf_event
*event
;
710 * Counters can generate an interrupt on an overflow when msb of a
711 * counter changes from 0 to 1. For the interrupt to be signalled,
712 * below condition mush be satisfied:
713 * PMGC0[PMIE] = 1, PMGC0[FCECE] = 1, PMLCAn[CE] = 1
714 * When an interrupt is signalled, PMGC0[FAC] is set by hardware and
715 * all of the registers are frozen.
716 * Software can clear the interrupt condition by resetting the performance
717 * monitor and clearing the most significant bit of the counter that
718 * generate the overflow.
720 for (i
= 0; i
< NUM_COUNTERS
; i
++) {
724 event
= pmu
->events
[i
];
726 ddr_perf_event_update(event
);
729 ddr_perf_counter_global_config(pmu
, true);
734 static int ddr_perf_offline_cpu(unsigned int cpu
, struct hlist_node
*node
)
736 struct ddr_pmu
*pmu
= hlist_entry_safe(node
, struct ddr_pmu
, node
);
742 target
= cpumask_any_but(cpu_online_mask
, cpu
);
743 if (target
>= nr_cpu_ids
)
746 perf_pmu_migrate_context(&pmu
->pmu
, cpu
, target
);
749 WARN_ON(irq_set_affinity(pmu
->irq
, cpumask_of(pmu
->cpu
)));
754 static int ddr_perf_probe(struct platform_device
*pdev
)
761 base
= devm_platform_ioremap_resource(pdev
, 0);
763 return PTR_ERR(base
);
765 pmu
= devm_kzalloc(&pdev
->dev
, sizeof(*pmu
), GFP_KERNEL
);
769 ddr_perf_init(pmu
, base
, &pdev
->dev
);
771 pmu
->devtype_data
= of_device_get_match_data(&pdev
->dev
);
773 platform_set_drvdata(pdev
, pmu
);
775 pmu
->id
= ida_alloc(&ddr_ida
, GFP_KERNEL
);
776 name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, DDR_PERF_DEV_NAME
"%d", pmu
->id
);
779 goto format_string_err
;
782 pmu
->cpu
= raw_smp_processor_id();
783 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, DDR_CPUHP_CB_NAME
,
784 NULL
, ddr_perf_offline_cpu
);
786 dev_err(&pdev
->dev
, "Failed to add callbacks for multi state\n");
787 goto cpuhp_state_err
;
789 pmu
->cpuhp_state
= ret
;
791 /* Register the pmu instance for cpu hotplug */
792 ret
= cpuhp_state_add_instance_nocalls(pmu
->cpuhp_state
, &pmu
->node
);
794 dev_err(&pdev
->dev
, "Error %d registering hotplug\n", ret
);
795 goto cpuhp_instance_err
;
799 irq
= platform_get_irq(pdev
, 0);
805 ret
= devm_request_irq(&pdev
->dev
, irq
, ddr_perf_irq_handler
,
806 IRQF_NOBALANCING
| IRQF_NO_THREAD
,
807 DDR_CPUHP_CB_NAME
, pmu
);
809 dev_err(&pdev
->dev
, "Request irq failed: %d", ret
);
814 ret
= irq_set_affinity(pmu
->irq
, cpumask_of(pmu
->cpu
));
816 dev_err(pmu
->dev
, "Failed to set interrupt affinity\n");
820 ret
= perf_pmu_register(&pmu
->pmu
, name
, -1);
827 cpuhp_state_remove_instance_nocalls(pmu
->cpuhp_state
, &pmu
->node
);
829 cpuhp_remove_multi_state(pmu
->cpuhp_state
);
832 ida_free(&ddr_ida
, pmu
->id
);
833 dev_warn(&pdev
->dev
, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret
);
837 static void ddr_perf_remove(struct platform_device
*pdev
)
839 struct ddr_pmu
*pmu
= platform_get_drvdata(pdev
);
841 cpuhp_state_remove_instance_nocalls(pmu
->cpuhp_state
, &pmu
->node
);
842 cpuhp_remove_multi_state(pmu
->cpuhp_state
);
844 perf_pmu_unregister(&pmu
->pmu
);
846 ida_free(&ddr_ida
, pmu
->id
);
849 static struct platform_driver imx_ddr_pmu_driver
= {
851 .name
= "imx9-ddr-pmu",
852 .of_match_table
= imx_ddr_pmu_dt_ids
,
853 .suppress_bind_attrs
= true,
855 .probe
= ddr_perf_probe
,
856 .remove
= ddr_perf_remove
,
858 module_platform_driver(imx_ddr_pmu_driver
);
860 MODULE_AUTHOR("Xu Yang <xu.yang_2@nxp.com>");
861 MODULE_LICENSE("GPL v2");
862 MODULE_DESCRIPTION("DDRC PerfMon for i.MX9 SoCs");