1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
4 * Author: Lin Huang <hl@rock-chips.com>
8 #include <linux/devfreq-event.h>
9 #include <linux/kernel.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/regmap.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/seqlock.h>
21 #include <linux/of_device.h>
22 #include <linux/bitfield.h>
23 #include <linux/bits.h>
24 #include <linux/perf_event.h>
26 #include <soc/rockchip/rockchip_grf.h>
27 #include <soc/rockchip/rk3399_grf.h>
28 #include <soc/rockchip/rk3568_grf.h>
29 #include <soc/rockchip/rk3588_grf.h>
31 #define DMC_MAX_CHANNELS 4
33 #define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
36 #define DDRMON_CTRL 0x04
37 #define DDRMON_CTRL_DDR4 BIT(5)
38 #define DDRMON_CTRL_LPDDR4 BIT(4)
39 #define DDRMON_CTRL_HARDWARE_EN BIT(3)
40 #define DDRMON_CTRL_LPDDR23 BIT(2)
41 #define DDRMON_CTRL_SOFTWARE_EN BIT(1)
42 #define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
43 #define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \
44 DDRMON_CTRL_LPDDR4 | \
47 #define DDRMON_CH0_WR_NUM 0x20
48 #define DDRMON_CH0_RD_NUM 0x24
49 #define DDRMON_CH0_COUNT_NUM 0x28
50 #define DDRMON_CH0_DFI_ACCESS_NUM 0x2c
51 #define DDRMON_CH1_COUNT_NUM 0x3c
52 #define DDRMON_CH1_DFI_ACCESS_NUM 0x40
54 #define PERF_EVENT_CYCLES 0x0
55 #define PERF_EVENT_READ_BYTES 0x1
56 #define PERF_EVENT_WRITE_BYTES 0x2
57 #define PERF_EVENT_READ_BYTES0 0x3
58 #define PERF_EVENT_WRITE_BYTES0 0x4
59 #define PERF_EVENT_READ_BYTES1 0x5
60 #define PERF_EVENT_WRITE_BYTES1 0x6
61 #define PERF_EVENT_READ_BYTES2 0x7
62 #define PERF_EVENT_WRITE_BYTES2 0x8
63 #define PERF_EVENT_READ_BYTES3 0x9
64 #define PERF_EVENT_WRITE_BYTES3 0xa
65 #define PERF_EVENT_BYTES 0xb
66 #define PERF_ACCESS_TYPE_MAX 0xc
69 * struct dmc_count_channel - structure to hold counter values from the DDR controller
70 * @access: Number of read and write accesses
71 * @clock_cycles: DDR clock cycles
72 * @read_access: number of read accesses
73 * @write_access: number of write accesses
75 struct dmc_count_channel
{
83 struct dmc_count_channel c
[DMC_MAX_CHANNELS
];
87 * The dfi controller can monitor DDR load. It has an upper and lower threshold
88 * for the operating points. Whenever the usage leaves these bounds an event is
89 * generated to indicate the DDR frequency should be changed.
92 struct devfreq_event_dev
*edev
;
93 struct devfreq_event_desc desc
;
94 struct dmc_count last_event_count
;
96 struct dmc_count last_perf_count
;
97 struct dmc_count total_count
;
98 seqlock_t count_seqlock
; /* protects last_perf_count and total_count */
102 struct regmap
*regmap_pmu
;
107 unsigned int channel_mask
;
108 unsigned int max_channels
;
109 enum cpuhp_state cpuhp_state
;
110 struct hlist_node node
;
112 struct hrtimer timer
;
116 int buswidth
[DMC_MAX_CHANNELS
];
118 bool ddrmon_ctrl_single
;
121 static int rockchip_dfi_enable(struct rockchip_dfi
*dfi
)
123 void __iomem
*dfi_regs
= dfi
->regs
;
126 mutex_lock(&dfi
->mutex
);
129 if (dfi
->usecount
> 1)
132 ret
= clk_prepare_enable(dfi
->clk
);
134 dev_err(&dfi
->edev
->dev
, "failed to enable dfi clk: %d\n", ret
);
138 for (i
= 0; i
< dfi
->max_channels
; i
++) {
141 if (!(dfi
->channel_mask
& BIT(i
)))
144 /* clear DDRMON_CTRL setting */
145 writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN
|
146 DDRMON_CTRL_SOFTWARE_EN
| DDRMON_CTRL_HARDWARE_EN
),
147 dfi_regs
+ i
* dfi
->ddrmon_stride
+ DDRMON_CTRL
);
149 /* set ddr type to dfi */
150 switch (dfi
->ddr_type
) {
151 case ROCKCHIP_DDRTYPE_LPDDR2
:
152 case ROCKCHIP_DDRTYPE_LPDDR3
:
153 ctrl
= DDRMON_CTRL_LPDDR23
;
155 case ROCKCHIP_DDRTYPE_LPDDR4
:
156 case ROCKCHIP_DDRTYPE_LPDDR4X
:
157 ctrl
= DDRMON_CTRL_LPDDR4
;
163 writel_relaxed(HIWORD_UPDATE(ctrl
, DDRMON_CTRL_DDR_TYPE_MASK
),
164 dfi_regs
+ i
* dfi
->ddrmon_stride
+ DDRMON_CTRL
);
166 /* enable count, use software mode */
167 writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN
, DDRMON_CTRL_SOFTWARE_EN
),
168 dfi_regs
+ i
* dfi
->ddrmon_stride
+ DDRMON_CTRL
);
170 if (dfi
->ddrmon_ctrl_single
)
174 mutex_unlock(&dfi
->mutex
);
179 static void rockchip_dfi_disable(struct rockchip_dfi
*dfi
)
181 void __iomem
*dfi_regs
= dfi
->regs
;
184 mutex_lock(&dfi
->mutex
);
188 WARN_ON_ONCE(dfi
->usecount
< 0);
190 if (dfi
->usecount
> 0)
193 for (i
= 0; i
< dfi
->max_channels
; i
++) {
194 if (!(dfi
->channel_mask
& BIT(i
)))
197 writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN
),
198 dfi_regs
+ i
* dfi
->ddrmon_stride
+ DDRMON_CTRL
);
200 if (dfi
->ddrmon_ctrl_single
)
204 clk_disable_unprepare(dfi
->clk
);
206 mutex_unlock(&dfi
->mutex
);
209 static void rockchip_dfi_read_counters(struct rockchip_dfi
*dfi
, struct dmc_count
*res
)
212 void __iomem
*dfi_regs
= dfi
->regs
;
214 for (i
= 0; i
< dfi
->max_channels
; i
++) {
215 if (!(dfi
->channel_mask
& BIT(i
)))
217 res
->c
[i
].read_access
= readl_relaxed(dfi_regs
+
218 DDRMON_CH0_RD_NUM
+ i
* dfi
->ddrmon_stride
);
219 res
->c
[i
].write_access
= readl_relaxed(dfi_regs
+
220 DDRMON_CH0_WR_NUM
+ i
* dfi
->ddrmon_stride
);
221 res
->c
[i
].access
= readl_relaxed(dfi_regs
+
222 DDRMON_CH0_DFI_ACCESS_NUM
+ i
* dfi
->ddrmon_stride
);
223 res
->c
[i
].clock_cycles
= readl_relaxed(dfi_regs
+
224 DDRMON_CH0_COUNT_NUM
+ i
* dfi
->ddrmon_stride
);
228 static int rockchip_dfi_event_disable(struct devfreq_event_dev
*edev
)
230 struct rockchip_dfi
*dfi
= devfreq_event_get_drvdata(edev
);
232 rockchip_dfi_disable(dfi
);
237 static int rockchip_dfi_event_enable(struct devfreq_event_dev
*edev
)
239 struct rockchip_dfi
*dfi
= devfreq_event_get_drvdata(edev
);
241 return rockchip_dfi_enable(dfi
);
244 static int rockchip_dfi_set_event(struct devfreq_event_dev
*edev
)
249 static int rockchip_dfi_get_event(struct devfreq_event_dev
*edev
,
250 struct devfreq_event_data
*edata
)
252 struct rockchip_dfi
*dfi
= devfreq_event_get_drvdata(edev
);
253 struct dmc_count count
;
254 struct dmc_count
*last
= &dfi
->last_event_count
;
255 u32 access
= 0, clock_cycles
= 0;
258 rockchip_dfi_read_counters(dfi
, &count
);
260 /* We can only report one channel, so find the busiest one */
261 for (i
= 0; i
< dfi
->max_channels
; i
++) {
264 if (!(dfi
->channel_mask
& BIT(i
)))
267 a
= count
.c
[i
].access
- last
->c
[i
].access
;
268 c
= count
.c
[i
].clock_cycles
- last
->c
[i
].clock_cycles
;
276 edata
->load_count
= access
* 4;
277 edata
->total_count
= clock_cycles
;
279 dfi
->last_event_count
= count
;
284 static const struct devfreq_event_ops rockchip_dfi_ops
= {
285 .disable
= rockchip_dfi_event_disable
,
286 .enable
= rockchip_dfi_event_enable
,
287 .get_event
= rockchip_dfi_get_event
,
288 .set_event
= rockchip_dfi_set_event
,
291 #ifdef CONFIG_PERF_EVENTS
293 static void rockchip_ddr_perf_counters_add(struct rockchip_dfi
*dfi
,
294 const struct dmc_count
*now
,
295 struct dmc_count
*res
)
297 const struct dmc_count
*last
= &dfi
->last_perf_count
;
300 for (i
= 0; i
< dfi
->max_channels
; i
++) {
301 res
->c
[i
].read_access
= dfi
->total_count
.c
[i
].read_access
+
302 (u32
)(now
->c
[i
].read_access
- last
->c
[i
].read_access
);
303 res
->c
[i
].write_access
= dfi
->total_count
.c
[i
].write_access
+
304 (u32
)(now
->c
[i
].write_access
- last
->c
[i
].write_access
);
305 res
->c
[i
].access
= dfi
->total_count
.c
[i
].access
+
306 (u32
)(now
->c
[i
].access
- last
->c
[i
].access
);
307 res
->c
[i
].clock_cycles
= dfi
->total_count
.c
[i
].clock_cycles
+
308 (u32
)(now
->c
[i
].clock_cycles
- last
->c
[i
].clock_cycles
);
312 static ssize_t
ddr_perf_cpumask_show(struct device
*dev
,
313 struct device_attribute
*attr
, char *buf
)
315 struct pmu
*pmu
= dev_get_drvdata(dev
);
316 struct rockchip_dfi
*dfi
= container_of(pmu
, struct rockchip_dfi
, pmu
);
318 return cpumap_print_to_pagebuf(true, buf
, cpumask_of(dfi
->cpu
));
321 static struct device_attribute ddr_perf_cpumask_attr
=
322 __ATTR(cpumask
, 0444, ddr_perf_cpumask_show
, NULL
);
324 static struct attribute
*ddr_perf_cpumask_attrs
[] = {
325 &ddr_perf_cpumask_attr
.attr
,
329 static const struct attribute_group ddr_perf_cpumask_attr_group
= {
330 .attrs
= ddr_perf_cpumask_attrs
,
333 PMU_EVENT_ATTR_STRING(cycles
, ddr_pmu_cycles
, "event="__stringify(PERF_EVENT_CYCLES
))
335 #define DFI_PMU_EVENT_ATTR(_name, _var, _str) \
336 PMU_EVENT_ATTR_STRING(_name, _var, _str); \
337 PMU_EVENT_ATTR_STRING(_name.unit, _var##_unit, "MB"); \
338 PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07")
340 DFI_PMU_EVENT_ATTR(read
-bytes0
, ddr_pmu_read_bytes0
, "event="__stringify(PERF_EVENT_READ_BYTES0
));
341 DFI_PMU_EVENT_ATTR(write
-bytes0
, ddr_pmu_write_bytes0
, "event="__stringify(PERF_EVENT_WRITE_BYTES0
));
343 DFI_PMU_EVENT_ATTR(read
-bytes1
, ddr_pmu_read_bytes1
, "event="__stringify(PERF_EVENT_READ_BYTES1
));
344 DFI_PMU_EVENT_ATTR(write
-bytes1
, ddr_pmu_write_bytes1
, "event="__stringify(PERF_EVENT_WRITE_BYTES1
));
346 DFI_PMU_EVENT_ATTR(read
-bytes2
, ddr_pmu_read_bytes2
, "event="__stringify(PERF_EVENT_READ_BYTES2
));
347 DFI_PMU_EVENT_ATTR(write
-bytes2
, ddr_pmu_write_bytes2
, "event="__stringify(PERF_EVENT_WRITE_BYTES2
));
349 DFI_PMU_EVENT_ATTR(read
-bytes3
, ddr_pmu_read_bytes3
, "event="__stringify(PERF_EVENT_READ_BYTES3
));
350 DFI_PMU_EVENT_ATTR(write
-bytes3
, ddr_pmu_write_bytes3
, "event="__stringify(PERF_EVENT_WRITE_BYTES3
));
352 DFI_PMU_EVENT_ATTR(read
-bytes
, ddr_pmu_read_bytes
, "event="__stringify(PERF_EVENT_READ_BYTES
));
353 DFI_PMU_EVENT_ATTR(write
-bytes
, ddr_pmu_write_bytes
, "event="__stringify(PERF_EVENT_WRITE_BYTES
));
355 DFI_PMU_EVENT_ATTR(bytes
, ddr_pmu_bytes
, "event="__stringify(PERF_EVENT_BYTES
));
357 #define DFI_ATTR_MB(_name) \
359 &_name##_unit.attr.attr, \
360 &_name##_scale.attr.attr
362 static struct attribute
*ddr_perf_events_attrs
[] = {
363 &ddr_pmu_cycles
.attr
.attr
,
364 DFI_ATTR_MB(ddr_pmu_read_bytes
),
365 DFI_ATTR_MB(ddr_pmu_write_bytes
),
366 DFI_ATTR_MB(ddr_pmu_read_bytes0
),
367 DFI_ATTR_MB(ddr_pmu_write_bytes0
),
368 DFI_ATTR_MB(ddr_pmu_read_bytes1
),
369 DFI_ATTR_MB(ddr_pmu_write_bytes1
),
370 DFI_ATTR_MB(ddr_pmu_read_bytes2
),
371 DFI_ATTR_MB(ddr_pmu_write_bytes2
),
372 DFI_ATTR_MB(ddr_pmu_read_bytes3
),
373 DFI_ATTR_MB(ddr_pmu_write_bytes3
),
374 DFI_ATTR_MB(ddr_pmu_bytes
),
378 static const struct attribute_group ddr_perf_events_attr_group
= {
380 .attrs
= ddr_perf_events_attrs
,
383 PMU_FORMAT_ATTR(event
, "config:0-7");
385 static struct attribute
*ddr_perf_format_attrs
[] = {
386 &format_attr_event
.attr
,
390 static const struct attribute_group ddr_perf_format_attr_group
= {
392 .attrs
= ddr_perf_format_attrs
,
395 static const struct attribute_group
*attr_groups
[] = {
396 &ddr_perf_events_attr_group
,
397 &ddr_perf_cpumask_attr_group
,
398 &ddr_perf_format_attr_group
,
402 static int rockchip_ddr_perf_event_init(struct perf_event
*event
)
404 struct rockchip_dfi
*dfi
= container_of(event
->pmu
, struct rockchip_dfi
, pmu
);
406 if (event
->attr
.type
!= event
->pmu
->type
)
409 if (event
->attach_state
& PERF_ATTACH_TASK
)
412 if (event
->cpu
< 0) {
413 dev_warn(dfi
->dev
, "Can't provide per-task data!\n");
420 static u64
rockchip_ddr_perf_event_get_count(struct perf_event
*event
)
422 struct rockchip_dfi
*dfi
= container_of(event
->pmu
, struct rockchip_dfi
, pmu
);
423 int blen
= dfi
->burst_len
;
424 struct dmc_count total
, now
;
429 rockchip_dfi_read_counters(dfi
, &now
);
432 seq
= read_seqbegin(&dfi
->count_seqlock
);
433 rockchip_ddr_perf_counters_add(dfi
, &now
, &total
);
434 } while (read_seqretry(&dfi
->count_seqlock
, seq
));
436 switch (event
->attr
.config
) {
437 case PERF_EVENT_CYCLES
:
438 count
= total
.c
[0].clock_cycles
;
440 case PERF_EVENT_READ_BYTES
:
441 for (i
= 0; i
< dfi
->max_channels
; i
++)
442 count
+= total
.c
[i
].read_access
* blen
* dfi
->buswidth
[i
];
444 case PERF_EVENT_WRITE_BYTES
:
445 for (i
= 0; i
< dfi
->max_channels
; i
++)
446 count
+= total
.c
[i
].write_access
* blen
* dfi
->buswidth
[i
];
448 case PERF_EVENT_READ_BYTES0
:
449 count
= total
.c
[0].read_access
* blen
* dfi
->buswidth
[0];
451 case PERF_EVENT_WRITE_BYTES0
:
452 count
= total
.c
[0].write_access
* blen
* dfi
->buswidth
[0];
454 case PERF_EVENT_READ_BYTES1
:
455 count
= total
.c
[1].read_access
* blen
* dfi
->buswidth
[1];
457 case PERF_EVENT_WRITE_BYTES1
:
458 count
= total
.c
[1].write_access
* blen
* dfi
->buswidth
[1];
460 case PERF_EVENT_READ_BYTES2
:
461 count
= total
.c
[2].read_access
* blen
* dfi
->buswidth
[2];
463 case PERF_EVENT_WRITE_BYTES2
:
464 count
= total
.c
[2].write_access
* blen
* dfi
->buswidth
[2];
466 case PERF_EVENT_READ_BYTES3
:
467 count
= total
.c
[3].read_access
* blen
* dfi
->buswidth
[3];
469 case PERF_EVENT_WRITE_BYTES3
:
470 count
= total
.c
[3].write_access
* blen
* dfi
->buswidth
[3];
472 case PERF_EVENT_BYTES
:
473 for (i
= 0; i
< dfi
->max_channels
; i
++)
474 count
+= total
.c
[i
].access
* blen
* dfi
->buswidth
[i
];
481 static void rockchip_ddr_perf_event_update(struct perf_event
*event
)
486 if (event
->attr
.config
>= PERF_ACCESS_TYPE_MAX
)
489 now
= rockchip_ddr_perf_event_get_count(event
);
490 prev
= local64_xchg(&event
->hw
.prev_count
, now
);
491 local64_add(now
- prev
, &event
->count
);
494 static void rockchip_ddr_perf_event_start(struct perf_event
*event
, int flags
)
496 u64 now
= rockchip_ddr_perf_event_get_count(event
);
498 local64_set(&event
->hw
.prev_count
, now
);
501 static int rockchip_ddr_perf_event_add(struct perf_event
*event
, int flags
)
503 struct rockchip_dfi
*dfi
= container_of(event
->pmu
, struct rockchip_dfi
, pmu
);
505 dfi
->active_events
++;
507 if (dfi
->active_events
== 1) {
508 dfi
->total_count
= (struct dmc_count
){};
509 rockchip_dfi_read_counters(dfi
, &dfi
->last_perf_count
);
510 hrtimer_start(&dfi
->timer
, ns_to_ktime(NSEC_PER_SEC
), HRTIMER_MODE_REL
);
513 if (flags
& PERF_EF_START
)
514 rockchip_ddr_perf_event_start(event
, flags
);
519 static void rockchip_ddr_perf_event_stop(struct perf_event
*event
, int flags
)
521 rockchip_ddr_perf_event_update(event
);
524 static void rockchip_ddr_perf_event_del(struct perf_event
*event
, int flags
)
526 struct rockchip_dfi
*dfi
= container_of(event
->pmu
, struct rockchip_dfi
, pmu
);
528 rockchip_ddr_perf_event_stop(event
, PERF_EF_UPDATE
);
530 dfi
->active_events
--;
532 if (dfi
->active_events
== 0)
533 hrtimer_cancel(&dfi
->timer
);
536 static enum hrtimer_restart
rockchip_dfi_timer(struct hrtimer
*timer
)
538 struct rockchip_dfi
*dfi
= container_of(timer
, struct rockchip_dfi
, timer
);
539 struct dmc_count now
, total
;
541 rockchip_dfi_read_counters(dfi
, &now
);
543 write_seqlock(&dfi
->count_seqlock
);
545 rockchip_ddr_perf_counters_add(dfi
, &now
, &total
);
546 dfi
->total_count
= total
;
547 dfi
->last_perf_count
= now
;
549 write_sequnlock(&dfi
->count_seqlock
);
551 hrtimer_forward_now(&dfi
->timer
, ns_to_ktime(NSEC_PER_SEC
));
553 return HRTIMER_RESTART
;
556 static int ddr_perf_offline_cpu(unsigned int cpu
, struct hlist_node
*node
)
558 struct rockchip_dfi
*dfi
= hlist_entry_safe(node
, struct rockchip_dfi
, node
);
564 target
= cpumask_any_but(cpu_online_mask
, cpu
);
565 if (target
>= nr_cpu_ids
)
568 perf_pmu_migrate_context(&dfi
->pmu
, cpu
, target
);
574 static void rockchip_ddr_cpuhp_remove_state(void *data
)
576 struct rockchip_dfi
*dfi
= data
;
578 cpuhp_remove_multi_state(dfi
->cpuhp_state
);
580 rockchip_dfi_disable(dfi
);
583 static void rockchip_ddr_cpuhp_remove_instance(void *data
)
585 struct rockchip_dfi
*dfi
= data
;
587 cpuhp_state_remove_instance_nocalls(dfi
->cpuhp_state
, &dfi
->node
);
590 static void rockchip_ddr_perf_remove(void *data
)
592 struct rockchip_dfi
*dfi
= data
;
594 perf_pmu_unregister(&dfi
->pmu
);
597 static int rockchip_ddr_perf_init(struct rockchip_dfi
*dfi
)
599 struct pmu
*pmu
= &dfi
->pmu
;
602 seqlock_init(&dfi
->count_seqlock
);
604 pmu
->module
= THIS_MODULE
;
605 pmu
->capabilities
= PERF_PMU_CAP_NO_EXCLUDE
;
606 pmu
->task_ctx_nr
= perf_invalid_context
;
607 pmu
->attr_groups
= attr_groups
;
608 pmu
->event_init
= rockchip_ddr_perf_event_init
;
609 pmu
->add
= rockchip_ddr_perf_event_add
;
610 pmu
->del
= rockchip_ddr_perf_event_del
;
611 pmu
->start
= rockchip_ddr_perf_event_start
;
612 pmu
->stop
= rockchip_ddr_perf_event_stop
;
613 pmu
->read
= rockchip_ddr_perf_event_update
;
615 dfi
->cpu
= raw_smp_processor_id();
617 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
,
618 "rockchip_ddr_perf_pmu",
620 ddr_perf_offline_cpu
);
623 dev_err(dfi
->dev
, "cpuhp_setup_state_multi failed: %d\n", ret
);
627 dfi
->cpuhp_state
= ret
;
629 rockchip_dfi_enable(dfi
);
631 ret
= devm_add_action_or_reset(dfi
->dev
, rockchip_ddr_cpuhp_remove_state
, dfi
);
635 ret
= cpuhp_state_add_instance_nocalls(dfi
->cpuhp_state
, &dfi
->node
);
637 dev_err(dfi
->dev
, "Error %d registering hotplug\n", ret
);
641 ret
= devm_add_action_or_reset(dfi
->dev
, rockchip_ddr_cpuhp_remove_instance
, dfi
);
645 hrtimer_init(&dfi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
646 dfi
->timer
.function
= rockchip_dfi_timer
;
648 switch (dfi
->ddr_type
) {
649 case ROCKCHIP_DDRTYPE_LPDDR2
:
650 case ROCKCHIP_DDRTYPE_LPDDR3
:
653 case ROCKCHIP_DDRTYPE_LPDDR4
:
654 case ROCKCHIP_DDRTYPE_LPDDR4X
:
659 ret
= perf_pmu_register(pmu
, "rockchip_ddr", -1);
663 return devm_add_action_or_reset(dfi
->dev
, rockchip_ddr_perf_remove
, dfi
);
666 static int rockchip_ddr_perf_init(struct rockchip_dfi
*dfi
)
672 static int rk3399_dfi_init(struct rockchip_dfi
*dfi
)
674 struct regmap
*regmap_pmu
= dfi
->regmap_pmu
;
677 dfi
->clk
= devm_clk_get(dfi
->dev
, "pclk_ddr_mon");
678 if (IS_ERR(dfi
->clk
))
679 return dev_err_probe(dfi
->dev
, PTR_ERR(dfi
->clk
),
680 "Cannot get the clk pclk_ddr_mon\n");
683 regmap_read(regmap_pmu
, RK3399_PMUGRF_OS_REG2
, &val
);
684 dfi
->ddr_type
= FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE
, val
);
686 dfi
->channel_mask
= GENMASK(1, 0);
687 dfi
->max_channels
= 2;
689 dfi
->buswidth
[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0
, val
) == 0 ? 4 : 2;
690 dfi
->buswidth
[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1
, val
) == 0 ? 4 : 2;
692 dfi
->ddrmon_stride
= 0x14;
693 dfi
->ddrmon_ctrl_single
= true;
698 static int rk3568_dfi_init(struct rockchip_dfi
*dfi
)
700 struct regmap
*regmap_pmu
= dfi
->regmap_pmu
;
703 regmap_read(regmap_pmu
, RK3568_PMUGRF_OS_REG2
, ®2
);
704 regmap_read(regmap_pmu
, RK3568_PMUGRF_OS_REG3
, ®3
);
706 /* lower 3 bits of the DDR type */
707 dfi
->ddr_type
= FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO
, reg2
);
710 * For version three and higher the upper two bits of the DDR type are
711 * in RK3568_PMUGRF_OS_REG3
713 if (FIELD_GET(RK3568_PMUGRF_OS_REG3_SYSREG_VERSION
, reg3
) >= 0x3)
714 dfi
->ddr_type
|= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3
, reg3
) << 3;
716 dfi
->channel_mask
= BIT(0);
717 dfi
->max_channels
= 1;
719 dfi
->buswidth
[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0
, reg2
) == 0 ? 4 : 2;
721 dfi
->ddrmon_stride
= 0x0; /* not relevant, we only have a single channel on this SoC */
722 dfi
->ddrmon_ctrl_single
= true;
727 static int rk3588_dfi_init(struct rockchip_dfi
*dfi
)
729 struct regmap
*regmap_pmu
= dfi
->regmap_pmu
;
730 u32 reg2
, reg3
, reg4
;
732 regmap_read(regmap_pmu
, RK3588_PMUGRF_OS_REG2
, ®2
);
733 regmap_read(regmap_pmu
, RK3588_PMUGRF_OS_REG3
, ®3
);
734 regmap_read(regmap_pmu
, RK3588_PMUGRF_OS_REG4
, ®4
);
736 /* lower 3 bits of the DDR type */
737 dfi
->ddr_type
= FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO
, reg2
);
740 * For version three and higher the upper two bits of the DDR type are
741 * in RK3588_PMUGRF_OS_REG3
743 if (FIELD_GET(RK3588_PMUGRF_OS_REG3_SYSREG_VERSION
, reg3
) >= 0x3)
744 dfi
->ddr_type
|= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3
, reg3
) << 3;
746 dfi
->buswidth
[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0
, reg2
) == 0 ? 4 : 2;
747 dfi
->buswidth
[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1
, reg2
) == 0 ? 4 : 2;
748 dfi
->buswidth
[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0
, reg4
) == 0 ? 4 : 2;
749 dfi
->buswidth
[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1
, reg4
) == 0 ? 4 : 2;
750 dfi
->channel_mask
= FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO
, reg2
) |
751 FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO
, reg4
) << 2;
752 dfi
->max_channels
= 4;
754 dfi
->ddrmon_stride
= 0x4000;
759 static const struct of_device_id rockchip_dfi_id_match
[] = {
760 { .compatible
= "rockchip,rk3399-dfi", .data
= rk3399_dfi_init
},
761 { .compatible
= "rockchip,rk3568-dfi", .data
= rk3568_dfi_init
},
762 { .compatible
= "rockchip,rk3588-dfi", .data
= rk3588_dfi_init
},
766 MODULE_DEVICE_TABLE(of
, rockchip_dfi_id_match
);
768 static int rockchip_dfi_probe(struct platform_device
*pdev
)
770 struct device
*dev
= &pdev
->dev
;
771 struct rockchip_dfi
*dfi
;
772 struct devfreq_event_desc
*desc
;
773 struct device_node
*np
= pdev
->dev
.of_node
, *node
;
774 int (*soc_init
)(struct rockchip_dfi
*dfi
);
777 soc_init
= of_device_get_match_data(&pdev
->dev
);
781 dfi
= devm_kzalloc(dev
, sizeof(*dfi
), GFP_KERNEL
);
785 dfi
->regs
= devm_platform_ioremap_resource(pdev
, 0);
786 if (IS_ERR(dfi
->regs
))
787 return PTR_ERR(dfi
->regs
);
789 node
= of_parse_phandle(np
, "rockchip,pmu", 0);
791 return dev_err_probe(&pdev
->dev
, -ENODEV
, "Can't find pmu_grf registers\n");
793 dfi
->regmap_pmu
= syscon_node_to_regmap(node
);
795 if (IS_ERR(dfi
->regmap_pmu
))
796 return PTR_ERR(dfi
->regmap_pmu
);
799 mutex_init(&dfi
->mutex
);
802 desc
->ops
= &rockchip_dfi_ops
;
803 desc
->driver_data
= dfi
;
804 desc
->name
= np
->name
;
810 dfi
->edev
= devm_devfreq_event_add_edev(&pdev
->dev
, desc
);
811 if (IS_ERR(dfi
->edev
)) {
813 "failed to add devfreq-event device\n");
814 return PTR_ERR(dfi
->edev
);
817 ret
= rockchip_ddr_perf_init(dfi
);
821 platform_set_drvdata(pdev
, dfi
);
826 static struct platform_driver rockchip_dfi_driver
= {
827 .probe
= rockchip_dfi_probe
,
829 .name
= "rockchip-dfi",
830 .of_match_table
= rockchip_dfi_id_match
,
831 .suppress_bind_attrs
= true,
834 module_platform_driver(rockchip_dfi_driver
);
836 MODULE_LICENSE("GPL v2");
837 MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
838 MODULE_DESCRIPTION("Rockchip DFI driver");