1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas R-Car V3U System Controller
5 * Copyright (C) 2020 Renesas Electronics Corp.
8 #include <linux/bits.h>
9 #include <linux/clk/renesas.h>
10 #include <linux/delay.h>
11 #include <linux/err.h>
13 #include <linux/iopoll.h>
14 #include <linux/kernel.h>
16 #include <linux/of_address.h>
17 #include <linux/pm_domain.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/types.h>
22 #include <dt-bindings/power/r8a779a0-sysc.h>
27 #define PD_CPU BIT(0) /* Area contains main CPU core */
28 #define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
29 #define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
31 #define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */
32 #define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
35 * Description of a Power Area
37 struct r8a779a0_sysc_area
{
40 int parent
; /* -1 if none */
41 unsigned int flags
; /* See PD_* */
45 * SoC-specific Power Area Description
47 struct r8a779a0_sysc_info
{
48 const struct r8a779a0_sysc_area
*areas
;
49 unsigned int num_areas
;
52 static struct r8a779a0_sysc_area r8a779a0_areas
[] __initdata
= {
53 { "always-on", R8A779A0_PD_ALWAYS_ON
, -1, PD_ALWAYS_ON
},
54 { "a3e0", R8A779A0_PD_A3E0
, R8A779A0_PD_ALWAYS_ON
, PD_SCU
},
55 { "a3e1", R8A779A0_PD_A3E1
, R8A779A0_PD_ALWAYS_ON
, PD_SCU
},
56 { "a2e0d0", R8A779A0_PD_A2E0D0
, R8A779A0_PD_A3E0
, PD_SCU
},
57 { "a2e0d1", R8A779A0_PD_A2E0D1
, R8A779A0_PD_A3E0
, PD_SCU
},
58 { "a2e1d0", R8A779A0_PD_A2E1D0
, R8A779A0_PD_A3E1
, PD_SCU
},
59 { "a2e1d1", R8A779A0_PD_A2E1D1
, R8A779A0_PD_A3E1
, PD_SCU
},
60 { "a1e0d0c0", R8A779A0_PD_A1E0D0C0
, R8A779A0_PD_A2E0D0
, PD_CPU_NOCR
},
61 { "a1e0d0c1", R8A779A0_PD_A1E0D0C1
, R8A779A0_PD_A2E0D0
, PD_CPU_NOCR
},
62 { "a1e0d1c0", R8A779A0_PD_A1E0D1C0
, R8A779A0_PD_A2E0D1
, PD_CPU_NOCR
},
63 { "a1e0d1c1", R8A779A0_PD_A1E0D1C1
, R8A779A0_PD_A2E0D1
, PD_CPU_NOCR
},
64 { "a1e1d0c0", R8A779A0_PD_A1E1D0C0
, R8A779A0_PD_A2E1D0
, PD_CPU_NOCR
},
65 { "a1e1d0c1", R8A779A0_PD_A1E1D0C1
, R8A779A0_PD_A2E1D0
, PD_CPU_NOCR
},
66 { "a1e1d1c0", R8A779A0_PD_A1E1D1C0
, R8A779A0_PD_A2E1D1
, PD_CPU_NOCR
},
67 { "a1e1d1c1", R8A779A0_PD_A1E1D1C1
, R8A779A0_PD_A2E1D1
, PD_CPU_NOCR
},
68 { "3dg-a", R8A779A0_PD_3DG_A
, R8A779A0_PD_ALWAYS_ON
},
69 { "3dg-b", R8A779A0_PD_3DG_B
, R8A779A0_PD_3DG_A
},
70 { "a3vip0", R8A779A0_PD_A3VIP0
, R8A779A0_PD_ALWAYS_ON
},
71 { "a3vip1", R8A779A0_PD_A3VIP1
, R8A779A0_PD_ALWAYS_ON
},
72 { "a3vip3", R8A779A0_PD_A3VIP3
, R8A779A0_PD_ALWAYS_ON
},
73 { "a3vip2", R8A779A0_PD_A3VIP2
, R8A779A0_PD_ALWAYS_ON
},
74 { "a3isp01", R8A779A0_PD_A3ISP01
, R8A779A0_PD_ALWAYS_ON
},
75 { "a3isp23", R8A779A0_PD_A3ISP23
, R8A779A0_PD_ALWAYS_ON
},
76 { "a3ir", R8A779A0_PD_A3IR
, R8A779A0_PD_ALWAYS_ON
},
77 { "a2cn0", R8A779A0_PD_A2CN0
, R8A779A0_PD_A3IR
},
78 { "a2imp01", R8A779A0_PD_A2IMP01
, R8A779A0_PD_A3IR
},
79 { "a2dp0", R8A779A0_PD_A2DP0
, R8A779A0_PD_A3IR
},
80 { "a2cv0", R8A779A0_PD_A2CV0
, R8A779A0_PD_A3IR
},
81 { "a2cv1", R8A779A0_PD_A2CV1
, R8A779A0_PD_A3IR
},
82 { "a2cv4", R8A779A0_PD_A2CV4
, R8A779A0_PD_A3IR
},
83 { "a2cv6", R8A779A0_PD_A2CV6
, R8A779A0_PD_A3IR
},
84 { "a2cn2", R8A779A0_PD_A2CN2
, R8A779A0_PD_A3IR
},
85 { "a2imp23", R8A779A0_PD_A2IMP23
, R8A779A0_PD_A3IR
},
86 { "a2dp1", R8A779A0_PD_A2DP0
, R8A779A0_PD_A3IR
},
87 { "a2cv2", R8A779A0_PD_A2CV0
, R8A779A0_PD_A3IR
},
88 { "a2cv3", R8A779A0_PD_A2CV1
, R8A779A0_PD_A3IR
},
89 { "a2cv5", R8A779A0_PD_A2CV4
, R8A779A0_PD_A3IR
},
90 { "a2cv7", R8A779A0_PD_A2CV6
, R8A779A0_PD_A3IR
},
91 { "a2cn1", R8A779A0_PD_A2CN1
, R8A779A0_PD_A3IR
},
92 { "a1cnn0", R8A779A0_PD_A1CNN0
, R8A779A0_PD_A2CN0
},
93 { "a1cnn2", R8A779A0_PD_A1CNN2
, R8A779A0_PD_A2CN2
},
94 { "a1dsp0", R8A779A0_PD_A1DSP0
, R8A779A0_PD_A2CN2
},
95 { "a1cnn1", R8A779A0_PD_A1CNN1
, R8A779A0_PD_A2CN1
},
96 { "a1dsp1", R8A779A0_PD_A1DSP1
, R8A779A0_PD_A2CN1
},
99 static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst
= {
100 .areas
= r8a779a0_areas
,
101 .num_areas
= ARRAY_SIZE(r8a779a0_areas
),
105 #define SYSCSR 0x000 /* SYSC Status Register */
106 #define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
107 #define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
108 #define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
109 #define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
110 #define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
112 /* Power Domain Registers */
113 #define PDRSR(n) (0x1000 + ((n) * 0x40))
114 #define PDRONCR(n) (0x1004 + ((n) * 0x40))
115 #define PDROFFCR(n) (0x1008 + ((n) * 0x40))
116 #define PDRESR(n) (0x100C + ((n) * 0x40))
119 #define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
122 #define PDRESR_ERR BIT(0)
125 #define PDRSR_OFF BIT(0) /* Power-OFF state */
126 #define PDRSR_ON BIT(4) /* Power-ON state */
127 #define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
128 #define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
130 #define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
132 #define SYSCSR_TIMEOUT 10000
133 #define SYSCSR_DELAY_US 10
135 #define PDRESR_RETRIES 1000
136 #define PDRESR_DELAY_US 10
138 #define SYSCISR_TIMEOUT 10000
139 #define SYSCISR_DELAY_US 10
141 #define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
143 static void __iomem
*r8a779a0_sysc_base
;
144 static DEFINE_SPINLOCK(r8a779a0_sysc_lock
); /* SMP CPUs + I/O devices */
146 static int r8a779a0_sysc_pwr_on_off(u8 pdr
, bool on
)
148 unsigned int reg_offs
;
153 reg_offs
= PDRONCR(pdr
);
155 reg_offs
= PDROFFCR(pdr
);
157 /* Wait until SYSC is ready to accept a power request */
158 ret
= readl_poll_timeout_atomic(r8a779a0_sysc_base
+ SYSCSR
, val
,
159 (val
& SYSCSR_BUSY
) == SYSCSR_BUSY
,
160 SYSCSR_DELAY_US
, SYSCSR_TIMEOUT
);
164 /* Submit power shutoff or power resume request */
165 iowrite32(PWRON_PWROFF
, r8a779a0_sysc_base
+ reg_offs
);
170 static int clear_irq_flags(unsigned int reg_idx
, unsigned int isr_mask
)
175 iowrite32(isr_mask
, r8a779a0_sysc_base
+ SYSCISCR(reg_idx
));
177 ret
= readl_poll_timeout_atomic(r8a779a0_sysc_base
+ SYSCISCR(reg_idx
),
178 val
, !(val
& isr_mask
),
179 SYSCISR_DELAY_US
, SYSCISR_TIMEOUT
);
181 pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__
);
188 static int r8a779a0_sysc_power(u8 pdr
, bool on
)
190 unsigned int isr_mask
;
191 unsigned int reg_idx
, bit_idx
;
198 spin_lock_irqsave(&r8a779a0_sysc_lock
, flags
);
200 reg_idx
= pdr
/ NUM_DOMAINS_EACH_REG
;
201 bit_idx
= pdr
% NUM_DOMAINS_EACH_REG
;
203 isr_mask
= BIT(bit_idx
);
206 * The interrupt source needs to be enabled, but masked, to prevent the
207 * CPU from receiving it.
209 iowrite32(ioread32(r8a779a0_sysc_base
+ SYSCIER(reg_idx
)) | isr_mask
,
210 r8a779a0_sysc_base
+ SYSCIER(reg_idx
));
211 iowrite32(ioread32(r8a779a0_sysc_base
+ SYSCIMR(reg_idx
)) | isr_mask
,
212 r8a779a0_sysc_base
+ SYSCIMR(reg_idx
));
214 ret
= clear_irq_flags(reg_idx
, isr_mask
);
218 /* Submit power shutoff or resume request until it was accepted */
219 for (k
= 0; k
< PDRESR_RETRIES
; k
++) {
220 ret
= r8a779a0_sysc_pwr_on_off(pdr
, on
);
224 status
= ioread32(r8a779a0_sysc_base
+ PDRESR(pdr
));
225 if (!(status
& PDRESR_ERR
))
228 udelay(PDRESR_DELAY_US
);
231 if (k
== PDRESR_RETRIES
) {
236 /* Wait until the power shutoff or resume request has completed * */
237 ret
= readl_poll_timeout_atomic(r8a779a0_sysc_base
+ SYSCISCR(reg_idx
),
238 val
, (val
& isr_mask
),
239 SYSCISR_DELAY_US
, SYSCISR_TIMEOUT
);
245 /* Clear interrupt flags */
246 ret
= clear_irq_flags(reg_idx
, isr_mask
);
251 spin_unlock_irqrestore(&r8a779a0_sysc_lock
, flags
);
253 pr_debug("sysc power %s domain %d: %08x -> %d\n", on
? "on" : "off",
254 pdr
, ioread32(r8a779a0_sysc_base
+ SYSCISCR(reg_idx
)), ret
);
258 static bool r8a779a0_sysc_power_is_off(u8 pdr
)
262 st
= ioread32(r8a779a0_sysc_base
+ PDRSR(pdr
));
270 struct r8a779a0_sysc_pd
{
271 struct generic_pm_domain genpd
;
277 static inline struct r8a779a0_sysc_pd
*to_r8a779a0_pd(struct generic_pm_domain
*d
)
279 return container_of(d
, struct r8a779a0_sysc_pd
, genpd
);
282 static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain
*genpd
)
284 struct r8a779a0_sysc_pd
*pd
= to_r8a779a0_pd(genpd
);
286 pr_debug("%s: %s\n", __func__
, genpd
->name
);
287 return r8a779a0_sysc_power(pd
->pdr
, false);
290 static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain
*genpd
)
292 struct r8a779a0_sysc_pd
*pd
= to_r8a779a0_pd(genpd
);
294 pr_debug("%s: %s\n", __func__
, genpd
->name
);
295 return r8a779a0_sysc_power(pd
->pdr
, true);
298 static int __init
r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd
*pd
)
300 struct generic_pm_domain
*genpd
= &pd
->genpd
;
301 const char *name
= pd
->genpd
.name
;
304 if (pd
->flags
& PD_CPU
) {
306 * This domain contains a CPU core and therefore it should
307 * only be turned off if the CPU is not in use.
309 pr_debug("PM domain %s contains %s\n", name
, "CPU");
310 genpd
->flags
|= GENPD_FLAG_ALWAYS_ON
;
311 } else if (pd
->flags
& PD_SCU
) {
313 * This domain contains an SCU and cache-controller, and
314 * therefore it should only be turned off if the CPU cores are
317 pr_debug("PM domain %s contains %s\n", name
, "SCU");
318 genpd
->flags
|= GENPD_FLAG_ALWAYS_ON
;
319 } else if (pd
->flags
& PD_NO_CR
) {
321 * This domain cannot be turned off.
323 genpd
->flags
|= GENPD_FLAG_ALWAYS_ON
;
326 if (!(pd
->flags
& (PD_CPU
| PD_SCU
))) {
327 /* Enable Clock Domain for I/O devices */
328 genpd
->flags
|= GENPD_FLAG_PM_CLK
| GENPD_FLAG_ACTIVE_WAKEUP
;
329 genpd
->attach_dev
= cpg_mssr_attach_dev
;
330 genpd
->detach_dev
= cpg_mssr_detach_dev
;
333 genpd
->power_off
= r8a779a0_sysc_pd_power_off
;
334 genpd
->power_on
= r8a779a0_sysc_pd_power_on
;
336 if (pd
->flags
& (PD_CPU
| PD_NO_CR
)) {
337 /* Skip CPUs (handled by SMP code) and areas without control */
338 pr_debug("%s: Not touching %s\n", __func__
, genpd
->name
);
342 if (!r8a779a0_sysc_power_is_off(pd
->pdr
)) {
343 pr_debug("%s: %s is already powered\n", __func__
, genpd
->name
);
347 r8a779a0_sysc_power(pd
->pdr
, true);
350 error
= pm_genpd_init(genpd
, &simple_qos_governor
, false);
352 pr_err("Failed to init PM domain %s: %d\n", name
, error
);
357 static const struct of_device_id r8a779a0_sysc_matches
[] __initconst
= {
358 { .compatible
= "renesas,r8a779a0-sysc", .data
= &r8a779a0_sysc_info
},
362 struct r8a779a0_pm_domains
{
363 struct genpd_onecell_data onecell_data
;
364 struct generic_pm_domain
*domains
[R8A779A0_PD_ALWAYS_ON
+ 1];
367 static struct genpd_onecell_data
*r8a779a0_sysc_onecell_data
;
369 static int __init
r8a779a0_sysc_pd_init(void)
371 const struct r8a779a0_sysc_info
*info
;
372 const struct of_device_id
*match
;
373 struct r8a779a0_pm_domains
*domains
;
374 struct device_node
*np
;
379 np
= of_find_matching_node_and_match(NULL
, r8a779a0_sysc_matches
, &match
);
385 base
= of_iomap(np
, 0);
387 pr_warn("%pOF: Cannot map regs\n", np
);
392 r8a779a0_sysc_base
= base
;
394 domains
= kzalloc(sizeof(*domains
), GFP_KERNEL
);
400 domains
->onecell_data
.domains
= domains
->domains
;
401 domains
->onecell_data
.num_domains
= ARRAY_SIZE(domains
->domains
);
402 r8a779a0_sysc_onecell_data
= &domains
->onecell_data
;
404 for (i
= 0; i
< info
->num_areas
; i
++) {
405 const struct r8a779a0_sysc_area
*area
= &info
->areas
[i
];
406 struct r8a779a0_sysc_pd
*pd
;
409 /* Skip NULLified area */
413 pd
= kzalloc(sizeof(*pd
) + strlen(area
->name
) + 1, GFP_KERNEL
);
419 strcpy(pd
->name
, area
->name
);
420 pd
->genpd
.name
= pd
->name
;
422 pd
->flags
= area
->flags
;
424 error
= r8a779a0_sysc_pd_setup(pd
);
428 domains
->domains
[area
->pdr
] = &pd
->genpd
;
430 if (area
->parent
< 0)
433 error
= pm_genpd_add_subdomain(domains
->domains
[area
->parent
],
436 pr_warn("Failed to add PM subdomain %s to parent %u\n",
437 area
->name
, area
->parent
);
442 error
= of_genpd_add_provider_onecell(np
, &domains
->onecell_data
);
448 early_initcall(r8a779a0_sysc_pd_init
);