1 // SPDX-License-Identifier: GPL-2.0
3 * Marvell Dove PMU support
7 #include <linux/irqdomain.h>
9 #include <linux/of_irq.h>
10 #include <linux/of_address.h>
11 #include <linux/platform_device.h>
12 #include <linux/pm_domain.h>
13 #include <linux/reset.h>
14 #include <linux/reset-controller.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/soc/dove/pmu.h>
18 #include <linux/spinlock.h>
22 #define PMC_SW_RST 0x30
23 #define PMC_IRQ_CAUSE 0x50
24 #define PMC_IRQ_MASK 0x54
31 struct device_node
*of_node
;
32 void __iomem
*pmc_base
;
33 void __iomem
*pmu_base
;
34 struct irq_chip_generic
*irq_gc
;
35 struct irq_domain
*irq_domain
;
36 #ifdef CONFIG_RESET_CONTROLLER
37 struct reset_controller_dev reset
;
42 * The PMU contains a register to reset various subsystems within the
43 * SoC. Export this as a reset controller.
45 #ifdef CONFIG_RESET_CONTROLLER
46 #define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
48 static int pmu_reset_reset(struct reset_controller_dev
*rc
, unsigned long id
)
50 struct pmu_data
*pmu
= rcdev_to_pmu(rc
);
54 spin_lock_irqsave(&pmu
->lock
, flags
);
55 val
= readl_relaxed(pmu
->pmc_base
+ PMC_SW_RST
);
56 writel_relaxed(val
& ~BIT(id
), pmu
->pmc_base
+ PMC_SW_RST
);
57 writel_relaxed(val
| BIT(id
), pmu
->pmc_base
+ PMC_SW_RST
);
58 spin_unlock_irqrestore(&pmu
->lock
, flags
);
63 static int pmu_reset_assert(struct reset_controller_dev
*rc
, unsigned long id
)
65 struct pmu_data
*pmu
= rcdev_to_pmu(rc
);
69 spin_lock_irqsave(&pmu
->lock
, flags
);
70 val
&= readl_relaxed(pmu
->pmc_base
+ PMC_SW_RST
);
71 writel_relaxed(val
, pmu
->pmc_base
+ PMC_SW_RST
);
72 spin_unlock_irqrestore(&pmu
->lock
, flags
);
77 static int pmu_reset_deassert(struct reset_controller_dev
*rc
, unsigned long id
)
79 struct pmu_data
*pmu
= rcdev_to_pmu(rc
);
83 spin_lock_irqsave(&pmu
->lock
, flags
);
84 val
|= readl_relaxed(pmu
->pmc_base
+ PMC_SW_RST
);
85 writel_relaxed(val
, pmu
->pmc_base
+ PMC_SW_RST
);
86 spin_unlock_irqrestore(&pmu
->lock
, flags
);
91 static const struct reset_control_ops pmu_reset_ops
= {
92 .reset
= pmu_reset_reset
,
93 .assert = pmu_reset_assert
,
94 .deassert
= pmu_reset_deassert
,
97 static struct reset_controller_dev pmu_reset __initdata
= {
98 .ops
= &pmu_reset_ops
,
103 static void __init
pmu_reset_init(struct pmu_data
*pmu
)
107 pmu
->reset
= pmu_reset
;
108 pmu
->reset
.of_node
= pmu
->of_node
;
110 ret
= reset_controller_register(&pmu
->reset
);
112 pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret
);
115 static void __init
pmu_reset_init(struct pmu_data
*pmu
)
121 struct pmu_data
*pmu
;
125 struct generic_pm_domain base
;
128 #define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
131 * This deals with the "old" Marvell sequence of bringing a power domain
132 * down/up, which is: apply power, release reset, disable isolators.
134 * Later devices apparantly use a different sequence: power up, disable
135 * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
136 * enable module clock, deassert reset.
138 * Note: reading the assembly, it seems that the IO accessors have an
139 * unfortunate side-effect - they cause memory already read into registers
140 * for the if () to be re-read for the bit-set or bit-clear operation.
141 * The code is written to avoid this.
143 static int pmu_domain_power_off(struct generic_pm_domain
*domain
)
145 struct pmu_domain
*pmu_dom
= to_pmu_domain(domain
);
146 struct pmu_data
*pmu
= pmu_dom
->pmu
;
149 void __iomem
*pmu_base
= pmu
->pmu_base
;
150 void __iomem
*pmc_base
= pmu
->pmc_base
;
152 spin_lock_irqsave(&pmu
->lock
, flags
);
154 /* Enable isolators */
155 if (pmu_dom
->iso_mask
) {
156 val
= ~pmu_dom
->iso_mask
;
157 val
&= readl_relaxed(pmu_base
+ PMU_ISO
);
158 writel_relaxed(val
, pmu_base
+ PMU_ISO
);
162 if (pmu_dom
->rst_mask
) {
163 val
= ~pmu_dom
->rst_mask
;
164 val
&= readl_relaxed(pmc_base
+ PMC_SW_RST
);
165 writel_relaxed(val
, pmc_base
+ PMC_SW_RST
);
169 val
= readl_relaxed(pmu_base
+ PMU_PWR
) | pmu_dom
->pwr_mask
;
170 writel_relaxed(val
, pmu_base
+ PMU_PWR
);
172 spin_unlock_irqrestore(&pmu
->lock
, flags
);
177 static int pmu_domain_power_on(struct generic_pm_domain
*domain
)
179 struct pmu_domain
*pmu_dom
= to_pmu_domain(domain
);
180 struct pmu_data
*pmu
= pmu_dom
->pmu
;
183 void __iomem
*pmu_base
= pmu
->pmu_base
;
184 void __iomem
*pmc_base
= pmu
->pmc_base
;
186 spin_lock_irqsave(&pmu
->lock
, flags
);
189 val
= ~pmu_dom
->pwr_mask
& readl_relaxed(pmu_base
+ PMU_PWR
);
190 writel_relaxed(val
, pmu_base
+ PMU_PWR
);
193 if (pmu_dom
->rst_mask
) {
194 val
= pmu_dom
->rst_mask
;
195 val
|= readl_relaxed(pmc_base
+ PMC_SW_RST
);
196 writel_relaxed(val
, pmc_base
+ PMC_SW_RST
);
199 /* Disable isolators */
200 if (pmu_dom
->iso_mask
) {
201 val
= pmu_dom
->iso_mask
;
202 val
|= readl_relaxed(pmu_base
+ PMU_ISO
);
203 writel_relaxed(val
, pmu_base
+ PMU_ISO
);
206 spin_unlock_irqrestore(&pmu
->lock
, flags
);
211 static void __pmu_domain_register(struct pmu_domain
*domain
,
212 struct device_node
*np
)
214 unsigned int val
= readl_relaxed(domain
->pmu
->pmu_base
+ PMU_PWR
);
216 domain
->base
.power_off
= pmu_domain_power_off
;
217 domain
->base
.power_on
= pmu_domain_power_on
;
219 pm_genpd_init(&domain
->base
, NULL
, !(val
& domain
->pwr_mask
));
222 of_genpd_add_provider_simple(np
, &domain
->base
);
225 /* PMU IRQ controller */
226 static void pmu_irq_handler(struct irq_desc
*desc
)
228 struct pmu_data
*pmu
= irq_desc_get_handler_data(desc
);
229 struct irq_chip_generic
*gc
= pmu
->irq_gc
;
230 struct irq_domain
*domain
= pmu
->irq_domain
;
231 void __iomem
*base
= gc
->reg_base
;
232 u32 stat
= readl_relaxed(base
+ PMC_IRQ_CAUSE
) & gc
->mask_cache
;
236 handle_bad_irq(desc
);
241 u32 hwirq
= fls(stat
) - 1;
243 stat
&= ~(1 << hwirq
);
244 done
&= ~(1 << hwirq
);
246 generic_handle_irq(irq_find_mapping(domain
, hwirq
));
250 * The PMU mask register is not RW0C: it is RW. This means that
251 * the bits take whatever value is written to them; if you write
252 * a '1', you will set the interrupt.
254 * Unfortunately this means there is NO race free way to clear
257 * So, let's structure the code so that the window is as small as
261 done
&= readl_relaxed(base
+ PMC_IRQ_CAUSE
);
262 writel_relaxed(done
, base
+ PMC_IRQ_CAUSE
);
266 static int __init
dove_init_pmu_irq(struct pmu_data
*pmu
, int irq
)
268 const char *name
= "pmu_irq";
269 struct irq_chip_generic
*gc
;
270 struct irq_domain
*domain
;
273 /* mask and clear all interrupts */
274 writel(0, pmu
->pmc_base
+ PMC_IRQ_MASK
);
275 writel(0, pmu
->pmc_base
+ PMC_IRQ_CAUSE
);
277 domain
= irq_domain_add_linear(pmu
->of_node
, NR_PMU_IRQS
,
278 &irq_generic_chip_ops
, NULL
);
280 pr_err("%s: unable to add irq domain\n", name
);
284 ret
= irq_alloc_domain_generic_chips(domain
, NR_PMU_IRQS
, 1, name
,
286 IRQ_NOREQUEST
| IRQ_NOPROBE
, 0,
287 IRQ_GC_INIT_MASK_CACHE
);
289 pr_err("%s: unable to alloc irq domain gc: %d\n", name
, ret
);
290 irq_domain_remove(domain
);
294 gc
= irq_get_domain_generic_chip(domain
, 0);
295 gc
->reg_base
= pmu
->pmc_base
;
296 gc
->chip_types
[0].regs
.mask
= PMC_IRQ_MASK
;
297 gc
->chip_types
[0].chip
.irq_mask
= irq_gc_mask_clr_bit
;
298 gc
->chip_types
[0].chip
.irq_unmask
= irq_gc_mask_set_bit
;
300 pmu
->irq_domain
= domain
;
303 irq_set_handler_data(irq
, pmu
);
304 irq_set_chained_handler(irq
, pmu_irq_handler
);
309 int __init
dove_init_pmu_legacy(const struct dove_pmu_initdata
*initdata
)
311 const struct dove_pmu_domain_initdata
*domain_initdata
;
312 struct pmu_data
*pmu
;
315 pmu
= kzalloc(sizeof(*pmu
), GFP_KERNEL
);
319 spin_lock_init(&pmu
->lock
);
320 pmu
->pmc_base
= initdata
->pmc_base
;
321 pmu
->pmu_base
= initdata
->pmu_base
;
324 for (domain_initdata
= initdata
->domains
; domain_initdata
->name
;
326 struct pmu_domain
*domain
;
328 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
331 domain
->pwr_mask
= domain_initdata
->pwr_mask
;
332 domain
->rst_mask
= domain_initdata
->rst_mask
;
333 domain
->iso_mask
= domain_initdata
->iso_mask
;
334 domain
->base
.name
= domain_initdata
->name
;
336 __pmu_domain_register(domain
, NULL
);
340 ret
= dove_init_pmu_irq(pmu
, initdata
->irq
);
342 pr_err("dove_init_pmu_irq() failed: %d\n", ret
);
345 irq_domain_associate_many(pmu
->irq_domain
,
346 initdata
->irq_domain_start
,
353 * pmu: power-manager@d0000 {
354 * compatible = "marvell,dove-pmu";
355 * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
357 * interrupt-controller;
359 * vpu_domain: vpu-domain {
360 * #power-domain-cells = <0>;
361 * marvell,pmu_pwr_mask = <0x00000008>;
362 * marvell,pmu_iso_mask = <0x00000001>;
363 * resets = <&pmu 16>;
365 * gpu_domain: gpu-domain {
366 * #power-domain-cells = <0>;
367 * marvell,pmu_pwr_mask = <0x00000004>;
368 * marvell,pmu_iso_mask = <0x00000002>;
369 * resets = <&pmu 18>;
373 int __init
dove_init_pmu(void)
375 struct device_node
*np_pmu
, *domains_node
, *np
;
376 struct pmu_data
*pmu
;
379 /* Lookup the PMU node */
380 np_pmu
= of_find_compatible_node(NULL
, NULL
, "marvell,dove-pmu");
384 domains_node
= of_get_child_by_name(np_pmu
, "domains");
386 pr_err("%pOFn: failed to find domains sub-node\n", np_pmu
);
390 pmu
= kzalloc(sizeof(*pmu
), GFP_KERNEL
);
394 spin_lock_init(&pmu
->lock
);
395 pmu
->of_node
= np_pmu
;
396 pmu
->pmc_base
= of_iomap(pmu
->of_node
, 0);
397 pmu
->pmu_base
= of_iomap(pmu
->of_node
, 1);
398 if (!pmu
->pmc_base
|| !pmu
->pmu_base
) {
399 pr_err("%pOFn: failed to map PMU\n", np_pmu
);
400 iounmap(pmu
->pmu_base
);
401 iounmap(pmu
->pmc_base
);
408 for_each_available_child_of_node(domains_node
, np
) {
409 struct of_phandle_args args
;
410 struct pmu_domain
*domain
;
412 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
417 domain
->base
.name
= kasprintf(GFP_KERNEL
, "%pOFn", np
);
418 if (!domain
->base
.name
) {
423 of_property_read_u32(np
, "marvell,pmu_pwr_mask",
425 of_property_read_u32(np
, "marvell,pmu_iso_mask",
429 * We parse the reset controller property directly here
430 * to ensure that we can operate when the reset controller
431 * support is not configured into the kernel.
433 ret
= of_parse_phandle_with_args(np
, "resets", "#reset-cells",
436 if (args
.np
== pmu
->of_node
)
437 domain
->rst_mask
= BIT(args
.args
[0]);
438 of_node_put(args
.np
);
441 __pmu_domain_register(domain
, np
);
444 /* Loss of the interrupt controller is not a fatal error. */
445 parent_irq
= irq_of_parse_and_map(pmu
->of_node
, 0);
447 pr_err("%pOFn: no interrupt specified\n", np_pmu
);
449 ret
= dove_init_pmu_irq(pmu
, parent_irq
);
451 pr_err("dove_init_pmu_irq() failed: %d\n", ret
);