2 * Marvell Dove PMU support
6 #include <linux/irqdomain.h>
8 #include <linux/of_irq.h>
9 #include <linux/of_address.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_domain.h>
12 #include <linux/reset.h>
13 #include <linux/reset-controller.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/soc/dove/pmu.h>
17 #include <linux/spinlock.h>
21 #define PMC_SW_RST 0x30
22 #define PMC_IRQ_CAUSE 0x50
23 #define PMC_IRQ_MASK 0x54
30 struct device_node
*of_node
;
31 void __iomem
*pmc_base
;
32 void __iomem
*pmu_base
;
33 struct irq_chip_generic
*irq_gc
;
34 struct irq_domain
*irq_domain
;
35 #ifdef CONFIG_RESET_CONTROLLER
36 struct reset_controller_dev reset
;
41 * The PMU contains a register to reset various subsystems within the
42 * SoC. Export this as a reset controller.
44 #ifdef CONFIG_RESET_CONTROLLER
45 #define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
47 static int pmu_reset_reset(struct reset_controller_dev
*rc
, unsigned long id
)
49 struct pmu_data
*pmu
= rcdev_to_pmu(rc
);
53 spin_lock_irqsave(&pmu
->lock
, flags
);
54 val
= readl_relaxed(pmu
->pmc_base
+ PMC_SW_RST
);
55 writel_relaxed(val
& ~BIT(id
), pmu
->pmc_base
+ PMC_SW_RST
);
56 writel_relaxed(val
| BIT(id
), pmu
->pmc_base
+ PMC_SW_RST
);
57 spin_unlock_irqrestore(&pmu
->lock
, flags
);
62 static int pmu_reset_assert(struct reset_controller_dev
*rc
, unsigned long id
)
64 struct pmu_data
*pmu
= rcdev_to_pmu(rc
);
68 spin_lock_irqsave(&pmu
->lock
, flags
);
69 val
&= readl_relaxed(pmu
->pmc_base
+ PMC_SW_RST
);
70 writel_relaxed(val
, pmu
->pmc_base
+ PMC_SW_RST
);
71 spin_unlock_irqrestore(&pmu
->lock
, flags
);
76 static int pmu_reset_deassert(struct reset_controller_dev
*rc
, unsigned long id
)
78 struct pmu_data
*pmu
= rcdev_to_pmu(rc
);
82 spin_lock_irqsave(&pmu
->lock
, flags
);
83 val
|= readl_relaxed(pmu
->pmc_base
+ PMC_SW_RST
);
84 writel_relaxed(val
, pmu
->pmc_base
+ PMC_SW_RST
);
85 spin_unlock_irqrestore(&pmu
->lock
, flags
);
90 static struct reset_control_ops pmu_reset_ops
= {
91 .reset
= pmu_reset_reset
,
92 .assert = pmu_reset_assert
,
93 .deassert
= pmu_reset_deassert
,
96 static struct reset_controller_dev pmu_reset __initdata
= {
97 .ops
= &pmu_reset_ops
,
102 static void __init
pmu_reset_init(struct pmu_data
*pmu
)
106 pmu
->reset
= pmu_reset
;
107 pmu
->reset
.of_node
= pmu
->of_node
;
109 ret
= reset_controller_register(&pmu
->reset
);
111 pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret
);
114 static void __init
pmu_reset_init(struct pmu_data
*pmu
)
120 struct pmu_data
*pmu
;
124 struct generic_pm_domain base
;
127 #define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
130 * This deals with the "old" Marvell sequence of bringing a power domain
131 * down/up, which is: apply power, release reset, disable isolators.
133 * Later devices apparantly use a different sequence: power up, disable
134 * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
135 * enable module clock, deassert reset.
137 * Note: reading the assembly, it seems that the IO accessors have an
138 * unfortunate side-effect - they cause memory already read into registers
139 * for the if () to be re-read for the bit-set or bit-clear operation.
140 * The code is written to avoid this.
142 static int pmu_domain_power_off(struct generic_pm_domain
*domain
)
144 struct pmu_domain
*pmu_dom
= to_pmu_domain(domain
);
145 struct pmu_data
*pmu
= pmu_dom
->pmu
;
148 void __iomem
*pmu_base
= pmu
->pmu_base
;
149 void __iomem
*pmc_base
= pmu
->pmc_base
;
151 spin_lock_irqsave(&pmu
->lock
, flags
);
153 /* Enable isolators */
154 if (pmu_dom
->iso_mask
) {
155 val
= ~pmu_dom
->iso_mask
;
156 val
&= readl_relaxed(pmu_base
+ PMU_ISO
);
157 writel_relaxed(val
, pmu_base
+ PMU_ISO
);
161 if (pmu_dom
->rst_mask
) {
162 val
= ~pmu_dom
->rst_mask
;
163 val
&= readl_relaxed(pmc_base
+ PMC_SW_RST
);
164 writel_relaxed(val
, pmc_base
+ PMC_SW_RST
);
168 val
= readl_relaxed(pmu_base
+ PMU_PWR
) | pmu_dom
->pwr_mask
;
169 writel_relaxed(val
, pmu_base
+ PMU_PWR
);
171 spin_unlock_irqrestore(&pmu
->lock
, flags
);
176 static int pmu_domain_power_on(struct generic_pm_domain
*domain
)
178 struct pmu_domain
*pmu_dom
= to_pmu_domain(domain
);
179 struct pmu_data
*pmu
= pmu_dom
->pmu
;
182 void __iomem
*pmu_base
= pmu
->pmu_base
;
183 void __iomem
*pmc_base
= pmu
->pmc_base
;
185 spin_lock_irqsave(&pmu
->lock
, flags
);
188 val
= ~pmu_dom
->pwr_mask
& readl_relaxed(pmu_base
+ PMU_PWR
);
189 writel_relaxed(val
, pmu_base
+ PMU_PWR
);
192 if (pmu_dom
->rst_mask
) {
193 val
= pmu_dom
->rst_mask
;
194 val
|= readl_relaxed(pmc_base
+ PMC_SW_RST
);
195 writel_relaxed(val
, pmc_base
+ PMC_SW_RST
);
198 /* Disable isolators */
199 if (pmu_dom
->iso_mask
) {
200 val
= pmu_dom
->iso_mask
;
201 val
|= readl_relaxed(pmu_base
+ PMU_ISO
);
202 writel_relaxed(val
, pmu_base
+ PMU_ISO
);
205 spin_unlock_irqrestore(&pmu
->lock
, flags
);
210 static void __pmu_domain_register(struct pmu_domain
*domain
,
211 struct device_node
*np
)
213 unsigned int val
= readl_relaxed(domain
->pmu
->pmu_base
+ PMU_PWR
);
215 domain
->base
.power_off
= pmu_domain_power_off
;
216 domain
->base
.power_on
= pmu_domain_power_on
;
218 pm_genpd_init(&domain
->base
, NULL
, !(val
& domain
->pwr_mask
));
221 of_genpd_add_provider_simple(np
, &domain
->base
);
224 /* PMU IRQ controller */
225 static void pmu_irq_handler(struct irq_desc
*desc
)
227 struct pmu_data
*pmu
= irq_desc_get_handler_data(desc
);
228 struct irq_chip_generic
*gc
= pmu
->irq_gc
;
229 struct irq_domain
*domain
= pmu
->irq_domain
;
230 void __iomem
*base
= gc
->reg_base
;
231 u32 stat
= readl_relaxed(base
+ PMC_IRQ_CAUSE
) & gc
->mask_cache
;
235 handle_bad_irq(desc
);
240 u32 hwirq
= fls(stat
) - 1;
242 stat
&= ~(1 << hwirq
);
243 done
&= ~(1 << hwirq
);
245 generic_handle_irq(irq_find_mapping(domain
, hwirq
));
249 * The PMU mask register is not RW0C: it is RW. This means that
250 * the bits take whatever value is written to them; if you write
251 * a '1', you will set the interrupt.
253 * Unfortunately this means there is NO race free way to clear
256 * So, let's structure the code so that the window is as small as
260 done
&= readl_relaxed(base
+ PMC_IRQ_CAUSE
);
261 writel_relaxed(done
, base
+ PMC_IRQ_CAUSE
);
265 static int __init
dove_init_pmu_irq(struct pmu_data
*pmu
, int irq
)
267 const char *name
= "pmu_irq";
268 struct irq_chip_generic
*gc
;
269 struct irq_domain
*domain
;
272 /* mask and clear all interrupts */
273 writel(0, pmu
->pmc_base
+ PMC_IRQ_MASK
);
274 writel(0, pmu
->pmc_base
+ PMC_IRQ_CAUSE
);
276 domain
= irq_domain_add_linear(pmu
->of_node
, NR_PMU_IRQS
,
277 &irq_generic_chip_ops
, NULL
);
279 pr_err("%s: unable to add irq domain\n", name
);
283 ret
= irq_alloc_domain_generic_chips(domain
, NR_PMU_IRQS
, 1, name
,
285 IRQ_NOREQUEST
| IRQ_NOPROBE
, 0,
286 IRQ_GC_INIT_MASK_CACHE
);
288 pr_err("%s: unable to alloc irq domain gc: %d\n", name
, ret
);
289 irq_domain_remove(domain
);
293 gc
= irq_get_domain_generic_chip(domain
, 0);
294 gc
->reg_base
= pmu
->pmc_base
;
295 gc
->chip_types
[0].regs
.mask
= PMC_IRQ_MASK
;
296 gc
->chip_types
[0].chip
.irq_mask
= irq_gc_mask_clr_bit
;
297 gc
->chip_types
[0].chip
.irq_unmask
= irq_gc_mask_set_bit
;
299 pmu
->irq_domain
= domain
;
302 irq_set_handler_data(irq
, pmu
);
303 irq_set_chained_handler(irq
, pmu_irq_handler
);
308 int __init
dove_init_pmu_legacy(const struct dove_pmu_initdata
*initdata
)
310 const struct dove_pmu_domain_initdata
*domain_initdata
;
311 struct pmu_data
*pmu
;
314 pmu
= kzalloc(sizeof(*pmu
), GFP_KERNEL
);
318 spin_lock_init(&pmu
->lock
);
319 pmu
->pmc_base
= initdata
->pmc_base
;
320 pmu
->pmu_base
= initdata
->pmu_base
;
323 for (domain_initdata
= initdata
->domains
; domain_initdata
->name
;
325 struct pmu_domain
*domain
;
327 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
330 domain
->pwr_mask
= domain_initdata
->pwr_mask
;
331 domain
->rst_mask
= domain_initdata
->rst_mask
;
332 domain
->iso_mask
= domain_initdata
->iso_mask
;
333 domain
->base
.name
= domain_initdata
->name
;
335 __pmu_domain_register(domain
, NULL
);
339 ret
= dove_init_pmu_irq(pmu
, initdata
->irq
);
341 pr_err("dove_init_pmu_irq() failed: %d\n", ret
);
344 irq_domain_associate_many(pmu
->irq_domain
,
345 initdata
->irq_domain_start
,
352 * pmu: power-manager@d0000 {
353 * compatible = "marvell,dove-pmu";
354 * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
356 * interrupt-controller;
358 * vpu_domain: vpu-domain {
359 * #power-domain-cells = <0>;
360 * marvell,pmu_pwr_mask = <0x00000008>;
361 * marvell,pmu_iso_mask = <0x00000001>;
362 * resets = <&pmu 16>;
364 * gpu_domain: gpu-domain {
365 * #power-domain-cells = <0>;
366 * marvell,pmu_pwr_mask = <0x00000004>;
367 * marvell,pmu_iso_mask = <0x00000002>;
368 * resets = <&pmu 18>;
372 int __init
dove_init_pmu(void)
374 struct device_node
*np_pmu
, *domains_node
, *np
;
375 struct pmu_data
*pmu
;
378 /* Lookup the PMU node */
379 np_pmu
= of_find_compatible_node(NULL
, NULL
, "marvell,dove-pmu");
383 domains_node
= of_get_child_by_name(np_pmu
, "domains");
385 pr_err("%s: failed to find domains sub-node\n", np_pmu
->name
);
389 pmu
= kzalloc(sizeof(*pmu
), GFP_KERNEL
);
393 spin_lock_init(&pmu
->lock
);
394 pmu
->of_node
= np_pmu
;
395 pmu
->pmc_base
= of_iomap(pmu
->of_node
, 0);
396 pmu
->pmu_base
= of_iomap(pmu
->of_node
, 1);
397 if (!pmu
->pmc_base
|| !pmu
->pmu_base
) {
398 pr_err("%s: failed to map PMU\n", np_pmu
->name
);
399 iounmap(pmu
->pmu_base
);
400 iounmap(pmu
->pmc_base
);
407 for_each_available_child_of_node(domains_node
, np
) {
408 struct of_phandle_args args
;
409 struct pmu_domain
*domain
;
411 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
416 domain
->base
.name
= kstrdup(np
->name
, GFP_KERNEL
);
417 if (!domain
->base
.name
) {
422 of_property_read_u32(np
, "marvell,pmu_pwr_mask",
424 of_property_read_u32(np
, "marvell,pmu_iso_mask",
428 * We parse the reset controller property directly here
429 * to ensure that we can operate when the reset controller
430 * support is not configured into the kernel.
432 ret
= of_parse_phandle_with_args(np
, "resets", "#reset-cells",
435 if (args
.np
== pmu
->of_node
)
436 domain
->rst_mask
= BIT(args
.args
[0]);
437 of_node_put(args
.np
);
440 __pmu_domain_register(domain
, np
);
443 /* Loss of the interrupt controller is not a fatal error. */
444 parent_irq
= irq_of_parse_and_map(pmu
->of_node
, 0);
446 pr_err("%s: no interrupt specified\n", np_pmu
->name
);
448 ret
= dove_init_pmu_irq(pmu
, parent_irq
);
450 pr_err("dove_init_pmu_irq() failed: %d\n", ret
);