Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / arm / mach-omap2 / prm_common.c
blobb4c4ab9c8044476d0777ed04cb8c9595d2f29f83
1 /*
2 * OMAP2+ common Power & Reset Management (PRM) IP block functions
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Tero Kristo <t-kristo@ti.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 * For historical purposes, the API used to configure the PRM
13 * interrupt handler refers to it as the "PRCM interrupt." The
14 * underlying registers are located in the PRM on OMAP3/4.
16 * XXX This code should eventually be moved to a PRM driver.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/slab.h>
26 #include <linux/of.h>
27 #include <linux/of_address.h>
28 #include <linux/clk-provider.h>
29 #include <linux/clk/ti.h>
31 #include "soc.h"
32 #include "prm2xxx_3xxx.h"
33 #include "prm2xxx.h"
34 #include "prm3xxx.h"
35 #include "prm44xx.h"
36 #include "common.h"
37 #include "clock.h"
40 * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
41 * XXX this is technically not needed, since
42 * omap_prcm_register_chain_handler() could allocate this based on the
43 * actual amount of memory needed for the SoC
45 #define OMAP_PRCM_MAX_NR_PENDING_REG 2
48 * prcm_irq_chips: an array of all of the "generic IRQ chips" in use
49 * by the PRCM interrupt handler code. There will be one 'chip' per
50 * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have
51 * one "chip" and OMAP4 will have two.)
53 static struct irq_chip_generic **prcm_irq_chips;
56 * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
57 * is currently running on. Defined and passed by initialization code
58 * that calls omap_prcm_register_chain_handler().
60 static struct omap_prcm_irq_setup *prcm_irq_setup;
62 /* prm_base: base virtual address of the PRM IP block */
63 void __iomem *prm_base;
66 * prm_ll_data: function pointers to SoC-specific implementations of
67 * common PRM functions
69 static struct prm_ll_data null_prm_ll_data;
70 static struct prm_ll_data *prm_ll_data = &null_prm_ll_data;
72 /* Private functions */
75 * Move priority events from events to priority_events array
77 static void omap_prcm_events_filter_priority(unsigned long *events,
78 unsigned long *priority_events)
80 int i;
82 for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
83 priority_events[i] =
84 events[i] & prcm_irq_setup->priority_mask[i];
85 events[i] ^= priority_events[i];
90 * PRCM Interrupt Handler
92 * This is a common handler for the OMAP PRCM interrupts. Pending
93 * interrupts are detected by a call to prcm_pending_events and
94 * dispatched accordingly. Clearing of the wakeup events should be
95 * done by the SoC specific individual handlers.
97 static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
99 unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
100 unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
101 struct irq_chip *chip = irq_desc_get_chip(desc);
102 unsigned int virtirq;
103 int nr_irq = prcm_irq_setup->nr_regs * 32;
106 * If we are suspended, mask all interrupts from PRCM level,
107 * this does not ack them, and they will be pending until we
108 * re-enable the interrupts, at which point the
109 * omap_prcm_irq_handler will be executed again. The
110 * _save_and_clear_irqen() function must ensure that the PRM
111 * write to disable all IRQs has reached the PRM before
112 * returning, or spurious PRCM interrupts may occur during
113 * suspend.
115 if (prcm_irq_setup->suspended) {
116 prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
117 prcm_irq_setup->suspend_save_flag = true;
121 * Loop until all pending irqs are handled, since
122 * generic_handle_irq() can cause new irqs to come
124 while (!prcm_irq_setup->suspended) {
125 prcm_irq_setup->read_pending_irqs(pending);
127 /* No bit set, then all IRQs are handled */
128 if (find_first_bit(pending, nr_irq) >= nr_irq)
129 break;
131 omap_prcm_events_filter_priority(pending, priority_pending);
134 * Loop on all currently pending irqs so that new irqs
135 * cannot starve previously pending irqs
138 /* Serve priority events first */
139 for_each_set_bit(virtirq, priority_pending, nr_irq)
140 generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
142 /* Serve normal events next */
143 for_each_set_bit(virtirq, pending, nr_irq)
144 generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
146 if (chip->irq_ack)
147 chip->irq_ack(&desc->irq_data);
148 if (chip->irq_eoi)
149 chip->irq_eoi(&desc->irq_data);
150 chip->irq_unmask(&desc->irq_data);
152 prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
155 /* Public functions */
158 * omap_prcm_event_to_irq - given a PRCM event name, returns the
159 * corresponding IRQ on which the handler should be registered
160 * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
162 * Returns the Linux internal IRQ ID corresponding to @name upon success,
163 * or -ENOENT upon failure.
165 int omap_prcm_event_to_irq(const char *name)
167 int i;
169 if (!prcm_irq_setup || !name)
170 return -ENOENT;
172 for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
173 if (!strcmp(prcm_irq_setup->irqs[i].name, name))
174 return prcm_irq_setup->base_irq +
175 prcm_irq_setup->irqs[i].offset;
177 return -ENOENT;
181 * omap_prcm_irq_cleanup - reverses memory allocated and other steps
182 * done by omap_prcm_register_chain_handler()
184 * No return value.
186 void omap_prcm_irq_cleanup(void)
188 int i;
190 if (!prcm_irq_setup) {
191 pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
192 return;
195 if (prcm_irq_chips) {
196 for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
197 if (prcm_irq_chips[i])
198 irq_remove_generic_chip(prcm_irq_chips[i],
199 0xffffffff, 0, 0);
200 prcm_irq_chips[i] = NULL;
202 kfree(prcm_irq_chips);
203 prcm_irq_chips = NULL;
206 kfree(prcm_irq_setup->saved_mask);
207 prcm_irq_setup->saved_mask = NULL;
209 kfree(prcm_irq_setup->priority_mask);
210 prcm_irq_setup->priority_mask = NULL;
212 irq_set_chained_handler(prcm_irq_setup->irq, NULL);
214 if (prcm_irq_setup->base_irq > 0)
215 irq_free_descs(prcm_irq_setup->base_irq,
216 prcm_irq_setup->nr_regs * 32);
217 prcm_irq_setup->base_irq = 0;
220 void omap_prcm_irq_prepare(void)
222 prcm_irq_setup->suspended = true;
225 void omap_prcm_irq_complete(void)
227 prcm_irq_setup->suspended = false;
229 /* If we have not saved the masks, do not attempt to restore */
230 if (!prcm_irq_setup->suspend_save_flag)
231 return;
233 prcm_irq_setup->suspend_save_flag = false;
236 * Re-enable all masked PRCM irq sources, this causes the PRCM
237 * interrupt to fire immediately if the events were masked
238 * previously in the chain handler
240 prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
244 * omap_prcm_register_chain_handler - initializes the prcm chained interrupt
245 * handler based on provided parameters
246 * @irq_setup: hardware data about the underlying PRM/PRCM
248 * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up
249 * one generic IRQ chip per PRM interrupt status/enable register pair.
250 * Returns 0 upon success, -EINVAL if called twice or if invalid
251 * arguments are passed, or -ENOMEM on any other error.
253 int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
255 int nr_regs;
256 u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
257 int offset, i;
258 struct irq_chip_generic *gc;
259 struct irq_chip_type *ct;
261 if (!irq_setup)
262 return -EINVAL;
264 nr_regs = irq_setup->nr_regs;
266 if (prcm_irq_setup) {
267 pr_err("PRCM: already initialized; won't reinitialize\n");
268 return -EINVAL;
271 if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
272 pr_err("PRCM: nr_regs too large\n");
273 return -EINVAL;
276 prcm_irq_setup = irq_setup;
278 prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
279 prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
280 prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
281 GFP_KERNEL);
283 if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
284 !prcm_irq_setup->priority_mask) {
285 pr_err("PRCM: kzalloc failed\n");
286 goto err;
289 memset(mask, 0, sizeof(mask));
291 for (i = 0; i < irq_setup->nr_irqs; i++) {
292 offset = irq_setup->irqs[i].offset;
293 mask[offset >> 5] |= 1 << (offset & 0x1f);
294 if (irq_setup->irqs[i].priority)
295 irq_setup->priority_mask[offset >> 5] |=
296 1 << (offset & 0x1f);
299 irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
301 irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
304 if (irq_setup->base_irq < 0) {
305 pr_err("PRCM: failed to allocate irq descs: %d\n",
306 irq_setup->base_irq);
307 goto err;
310 for (i = 0; i < irq_setup->nr_regs; i++) {
311 gc = irq_alloc_generic_chip("PRCM", 1,
312 irq_setup->base_irq + i * 32, prm_base,
313 handle_level_irq);
315 if (!gc) {
316 pr_err("PRCM: failed to allocate generic chip\n");
317 goto err;
319 ct = gc->chip_types;
320 ct->chip.irq_ack = irq_gc_ack_set_bit;
321 ct->chip.irq_mask = irq_gc_mask_clr_bit;
322 ct->chip.irq_unmask = irq_gc_mask_set_bit;
324 ct->regs.ack = irq_setup->ack + i * 4;
325 ct->regs.mask = irq_setup->mask + i * 4;
327 irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
328 prcm_irq_chips[i] = gc;
331 if (of_have_populated_dt()) {
332 int irq = omap_prcm_event_to_irq("io");
333 if (cpu_is_omap34xx())
334 omap_pcs_legacy_init(irq,
335 omap3xxx_prm_reconfigure_io_chain);
336 else
337 omap_pcs_legacy_init(irq,
338 omap44xx_prm_reconfigure_io_chain);
341 return 0;
343 err:
344 omap_prcm_irq_cleanup();
345 return -ENOMEM;
349 * omap2_set_globals_prm - set the PRM base address (for early use)
350 * @prm: PRM base virtual address
352 * XXX Will be replaced when the PRM/CM drivers are completed.
354 void __init omap2_set_globals_prm(void __iomem *prm)
356 prm_base = prm;
360 * prm_read_reset_sources - return the sources of the SoC's last reset
362 * Return a u32 bitmask representing the reset sources that caused the
363 * SoC to reset. The low-level per-SoC functions called by this
364 * function remap the SoC-specific reset source bits into an
365 * OMAP-common set of reset source bits, defined in
366 * arch/arm/mach-omap2/prm.h. Returns the standardized reset source
367 * u32 bitmask from the hardware upon success, or returns (1 <<
368 * OMAP_UNKNOWN_RST_SRC_ID_SHIFT) if no low-level read_reset_sources()
369 * function was registered.
371 u32 prm_read_reset_sources(void)
373 u32 ret = 1 << OMAP_UNKNOWN_RST_SRC_ID_SHIFT;
375 if (prm_ll_data->read_reset_sources)
376 ret = prm_ll_data->read_reset_sources();
377 else
378 WARN_ONCE(1, "prm: %s: no mapping function defined for reset sources\n", __func__);
380 return ret;
384 * prm_was_any_context_lost_old - was device context lost? (old API)
385 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
386 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
387 * @idx: CONTEXT register offset
389 * Return 1 if any bits were set in the *_CONTEXT_* register
390 * identified by (@part, @inst, @idx), which means that some context
391 * was lost for that module; otherwise, return 0. XXX Deprecated;
392 * callers need to use a less-SoC-dependent way to identify hardware
393 * IP blocks.
395 bool prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx)
397 bool ret = true;
399 if (prm_ll_data->was_any_context_lost_old)
400 ret = prm_ll_data->was_any_context_lost_old(part, inst, idx);
401 else
402 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
403 __func__);
405 return ret;
409 * prm_clear_context_lost_flags_old - clear context loss flags (old API)
410 * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION)
411 * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST)
412 * @idx: CONTEXT register offset
414 * Clear hardware context loss bits for the module identified by
415 * (@part, @inst, @idx). No return value. XXX Deprecated; callers
416 * need to use a less-SoC-dependent way to identify hardware IP
417 * blocks.
419 void prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx)
421 if (prm_ll_data->clear_context_loss_flags_old)
422 prm_ll_data->clear_context_loss_flags_old(part, inst, idx);
423 else
424 WARN_ONCE(1, "prm: %s: no mapping function defined\n",
425 __func__);
429 * prm_register - register per-SoC low-level data with the PRM
430 * @pld: low-level per-SoC OMAP PRM data & function pointers to register
432 * Register per-SoC low-level OMAP PRM data and function pointers with
433 * the OMAP PRM common interface. The caller must keep the data
434 * pointed to by @pld valid until it calls prm_unregister() and
435 * it returns successfully. Returns 0 upon success, -EINVAL if @pld
436 * is NULL, or -EEXIST if prm_register() has already been called
437 * without an intervening prm_unregister().
439 int prm_register(struct prm_ll_data *pld)
441 if (!pld)
442 return -EINVAL;
444 if (prm_ll_data != &null_prm_ll_data)
445 return -EEXIST;
447 prm_ll_data = pld;
449 return 0;
453 * prm_unregister - unregister per-SoC low-level data & function pointers
454 * @pld: low-level per-SoC OMAP PRM data & function pointers to unregister
456 * Unregister per-SoC low-level OMAP PRM data and function pointers
457 * that were previously registered with prm_register(). The
458 * caller may not destroy any of the data pointed to by @pld until
459 * this function returns successfully. Returns 0 upon success, or
460 * -EINVAL if @pld is NULL or if @pld does not match the struct
461 * prm_ll_data * previously registered by prm_register().
463 int prm_unregister(struct prm_ll_data *pld)
465 if (!pld || prm_ll_data != pld)
466 return -EINVAL;
468 prm_ll_data = &null_prm_ll_data;
470 return 0;
473 static struct of_device_id omap_prcm_dt_match_table[] = {
474 { .compatible = "ti,am3-prcm" },
475 { .compatible = "ti,am3-scrm" },
476 { .compatible = "ti,am4-prcm" },
477 { .compatible = "ti,am4-scrm" },
478 { .compatible = "ti,omap3-prm" },
479 { .compatible = "ti,omap3-cm" },
480 { .compatible = "ti,omap3-scrm" },
481 { .compatible = "ti,omap4-cm1" },
482 { .compatible = "ti,omap4-prm" },
483 { .compatible = "ti,omap4-cm2" },
484 { .compatible = "ti,omap4-scrm" },
485 { .compatible = "ti,omap5-prm" },
486 { .compatible = "ti,omap5-cm-core-aon" },
487 { .compatible = "ti,omap5-scrm" },
488 { .compatible = "ti,omap5-cm-core" },
489 { .compatible = "ti,dra7-prm" },
490 { .compatible = "ti,dra7-cm-core-aon" },
491 { .compatible = "ti,dra7-cm-core" },
495 static struct clk_hw_omap memmap_dummy_ck = {
496 .flags = MEMMAP_ADDRESSING,
499 static u32 prm_clk_readl(void __iomem *reg)
501 return omap2_clk_readl(&memmap_dummy_ck, reg);
504 static void prm_clk_writel(u32 val, void __iomem *reg)
506 omap2_clk_writel(val, &memmap_dummy_ck, reg);
509 static struct ti_clk_ll_ops omap_clk_ll_ops = {
510 .clk_readl = prm_clk_readl,
511 .clk_writel = prm_clk_writel,
514 int __init of_prcm_init(void)
516 struct device_node *np;
517 void __iomem *mem;
518 int memmap_index = 0;
520 ti_clk_ll_ops = &omap_clk_ll_ops;
522 for_each_matching_node(np, omap_prcm_dt_match_table) {
523 mem = of_iomap(np, 0);
524 clk_memmaps[memmap_index] = mem;
525 ti_dt_clk_init_provider(np, memmap_index);
526 memmap_index++;
529 ti_dt_clockdomains_setup();
531 return 0;