perf record: Add support for sampling indirect jumps
[linux/fpc-iii.git] / drivers / irqchip / irq-dw-apb-ictl.c
blob53bb7326a60a6a597c159032627daa7d46d8ffe9
1 /*
2 * Synopsys DW APB ICTL irqchip driver.
4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
6 * based on GPL'ed 2.6 kernel sources
7 * (c) Marvell International Ltd.
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/io.h>
15 #include <linux/irq.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
20 #include "irqchip.h"
22 #define APB_INT_ENABLE_L 0x00
23 #define APB_INT_ENABLE_H 0x04
24 #define APB_INT_MASK_L 0x08
25 #define APB_INT_MASK_H 0x0c
26 #define APB_INT_FINALSTATUS_L 0x30
27 #define APB_INT_FINALSTATUS_H 0x34
29 static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
31 struct irq_chip *chip = irq_get_chip(irq);
32 struct irq_chip_generic *gc = irq_get_handler_data(irq);
33 struct irq_domain *d = gc->private;
34 u32 stat;
35 int n;
37 chained_irq_enter(chip, desc);
39 for (n = 0; n < gc->num_ct; n++) {
40 stat = readl_relaxed(gc->reg_base +
41 APB_INT_FINALSTATUS_L + 4 * n);
42 while (stat) {
43 u32 hwirq = ffs(stat) - 1;
44 generic_handle_irq(irq_find_mapping(d,
45 gc->irq_base + hwirq + 32 * n));
46 stat &= ~(1 << hwirq);
50 chained_irq_exit(chip, desc);
53 #ifdef CONFIG_PM
54 static void dw_apb_ictl_resume(struct irq_data *d)
56 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
57 struct irq_chip_type *ct = irq_data_get_chip_type(d);
59 irq_gc_lock(gc);
60 writel_relaxed(~0, gc->reg_base + ct->regs.enable);
61 writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
62 irq_gc_unlock(gc);
64 #else
65 #define dw_apb_ictl_resume NULL
66 #endif /* CONFIG_PM */
68 static int __init dw_apb_ictl_init(struct device_node *np,
69 struct device_node *parent)
71 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
72 struct resource r;
73 struct irq_domain *domain;
74 struct irq_chip_generic *gc;
75 void __iomem *iobase;
76 int ret, nrirqs, irq;
77 u32 reg;
79 /* Map the parent interrupt for the chained handler */
80 irq = irq_of_parse_and_map(np, 0);
81 if (irq <= 0) {
82 pr_err("%s: unable to parse irq\n", np->full_name);
83 return -EINVAL;
86 ret = of_address_to_resource(np, 0, &r);
87 if (ret) {
88 pr_err("%s: unable to get resource\n", np->full_name);
89 return ret;
92 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
93 pr_err("%s: unable to request mem region\n", np->full_name);
94 return -ENOMEM;
97 iobase = ioremap(r.start, resource_size(&r));
98 if (!iobase) {
99 pr_err("%s: unable to map resource\n", np->full_name);
100 ret = -ENOMEM;
101 goto err_release;
105 * DW IP can be configured to allow 2-64 irqs. We can determine
106 * the number of irqs supported by writing into enable register
107 * and look for bits not set, as corresponding flip-flops will
108 * have been removed by sythesis tool.
111 /* mask and enable all interrupts */
112 writel_relaxed(~0, iobase + APB_INT_MASK_L);
113 writel_relaxed(~0, iobase + APB_INT_MASK_H);
114 writel_relaxed(~0, iobase + APB_INT_ENABLE_L);
115 writel_relaxed(~0, iobase + APB_INT_ENABLE_H);
117 reg = readl_relaxed(iobase + APB_INT_ENABLE_H);
118 if (reg)
119 nrirqs = 32 + fls(reg);
120 else
121 nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
123 domain = irq_domain_add_linear(np, nrirqs,
124 &irq_generic_chip_ops, NULL);
125 if (!domain) {
126 pr_err("%s: unable to add irq domain\n", np->full_name);
127 ret = -ENOMEM;
128 goto err_unmap;
131 ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
132 np->name, handle_level_irq, clr, 0,
133 IRQ_GC_MASK_CACHE_PER_TYPE |
134 IRQ_GC_INIT_MASK_CACHE);
135 if (ret) {
136 pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
137 goto err_unmap;
140 gc = irq_get_domain_generic_chip(domain, 0);
141 gc->private = domain;
142 gc->reg_base = iobase;
144 gc->chip_types[0].regs.mask = APB_INT_MASK_L;
145 gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
146 gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
147 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
148 gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
150 if (nrirqs > 32) {
151 gc->chip_types[1].regs.mask = APB_INT_MASK_H;
152 gc->chip_types[1].regs.enable = APB_INT_ENABLE_H;
153 gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
154 gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
155 gc->chip_types[1].chip.irq_resume = dw_apb_ictl_resume;
158 irq_set_handler_data(irq, gc);
159 irq_set_chained_handler(irq, dw_apb_ictl_handler);
161 return 0;
163 err_unmap:
164 iounmap(iobase);
165 err_release:
166 release_mem_region(r.start, resource_size(&r));
167 return ret;
169 IRQCHIP_DECLARE(dw_apb_ictl,
170 "snps,dw-apb-ictl", dw_apb_ictl_init);