perf kmem: Fix compiles on RHEL6/OL6
[linux/fpc-iii.git] / drivers / irqchip / irq-armada-370-xp.c
blob4387dae14e453a949bb297ec1a74a59e400a3089
1 /*
2 * Marvell Armada 370 and Armada XP SoC IRQ handling
4 * Copyright (C) 2012 Marvell
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
21 #include <linux/irqchip/chained_irq.h>
22 #include <linux/cpu.h>
23 #include <linux/io.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_pci.h>
27 #include <linux/irqdomain.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/msi.h>
31 #include <asm/mach/arch.h>
32 #include <asm/exception.h>
33 #include <asm/smp_plat.h>
34 #include <asm/mach/irq.h>
36 #include "irqchip.h"
38 /* Interrupt Controller Registers Map */
39 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
40 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
42 #define ARMADA_370_XP_INT_CONTROL (0x00)
43 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
44 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
45 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
46 #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
47 #define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
49 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
50 #define ARMADA_375_PPI_CAUSE (0x10)
52 #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
53 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
54 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
56 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
58 #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
60 #define IPI_DOORBELL_START (0)
61 #define IPI_DOORBELL_END (8)
62 #define IPI_DOORBELL_MASK 0xFF
63 #define PCI_MSI_DOORBELL_START (16)
64 #define PCI_MSI_DOORBELL_NR (16)
65 #define PCI_MSI_DOORBELL_END (32)
66 #define PCI_MSI_DOORBELL_MASK 0xFFFF0000
68 static void __iomem *per_cpu_int_base;
69 static void __iomem *main_int_base;
70 static struct irq_domain *armada_370_xp_mpic_domain;
71 static u32 doorbell_mask_reg;
72 static int parent_irq;
73 #ifdef CONFIG_PCI_MSI
74 static struct irq_domain *armada_370_xp_msi_domain;
75 static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
76 static DEFINE_MUTEX(msi_used_lock);
77 static phys_addr_t msi_doorbell_addr;
78 #endif
81 * In SMP mode:
82 * For shared global interrupts, mask/unmask global enable bit
83 * For CPU interrupts, mask/unmask the calling CPU's bit
85 static void armada_370_xp_irq_mask(struct irq_data *d)
87 irq_hw_number_t hwirq = irqd_to_hwirq(d);
89 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
90 writel(hwirq, main_int_base +
91 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
92 else
93 writel(hwirq, per_cpu_int_base +
94 ARMADA_370_XP_INT_SET_MASK_OFFS);
97 static void armada_370_xp_irq_unmask(struct irq_data *d)
99 irq_hw_number_t hwirq = irqd_to_hwirq(d);
101 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
102 writel(hwirq, main_int_base +
103 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
104 else
105 writel(hwirq, per_cpu_int_base +
106 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
109 #ifdef CONFIG_PCI_MSI
111 static int armada_370_xp_alloc_msi(void)
113 int hwirq;
115 mutex_lock(&msi_used_lock);
116 hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
117 if (hwirq >= PCI_MSI_DOORBELL_NR)
118 hwirq = -ENOSPC;
119 else
120 set_bit(hwirq, msi_used);
121 mutex_unlock(&msi_used_lock);
123 return hwirq;
126 static void armada_370_xp_free_msi(int hwirq)
128 mutex_lock(&msi_used_lock);
129 if (!test_bit(hwirq, msi_used))
130 pr_err("trying to free unused MSI#%d\n", hwirq);
131 else
132 clear_bit(hwirq, msi_used);
133 mutex_unlock(&msi_used_lock);
136 static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
137 struct pci_dev *pdev,
138 struct msi_desc *desc)
140 struct msi_msg msg;
141 int virq, hwirq;
143 /* We support MSI, but not MSI-X */
144 if (desc->msi_attrib.is_msix)
145 return -EINVAL;
147 hwirq = armada_370_xp_alloc_msi();
148 if (hwirq < 0)
149 return hwirq;
151 virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
152 if (!virq) {
153 armada_370_xp_free_msi(hwirq);
154 return -EINVAL;
157 irq_set_msi_desc(virq, desc);
159 msg.address_lo = msi_doorbell_addr;
160 msg.address_hi = 0;
161 msg.data = 0xf00 | (hwirq + 16);
163 pci_write_msi_msg(virq, &msg);
164 return 0;
167 static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
168 unsigned int irq)
170 struct irq_data *d = irq_get_irq_data(irq);
171 unsigned long hwirq = d->hwirq;
173 irq_dispose_mapping(irq);
174 armada_370_xp_free_msi(hwirq);
177 static struct irq_chip armada_370_xp_msi_irq_chip = {
178 .name = "armada_370_xp_msi_irq",
179 .irq_enable = pci_msi_unmask_irq,
180 .irq_disable = pci_msi_mask_irq,
181 .irq_mask = pci_msi_mask_irq,
182 .irq_unmask = pci_msi_unmask_irq,
185 static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
186 irq_hw_number_t hw)
188 irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
189 handle_simple_irq);
190 set_irq_flags(virq, IRQF_VALID);
192 return 0;
195 static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
196 .map = armada_370_xp_msi_map,
199 static int armada_370_xp_msi_init(struct device_node *node,
200 phys_addr_t main_int_phys_base)
202 struct msi_controller *msi_chip;
203 u32 reg;
204 int ret;
206 msi_doorbell_addr = main_int_phys_base +
207 ARMADA_370_XP_SW_TRIG_INT_OFFS;
209 msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
210 if (!msi_chip)
211 return -ENOMEM;
213 msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
214 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
215 msi_chip->of_node = node;
217 armada_370_xp_msi_domain =
218 irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
219 &armada_370_xp_msi_irq_ops,
220 NULL);
221 if (!armada_370_xp_msi_domain) {
222 kfree(msi_chip);
223 return -ENOMEM;
226 ret = of_pci_msi_chip_add(msi_chip);
227 if (ret < 0) {
228 irq_domain_remove(armada_370_xp_msi_domain);
229 kfree(msi_chip);
230 return ret;
233 reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
234 | PCI_MSI_DOORBELL_MASK;
236 writel(reg, per_cpu_int_base +
237 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
239 /* Unmask IPI interrupt */
240 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
242 return 0;
244 #else
245 static inline int armada_370_xp_msi_init(struct device_node *node,
246 phys_addr_t main_int_phys_base)
248 return 0;
250 #endif
252 #ifdef CONFIG_SMP
253 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
255 static int armada_xp_set_affinity(struct irq_data *d,
256 const struct cpumask *mask_val, bool force)
258 irq_hw_number_t hwirq = irqd_to_hwirq(d);
259 unsigned long reg, mask;
260 int cpu;
262 /* Select a single core from the affinity mask which is online */
263 cpu = cpumask_any_and(mask_val, cpu_online_mask);
264 mask = 1UL << cpu_logical_map(cpu);
266 raw_spin_lock(&irq_controller_lock);
267 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
268 reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
269 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
270 raw_spin_unlock(&irq_controller_lock);
272 return IRQ_SET_MASK_OK;
274 #endif
276 static struct irq_chip armada_370_xp_irq_chip = {
277 .name = "armada_370_xp_irq",
278 .irq_mask = armada_370_xp_irq_mask,
279 .irq_mask_ack = armada_370_xp_irq_mask,
280 .irq_unmask = armada_370_xp_irq_unmask,
281 #ifdef CONFIG_SMP
282 .irq_set_affinity = armada_xp_set_affinity,
283 #endif
286 static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
287 unsigned int virq, irq_hw_number_t hw)
289 armada_370_xp_irq_mask(irq_get_irq_data(virq));
290 if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
291 writel(hw, per_cpu_int_base +
292 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
293 else
294 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
295 irq_set_status_flags(virq, IRQ_LEVEL);
297 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
298 irq_set_percpu_devid(virq);
299 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
300 handle_percpu_devid_irq);
302 } else {
303 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
304 handle_level_irq);
306 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
308 return 0;
311 #ifdef CONFIG_SMP
312 static void armada_mpic_send_doorbell(const struct cpumask *mask,
313 unsigned int irq)
315 int cpu;
316 unsigned long map = 0;
318 /* Convert our logical CPU mask into a physical one. */
319 for_each_cpu(cpu, mask)
320 map |= 1 << cpu_logical_map(cpu);
323 * Ensure that stores to Normal memory are visible to the
324 * other CPUs before issuing the IPI.
326 dsb();
328 /* submit softirq */
329 writel((map << 8) | irq, main_int_base +
330 ARMADA_370_XP_SW_TRIG_INT_OFFS);
333 static void armada_xp_mpic_smp_cpu_init(void)
335 u32 control;
336 int nr_irqs, i;
338 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
339 nr_irqs = (control >> 2) & 0x3ff;
341 for (i = 0; i < nr_irqs; i++)
342 writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
344 /* Clear pending IPIs */
345 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
347 /* Enable first 8 IPIs */
348 writel(IPI_DOORBELL_MASK, per_cpu_int_base +
349 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
351 /* Unmask IPI interrupt */
352 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
355 static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
356 unsigned long action, void *hcpu)
358 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
359 armada_xp_mpic_smp_cpu_init();
361 return NOTIFY_OK;
364 static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
365 .notifier_call = armada_xp_mpic_secondary_init,
366 .priority = 100,
369 static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
370 unsigned long action, void *hcpu)
372 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
373 enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
375 return NOTIFY_OK;
378 static struct notifier_block mpic_cascaded_cpu_notifier = {
379 .notifier_call = mpic_cascaded_secondary_init,
380 .priority = 100,
383 #endif /* CONFIG_SMP */
385 static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
386 .map = armada_370_xp_mpic_irq_map,
387 .xlate = irq_domain_xlate_onecell,
390 #ifdef CONFIG_PCI_MSI
391 static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
393 u32 msimask, msinr;
395 msimask = readl_relaxed(per_cpu_int_base +
396 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
397 & PCI_MSI_DOORBELL_MASK;
399 writel(~msimask, per_cpu_int_base +
400 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
402 for (msinr = PCI_MSI_DOORBELL_START;
403 msinr < PCI_MSI_DOORBELL_END; msinr++) {
404 int irq;
406 if (!(msimask & BIT(msinr)))
407 continue;
409 if (is_chained) {
410 irq = irq_find_mapping(armada_370_xp_msi_domain,
411 msinr - 16);
412 generic_handle_irq(irq);
413 } else {
414 irq = msinr - 16;
415 handle_domain_irq(armada_370_xp_msi_domain,
416 irq, regs);
420 #else
421 static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
422 #endif
424 static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
425 struct irq_desc *desc)
427 struct irq_chip *chip = irq_get_chip(irq);
428 unsigned long irqmap, irqn, irqsrc, cpuid;
429 unsigned int cascade_irq;
431 chained_irq_enter(chip, desc);
433 irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
434 cpuid = cpu_logical_map(smp_processor_id());
436 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
437 irqsrc = readl_relaxed(main_int_base +
438 ARMADA_370_XP_INT_SOURCE_CTL(irqn));
440 /* Check if the interrupt is not masked on current CPU.
441 * Test IRQ (0-1) and FIQ (8-9) mask bits.
443 if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
444 continue;
446 if (irqn == 1) {
447 armada_370_xp_handle_msi_irq(NULL, true);
448 continue;
451 cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
452 generic_handle_irq(cascade_irq);
455 chained_irq_exit(chip, desc);
458 static void __exception_irq_entry
459 armada_370_xp_handle_irq(struct pt_regs *regs)
461 u32 irqstat, irqnr;
463 do {
464 irqstat = readl_relaxed(per_cpu_int_base +
465 ARMADA_370_XP_CPU_INTACK_OFFS);
466 irqnr = irqstat & 0x3FF;
468 if (irqnr > 1022)
469 break;
471 if (irqnr > 1) {
472 handle_domain_irq(armada_370_xp_mpic_domain,
473 irqnr, regs);
474 continue;
477 /* MSI handling */
478 if (irqnr == 1)
479 armada_370_xp_handle_msi_irq(regs, false);
481 #ifdef CONFIG_SMP
482 /* IPI Handling */
483 if (irqnr == 0) {
484 u32 ipimask, ipinr;
486 ipimask = readl_relaxed(per_cpu_int_base +
487 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
488 & IPI_DOORBELL_MASK;
490 writel(~ipimask, per_cpu_int_base +
491 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
493 /* Handle all pending doorbells */
494 for (ipinr = IPI_DOORBELL_START;
495 ipinr < IPI_DOORBELL_END; ipinr++) {
496 if (ipimask & (0x1 << ipinr))
497 handle_IPI(ipinr, regs);
499 continue;
501 #endif
503 } while (1);
506 static int armada_370_xp_mpic_suspend(void)
508 doorbell_mask_reg = readl(per_cpu_int_base +
509 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
510 return 0;
513 static void armada_370_xp_mpic_resume(void)
515 int nirqs;
516 irq_hw_number_t irq;
518 /* Re-enable interrupts */
519 nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
520 for (irq = 0; irq < nirqs; irq++) {
521 struct irq_data *data;
522 int virq;
524 virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
525 if (virq == 0)
526 continue;
528 if (irq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
529 writel(irq, per_cpu_int_base +
530 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
531 else
532 writel(irq, main_int_base +
533 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
535 data = irq_get_irq_data(virq);
536 if (!irqd_irq_disabled(data))
537 armada_370_xp_irq_unmask(data);
540 /* Reconfigure doorbells for IPIs and MSIs */
541 writel(doorbell_mask_reg,
542 per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
543 if (doorbell_mask_reg & IPI_DOORBELL_MASK)
544 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
545 if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
546 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
549 struct syscore_ops armada_370_xp_mpic_syscore_ops = {
550 .suspend = armada_370_xp_mpic_suspend,
551 .resume = armada_370_xp_mpic_resume,
554 static int __init armada_370_xp_mpic_of_init(struct device_node *node,
555 struct device_node *parent)
557 struct resource main_int_res, per_cpu_int_res;
558 int nr_irqs, i;
559 u32 control;
561 BUG_ON(of_address_to_resource(node, 0, &main_int_res));
562 BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
564 BUG_ON(!request_mem_region(main_int_res.start,
565 resource_size(&main_int_res),
566 node->full_name));
567 BUG_ON(!request_mem_region(per_cpu_int_res.start,
568 resource_size(&per_cpu_int_res),
569 node->full_name));
571 main_int_base = ioremap(main_int_res.start,
572 resource_size(&main_int_res));
573 BUG_ON(!main_int_base);
575 per_cpu_int_base = ioremap(per_cpu_int_res.start,
576 resource_size(&per_cpu_int_res));
577 BUG_ON(!per_cpu_int_base);
579 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
580 nr_irqs = (control >> 2) & 0x3ff;
582 for (i = 0; i < nr_irqs; i++)
583 writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
585 armada_370_xp_mpic_domain =
586 irq_domain_add_linear(node, nr_irqs,
587 &armada_370_xp_mpic_irq_ops, NULL);
589 BUG_ON(!armada_370_xp_mpic_domain);
591 #ifdef CONFIG_SMP
592 armada_xp_mpic_smp_cpu_init();
593 #endif
595 armada_370_xp_msi_init(node, main_int_res.start);
597 parent_irq = irq_of_parse_and_map(node, 0);
598 if (parent_irq <= 0) {
599 irq_set_default_host(armada_370_xp_mpic_domain);
600 set_handle_irq(armada_370_xp_handle_irq);
601 #ifdef CONFIG_SMP
602 set_smp_cross_call(armada_mpic_send_doorbell);
603 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
604 #endif
605 } else {
606 #ifdef CONFIG_SMP
607 register_cpu_notifier(&mpic_cascaded_cpu_notifier);
608 #endif
609 irq_set_chained_handler(parent_irq,
610 armada_370_xp_mpic_handle_cascade_irq);
613 register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
615 return 0;
618 IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);