Lynx framebuffers multidomain implementation.
[linux/elbrus.git] / drivers / irqchip / irq-armada-370-xp.c
blob939eb0d8fbf12af5252236d09ca16420fea00a29
1 /*
2 * Marvell Armada 370 and Armada XP SoC IRQ handling
4 * Copyright (C) 2012 Marvell
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_pci.h>
25 #include <linux/irqdomain.h>
26 #include <linux/slab.h>
27 #include <linux/msi.h>
28 #include <asm/mach/arch.h>
29 #include <asm/exception.h>
30 #include <asm/smp_plat.h>
31 #include <asm/mach/irq.h>
33 #include "irqchip.h"
35 /* Interrupt Controller Registers Map */
36 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
37 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
39 #define ARMADA_370_XP_INT_CONTROL (0x00)
40 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
41 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
42 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
44 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
46 #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
47 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
48 #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
50 #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
52 #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
54 #define IPI_DOORBELL_START (0)
55 #define IPI_DOORBELL_END (8)
56 #define IPI_DOORBELL_MASK 0xFF
57 #define PCI_MSI_DOORBELL_START (16)
58 #define PCI_MSI_DOORBELL_NR (16)
59 #define PCI_MSI_DOORBELL_END (32)
60 #define PCI_MSI_DOORBELL_MASK 0xFFFF0000
62 static void __iomem *per_cpu_int_base;
63 static void __iomem *main_int_base;
64 static struct irq_domain *armada_370_xp_mpic_domain;
65 #ifdef CONFIG_PCI_MSI
66 static struct irq_domain *armada_370_xp_msi_domain;
67 static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
68 static DEFINE_MUTEX(msi_used_lock);
69 static phys_addr_t msi_doorbell_addr;
70 #endif
73 * In SMP mode:
74 * For shared global interrupts, mask/unmask global enable bit
75 * For CPU interrupts, mask/unmask the calling CPU's bit
77 static void armada_370_xp_irq_mask(struct irq_data *d)
79 irq_hw_number_t hwirq = irqd_to_hwirq(d);
81 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
82 writel(hwirq, main_int_base +
83 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
84 else
85 writel(hwirq, per_cpu_int_base +
86 ARMADA_370_XP_INT_SET_MASK_OFFS);
89 static void armada_370_xp_irq_unmask(struct irq_data *d)
91 irq_hw_number_t hwirq = irqd_to_hwirq(d);
93 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
94 writel(hwirq, main_int_base +
95 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
96 else
97 writel(hwirq, per_cpu_int_base +
98 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
101 #ifdef CONFIG_PCI_MSI
103 static int armada_370_xp_alloc_msi(void)
105 int hwirq;
107 mutex_lock(&msi_used_lock);
108 hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
109 if (hwirq >= PCI_MSI_DOORBELL_NR)
110 hwirq = -ENOSPC;
111 else
112 set_bit(hwirq, msi_used);
113 mutex_unlock(&msi_used_lock);
115 return hwirq;
118 static void armada_370_xp_free_msi(int hwirq)
120 mutex_lock(&msi_used_lock);
121 if (!test_bit(hwirq, msi_used))
122 pr_err("trying to free unused MSI#%d\n", hwirq);
123 else
124 clear_bit(hwirq, msi_used);
125 mutex_unlock(&msi_used_lock);
128 static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
129 struct pci_dev *pdev,
130 struct msi_desc *desc)
132 struct msi_msg msg;
133 int virq, hwirq;
135 hwirq = armada_370_xp_alloc_msi();
136 if (hwirq < 0)
137 return hwirq;
139 virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
140 if (!virq) {
141 armada_370_xp_free_msi(hwirq);
142 return -EINVAL;
145 irq_set_msi_desc(virq, desc);
147 msg.address_lo = msi_doorbell_addr;
148 msg.address_hi = 0;
149 msg.data = 0xf00 | (hwirq + 16);
151 write_msi_msg(virq, &msg);
152 return 0;
155 static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
156 unsigned int irq)
158 struct irq_data *d = irq_get_irq_data(irq);
159 unsigned long hwirq = d->hwirq;
161 irq_dispose_mapping(irq);
162 armada_370_xp_free_msi(hwirq);
165 static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
166 int nvec, int type)
168 /* We support MSI, but not MSI-X */
169 if (type == PCI_CAP_ID_MSI)
170 return 0;
171 return -EINVAL;
174 static struct irq_chip armada_370_xp_msi_irq_chip = {
175 .name = "armada_370_xp_msi_irq",
176 .irq_enable = unmask_msi_irq,
177 .irq_disable = mask_msi_irq,
178 .irq_mask = mask_msi_irq,
179 .irq_unmask = unmask_msi_irq,
182 static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
183 irq_hw_number_t hw)
185 irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
186 handle_simple_irq);
187 set_irq_flags(virq, IRQF_VALID);
189 return 0;
192 static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
193 .map = armada_370_xp_msi_map,
196 static int armada_370_xp_msi_init(struct device_node *node,
197 phys_addr_t main_int_phys_base)
199 struct msi_chip *msi_chip;
200 u32 reg;
201 int ret;
203 msi_doorbell_addr = main_int_phys_base +
204 ARMADA_370_XP_SW_TRIG_INT_OFFS;
206 msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
207 if (!msi_chip)
208 return -ENOMEM;
210 msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
211 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
212 msi_chip->check_device = armada_370_xp_check_msi_device;
213 msi_chip->of_node = node;
215 armada_370_xp_msi_domain =
216 irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
217 &armada_370_xp_msi_irq_ops,
218 NULL);
219 if (!armada_370_xp_msi_domain) {
220 kfree(msi_chip);
221 return -ENOMEM;
224 ret = of_pci_msi_chip_add(msi_chip);
225 if (ret < 0) {
226 irq_domain_remove(armada_370_xp_msi_domain);
227 kfree(msi_chip);
228 return ret;
231 reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
232 | PCI_MSI_DOORBELL_MASK;
234 writel(reg, per_cpu_int_base +
235 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
237 /* Unmask IPI interrupt */
238 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
240 return 0;
242 #else
243 static inline int armada_370_xp_msi_init(struct device_node *node,
244 phys_addr_t main_int_phys_base)
246 return 0;
248 #endif
250 #ifdef CONFIG_SMP
251 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
253 static int armada_xp_set_affinity(struct irq_data *d,
254 const struct cpumask *mask_val, bool force)
256 unsigned long reg;
257 unsigned long new_mask = 0;
258 unsigned long online_mask = 0;
259 unsigned long count = 0;
260 irq_hw_number_t hwirq = irqd_to_hwirq(d);
261 int cpu;
263 for_each_cpu(cpu, mask_val) {
264 new_mask |= 1 << cpu_logical_map(cpu);
265 count++;
269 * Forbid mutlicore interrupt affinity
270 * This is required since the MPIC HW doesn't limit
271 * several CPUs from acknowledging the same interrupt.
273 if (count > 1)
274 return -EINVAL;
276 for_each_cpu(cpu, cpu_online_mask)
277 online_mask |= 1 << cpu_logical_map(cpu);
279 raw_spin_lock(&irq_controller_lock);
281 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
282 reg = (reg & (~online_mask)) | new_mask;
283 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
285 raw_spin_unlock(&irq_controller_lock);
287 return 0;
289 #endif
291 static struct irq_chip armada_370_xp_irq_chip = {
292 .name = "armada_370_xp_irq",
293 .irq_mask = armada_370_xp_irq_mask,
294 .irq_mask_ack = armada_370_xp_irq_mask,
295 .irq_unmask = armada_370_xp_irq_unmask,
296 #ifdef CONFIG_SMP
297 .irq_set_affinity = armada_xp_set_affinity,
298 #endif
301 static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
302 unsigned int virq, irq_hw_number_t hw)
304 armada_370_xp_irq_mask(irq_get_irq_data(virq));
305 if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
306 writel(hw, per_cpu_int_base +
307 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
308 else
309 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
310 irq_set_status_flags(virq, IRQ_LEVEL);
312 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
313 irq_set_percpu_devid(virq);
314 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
315 handle_percpu_devid_irq);
317 } else {
318 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
319 handle_level_irq);
321 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
323 return 0;
326 #ifdef CONFIG_SMP
327 void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
329 int cpu;
330 unsigned long map = 0;
332 /* Convert our logical CPU mask into a physical one. */
333 for_each_cpu(cpu, mask)
334 map |= 1 << cpu_logical_map(cpu);
337 * Ensure that stores to Normal memory are visible to the
338 * other CPUs before issuing the IPI.
340 dsb();
342 /* submit softirq */
343 writel((map << 8) | irq, main_int_base +
344 ARMADA_370_XP_SW_TRIG_INT_OFFS);
347 void armada_xp_mpic_smp_cpu_init(void)
349 /* Clear pending IPIs */
350 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
352 /* Enable first 8 IPIs */
353 writel(IPI_DOORBELL_MASK, per_cpu_int_base +
354 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
356 /* Unmask IPI interrupt */
357 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
359 #endif /* CONFIG_SMP */
361 static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
362 .map = armada_370_xp_mpic_irq_map,
363 .xlate = irq_domain_xlate_onecell,
366 static asmlinkage void __exception_irq_entry
367 armada_370_xp_handle_irq(struct pt_regs *regs)
369 u32 irqstat, irqnr;
371 do {
372 irqstat = readl_relaxed(per_cpu_int_base +
373 ARMADA_370_XP_CPU_INTACK_OFFS);
374 irqnr = irqstat & 0x3FF;
376 if (irqnr > 1022)
377 break;
379 if (irqnr > 1) {
380 irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
381 irqnr);
382 handle_IRQ(irqnr, regs);
383 continue;
386 #ifdef CONFIG_PCI_MSI
387 /* MSI handling */
388 if (irqnr == 1) {
389 u32 msimask, msinr;
391 msimask = readl_relaxed(per_cpu_int_base +
392 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
393 & PCI_MSI_DOORBELL_MASK;
395 writel(~msimask, per_cpu_int_base +
396 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
398 for (msinr = PCI_MSI_DOORBELL_START;
399 msinr < PCI_MSI_DOORBELL_END; msinr++) {
400 int irq;
402 if (!(msimask & BIT(msinr)))
403 continue;
405 irq = irq_find_mapping(armada_370_xp_msi_domain,
406 msinr - 16);
407 handle_IRQ(irq, regs);
410 #endif
412 #ifdef CONFIG_SMP
413 /* IPI Handling */
414 if (irqnr == 0) {
415 u32 ipimask, ipinr;
417 ipimask = readl_relaxed(per_cpu_int_base +
418 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
419 & IPI_DOORBELL_MASK;
421 writel(~ipimask, per_cpu_int_base +
422 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
424 /* Handle all pending doorbells */
425 for (ipinr = IPI_DOORBELL_START;
426 ipinr < IPI_DOORBELL_END; ipinr++) {
427 if (ipimask & (0x1 << ipinr))
428 handle_IPI(ipinr, regs);
430 continue;
432 #endif
434 } while (1);
437 static int __init armada_370_xp_mpic_of_init(struct device_node *node,
438 struct device_node *parent)
440 struct resource main_int_res, per_cpu_int_res;
441 u32 control;
443 BUG_ON(of_address_to_resource(node, 0, &main_int_res));
444 BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
446 BUG_ON(!request_mem_region(main_int_res.start,
447 resource_size(&main_int_res),
448 node->full_name));
449 BUG_ON(!request_mem_region(per_cpu_int_res.start,
450 resource_size(&per_cpu_int_res),
451 node->full_name));
453 main_int_base = ioremap(main_int_res.start,
454 resource_size(&main_int_res));
455 BUG_ON(!main_int_base);
457 per_cpu_int_base = ioremap(per_cpu_int_res.start,
458 resource_size(&per_cpu_int_res));
459 BUG_ON(!per_cpu_int_base);
461 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
463 armada_370_xp_mpic_domain =
464 irq_domain_add_linear(node, (control >> 2) & 0x3ff,
465 &armada_370_xp_mpic_irq_ops, NULL);
467 BUG_ON(!armada_370_xp_mpic_domain);
469 irq_set_default_host(armada_370_xp_mpic_domain);
471 #ifdef CONFIG_SMP
472 armada_xp_mpic_smp_cpu_init();
475 * Set the default affinity from all CPUs to the boot cpu.
476 * This is required since the MPIC doesn't limit several CPUs
477 * from acknowledging the same interrupt.
479 cpumask_clear(irq_default_affinity);
480 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
482 #endif
484 armada_370_xp_msi_init(node, main_int_res.start);
486 set_handle_irq(armada_370_xp_handle_irq);
488 return 0;
491 IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);