2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
17 #include <asm/uv/uv_irq.h>
18 #include <asm/uv/uv_hub.h>
20 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
21 struct uv_irq_2_mmr_pnode
{
28 static DEFINE_SPINLOCK(uv_irq_lock
);
29 static struct rb_root uv_irq_root
;
31 static int uv_set_irq_affinity(struct irq_data
*, const struct cpumask
*, bool);
33 static void uv_noop(struct irq_data
*data
) { }
35 static void uv_ack_apic(struct irq_data
*data
)
40 static struct irq_chip uv_irq_chip
= {
43 .irq_unmask
= uv_noop
,
44 .irq_eoi
= uv_ack_apic
,
45 .irq_set_affinity
= uv_set_irq_affinity
,
49 * Add offset and pnode information of the hub sourcing interrupts to the
50 * rb tree for a specific irq.
52 static int uv_set_irq_2_mmr_info(int irq
, unsigned long offset
, unsigned blade
)
54 struct rb_node
**link
= &uv_irq_root
.rb_node
;
55 struct rb_node
*parent
= NULL
;
56 struct uv_irq_2_mmr_pnode
*n
;
57 struct uv_irq_2_mmr_pnode
*e
;
58 unsigned long irqflags
;
60 n
= kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode
), GFP_KERNEL
,
61 uv_blade_to_memory_nid(blade
));
67 n
->pnode
= uv_blade_to_pnode(blade
);
68 spin_lock_irqsave(&uv_irq_lock
, irqflags
);
69 /* Find the right place in the rbtree: */
72 e
= rb_entry(parent
, struct uv_irq_2_mmr_pnode
, list
);
74 if (unlikely(irq
== e
->irq
)) {
75 /* irq entry exists */
76 e
->pnode
= uv_blade_to_pnode(blade
);
78 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
84 link
= &(*link
)->rb_left
;
86 link
= &(*link
)->rb_right
;
89 /* Insert the node into the rbtree. */
90 rb_link_node(&n
->list
, parent
, link
);
91 rb_insert_color(&n
->list
, &uv_irq_root
);
93 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
97 /* Retrieve offset and pnode information from the rb tree for a specific irq */
98 int uv_irq_2_mmr_info(int irq
, unsigned long *offset
, int *pnode
)
100 struct uv_irq_2_mmr_pnode
*e
;
102 unsigned long irqflags
;
104 spin_lock_irqsave(&uv_irq_lock
, irqflags
);
105 n
= uv_irq_root
.rb_node
;
107 e
= rb_entry(n
, struct uv_irq_2_mmr_pnode
, list
);
112 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
121 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
126 * Re-target the irq to the specified CPU and enable the specified MMR located
127 * on the specified blade to allow the sending of MSIs to the specified CPU.
130 arch_enable_uv_irq(char *irq_name
, unsigned int irq
, int cpu
, int mmr_blade
,
131 unsigned long mmr_offset
, int limit
)
133 const struct cpumask
*eligible_cpu
= cpumask_of(cpu
);
134 struct irq_cfg
*cfg
= irq_get_chip_data(irq
);
135 unsigned long mmr_value
;
136 struct uv_IO_APIC_route_entry
*entry
;
140 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) !=
141 sizeof(unsigned long));
143 err
= assign_irq_vector(irq
, cfg
, eligible_cpu
);
147 err
= apic
->cpu_mask_to_apicid_and(eligible_cpu
, eligible_cpu
, &dest
);
151 if (limit
== UV_AFFINITY_CPU
)
152 irq_set_status_flags(irq
, IRQ_NO_BALANCING
);
154 irq_set_status_flags(irq
, IRQ_MOVE_PCNTXT
);
156 irq_set_chip_and_handler_name(irq
, &uv_irq_chip
, handle_percpu_irq
,
160 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
161 entry
->vector
= cfg
->vector
;
162 entry
->delivery_mode
= apic
->irq_delivery_mode
;
163 entry
->dest_mode
= apic
->irq_dest_mode
;
169 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
170 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
172 if (cfg
->move_in_progress
)
173 send_cleanup_vector(cfg
);
179 * Disable the specified MMR located on the specified blade so that MSIs are
180 * longer allowed to be sent.
182 static void arch_disable_uv_irq(int mmr_pnode
, unsigned long mmr_offset
)
184 unsigned long mmr_value
;
185 struct uv_IO_APIC_route_entry
*entry
;
187 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) !=
188 sizeof(unsigned long));
191 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
194 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
198 uv_set_irq_affinity(struct irq_data
*data
, const struct cpumask
*mask
,
201 struct irq_cfg
*cfg
= data
->chip_data
;
203 unsigned long mmr_value
, mmr_offset
;
204 struct uv_IO_APIC_route_entry
*entry
;
207 if (__ioapic_set_affinity(data
, mask
, &dest
))
211 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
213 entry
->vector
= cfg
->vector
;
214 entry
->delivery_mode
= apic
->irq_delivery_mode
;
215 entry
->dest_mode
= apic
->irq_dest_mode
;
221 /* Get previously stored MMR and pnode of hub sourcing interrupts */
222 if (uv_irq_2_mmr_info(data
->irq
, &mmr_offset
, &mmr_pnode
))
225 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
227 if (cfg
->move_in_progress
)
228 send_cleanup_vector(cfg
);
230 return IRQ_SET_MASK_OK_NOCOPY
;
234 * Set up a mapping of an available irq and vector, and enable the specified
235 * MMR that defines the MSI that is to be sent to the specified CPU when an
236 * interrupt is raised.
238 int uv_setup_irq(char *irq_name
, int cpu
, int mmr_blade
,
239 unsigned long mmr_offset
, int limit
)
243 irq
= create_irq_nr(NR_IRQS_LEGACY
, uv_blade_to_memory_nid(mmr_blade
));
248 ret
= arch_enable_uv_irq(irq_name
, irq
, cpu
, mmr_blade
, mmr_offset
,
251 uv_set_irq_2_mmr_info(irq
, mmr_offset
, mmr_blade
);
257 EXPORT_SYMBOL_GPL(uv_setup_irq
);
260 * Tear down a mapping of an irq and vector, and disable the specified MMR that
261 * defined the MSI that was to be sent to the specified CPU when an interrupt
264 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
266 void uv_teardown_irq(unsigned int irq
)
268 struct uv_irq_2_mmr_pnode
*e
;
270 unsigned long irqflags
;
272 spin_lock_irqsave(&uv_irq_lock
, irqflags
);
273 n
= uv_irq_root
.rb_node
;
275 e
= rb_entry(n
, struct uv_irq_2_mmr_pnode
, list
);
277 arch_disable_uv_irq(e
->pnode
, e
->offset
);
278 rb_erase(n
, &uv_irq_root
);
287 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
290 EXPORT_SYMBOL_GPL(uv_teardown_irq
);