2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/module.h>
12 #include <linux/rbtree.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
17 #include <asm/uv/uv_irq.h>
18 #include <asm/uv/uv_hub.h>
20 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
21 struct uv_irq_2_mmr_pnode
{
28 static spinlock_t uv_irq_lock
;
29 static struct rb_root uv_irq_root
;
31 static int uv_set_irq_affinity(struct irq_data
*, const struct cpumask
*, bool);
33 static void uv_noop(struct irq_data
*data
) { }
35 static void uv_ack_apic(struct irq_data
*data
)
40 static struct irq_chip uv_irq_chip
= {
43 .irq_unmask
= uv_noop
,
44 .irq_eoi
= uv_ack_apic
,
45 .irq_set_affinity
= uv_set_irq_affinity
,
49 * Add offset and pnode information of the hub sourcing interrupts to the
50 * rb tree for a specific irq.
52 static int uv_set_irq_2_mmr_info(int irq
, unsigned long offset
, unsigned blade
)
54 struct rb_node
**link
= &uv_irq_root
.rb_node
;
55 struct rb_node
*parent
= NULL
;
56 struct uv_irq_2_mmr_pnode
*n
;
57 struct uv_irq_2_mmr_pnode
*e
;
58 unsigned long irqflags
;
60 n
= kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode
), GFP_KERNEL
,
61 uv_blade_to_memory_nid(blade
));
67 n
->pnode
= uv_blade_to_pnode(blade
);
68 spin_lock_irqsave(&uv_irq_lock
, irqflags
);
69 /* Find the right place in the rbtree: */
72 e
= rb_entry(parent
, struct uv_irq_2_mmr_pnode
, list
);
74 if (unlikely(irq
== e
->irq
)) {
75 /* irq entry exists */
76 e
->pnode
= uv_blade_to_pnode(blade
);
78 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
84 link
= &(*link
)->rb_left
;
86 link
= &(*link
)->rb_right
;
89 /* Insert the node into the rbtree. */
90 rb_link_node(&n
->list
, parent
, link
);
91 rb_insert_color(&n
->list
, &uv_irq_root
);
93 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
97 /* Retrieve offset and pnode information from the rb tree for a specific irq */
98 int uv_irq_2_mmr_info(int irq
, unsigned long *offset
, int *pnode
)
100 struct uv_irq_2_mmr_pnode
*e
;
102 unsigned long irqflags
;
104 spin_lock_irqsave(&uv_irq_lock
, irqflags
);
105 n
= uv_irq_root
.rb_node
;
107 e
= rb_entry(n
, struct uv_irq_2_mmr_pnode
, list
);
112 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
121 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
126 * Re-target the irq to the specified CPU and enable the specified MMR located
127 * on the specified blade to allow the sending of MSIs to the specified CPU.
130 arch_enable_uv_irq(char *irq_name
, unsigned int irq
, int cpu
, int mmr_blade
,
131 unsigned long mmr_offset
, int limit
)
133 const struct cpumask
*eligible_cpu
= cpumask_of(cpu
);
134 struct irq_cfg
*cfg
= irq_get_chip_data(irq
);
135 unsigned long mmr_value
;
136 struct uv_IO_APIC_route_entry
*entry
;
139 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) !=
140 sizeof(unsigned long));
142 err
= assign_irq_vector(irq
, cfg
, eligible_cpu
);
146 if (limit
== UV_AFFINITY_CPU
)
147 irq_set_status_flags(irq
, IRQ_NO_BALANCING
);
149 irq_set_status_flags(irq
, IRQ_MOVE_PCNTXT
);
151 irq_set_chip_and_handler_name(irq
, &uv_irq_chip
, handle_percpu_irq
,
155 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
156 entry
->vector
= cfg
->vector
;
157 entry
->delivery_mode
= apic
->irq_delivery_mode
;
158 entry
->dest_mode
= apic
->irq_dest_mode
;
162 entry
->dest
= apic
->cpu_mask_to_apicid(eligible_cpu
);
164 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
165 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
167 if (cfg
->move_in_progress
)
168 send_cleanup_vector(cfg
);
174 * Disable the specified MMR located on the specified blade so that MSIs are
175 * longer allowed to be sent.
177 static void arch_disable_uv_irq(int mmr_pnode
, unsigned long mmr_offset
)
179 unsigned long mmr_value
;
180 struct uv_IO_APIC_route_entry
*entry
;
182 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) !=
183 sizeof(unsigned long));
186 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
189 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
193 uv_set_irq_affinity(struct irq_data
*data
, const struct cpumask
*mask
,
196 struct irq_cfg
*cfg
= data
->chip_data
;
198 unsigned long mmr_value
, mmr_offset
;
199 struct uv_IO_APIC_route_entry
*entry
;
202 if (__ioapic_set_affinity(data
, mask
, &dest
))
206 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
208 entry
->vector
= cfg
->vector
;
209 entry
->delivery_mode
= apic
->irq_delivery_mode
;
210 entry
->dest_mode
= apic
->irq_dest_mode
;
216 /* Get previously stored MMR and pnode of hub sourcing interrupts */
217 if (uv_irq_2_mmr_info(data
->irq
, &mmr_offset
, &mmr_pnode
))
220 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
222 if (cfg
->move_in_progress
)
223 send_cleanup_vector(cfg
);
229 * Set up a mapping of an available irq and vector, and enable the specified
230 * MMR that defines the MSI that is to be sent to the specified CPU when an
231 * interrupt is raised.
233 int uv_setup_irq(char *irq_name
, int cpu
, int mmr_blade
,
234 unsigned long mmr_offset
, int limit
)
238 irq
= create_irq_nr(NR_IRQS_LEGACY
, uv_blade_to_memory_nid(mmr_blade
));
243 ret
= arch_enable_uv_irq(irq_name
, irq
, cpu
, mmr_blade
, mmr_offset
,
246 uv_set_irq_2_mmr_info(irq
, mmr_offset
, mmr_blade
);
252 EXPORT_SYMBOL_GPL(uv_setup_irq
);
255 * Tear down a mapping of an irq and vector, and disable the specified MMR that
256 * defined the MSI that was to be sent to the specified CPU when an interrupt
259 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
261 void uv_teardown_irq(unsigned int irq
)
263 struct uv_irq_2_mmr_pnode
*e
;
265 unsigned long irqflags
;
267 spin_lock_irqsave(&uv_irq_lock
, irqflags
);
268 n
= uv_irq_root
.rb_node
;
270 e
= rb_entry(n
, struct uv_irq_2_mmr_pnode
, list
);
272 arch_disable_uv_irq(e
->pnode
, e
->offset
);
273 rb_erase(n
, &uv_irq_root
);
282 spin_unlock_irqrestore(&uv_irq_lock
, irqflags
);
285 EXPORT_SYMBOL_GPL(uv_teardown_irq
);