2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/export.h>
12 #include <linux/rbtree.h>
13 #include <linux/slab.h>
14 #include <linux/irq.h>
16 #include <asm/irqdomain.h>
18 #include <asm/uv/uv_irq.h>
19 #include <asm/uv/uv_hub.h>
21 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
22 struct uv_irq_2_mmr_pnode
{
27 static void uv_program_mmr(struct irq_cfg
*cfg
, struct uv_irq_2_mmr_pnode
*info
)
29 unsigned long mmr_value
;
30 struct uv_IO_APIC_route_entry
*entry
;
32 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) !=
33 sizeof(unsigned long));
36 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
37 entry
->vector
= cfg
->vector
;
38 entry
->delivery_mode
= apic
->irq_delivery_mode
;
39 entry
->dest_mode
= apic
->irq_dest_mode
;
43 entry
->dest
= cfg
->dest_apicid
;
45 uv_write_global_mmr64(info
->pnode
, info
->offset
, mmr_value
);
48 static void uv_noop(struct irq_data
*data
) { }
50 static void uv_ack_apic(struct irq_data
*data
)
56 uv_set_irq_affinity(struct irq_data
*data
, const struct cpumask
*mask
,
59 struct irq_data
*parent
= data
->parent_data
;
60 struct irq_cfg
*cfg
= irqd_cfg(data
);
63 ret
= parent
->chip
->irq_set_affinity(parent
, mask
, force
);
65 uv_program_mmr(cfg
, data
->chip_data
);
66 send_cleanup_vector(cfg
);
72 static struct irq_chip uv_irq_chip
= {
75 .irq_unmask
= uv_noop
,
76 .irq_eoi
= uv_ack_apic
,
77 .irq_set_affinity
= uv_set_irq_affinity
,
80 static int uv_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
81 unsigned int nr_irqs
, void *arg
)
83 struct uv_irq_2_mmr_pnode
*chip_data
;
84 struct irq_alloc_info
*info
= arg
;
85 struct irq_data
*irq_data
= irq_domain_get_irq_data(domain
, virq
);
88 if (nr_irqs
> 1 || !info
|| info
->type
!= X86_IRQ_ALLOC_TYPE_UV
)
91 chip_data
= kmalloc_node(sizeof(*chip_data
), GFP_KERNEL
,
92 irq_data_get_node(irq_data
));
96 ret
= irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
, arg
);
98 if (info
->uv_limit
== UV_AFFINITY_CPU
)
99 irq_set_status_flags(virq
, IRQ_NO_BALANCING
);
101 irq_set_status_flags(virq
, IRQ_MOVE_PCNTXT
);
103 chip_data
->pnode
= uv_blade_to_pnode(info
->uv_blade
);
104 chip_data
->offset
= info
->uv_offset
;
105 irq_domain_set_info(domain
, virq
, virq
, &uv_irq_chip
, chip_data
,
106 handle_percpu_irq
, NULL
, info
->uv_name
);
114 static void uv_domain_free(struct irq_domain
*domain
, unsigned int virq
,
115 unsigned int nr_irqs
)
117 struct irq_data
*irq_data
= irq_domain_get_irq_data(domain
, virq
);
119 BUG_ON(nr_irqs
!= 1);
120 kfree(irq_data
->chip_data
);
121 irq_clear_status_flags(virq
, IRQ_MOVE_PCNTXT
);
122 irq_clear_status_flags(virq
, IRQ_NO_BALANCING
);
123 irq_domain_free_irqs_top(domain
, virq
, nr_irqs
);
127 * Re-target the irq to the specified CPU and enable the specified MMR located
128 * on the specified blade to allow the sending of MSIs to the specified CPU.
130 static void uv_domain_activate(struct irq_domain
*domain
,
131 struct irq_data
*irq_data
)
133 uv_program_mmr(irqd_cfg(irq_data
), irq_data
->chip_data
);
137 * Disable the specified MMR located on the specified blade so that MSIs are
138 * longer allowed to be sent.
140 static void uv_domain_deactivate(struct irq_domain
*domain
,
141 struct irq_data
*irq_data
)
143 unsigned long mmr_value
;
144 struct uv_IO_APIC_route_entry
*entry
;
147 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
149 uv_program_mmr(irqd_cfg(irq_data
), irq_data
->chip_data
);
152 static const struct irq_domain_ops uv_domain_ops
= {
153 .alloc
= uv_domain_alloc
,
154 .free
= uv_domain_free
,
155 .activate
= uv_domain_activate
,
156 .deactivate
= uv_domain_deactivate
,
159 static struct irq_domain
*uv_get_irq_domain(void)
161 static struct irq_domain
*uv_domain
;
162 static DEFINE_MUTEX(uv_lock
);
164 mutex_lock(&uv_lock
);
165 if (uv_domain
== NULL
) {
166 uv_domain
= irq_domain_add_tree(NULL
, &uv_domain_ops
, NULL
);
168 uv_domain
->parent
= x86_vector_domain
;
170 mutex_unlock(&uv_lock
);
176 * Set up a mapping of an available irq and vector, and enable the specified
177 * MMR that defines the MSI that is to be sent to the specified CPU when an
178 * interrupt is raised.
180 int uv_setup_irq(char *irq_name
, int cpu
, int mmr_blade
,
181 unsigned long mmr_offset
, int limit
)
183 struct irq_alloc_info info
;
184 struct irq_domain
*domain
= uv_get_irq_domain();
189 init_irq_alloc_info(&info
, cpumask_of(cpu
));
190 info
.type
= X86_IRQ_ALLOC_TYPE_UV
;
191 info
.uv_limit
= limit
;
192 info
.uv_blade
= mmr_blade
;
193 info
.uv_offset
= mmr_offset
;
194 info
.uv_name
= irq_name
;
196 return irq_domain_alloc_irqs(domain
, 1,
197 uv_blade_to_memory_nid(mmr_blade
), &info
);
199 EXPORT_SYMBOL_GPL(uv_setup_irq
);
202 * Tear down a mapping of an irq and vector, and disable the specified MMR that
203 * defined the MSI that was to be sent to the specified CPU when an interrupt
206 * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
208 void uv_teardown_irq(unsigned int irq
)
210 irq_domain_free_irqs(irq
, 1);
212 EXPORT_SYMBOL_GPL(uv_teardown_irq
);