1 // SPDX-License-Identifier: GPL-2.0-only
3 * irq_comm.c: Common API for in kernel interrupt controller
4 * Copyright (c) 2007, Intel Corporation.
7 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 #include <linux/kvm_host.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/rculist.h>
17 #include <trace/events/kvm.h>
19 #include <asm/msidef.h>
30 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry
*e
,
31 struct kvm
*kvm
, int irq_source_id
, int level
,
34 struct kvm_pic
*pic
= kvm
->arch
.vpic
;
35 return kvm_pic_set_irq(pic
, e
->irqchip
.pin
, irq_source_id
, level
);
38 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry
*e
,
39 struct kvm
*kvm
, int irq_source_id
, int level
,
42 struct kvm_ioapic
*ioapic
= kvm
->arch
.vioapic
;
43 return kvm_ioapic_set_irq(ioapic
, e
->irqchip
.pin
, irq_source_id
, level
,
47 int kvm_irq_delivery_to_apic(struct kvm
*kvm
, struct kvm_lapic
*src
,
48 struct kvm_lapic_irq
*irq
, struct dest_map
*dest_map
)
51 struct kvm_vcpu
*vcpu
, *lowest
= NULL
;
52 unsigned long dest_vcpu_bitmap
[BITS_TO_LONGS(KVM_MAX_VCPUS
)];
53 unsigned int dest_vcpus
= 0;
55 if (kvm_irq_delivery_to_apic_fast(kvm
, src
, irq
, &r
, dest_map
))
58 if (irq
->dest_mode
== APIC_DEST_PHYSICAL
&&
59 irq
->dest_id
== 0xff && kvm_lowest_prio_delivery(irq
)) {
60 printk(KERN_INFO
"kvm: apic: phys broadcast and lowest prio\n");
61 irq
->delivery_mode
= APIC_DM_FIXED
;
64 memset(dest_vcpu_bitmap
, 0, sizeof(dest_vcpu_bitmap
));
66 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
67 if (!kvm_apic_present(vcpu
))
70 if (!kvm_apic_match_dest(vcpu
, src
, irq
->shorthand
,
71 irq
->dest_id
, irq
->dest_mode
))
74 if (!kvm_lowest_prio_delivery(irq
)) {
77 r
+= kvm_apic_set_irq(vcpu
, irq
, dest_map
);
78 } else if (kvm_apic_sw_enabled(vcpu
->arch
.apic
)) {
79 if (!kvm_vector_hashing_enabled()) {
82 else if (kvm_apic_compare_prio(vcpu
, lowest
) < 0)
85 __set_bit(i
, dest_vcpu_bitmap
);
91 if (dest_vcpus
!= 0) {
92 int idx
= kvm_vector_to_index(irq
->vector
, dest_vcpus
,
93 dest_vcpu_bitmap
, KVM_MAX_VCPUS
);
95 lowest
= kvm_get_vcpu(kvm
, idx
);
99 r
= kvm_apic_set_irq(lowest
, irq
, dest_map
);
104 void kvm_set_msi_irq(struct kvm
*kvm
, struct kvm_kernel_irq_routing_entry
*e
,
105 struct kvm_lapic_irq
*irq
)
107 trace_kvm_msi_set_irq(e
->msi
.address_lo
| (kvm
->arch
.x2apic_format
?
108 (u64
)e
->msi
.address_hi
<< 32 : 0),
111 irq
->dest_id
= (e
->msi
.address_lo
&
112 MSI_ADDR_DEST_ID_MASK
) >> MSI_ADDR_DEST_ID_SHIFT
;
113 if (kvm
->arch
.x2apic_format
)
114 irq
->dest_id
|= MSI_ADDR_EXT_DEST_ID(e
->msi
.address_hi
);
115 irq
->vector
= (e
->msi
.data
&
116 MSI_DATA_VECTOR_MASK
) >> MSI_DATA_VECTOR_SHIFT
;
117 irq
->dest_mode
= kvm_lapic_irq_dest_mode(
118 !!((1 << MSI_ADDR_DEST_MODE_SHIFT
) & e
->msi
.address_lo
));
119 irq
->trig_mode
= (1 << MSI_DATA_TRIGGER_SHIFT
) & e
->msi
.data
;
120 irq
->delivery_mode
= e
->msi
.data
& 0x700;
121 irq
->msi_redir_hint
= ((e
->msi
.address_lo
122 & MSI_ADDR_REDIRECTION_LOWPRI
) > 0);
124 irq
->shorthand
= APIC_DEST_NOSHORT
;
126 EXPORT_SYMBOL_GPL(kvm_set_msi_irq
);
128 static inline bool kvm_msi_route_invalid(struct kvm
*kvm
,
129 struct kvm_kernel_irq_routing_entry
*e
)
131 return kvm
->arch
.x2apic_format
&& (e
->msi
.address_hi
& 0xff);
134 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
,
135 struct kvm
*kvm
, int irq_source_id
, int level
, bool line_status
)
137 struct kvm_lapic_irq irq
;
139 if (kvm_msi_route_invalid(kvm
, e
))
145 kvm_set_msi_irq(kvm
, e
, &irq
);
147 return kvm_irq_delivery_to_apic(kvm
, NULL
, &irq
, NULL
);
151 static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry
*e
,
152 struct kvm
*kvm
, int irq_source_id
, int level
,
158 return kvm_hv_synic_set_irq(kvm
, e
->hv_sint
.vcpu
, e
->hv_sint
.sint
);
161 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry
*e
,
162 struct kvm
*kvm
, int irq_source_id
, int level
,
165 struct kvm_lapic_irq irq
;
169 case KVM_IRQ_ROUTING_HV_SINT
:
170 return kvm_hv_set_sint(e
, kvm
, irq_source_id
, level
,
173 case KVM_IRQ_ROUTING_MSI
:
174 if (kvm_msi_route_invalid(kvm
, e
))
177 kvm_set_msi_irq(kvm
, e
, &irq
);
179 if (kvm_irq_delivery_to_apic_fast(kvm
, NULL
, &irq
, &r
, NULL
))
190 int kvm_request_irq_source_id(struct kvm
*kvm
)
192 unsigned long *bitmap
= &kvm
->arch
.irq_sources_bitmap
;
195 mutex_lock(&kvm
->irq_lock
);
196 irq_source_id
= find_first_zero_bit(bitmap
, BITS_PER_LONG
);
198 if (irq_source_id
>= BITS_PER_LONG
) {
199 printk(KERN_WARNING
"kvm: exhaust allocatable IRQ sources!\n");
200 irq_source_id
= -EFAULT
;
204 ASSERT(irq_source_id
!= KVM_USERSPACE_IRQ_SOURCE_ID
);
205 ASSERT(irq_source_id
!= KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
);
206 set_bit(irq_source_id
, bitmap
);
208 mutex_unlock(&kvm
->irq_lock
);
210 return irq_source_id
;
213 void kvm_free_irq_source_id(struct kvm
*kvm
, int irq_source_id
)
215 ASSERT(irq_source_id
!= KVM_USERSPACE_IRQ_SOURCE_ID
);
216 ASSERT(irq_source_id
!= KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
);
218 mutex_lock(&kvm
->irq_lock
);
219 if (irq_source_id
< 0 ||
220 irq_source_id
>= BITS_PER_LONG
) {
221 printk(KERN_ERR
"kvm: IRQ source ID out of range!\n");
224 clear_bit(irq_source_id
, &kvm
->arch
.irq_sources_bitmap
);
225 if (!irqchip_kernel(kvm
))
228 kvm_ioapic_clear_all(kvm
->arch
.vioapic
, irq_source_id
);
229 kvm_pic_clear_all(kvm
->arch
.vpic
, irq_source_id
);
231 mutex_unlock(&kvm
->irq_lock
);
234 void kvm_register_irq_mask_notifier(struct kvm
*kvm
, int irq
,
235 struct kvm_irq_mask_notifier
*kimn
)
237 mutex_lock(&kvm
->irq_lock
);
239 hlist_add_head_rcu(&kimn
->link
, &kvm
->arch
.mask_notifier_list
);
240 mutex_unlock(&kvm
->irq_lock
);
243 void kvm_unregister_irq_mask_notifier(struct kvm
*kvm
, int irq
,
244 struct kvm_irq_mask_notifier
*kimn
)
246 mutex_lock(&kvm
->irq_lock
);
247 hlist_del_rcu(&kimn
->link
);
248 mutex_unlock(&kvm
->irq_lock
);
249 synchronize_srcu(&kvm
->irq_srcu
);
252 void kvm_fire_mask_notifiers(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
,
255 struct kvm_irq_mask_notifier
*kimn
;
258 idx
= srcu_read_lock(&kvm
->irq_srcu
);
259 gsi
= kvm_irq_map_chip_pin(kvm
, irqchip
, pin
);
261 hlist_for_each_entry_rcu(kimn
, &kvm
->arch
.mask_notifier_list
, link
)
262 if (kimn
->irq
== gsi
)
263 kimn
->func(kimn
, mask
);
264 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
267 bool kvm_arch_can_set_irq_routing(struct kvm
*kvm
)
269 return irqchip_in_kernel(kvm
);
272 int kvm_set_routing_entry(struct kvm
*kvm
,
273 struct kvm_kernel_irq_routing_entry
*e
,
274 const struct kvm_irq_routing_entry
*ue
)
276 /* We can't check irqchip_in_kernel() here as some callers are
277 * currently inititalizing the irqchip. Other callers should therefore
278 * check kvm_arch_can_set_irq_routing() before calling this function.
281 case KVM_IRQ_ROUTING_IRQCHIP
:
282 if (irqchip_split(kvm
))
284 e
->irqchip
.pin
= ue
->u
.irqchip
.pin
;
285 switch (ue
->u
.irqchip
.irqchip
) {
286 case KVM_IRQCHIP_PIC_SLAVE
:
287 e
->irqchip
.pin
+= PIC_NUM_PINS
/ 2;
289 case KVM_IRQCHIP_PIC_MASTER
:
290 if (ue
->u
.irqchip
.pin
>= PIC_NUM_PINS
/ 2)
292 e
->set
= kvm_set_pic_irq
;
294 case KVM_IRQCHIP_IOAPIC
:
295 if (ue
->u
.irqchip
.pin
>= KVM_IOAPIC_NUM_PINS
)
297 e
->set
= kvm_set_ioapic_irq
;
302 e
->irqchip
.irqchip
= ue
->u
.irqchip
.irqchip
;
304 case KVM_IRQ_ROUTING_MSI
:
305 e
->set
= kvm_set_msi
;
306 e
->msi
.address_lo
= ue
->u
.msi
.address_lo
;
307 e
->msi
.address_hi
= ue
->u
.msi
.address_hi
;
308 e
->msi
.data
= ue
->u
.msi
.data
;
310 if (kvm_msi_route_invalid(kvm
, e
))
313 case KVM_IRQ_ROUTING_HV_SINT
:
314 e
->set
= kvm_hv_set_sint
;
315 e
->hv_sint
.vcpu
= ue
->u
.hv_sint
.vcpu
;
316 e
->hv_sint
.sint
= ue
->u
.hv_sint
.sint
;
325 bool kvm_intr_is_single_vcpu(struct kvm
*kvm
, struct kvm_lapic_irq
*irq
,
326 struct kvm_vcpu
**dest_vcpu
)
329 struct kvm_vcpu
*vcpu
;
331 if (kvm_intr_is_single_vcpu_fast(kvm
, irq
, dest_vcpu
))
334 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
335 if (!kvm_apic_present(vcpu
))
338 if (!kvm_apic_match_dest(vcpu
, NULL
, irq
->shorthand
,
339 irq
->dest_id
, irq
->dest_mode
))
350 EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu
);
352 #define IOAPIC_ROUTING_ENTRY(irq) \
353 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
354 .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
355 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
357 #define PIC_ROUTING_ENTRY(irq) \
358 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
359 .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
360 #define ROUTING_ENTRY2(irq) \
361 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
363 static const struct kvm_irq_routing_entry default_routing
[] = {
364 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
365 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
366 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
367 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
368 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
369 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
370 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
371 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
372 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
373 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
374 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
375 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
378 int kvm_setup_default_irq_routing(struct kvm
*kvm
)
380 return kvm_set_irq_routing(kvm
, default_routing
,
381 ARRAY_SIZE(default_routing
), 0);
384 static const struct kvm_irq_routing_entry empty_routing
[] = {};
386 int kvm_setup_empty_irq_routing(struct kvm
*kvm
)
388 return kvm_set_irq_routing(kvm
, empty_routing
, 0, 0);
391 void kvm_arch_post_irq_routing_update(struct kvm
*kvm
)
393 if (!irqchip_split(kvm
))
395 kvm_make_scan_ioapic_request(kvm
);
398 void kvm_scan_ioapic_routes(struct kvm_vcpu
*vcpu
,
399 ulong
*ioapic_handled_vectors
)
401 struct kvm
*kvm
= vcpu
->kvm
;
402 struct kvm_kernel_irq_routing_entry
*entry
;
403 struct kvm_irq_routing_table
*table
;
404 u32 i
, nr_ioapic_pins
;
407 idx
= srcu_read_lock(&kvm
->irq_srcu
);
408 table
= srcu_dereference(kvm
->irq_routing
, &kvm
->irq_srcu
);
409 nr_ioapic_pins
= min_t(u32
, table
->nr_rt_entries
,
410 kvm
->arch
.nr_reserved_ioapic_pins
);
411 for (i
= 0; i
< nr_ioapic_pins
; ++i
) {
412 hlist_for_each_entry(entry
, &table
->map
[i
], link
) {
413 struct kvm_lapic_irq irq
;
415 if (entry
->type
!= KVM_IRQ_ROUTING_MSI
)
418 kvm_set_msi_irq(vcpu
->kvm
, entry
, &irq
);
421 kvm_apic_match_dest(vcpu
, NULL
, APIC_DEST_NOSHORT
,
422 irq
.dest_id
, irq
.dest_mode
))
423 __set_bit(irq
.vector
, ioapic_handled_vectors
);
426 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
429 void kvm_arch_irq_routing_update(struct kvm
*kvm
)
431 kvm_hv_irq_routing_update(kvm
);