2 * Copyright (C) 2001 MandrakeSoft S.A.
7 * http://www.linux-mandrake.com/
8 * http://www.mandrakesoft.com/
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * Yunhong Jiang <yunhong.jiang@intel.com>
25 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
26 * Based on Xen 3.1 code.
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
32 #include <linux/highmem.h>
33 #include <linux/smp.h>
34 #include <linux/hrtimer.h>
36 #include <asm/processor.h>
38 #include <asm/current.h>
45 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
47 #define ioapic_debug(fmt, arg...)
49 static int ioapic_deliver(struct kvm_ioapic
*vioapic
, int irq
);
51 static unsigned long ioapic_read_indirect(struct kvm_ioapic
*ioapic
,
55 unsigned long result
= 0;
57 switch (ioapic
->ioregsel
) {
58 case IOAPIC_REG_VERSION
:
59 result
= ((((IOAPIC_NUM_PINS
- 1) & 0xff) << 16)
60 | (IOAPIC_VERSION_ID
& 0xff));
63 case IOAPIC_REG_APIC_ID
:
64 case IOAPIC_REG_ARB_ID
:
65 result
= ((ioapic
->id
& 0xf) << 24);
70 u32 redir_index
= (ioapic
->ioregsel
- 0x10) >> 1;
73 ASSERT(redir_index
< IOAPIC_NUM_PINS
);
75 redir_content
= ioapic
->redirtbl
[redir_index
].bits
;
76 result
= (ioapic
->ioregsel
& 0x1) ?
77 (redir_content
>> 32) & 0xffffffff :
78 redir_content
& 0xffffffff;
86 static void ioapic_service(struct kvm_ioapic
*ioapic
, unsigned int idx
)
88 union ioapic_redir_entry
*pent
;
90 pent
= &ioapic
->redirtbl
[idx
];
92 if (!pent
->fields
.mask
) {
93 int injected
= ioapic_deliver(ioapic
, idx
);
94 if (injected
&& pent
->fields
.trig_mode
== IOAPIC_LEVEL_TRIG
)
95 pent
->fields
.remote_irr
= 1;
97 if (!pent
->fields
.trig_mode
)
98 ioapic
->irr
&= ~(1 << idx
);
101 static void ioapic_write_indirect(struct kvm_ioapic
*ioapic
, u32 val
)
104 bool mask_before
, mask_after
;
106 switch (ioapic
->ioregsel
) {
107 case IOAPIC_REG_VERSION
:
108 /* Writes are ignored. */
111 case IOAPIC_REG_APIC_ID
:
112 ioapic
->id
= (val
>> 24) & 0xf;
115 case IOAPIC_REG_ARB_ID
:
119 index
= (ioapic
->ioregsel
- 0x10) >> 1;
121 ioapic_debug("change redir index %x val %x\n", index
, val
);
122 if (index
>= IOAPIC_NUM_PINS
)
124 mask_before
= ioapic
->redirtbl
[index
].fields
.mask
;
125 if (ioapic
->ioregsel
& 1) {
126 ioapic
->redirtbl
[index
].bits
&= 0xffffffff;
127 ioapic
->redirtbl
[index
].bits
|= (u64
) val
<< 32;
129 ioapic
->redirtbl
[index
].bits
&= ~0xffffffffULL
;
130 ioapic
->redirtbl
[index
].bits
|= (u32
) val
;
131 ioapic
->redirtbl
[index
].fields
.remote_irr
= 0;
133 mask_after
= ioapic
->redirtbl
[index
].fields
.mask
;
134 if (mask_before
!= mask_after
)
135 kvm_fire_mask_notifiers(ioapic
->kvm
, index
, mask_after
);
136 if (ioapic
->irr
& (1 << index
))
137 ioapic_service(ioapic
, index
);
142 static int ioapic_inj_irq(struct kvm_ioapic
*ioapic
,
143 struct kvm_vcpu
*vcpu
,
144 u8 vector
, u8 trig_mode
, u8 delivery_mode
)
146 ioapic_debug("irq %d trig %d deliv %d\n", vector
, trig_mode
,
149 ASSERT((delivery_mode
== IOAPIC_FIXED
) ||
150 (delivery_mode
== IOAPIC_LOWEST_PRIORITY
));
152 return kvm_apic_set_irq(vcpu
, vector
, trig_mode
);
155 static void ioapic_inj_nmi(struct kvm_vcpu
*vcpu
)
157 kvm_inject_nmi(vcpu
);
161 u32
kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic
*ioapic
, u8 dest
,
166 struct kvm
*kvm
= ioapic
->kvm
;
167 struct kvm_vcpu
*vcpu
;
169 ioapic_debug("dest %d dest_mode %d\n", dest
, dest_mode
);
171 if (dest_mode
== 0) { /* Physical mode. */
172 if (dest
== 0xFF) { /* Broadcast. */
173 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
)
174 if (kvm
->vcpus
[i
] && kvm
->vcpus
[i
]->arch
.apic
)
178 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
179 vcpu
= kvm
->vcpus
[i
];
182 if (kvm_apic_match_physical_addr(vcpu
->arch
.apic
, dest
)) {
188 } else if (dest
!= 0) /* Logical mode, MDA non-zero. */
189 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
190 vcpu
= kvm
->vcpus
[i
];
193 if (vcpu
->arch
.apic
&&
194 kvm_apic_match_logical_addr(vcpu
->arch
.apic
, dest
))
195 mask
|= 1 << vcpu
->vcpu_id
;
197 ioapic_debug("mask %x\n", mask
);
201 static int ioapic_deliver(struct kvm_ioapic
*ioapic
, int irq
)
203 u8 dest
= ioapic
->redirtbl
[irq
].fields
.dest_id
;
204 u8 dest_mode
= ioapic
->redirtbl
[irq
].fields
.dest_mode
;
205 u8 delivery_mode
= ioapic
->redirtbl
[irq
].fields
.delivery_mode
;
206 u8 vector
= ioapic
->redirtbl
[irq
].fields
.vector
;
207 u8 trig_mode
= ioapic
->redirtbl
[irq
].fields
.trig_mode
;
209 struct kvm_vcpu
*vcpu
;
212 ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
213 "vector=%x trig_mode=%x\n",
214 dest
, dest_mode
, delivery_mode
, vector
, trig_mode
);
216 deliver_bitmask
= kvm_ioapic_get_delivery_bitmask(ioapic
, dest
,
218 if (!deliver_bitmask
) {
219 ioapic_debug("no target on destination\n");
223 switch (delivery_mode
) {
224 case IOAPIC_LOWEST_PRIORITY
:
225 vcpu
= kvm_get_lowest_prio_vcpu(ioapic
->kvm
, vector
,
229 vcpu
= ioapic
->kvm
->vcpus
[0];
232 r
= ioapic_inj_irq(ioapic
, vcpu
, vector
,
233 trig_mode
, delivery_mode
);
235 ioapic_debug("null lowest prio vcpu: "
236 "mask=%x vector=%x delivery_mode=%x\n",
237 deliver_bitmask
, vector
, IOAPIC_LOWEST_PRIORITY
);
244 for (vcpu_id
= 0; deliver_bitmask
!= 0; vcpu_id
++) {
245 if (!(deliver_bitmask
& (1 << vcpu_id
)))
247 deliver_bitmask
&= ~(1 << vcpu_id
);
248 vcpu
= ioapic
->kvm
->vcpus
[vcpu_id
];
250 r
= ioapic_inj_irq(ioapic
, vcpu
, vector
,
251 trig_mode
, delivery_mode
);
256 for (vcpu_id
= 0; deliver_bitmask
!= 0; vcpu_id
++) {
257 if (!(deliver_bitmask
& (1 << vcpu_id
)))
259 deliver_bitmask
&= ~(1 << vcpu_id
);
260 vcpu
= ioapic
->kvm
->vcpus
[vcpu_id
];
262 ioapic_inj_nmi(vcpu
);
264 ioapic_debug("NMI to vcpu %d failed\n",
269 printk(KERN_WARNING
"Unsupported delivery mode %d\n",
276 void kvm_ioapic_set_irq(struct kvm_ioapic
*ioapic
, int irq
, int level
)
278 u32 old_irr
= ioapic
->irr
;
280 union ioapic_redir_entry entry
;
282 if (irq
>= 0 && irq
< IOAPIC_NUM_PINS
) {
283 entry
= ioapic
->redirtbl
[irq
];
284 level
^= entry
.fields
.polarity
;
286 ioapic
->irr
&= ~mask
;
289 if ((!entry
.fields
.trig_mode
&& old_irr
!= ioapic
->irr
)
290 || !entry
.fields
.remote_irr
)
291 ioapic_service(ioapic
, irq
);
296 static void __kvm_ioapic_update_eoi(struct kvm_ioapic
*ioapic
, int gsi
,
299 union ioapic_redir_entry
*ent
;
301 ent
= &ioapic
->redirtbl
[gsi
];
303 kvm_notify_acked_irq(ioapic
->kvm
, gsi
);
305 if (trigger_mode
== IOAPIC_LEVEL_TRIG
) {
306 ASSERT(ent
->fields
.trig_mode
== IOAPIC_LEVEL_TRIG
);
307 ent
->fields
.remote_irr
= 0;
308 if (!ent
->fields
.mask
&& (ioapic
->irr
& (1 << gsi
)))
309 ioapic_service(ioapic
, gsi
);
313 void kvm_ioapic_update_eoi(struct kvm
*kvm
, int vector
, int trigger_mode
)
315 struct kvm_ioapic
*ioapic
= kvm
->arch
.vioapic
;
318 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++)
319 if (ioapic
->redirtbl
[i
].fields
.vector
== vector
)
320 __kvm_ioapic_update_eoi(ioapic
, i
, trigger_mode
);
323 static int ioapic_in_range(struct kvm_io_device
*this, gpa_t addr
,
324 int len
, int is_write
)
326 struct kvm_ioapic
*ioapic
= (struct kvm_ioapic
*)this->private;
328 return ((addr
>= ioapic
->base_address
&&
329 (addr
< ioapic
->base_address
+ IOAPIC_MEM_LENGTH
)));
332 static void ioapic_mmio_read(struct kvm_io_device
*this, gpa_t addr
, int len
,
335 struct kvm_ioapic
*ioapic
= (struct kvm_ioapic
*)this->private;
338 ioapic_debug("addr %lx\n", (unsigned long)addr
);
339 ASSERT(!(addr
& 0xf)); /* check alignment */
343 case IOAPIC_REG_SELECT
:
344 result
= ioapic
->ioregsel
;
347 case IOAPIC_REG_WINDOW
:
348 result
= ioapic_read_indirect(ioapic
, addr
, len
);
357 *(u64
*) val
= result
;
362 memcpy(val
, (char *)&result
, len
);
365 printk(KERN_WARNING
"ioapic: wrong length %d\n", len
);
369 static void ioapic_mmio_write(struct kvm_io_device
*this, gpa_t addr
, int len
,
372 struct kvm_ioapic
*ioapic
= (struct kvm_ioapic
*)this->private;
375 ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
376 (void*)addr
, len
, val
);
377 ASSERT(!(addr
& 0xf)); /* check alignment */
378 if (len
== 4 || len
== 8)
381 printk(KERN_WARNING
"ioapic: Unsupported size %d\n", len
);
387 case IOAPIC_REG_SELECT
:
388 ioapic
->ioregsel
= data
;
391 case IOAPIC_REG_WINDOW
:
392 ioapic_write_indirect(ioapic
, data
);
396 kvm_ioapic_update_eoi(ioapic
->kvm
, data
, IOAPIC_LEVEL_TRIG
);
405 void kvm_ioapic_reset(struct kvm_ioapic
*ioapic
)
409 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++)
410 ioapic
->redirtbl
[i
].fields
.mask
= 1;
411 ioapic
->base_address
= IOAPIC_DEFAULT_BASE_ADDRESS
;
412 ioapic
->ioregsel
= 0;
417 int kvm_ioapic_init(struct kvm
*kvm
)
419 struct kvm_ioapic
*ioapic
;
421 ioapic
= kzalloc(sizeof(struct kvm_ioapic
), GFP_KERNEL
);
424 kvm
->arch
.vioapic
= ioapic
;
425 kvm_ioapic_reset(ioapic
);
426 ioapic
->dev
.read
= ioapic_mmio_read
;
427 ioapic
->dev
.write
= ioapic_mmio_write
;
428 ioapic
->dev
.in_range
= ioapic_in_range
;
429 ioapic
->dev
.private = ioapic
;
431 kvm_io_bus_register_dev(&kvm
->mmio_bus
, &ioapic
->dev
);