2 * kvm_vcpu.c: handling all virtual cpu related thing.
3 * Copyright (c) 2005, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang <xiantao.zhang@intel.com>
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
34 #include "asm-offsets.h"
39 * - Index by it/dt/rt sequence
40 * - Only existing mode transitions are allowed in this table
41 * - RSE is placed at lazy mode when emulating guest partial mode
42 * - If gva happens to be rr0 and rr4, only allowed case is identity
43 * mapping (gva=gpa), or panic! (How?)
45 int mm_switch_table
[8][8] = {
46 /* 2004/09/12(Kevin): Allow switch to self */
48 * (it,dt,rt): (0,0,0) -> (1,1,1)
49 * This kind of transition usually occurs in the very early
50 * stage of Linux boot up procedure. Another case is in efi
51 * and pal calls. (see "arch/ia64/kernel/head.S")
53 * (it,dt,rt): (0,0,0) -> (0,1,1)
54 * This kind of transition is found when OSYa exits efi boot
55 * service. Due to gva = gpa in this case (Same region),
56 * data access can be satisfied though itlb entry for physical
59 {SW_SELF
, 0, 0, SW_NOP
, 0, 0, 0, SW_P2V
},
60 {0, 0, 0, 0, 0, 0, 0, 0},
61 {0, 0, 0, 0, 0, 0, 0, 0},
63 * (it,dt,rt): (0,1,1) -> (1,1,1)
64 * This kind of transition is found in OSYa.
66 * (it,dt,rt): (0,1,1) -> (0,0,0)
67 * This kind of transition is found in OSYa
69 {SW_NOP
, 0, 0, SW_SELF
, 0, 0, 0, SW_P2V
},
70 /* (1,0,0)->(1,1,1) */
71 {0, 0, 0, 0, 0, 0, 0, SW_P2V
},
73 * (it,dt,rt): (1,0,1) -> (1,1,1)
74 * This kind of transition usually occurs when Linux returns
75 * from the low level TLB miss handlers.
76 * (see "arch/ia64/kernel/ivt.S")
78 {0, 0, 0, 0, 0, SW_SELF
, 0, SW_P2V
},
79 {0, 0, 0, 0, 0, 0, 0, 0},
81 * (it,dt,rt): (1,1,1) -> (1,0,1)
82 * This kind of transition usually occurs in Linux low level
83 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
85 * (it,dt,rt): (1,1,1) -> (0,0,0)
86 * This kind of transition usually occurs in pal and efi calls,
87 * which requires running in physical mode.
88 * (see "arch/ia64/kernel/head.S")
92 {SW_V2P
, 0, 0, 0, SW_V2P
, SW_V2P
, 0, SW_SELF
},
95 void physical_mode_init(struct kvm_vcpu
*vcpu
)
97 vcpu
->arch
.mode_flags
= GUEST_IN_PHY
;
100 void switch_to_physical_rid(struct kvm_vcpu
*vcpu
)
104 /* Save original virtual mode rr[0] and rr[4] */
105 psr
= ia64_clear_ic();
106 ia64_set_rr(VRN0
<<VRN_SHIFT
, vcpu
->arch
.metaphysical_rr0
);
108 ia64_set_rr(VRN4
<<VRN_SHIFT
, vcpu
->arch
.metaphysical_rr4
);
115 void switch_to_virtual_rid(struct kvm_vcpu
*vcpu
)
119 psr
= ia64_clear_ic();
120 ia64_set_rr(VRN0
<< VRN_SHIFT
, vcpu
->arch
.metaphysical_saved_rr0
);
122 ia64_set_rr(VRN4
<< VRN_SHIFT
, vcpu
->arch
.metaphysical_saved_rr4
);
128 static int mm_switch_action(struct ia64_psr opsr
, struct ia64_psr npsr
)
130 return mm_switch_table
[MODE_IND(opsr
)][MODE_IND(npsr
)];
133 void switch_mm_mode(struct kvm_vcpu
*vcpu
, struct ia64_psr old_psr
,
134 struct ia64_psr new_psr
)
137 act
= mm_switch_action(old_psr
, new_psr
);
140 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
141 old_psr.val, new_psr.val);*/
142 switch_to_physical_rid(vcpu
);
144 * Set rse to enforced lazy, to prevent active rse
145 *save/restor when guest physical mode.
147 vcpu
->arch
.mode_flags
|= GUEST_IN_PHY
;
150 switch_to_virtual_rid(vcpu
);
152 * recover old mode which is saved when entering
153 * guest physical mode
155 vcpu
->arch
.mode_flags
&= ~GUEST_IN_PHY
;
169 * In physical mode, insert tc/tr for region 0 and 4 uses
170 * RID[0] and RID[4] which is for physical mode emulation.
171 * However what those inserted tc/tr wants is rid for
172 * virtual mode. So original virtual rid needs to be restored
175 * Operations which required such switch include:
176 * - insertions (itc.*, itr.*)
177 * - purges (ptc.* and ptr.*)
181 * All above needs actual virtual rid for destination entry.
184 void check_mm_mode_switch(struct kvm_vcpu
*vcpu
, struct ia64_psr old_psr
,
185 struct ia64_psr new_psr
)
188 if ((old_psr
.dt
!= new_psr
.dt
)
189 || (old_psr
.it
!= new_psr
.it
)
190 || (old_psr
.rt
!= new_psr
.rt
))
191 switch_mm_mode(vcpu
, old_psr
, new_psr
);
198 * In physical mode, insert tc/tr for region 0 and 4 uses
199 * RID[0] and RID[4] which is for physical mode emulation.
200 * However what those inserted tc/tr wants is rid for
201 * virtual mode. So original virtual rid needs to be restored
204 * Operations which required such switch include:
205 * - insertions (itc.*, itr.*)
206 * - purges (ptc.* and ptr.*)
210 * All above needs actual virtual rid for destination entry.
213 void prepare_if_physical_mode(struct kvm_vcpu
*vcpu
)
215 if (is_physical_mode(vcpu
)) {
216 vcpu
->arch
.mode_flags
|= GUEST_PHY_EMUL
;
217 switch_to_virtual_rid(vcpu
);
222 /* Recover always follows prepare */
223 void recover_if_physical_mode(struct kvm_vcpu
*vcpu
)
225 if (is_physical_mode(vcpu
))
226 switch_to_physical_rid(vcpu
);
227 vcpu
->arch
.mode_flags
&= ~GUEST_PHY_EMUL
;
231 #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
233 static u16 gr_info
[32] = {
234 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
235 RPT(r1
), RPT(r2
), RPT(r3
),
236 RPT(r4
), RPT(r5
), RPT(r6
), RPT(r7
),
237 RPT(r8
), RPT(r9
), RPT(r10
), RPT(r11
),
238 RPT(r12
), RPT(r13
), RPT(r14
), RPT(r15
),
239 RPT(r16
), RPT(r17
), RPT(r18
), RPT(r19
),
240 RPT(r20
), RPT(r21
), RPT(r22
), RPT(r23
),
241 RPT(r24
), RPT(r25
), RPT(r26
), RPT(r27
),
242 RPT(r28
), RPT(r29
), RPT(r30
), RPT(r31
)
245 #define IA64_FIRST_STACKED_GR 32
246 #define IA64_FIRST_ROTATING_FR 32
248 static inline unsigned long
249 rotate_reg(unsigned long sor
, unsigned long rrb
, unsigned long reg
)
258 * Return the (rotated) index for floating point register
259 * be in the REGNUM (REGNUM must range from 32-127,
260 * result is in the range from 0-95.
262 static inline unsigned long fph_index(struct kvm_pt_regs
*regs
,
265 unsigned long rrb_fr
= (regs
->cr_ifs
>> 25) & 0x7f;
266 return rotate_reg(96, rrb_fr
, (regnum
- IA64_FIRST_ROTATING_FR
));
270 * The inverse of the above: given bspstore and the number of
271 * registers, calculate ar.bsp.
273 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr
,
276 long delta
= ia64_rse_slot_num(addr
) + num_regs
;
282 while (delta
<= -0x3f) {
287 while (delta
>= 0x3f) {
293 return addr
+ num_regs
+ i
;
296 static void get_rse_reg(struct kvm_pt_regs
*regs
, unsigned long r1
,
297 unsigned long *val
, int *nat
)
299 unsigned long *bsp
, *addr
, *rnat_addr
, *bspstore
;
300 unsigned long *kbs
= (void *) current_vcpu
+ VMM_RBS_OFFSET
;
301 unsigned long nat_mask
;
302 unsigned long old_rsc
, new_rsc
;
303 long sof
= (regs
->cr_ifs
) & 0x7f;
304 long sor
= (((regs
->cr_ifs
>> 14) & 0xf) << 3);
305 long rrb_gr
= (regs
->cr_ifs
>> 18) & 0x7f;
309 ridx
= rotate_reg(sor
, rrb_gr
, ridx
);
311 old_rsc
= ia64_getreg(_IA64_REG_AR_RSC
);
312 new_rsc
= old_rsc
&(~(0x3));
313 ia64_setreg(_IA64_REG_AR_RSC
, new_rsc
);
315 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
316 bsp
= kbs
+ (regs
->loadrs
>> 19);
318 addr
= kvm_rse_skip_regs(bsp
, -sof
+ ridx
);
319 nat_mask
= 1UL << ia64_rse_slot_num(addr
);
320 rnat_addr
= ia64_rse_rnat_addr(addr
);
322 if (addr
>= bspstore
) {
325 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
329 if (bspstore
< rnat_addr
)
330 *nat
= (int)!!(ia64_getreg(_IA64_REG_AR_RNAT
)
333 *nat
= (int)!!((*rnat_addr
) & nat_mask
);
334 ia64_setreg(_IA64_REG_AR_RSC
, old_rsc
);
338 void set_rse_reg(struct kvm_pt_regs
*regs
, unsigned long r1
,
339 unsigned long val
, unsigned long nat
)
341 unsigned long *bsp
, *bspstore
, *addr
, *rnat_addr
;
342 unsigned long *kbs
= (void *) current_vcpu
+ VMM_RBS_OFFSET
;
343 unsigned long nat_mask
;
344 unsigned long old_rsc
, new_rsc
, psr
;
346 long sof
= (regs
->cr_ifs
) & 0x7f;
347 long sor
= (((regs
->cr_ifs
>> 14) & 0xf) << 3);
348 long rrb_gr
= (regs
->cr_ifs
>> 18) & 0x7f;
352 ridx
= rotate_reg(sor
, rrb_gr
, ridx
);
354 old_rsc
= ia64_getreg(_IA64_REG_AR_RSC
);
355 /* put RSC to lazy mode, and set loadrs 0 */
356 new_rsc
= old_rsc
& (~0x3fff0003);
357 ia64_setreg(_IA64_REG_AR_RSC
, new_rsc
);
358 bsp
= kbs
+ (regs
->loadrs
>> 19); /* 16 + 3 */
360 addr
= kvm_rse_skip_regs(bsp
, -sof
+ ridx
);
361 nat_mask
= 1UL << ia64_rse_slot_num(addr
);
362 rnat_addr
= ia64_rse_rnat_addr(addr
);
365 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
366 if (addr
>= bspstore
) {
371 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
372 rnat
= ia64_getreg(_IA64_REG_AR_RNAT
);
373 if (bspstore
< rnat_addr
)
374 rnat
= rnat
& (~nat_mask
);
376 *rnat_addr
= (*rnat_addr
)&(~nat_mask
);
380 ia64_setreg(_IA64_REG_AR_RNAT
, rnat
);
382 rnat
= ia64_getreg(_IA64_REG_AR_RNAT
);
384 if (bspstore
< rnat_addr
)
385 rnat
= rnat
&(~nat_mask
);
387 *rnat_addr
= (*rnat_addr
) & (~nat_mask
);
389 ia64_setreg(_IA64_REG_AR_BSPSTORE
, (unsigned long)bspstore
);
390 ia64_setreg(_IA64_REG_AR_RNAT
, rnat
);
392 local_irq_restore(psr
);
393 ia64_setreg(_IA64_REG_AR_RSC
, old_rsc
);
396 void getreg(unsigned long regnum
, unsigned long *val
,
397 int *nat
, struct kvm_pt_regs
*regs
)
399 unsigned long addr
, *unat
;
400 if (regnum
>= IA64_FIRST_STACKED_GR
) {
401 get_rse_reg(regs
, regnum
, val
, nat
);
406 * Now look at registers in [0-31] range and init correct UNAT
408 addr
= (unsigned long)regs
;
409 unat
= ®s
->eml_unat
;
411 addr
+= gr_info
[regnum
];
413 *val
= *(unsigned long *)addr
;
415 * do it only when requested
418 *nat
= (*unat
>> ((addr
>> 3) & 0x3f)) & 0x1UL
;
421 void setreg(unsigned long regnum
, unsigned long val
,
422 int nat
, struct kvm_pt_regs
*regs
)
425 unsigned long bitmask
;
429 * First takes care of stacked registers
431 if (regnum
>= IA64_FIRST_STACKED_GR
) {
432 set_rse_reg(regs
, regnum
, val
, nat
);
437 * Now look at registers in [0-31] range and init correct UNAT
439 addr
= (unsigned long)regs
;
440 unat
= ®s
->eml_unat
;
442 * add offset from base of struct
445 addr
+= gr_info
[regnum
];
447 *(unsigned long *)addr
= val
;
450 * We need to clear the corresponding UNAT bit to fully emulate the load
451 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
453 bitmask
= 1UL << ((addr
>> 3) & 0x3f);
461 u64
vcpu_get_gr(struct kvm_vcpu
*vcpu
, unsigned long reg
)
463 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
468 getreg(reg
, &val
, 0, regs
);
472 void vcpu_set_gr(struct kvm_vcpu
*vcpu
, unsigned long reg
, u64 value
, int nat
)
474 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
475 long sof
= (regs
->cr_ifs
) & 0x7f;
481 setreg(reg
, value
, nat
, regs
); /* FIXME: handle NATs later*/
484 void getfpreg(unsigned long regnum
, struct ia64_fpreg
*fpval
,
485 struct kvm_pt_regs
*regs
)
487 /* Take floating register rotation into consideration*/
488 if (regnum
>= IA64_FIRST_ROTATING_FR
)
489 regnum
= IA64_FIRST_ROTATING_FR
+ fph_index(regs
, regnum
);
490 #define CASE_FIXED_FP(reg) \
492 ia64_stf_spill(fpval, reg); \
630 void setfpreg(unsigned long regnum
, struct ia64_fpreg
*fpval
,
631 struct kvm_pt_regs
*regs
)
633 /* Take floating register rotation into consideration*/
634 if (regnum
>= IA64_FIRST_ROTATING_FR
)
635 regnum
= IA64_FIRST_ROTATING_FR
+ fph_index(regs
, regnum
);
637 #define CASE_FIXED_FP(reg) \
639 ia64_ldf_fill(reg, fpval); \
774 void vcpu_get_fpreg(struct kvm_vcpu
*vcpu
, unsigned long reg
,
775 struct ia64_fpreg
*val
)
777 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
779 getfpreg(reg
, val
, regs
); /* FIXME: handle NATs later*/
782 void vcpu_set_fpreg(struct kvm_vcpu
*vcpu
, unsigned long reg
,
783 struct ia64_fpreg
*val
)
785 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
788 setfpreg(reg
, val
, regs
); /* FIXME: handle NATs later*/
792 * The Altix RTC is mapped specially here for the vmm module
794 #define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
795 static long kvm_get_itc(struct kvm_vcpu
*vcpu
)
797 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
798 struct kvm
*kvm
= (struct kvm
*)KVM_VM_BASE
;
800 if (kvm
->arch
.is_sn2
)
801 return (*SN_RTC_BASE
);
804 return ia64_getreg(_IA64_REG_AR_ITC
);
807 /************************************************************************
809 ***********************************************************************/
810 u64
vcpu_get_itc(struct kvm_vcpu
*vcpu
)
812 unsigned long guest_itc
;
813 guest_itc
= VMX(vcpu
, itc_offset
) + kvm_get_itc(vcpu
);
815 if (guest_itc
>= VMX(vcpu
, last_itc
)) {
816 VMX(vcpu
, last_itc
) = guest_itc
;
819 return VMX(vcpu
, last_itc
);
822 static inline void vcpu_set_itm(struct kvm_vcpu
*vcpu
, u64 val
);
823 static void vcpu_set_itc(struct kvm_vcpu
*vcpu
, u64 val
)
828 long itc_offset
= val
- kvm_get_itc(vcpu
);
829 unsigned long vitv
= VCPU(vcpu
, itv
);
831 kvm
= (struct kvm
*)KVM_VM_BASE
;
833 if (kvm_vcpu_is_bsp(vcpu
)) {
834 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++) {
835 v
= (struct kvm_vcpu
*)((char *)vcpu
+
836 sizeof(struct kvm_vcpu_data
) * i
);
837 VMX(v
, itc_offset
) = itc_offset
;
838 VMX(v
, last_itc
) = 0;
841 VMX(vcpu
, last_itc
) = 0;
842 if (VCPU(vcpu
, itm
) <= val
) {
843 VMX(vcpu
, itc_check
) = 0;
844 vcpu_unpend_interrupt(vcpu
, vitv
);
846 VMX(vcpu
, itc_check
) = 1;
847 vcpu_set_itm(vcpu
, VCPU(vcpu
, itm
));
852 static inline u64
vcpu_get_itm(struct kvm_vcpu
*vcpu
)
854 return ((u64
)VCPU(vcpu
, itm
));
857 static inline void vcpu_set_itm(struct kvm_vcpu
*vcpu
, u64 val
)
859 unsigned long vitv
= VCPU(vcpu
, itv
);
860 VCPU(vcpu
, itm
) = val
;
862 if (val
> vcpu_get_itc(vcpu
)) {
863 VMX(vcpu
, itc_check
) = 1;
864 vcpu_unpend_interrupt(vcpu
, vitv
);
865 VMX(vcpu
, timer_pending
) = 0;
867 VMX(vcpu
, itc_check
) = 0;
870 #define ITV_VECTOR(itv) (itv&0xff)
871 #define ITV_IRQ_MASK(itv) (itv&(1<<16))
873 static inline void vcpu_set_itv(struct kvm_vcpu
*vcpu
, u64 val
)
875 VCPU(vcpu
, itv
) = val
;
876 if (!ITV_IRQ_MASK(val
) && vcpu
->arch
.timer_pending
) {
877 vcpu_pend_interrupt(vcpu
, ITV_VECTOR(val
));
878 vcpu
->arch
.timer_pending
= 0;
882 static inline void vcpu_set_eoi(struct kvm_vcpu
*vcpu
, u64 val
)
886 vec
= highest_inservice_irq(vcpu
);
887 if (vec
== NULL_VECTOR
)
889 VMX(vcpu
, insvc
[vec
>> 6]) &= ~(1UL << (vec
& 63));
891 vcpu
->arch
.irq_new_pending
= 1;
895 /* See Table 5-8 in SDM vol2 for the definition */
896 int irq_masked(struct kvm_vcpu
*vcpu
, int h_pending
, int h_inservice
)
900 vtpr
.val
= VCPU(vcpu
, tpr
);
902 if (h_inservice
== NMI_VECTOR
)
903 return IRQ_MASKED_BY_INSVC
;
905 if (h_pending
== NMI_VECTOR
) {
906 /* Non Maskable Interrupt */
907 return IRQ_NO_MASKED
;
910 if (h_inservice
== ExtINT_VECTOR
)
911 return IRQ_MASKED_BY_INSVC
;
913 if (h_pending
== ExtINT_VECTOR
) {
915 /* mask all external IRQ */
916 return IRQ_MASKED_BY_VTPR
;
918 return IRQ_NO_MASKED
;
921 if (is_higher_irq(h_pending
, h_inservice
)) {
922 if (is_higher_class(h_pending
, vtpr
.mic
+ (vtpr
.mmi
<< 4)))
923 return IRQ_NO_MASKED
;
925 return IRQ_MASKED_BY_VTPR
;
927 return IRQ_MASKED_BY_INSVC
;
931 void vcpu_pend_interrupt(struct kvm_vcpu
*vcpu
, u8 vec
)
936 local_irq_save(spsr
);
937 ret
= test_and_set_bit(vec
, &VCPU(vcpu
, irr
[0]));
938 local_irq_restore(spsr
);
940 vcpu
->arch
.irq_new_pending
= 1;
943 void vcpu_unpend_interrupt(struct kvm_vcpu
*vcpu
, u8 vec
)
948 local_irq_save(spsr
);
949 ret
= test_and_clear_bit(vec
, &VCPU(vcpu
, irr
[0]));
950 local_irq_restore(spsr
);
952 vcpu
->arch
.irq_new_pending
= 1;
957 void update_vhpi(struct kvm_vcpu
*vcpu
, int vec
)
961 if (vec
== NULL_VECTOR
)
963 else if (vec
== NMI_VECTOR
)
965 else if (vec
== ExtINT_VECTOR
)
970 VCPU(vcpu
, vhpi
) = vhpi
;
971 if (VCPU(vcpu
, vac
).a_int
)
972 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT
,
973 (u64
)vcpu
->arch
.vpd
, 0, 0, 0, 0, 0, 0);
976 u64
vcpu_get_ivr(struct kvm_vcpu
*vcpu
)
978 int vec
, h_inservice
, mask
;
980 vec
= highest_pending_irq(vcpu
);
981 h_inservice
= highest_inservice_irq(vcpu
);
982 mask
= irq_masked(vcpu
, vec
, h_inservice
);
983 if (vec
== NULL_VECTOR
|| mask
== IRQ_MASKED_BY_INSVC
) {
984 if (VCPU(vcpu
, vhpi
))
985 update_vhpi(vcpu
, NULL_VECTOR
);
986 return IA64_SPURIOUS_INT_VECTOR
;
988 if (mask
== IRQ_MASKED_BY_VTPR
) {
989 update_vhpi(vcpu
, vec
);
990 return IA64_SPURIOUS_INT_VECTOR
;
992 VMX(vcpu
, insvc
[vec
>> 6]) |= (1UL << (vec
& 63));
993 vcpu_unpend_interrupt(vcpu
, vec
);
997 /**************************************************************************
998 Privileged operation emulation routines
999 **************************************************************************/
1000 u64
vcpu_thash(struct kvm_vcpu
*vcpu
, u64 vadr
)
1002 union ia64_pta vpta
;
1007 vpta
.val
= vcpu_get_pta(vcpu
);
1008 vrr
.val
= vcpu_get_rr(vcpu
, vadr
);
1009 vhpt_offset
= ((vadr
>> vrr
.ps
) << 3) & ((1UL << (vpta
.size
)) - 1);
1011 pval
= ia64_call_vsa(PAL_VPS_THASH
, vadr
, vrr
.val
,
1012 vpta
.val
, 0, 0, 0, 0);
1014 pval
= (vadr
& VRN_MASK
) | vhpt_offset
|
1015 (vpta
.val
<< 3 >> (vpta
.size
+ 3) << (vpta
.size
));
1020 u64
vcpu_ttag(struct kvm_vcpu
*vcpu
, u64 vadr
)
1023 union ia64_pta vpta
;
1026 vpta
.val
= vcpu_get_pta(vcpu
);
1027 vrr
.val
= vcpu_get_rr(vcpu
, vadr
);
1029 pval
= ia64_call_vsa(PAL_VPS_TTAG
, vadr
, vrr
.val
,
1037 u64
vcpu_tak(struct kvm_vcpu
*vcpu
, u64 vadr
)
1039 struct thash_data
*data
;
1040 union ia64_pta vpta
;
1043 vpta
.val
= vcpu_get_pta(vcpu
);
1048 data
= vtlb_lookup(vcpu
, vadr
, D_TLB
);
1049 if (!data
|| !data
->p
)
1057 void kvm_thash(struct kvm_vcpu
*vcpu
, INST64 inst
)
1059 unsigned long thash
, vadr
;
1061 vadr
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1062 thash
= vcpu_thash(vcpu
, vadr
);
1063 vcpu_set_gr(vcpu
, inst
.M46
.r1
, thash
, 0);
1066 void kvm_ttag(struct kvm_vcpu
*vcpu
, INST64 inst
)
1068 unsigned long tag
, vadr
;
1070 vadr
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1071 tag
= vcpu_ttag(vcpu
, vadr
);
1072 vcpu_set_gr(vcpu
, inst
.M46
.r1
, tag
, 0);
1075 int vcpu_tpa(struct kvm_vcpu
*vcpu
, u64 vadr
, unsigned long *padr
)
1077 struct thash_data
*data
;
1078 union ia64_isr visr
, pt_isr
;
1079 struct kvm_pt_regs
*regs
;
1080 struct ia64_psr vpsr
;
1082 regs
= vcpu_regs(vcpu
);
1083 pt_isr
.val
= VMX(vcpu
, cr_isr
);
1085 visr
.ei
= pt_isr
.ei
;
1086 visr
.ir
= pt_isr
.ir
;
1087 vpsr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1090 data
= vhpt_lookup(vadr
);
1093 vcpu_set_isr(vcpu
, visr
.val
);
1094 data_page_not_present(vcpu
, vadr
);
1096 } else if (data
->ma
== VA_MATTR_NATPAGE
) {
1097 vcpu_set_isr(vcpu
, visr
.val
);
1098 dnat_page_consumption(vcpu
, vadr
);
1101 *padr
= (data
->gpaddr
>> data
->ps
<< data
->ps
) |
1102 (vadr
& (PSIZE(data
->ps
) - 1));
1103 return IA64_NO_FAULT
;
1107 data
= vtlb_lookup(vcpu
, vadr
, D_TLB
);
1110 vcpu_set_isr(vcpu
, visr
.val
);
1111 data_page_not_present(vcpu
, vadr
);
1113 } else if (data
->ma
== VA_MATTR_NATPAGE
) {
1114 vcpu_set_isr(vcpu
, visr
.val
);
1115 dnat_page_consumption(vcpu
, vadr
);
1118 *padr
= ((data
->ppn
>> (data
->ps
- 12)) << data
->ps
)
1119 | (vadr
& (PSIZE(data
->ps
) - 1));
1120 return IA64_NO_FAULT
;
1123 if (!vhpt_enabled(vcpu
, vadr
, NA_REF
)) {
1125 vcpu_set_isr(vcpu
, visr
.val
);
1126 alt_dtlb(vcpu
, vadr
);
1134 vcpu_set_isr(vcpu
, visr
.val
);
1135 dvhpt_fault(vcpu
, vadr
);
1143 return IA64_NO_FAULT
;
1146 int kvm_tpa(struct kvm_vcpu
*vcpu
, INST64 inst
)
1148 unsigned long r1
, r3
;
1150 r3
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1152 if (vcpu_tpa(vcpu
, r3
, &r1
))
1155 vcpu_set_gr(vcpu
, inst
.M46
.r1
, r1
, 0);
1156 return(IA64_NO_FAULT
);
1159 void kvm_tak(struct kvm_vcpu
*vcpu
, INST64 inst
)
1161 unsigned long r1
, r3
;
1163 r3
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1164 r1
= vcpu_tak(vcpu
, r3
);
1165 vcpu_set_gr(vcpu
, inst
.M46
.r1
, r1
, 0);
1168 /************************************
1169 * Insert/Purge translation register/cache
1170 ************************************/
1171 void vcpu_itc_i(struct kvm_vcpu
*vcpu
, u64 pte
, u64 itir
, u64 ifa
)
1173 thash_purge_and_insert(vcpu
, pte
, itir
, ifa
, I_TLB
);
1176 void vcpu_itc_d(struct kvm_vcpu
*vcpu
, u64 pte
, u64 itir
, u64 ifa
)
1178 thash_purge_and_insert(vcpu
, pte
, itir
, ifa
, D_TLB
);
1181 void vcpu_itr_i(struct kvm_vcpu
*vcpu
, u64 slot
, u64 pte
, u64 itir
, u64 ifa
)
1184 struct thash_data
*p_itr
;
1187 va
= PAGEALIGN(ifa
, ps
);
1188 pte
&= ~PAGE_FLAGS_RV_MASK
;
1189 rid
= vcpu_get_rr(vcpu
, ifa
);
1190 rid
= rid
& RR_RID_MASK
;
1191 p_itr
= (struct thash_data
*)&vcpu
->arch
.itrs
[slot
];
1192 vcpu_set_tr(p_itr
, pte
, itir
, va
, rid
);
1193 vcpu_quick_region_set(VMX(vcpu
, itr_regions
), va
);
1197 void vcpu_itr_d(struct kvm_vcpu
*vcpu
, u64 slot
, u64 pte
, u64 itir
, u64 ifa
)
1201 struct thash_data
*p_dtr
;
1204 va
= PAGEALIGN(ifa
, ps
);
1205 pte
&= ~PAGE_FLAGS_RV_MASK
;
1207 if (ps
!= _PAGE_SIZE_16M
)
1208 thash_purge_entries(vcpu
, va
, ps
);
1209 gpfn
= (pte
& _PAGE_PPN_MASK
) >> PAGE_SHIFT
;
1210 if (__gpfn_is_io(gpfn
))
1212 rid
= vcpu_get_rr(vcpu
, va
);
1213 rid
= rid
& RR_RID_MASK
;
1214 p_dtr
= (struct thash_data
*)&vcpu
->arch
.dtrs
[slot
];
1215 vcpu_set_tr((struct thash_data
*)&vcpu
->arch
.dtrs
[slot
],
1216 pte
, itir
, va
, rid
);
1217 vcpu_quick_region_set(VMX(vcpu
, dtr_regions
), va
);
1220 void vcpu_ptr_d(struct kvm_vcpu
*vcpu
, u64 ifa
, u64 ps
)
1225 va
= PAGEALIGN(ifa
, ps
);
1226 while ((index
= vtr_find_overlap(vcpu
, va
, ps
, D_TLB
)) >= 0)
1227 vcpu
->arch
.dtrs
[index
].page_flags
= 0;
1229 thash_purge_entries(vcpu
, va
, ps
);
1232 void vcpu_ptr_i(struct kvm_vcpu
*vcpu
, u64 ifa
, u64 ps
)
1237 va
= PAGEALIGN(ifa
, ps
);
1238 while ((index
= vtr_find_overlap(vcpu
, va
, ps
, I_TLB
)) >= 0)
1239 vcpu
->arch
.itrs
[index
].page_flags
= 0;
1241 thash_purge_entries(vcpu
, va
, ps
);
1244 void vcpu_ptc_l(struct kvm_vcpu
*vcpu
, u64 va
, u64 ps
)
1246 va
= PAGEALIGN(va
, ps
);
1247 thash_purge_entries(vcpu
, va
, ps
);
1250 void vcpu_ptc_e(struct kvm_vcpu
*vcpu
, u64 va
)
1252 thash_purge_all(vcpu
);
1255 void vcpu_ptc_ga(struct kvm_vcpu
*vcpu
, u64 va
, u64 ps
)
1257 struct exit_ctl_data
*p
= &vcpu
->arch
.exit_data
;
1259 local_irq_save(psr
);
1260 p
->exit_reason
= EXIT_REASON_PTC_G
;
1262 p
->u
.ptc_g_data
.rr
= vcpu_get_rr(vcpu
, va
);
1263 p
->u
.ptc_g_data
.vaddr
= va
;
1264 p
->u
.ptc_g_data
.ps
= ps
;
1265 vmm_transition(vcpu
);
1266 /* Do Local Purge Here*/
1267 vcpu_ptc_l(vcpu
, va
, ps
);
1268 local_irq_restore(psr
);
1272 void vcpu_ptc_g(struct kvm_vcpu
*vcpu
, u64 va
, u64 ps
)
1274 vcpu_ptc_ga(vcpu
, va
, ps
);
1277 void kvm_ptc_e(struct kvm_vcpu
*vcpu
, INST64 inst
)
1281 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1282 vcpu_ptc_e(vcpu
, ifa
);
1285 void kvm_ptc_g(struct kvm_vcpu
*vcpu
, INST64 inst
)
1287 unsigned long ifa
, itir
;
1289 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1290 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1291 vcpu_ptc_g(vcpu
, ifa
, itir_ps(itir
));
1294 void kvm_ptc_ga(struct kvm_vcpu
*vcpu
, INST64 inst
)
1296 unsigned long ifa
, itir
;
1298 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1299 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1300 vcpu_ptc_ga(vcpu
, ifa
, itir_ps(itir
));
1303 void kvm_ptc_l(struct kvm_vcpu
*vcpu
, INST64 inst
)
1305 unsigned long ifa
, itir
;
1307 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1308 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1309 vcpu_ptc_l(vcpu
, ifa
, itir_ps(itir
));
1312 void kvm_ptr_d(struct kvm_vcpu
*vcpu
, INST64 inst
)
1314 unsigned long ifa
, itir
;
1316 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1317 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1318 vcpu_ptr_d(vcpu
, ifa
, itir_ps(itir
));
1321 void kvm_ptr_i(struct kvm_vcpu
*vcpu
, INST64 inst
)
1323 unsigned long ifa
, itir
;
1325 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1326 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1327 vcpu_ptr_i(vcpu
, ifa
, itir_ps(itir
));
1330 void kvm_itr_d(struct kvm_vcpu
*vcpu
, INST64 inst
)
1332 unsigned long itir
, ifa
, pte
, slot
;
1334 slot
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1335 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1336 itir
= vcpu_get_itir(vcpu
);
1337 ifa
= vcpu_get_ifa(vcpu
);
1338 vcpu_itr_d(vcpu
, slot
, pte
, itir
, ifa
);
1343 void kvm_itr_i(struct kvm_vcpu
*vcpu
, INST64 inst
)
1345 unsigned long itir
, ifa
, pte
, slot
;
1347 slot
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1348 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1349 itir
= vcpu_get_itir(vcpu
);
1350 ifa
= vcpu_get_ifa(vcpu
);
1351 vcpu_itr_i(vcpu
, slot
, pte
, itir
, ifa
);
1354 void kvm_itc_d(struct kvm_vcpu
*vcpu
, INST64 inst
)
1356 unsigned long itir
, ifa
, pte
;
1358 itir
= vcpu_get_itir(vcpu
);
1359 ifa
= vcpu_get_ifa(vcpu
);
1360 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1361 vcpu_itc_d(vcpu
, pte
, itir
, ifa
);
1364 void kvm_itc_i(struct kvm_vcpu
*vcpu
, INST64 inst
)
1366 unsigned long itir
, ifa
, pte
;
1368 itir
= vcpu_get_itir(vcpu
);
1369 ifa
= vcpu_get_ifa(vcpu
);
1370 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1371 vcpu_itc_i(vcpu
, pte
, itir
, ifa
);
1374 /*************************************
1375 * Moves to semi-privileged registers
1376 *************************************/
1378 void kvm_mov_to_ar_imm(struct kvm_vcpu
*vcpu
, INST64 inst
)
1383 imm
= -inst
.M30
.imm
;
1387 vcpu_set_itc(vcpu
, imm
);
1390 void kvm_mov_to_ar_reg(struct kvm_vcpu
*vcpu
, INST64 inst
)
1394 r2
= vcpu_get_gr(vcpu
, inst
.M29
.r2
);
1395 vcpu_set_itc(vcpu
, r2
);
1398 void kvm_mov_from_ar_reg(struct kvm_vcpu
*vcpu
, INST64 inst
)
1402 r1
= vcpu_get_itc(vcpu
);
1403 vcpu_set_gr(vcpu
, inst
.M31
.r1
, r1
, 0);
1406 /**************************************************************************
1407 struct kvm_vcpu protection key register access routines
1408 **************************************************************************/
1410 unsigned long vcpu_get_pkr(struct kvm_vcpu
*vcpu
, unsigned long reg
)
1412 return ((unsigned long)ia64_get_pkr(reg
));
1415 void vcpu_set_pkr(struct kvm_vcpu
*vcpu
, unsigned long reg
, unsigned long val
)
1417 ia64_set_pkr(reg
, val
);
1420 /********************************
1421 * Moves to privileged registers
1422 ********************************/
1423 unsigned long vcpu_set_rr(struct kvm_vcpu
*vcpu
, unsigned long reg
,
1426 union ia64_rr oldrr
, newrr
;
1427 unsigned long rrval
;
1428 struct exit_ctl_data
*p
= &vcpu
->arch
.exit_data
;
1431 oldrr
.val
= vcpu_get_rr(vcpu
, reg
);
1433 vcpu
->arch
.vrr
[reg
>> VRN_SHIFT
] = val
;
1435 switch ((unsigned long)(reg
>> VRN_SHIFT
)) {
1437 vcpu
->arch
.vmm_rr
= vrrtomrr(val
);
1438 local_irq_save(psr
);
1439 p
->exit_reason
= EXIT_REASON_SWITCH_RR6
;
1440 vmm_transition(vcpu
);
1441 local_irq_restore(psr
);
1444 rrval
= vrrtomrr(val
);
1445 vcpu
->arch
.metaphysical_saved_rr4
= rrval
;
1446 if (!is_physical_mode(vcpu
))
1447 ia64_set_rr(reg
, rrval
);
1450 rrval
= vrrtomrr(val
);
1451 vcpu
->arch
.metaphysical_saved_rr0
= rrval
;
1452 if (!is_physical_mode(vcpu
))
1453 ia64_set_rr(reg
, rrval
);
1456 ia64_set_rr(reg
, vrrtomrr(val
));
1460 return (IA64_NO_FAULT
);
1463 void kvm_mov_to_rr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1465 unsigned long r3
, r2
;
1467 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1468 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1469 vcpu_set_rr(vcpu
, r3
, r2
);
1472 void kvm_mov_to_dbr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1476 void kvm_mov_to_ibr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1480 void kvm_mov_to_pmc(struct kvm_vcpu
*vcpu
, INST64 inst
)
1482 unsigned long r3
, r2
;
1484 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1485 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1486 vcpu_set_pmc(vcpu
, r3
, r2
);
1489 void kvm_mov_to_pmd(struct kvm_vcpu
*vcpu
, INST64 inst
)
1491 unsigned long r3
, r2
;
1493 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1494 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1495 vcpu_set_pmd(vcpu
, r3
, r2
);
1498 void kvm_mov_to_pkr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1502 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1503 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1504 vcpu_set_pkr(vcpu
, r3
, r2
);
1507 void kvm_mov_from_rr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1509 unsigned long r3
, r1
;
1511 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1512 r1
= vcpu_get_rr(vcpu
, r3
);
1513 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1516 void kvm_mov_from_pkr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1518 unsigned long r3
, r1
;
1520 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1521 r1
= vcpu_get_pkr(vcpu
, r3
);
1522 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1525 void kvm_mov_from_dbr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1527 unsigned long r3
, r1
;
1529 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1530 r1
= vcpu_get_dbr(vcpu
, r3
);
1531 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1534 void kvm_mov_from_ibr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1536 unsigned long r3
, r1
;
1538 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1539 r1
= vcpu_get_ibr(vcpu
, r3
);
1540 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1543 void kvm_mov_from_pmc(struct kvm_vcpu
*vcpu
, INST64 inst
)
1545 unsigned long r3
, r1
;
1547 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1548 r1
= vcpu_get_pmc(vcpu
, r3
);
1549 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1552 unsigned long vcpu_get_cpuid(struct kvm_vcpu
*vcpu
, unsigned long reg
)
1554 /* FIXME: This could get called as a result of a rsvd-reg fault */
1555 if (reg
> (ia64_get_cpuid(3) & 0xff))
1558 return ia64_get_cpuid(reg
);
1561 void kvm_mov_from_cpuid(struct kvm_vcpu
*vcpu
, INST64 inst
)
1563 unsigned long r3
, r1
;
1565 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1566 r1
= vcpu_get_cpuid(vcpu
, r3
);
1567 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1570 void vcpu_set_tpr(struct kvm_vcpu
*vcpu
, unsigned long val
)
1572 VCPU(vcpu
, tpr
) = val
;
1573 vcpu
->arch
.irq_check
= 1;
1576 unsigned long kvm_mov_to_cr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1580 r2
= vcpu_get_gr(vcpu
, inst
.M32
.r2
);
1581 VCPU(vcpu
, vcr
[inst
.M32
.cr3
]) = r2
;
1583 switch (inst
.M32
.cr3
) {
1585 vcpu_set_dcr(vcpu
, r2
);
1588 vcpu_set_itm(vcpu
, r2
);
1591 vcpu_set_tpr(vcpu
, r2
);
1594 vcpu_set_eoi(vcpu
, r2
);
1603 unsigned long kvm_mov_from_cr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1605 unsigned long tgt
= inst
.M33
.r1
;
1608 switch (inst
.M33
.cr3
) {
1610 val
= vcpu_get_ivr(vcpu
);
1611 vcpu_set_gr(vcpu
, tgt
, val
, 0);
1615 vcpu_set_gr(vcpu
, tgt
, 0L, 0);
1618 val
= VCPU(vcpu
, vcr
[inst
.M33
.cr3
]);
1619 vcpu_set_gr(vcpu
, tgt
, val
, 0);
1626 void vcpu_set_psr(struct kvm_vcpu
*vcpu
, unsigned long val
)
1630 struct kvm_pt_regs
*regs
;
1631 struct ia64_psr old_psr
, new_psr
;
1633 old_psr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1635 regs
= vcpu_regs(vcpu
);
1636 /* We only support guest as:
1641 if (val
& (IA64_PSR_PK
| IA64_PSR_IS
| IA64_PSR_VM
))
1642 panic_vm(vcpu
, "Only support guests with vpsr.pk =0 "
1646 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1647 * Since these bits will become 0, after success execution of each
1648 * instruction, we will change set them to mIA64_PSR
1650 VCPU(vcpu
, vpsr
) = val
1651 & (~(IA64_PSR_ID
| IA64_PSR_DA
| IA64_PSR_DD
|
1652 IA64_PSR_SS
| IA64_PSR_ED
| IA64_PSR_IA
));
1654 if (!old_psr
.i
&& (val
& IA64_PSR_I
)) {
1656 vcpu
->arch
.irq_check
= 1;
1658 new_psr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1661 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1662 * , except for the following bits:
1663 * ic/i/dt/si/rt/mc/it/bn/vm
1665 mask
= IA64_PSR_IC
+ IA64_PSR_I
+ IA64_PSR_DT
+ IA64_PSR_SI
+
1666 IA64_PSR_RT
+ IA64_PSR_MC
+ IA64_PSR_IT
+ IA64_PSR_BN
+
1669 regs
->cr_ipsr
= (regs
->cr_ipsr
& mask
) | (val
& (~mask
));
1671 check_mm_mode_switch(vcpu
, old_psr
, new_psr
);
1676 unsigned long vcpu_cover(struct kvm_vcpu
*vcpu
)
1678 struct ia64_psr vpsr
;
1680 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1681 vpsr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1684 VCPU(vcpu
, ifs
) = regs
->cr_ifs
;
1685 regs
->cr_ifs
= IA64_IFS_V
;
1686 return (IA64_NO_FAULT
);
1691 /**************************************************************************
1692 VCPU banked general register access routines
1693 **************************************************************************/
1694 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1696 __asm__ __volatile__ ( \
1697 ";;extr.u %0 = %3,%6,16;;\n" \
1698 "dep %1 = %0, %1, 0, 16;;\n" \
1700 "extr.u %0 = %2, 16, 16;;\n" \
1701 "dep %3 = %0, %3, %6, 16;;\n" \
1703 ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
1704 "r"(*runat), "r"(b1unat), "r"(runat), \
1705 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1708 void vcpu_bsw0(struct kvm_vcpu
*vcpu
)
1712 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1713 unsigned long *r
= ®s
->r16
;
1714 unsigned long *b0
= &VCPU(vcpu
, vbgr
[0]);
1715 unsigned long *b1
= &VCPU(vcpu
, vgr
[0]);
1716 unsigned long *runat
= ®s
->eml_unat
;
1717 unsigned long *b0unat
= &VCPU(vcpu
, vbnat
);
1718 unsigned long *b1unat
= &VCPU(vcpu
, vnat
);
1721 if (VCPU(vcpu
, vpsr
) & IA64_PSR_BN
) {
1722 for (i
= 0; i
< 16; i
++) {
1726 vcpu_bsw0_unat(i
, b0unat
, b1unat
, runat
,
1727 VMM_PT_REGS_R16_SLOT
);
1728 VCPU(vcpu
, vpsr
) &= ~IA64_PSR_BN
;
1732 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1734 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
1735 "dep %1 = %0, %1, 16, 16;;\n" \
1737 "extr.u %0 = %2, 0, 16;;\n" \
1738 "dep %3 = %0, %3, %6, 16;;\n" \
1740 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
1741 "r"(*runat), "r"(b0unat), "r"(runat), \
1742 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1745 void vcpu_bsw1(struct kvm_vcpu
*vcpu
)
1748 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1749 unsigned long *r
= ®s
->r16
;
1750 unsigned long *b0
= &VCPU(vcpu
, vbgr
[0]);
1751 unsigned long *b1
= &VCPU(vcpu
, vgr
[0]);
1752 unsigned long *runat
= ®s
->eml_unat
;
1753 unsigned long *b0unat
= &VCPU(vcpu
, vbnat
);
1754 unsigned long *b1unat
= &VCPU(vcpu
, vnat
);
1756 if (!(VCPU(vcpu
, vpsr
) & IA64_PSR_BN
)) {
1757 for (i
= 0; i
< 16; i
++) {
1761 vcpu_bsw1_unat(i
, b0unat
, b1unat
, runat
,
1762 VMM_PT_REGS_R16_SLOT
);
1763 VCPU(vcpu
, vpsr
) |= IA64_PSR_BN
;
1767 void vcpu_rfi(struct kvm_vcpu
*vcpu
)
1769 unsigned long ifs
, psr
;
1770 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1772 psr
= VCPU(vcpu
, ipsr
);
1773 if (psr
& IA64_PSR_BN
)
1777 vcpu_set_psr(vcpu
, psr
);
1778 ifs
= VCPU(vcpu
, ifs
);
1781 regs
->cr_iip
= VCPU(vcpu
, iip
);
1785 VPSR can't keep track of below bits of guest PSR
1786 This function gets guest PSR
1789 unsigned long vcpu_get_psr(struct kvm_vcpu
*vcpu
)
1792 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1794 mask
= IA64_PSR_BE
| IA64_PSR_UP
| IA64_PSR_AC
| IA64_PSR_MFL
|
1795 IA64_PSR_MFH
| IA64_PSR_CPL
| IA64_PSR_RI
;
1796 return (VCPU(vcpu
, vpsr
) & ~mask
) | (regs
->cr_ipsr
& mask
);
1799 void kvm_rsm(struct kvm_vcpu
*vcpu
, INST64 inst
)
1802 unsigned long imm24
= (inst
.M44
.i
<<23) | (inst
.M44
.i2
<<21)
1805 vpsr
= vcpu_get_psr(vcpu
);
1807 vcpu_set_psr(vcpu
, vpsr
);
1810 void kvm_ssm(struct kvm_vcpu
*vcpu
, INST64 inst
)
1813 unsigned long imm24
= (inst
.M44
.i
<< 23) | (inst
.M44
.i2
<< 21)
1816 vpsr
= vcpu_get_psr(vcpu
);
1818 vcpu_set_psr(vcpu
, vpsr
);
1823 * bit -- starting bit
1824 * len -- how many bits
1826 #define MASK(bit,len) \
1830 __asm __volatile("dep %0=-1, r0, %1, %2"\
1837 void vcpu_set_psr_l(struct kvm_vcpu
*vcpu
, unsigned long val
)
1839 val
= (val
& MASK(0, 32)) | (vcpu_get_psr(vcpu
) & MASK(32, 32));
1840 vcpu_set_psr(vcpu
, val
);
1843 void kvm_mov_to_psr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1847 val
= vcpu_get_gr(vcpu
, inst
.M35
.r2
);
1848 vcpu_set_psr_l(vcpu
, val
);
1851 void kvm_mov_from_psr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1855 val
= vcpu_get_psr(vcpu
);
1856 val
= (val
& MASK(0, 32)) | (val
& MASK(35, 2));
1857 vcpu_set_gr(vcpu
, inst
.M33
.r1
, val
, 0);
1860 void vcpu_increment_iip(struct kvm_vcpu
*vcpu
)
1862 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1863 struct ia64_psr
*ipsr
= (struct ia64_psr
*)®s
->cr_ipsr
;
1864 if (ipsr
->ri
== 2) {
1871 void vcpu_decrement_iip(struct kvm_vcpu
*vcpu
)
1873 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1874 struct ia64_psr
*ipsr
= (struct ia64_psr
*)®s
->cr_ipsr
;
1876 if (ipsr
->ri
== 0) {
1883 /** Emulate a privileged operation.
1886 * @param vcpu virtual cpu
1887 * @cause the reason cause virtualization fault
1888 * @opcode the instruction code which cause virtualization fault
1891 void kvm_emulate(struct kvm_vcpu
*vcpu
, struct kvm_pt_regs
*regs
)
1893 unsigned long status
, cause
, opcode
;
1896 status
= IA64_NO_FAULT
;
1897 cause
= VMX(vcpu
, cause
);
1898 opcode
= VMX(vcpu
, opcode
);
1901 * Switch to actual virtual rid in rr0 and rr4,
1902 * which is required by some tlb related instructions.
1904 prepare_if_physical_mode(vcpu
);
1908 kvm_rsm(vcpu
, inst
);
1911 kvm_ssm(vcpu
, inst
);
1913 case EVENT_MOV_TO_PSR
:
1914 kvm_mov_to_psr(vcpu
, inst
);
1916 case EVENT_MOV_FROM_PSR
:
1917 kvm_mov_from_psr(vcpu
, inst
);
1919 case EVENT_MOV_FROM_CR
:
1920 kvm_mov_from_cr(vcpu
, inst
);
1922 case EVENT_MOV_TO_CR
:
1923 kvm_mov_to_cr(vcpu
, inst
);
1938 kvm_itr_d(vcpu
, inst
);
1941 kvm_itr_i(vcpu
, inst
);
1944 kvm_ptr_d(vcpu
, inst
);
1947 kvm_ptr_i(vcpu
, inst
);
1950 kvm_itc_d(vcpu
, inst
);
1953 kvm_itc_i(vcpu
, inst
);
1956 kvm_ptc_l(vcpu
, inst
);
1959 kvm_ptc_g(vcpu
, inst
);
1962 kvm_ptc_ga(vcpu
, inst
);
1965 kvm_ptc_e(vcpu
, inst
);
1967 case EVENT_MOV_TO_RR
:
1968 kvm_mov_to_rr(vcpu
, inst
);
1970 case EVENT_MOV_FROM_RR
:
1971 kvm_mov_from_rr(vcpu
, inst
);
1974 kvm_thash(vcpu
, inst
);
1977 kvm_ttag(vcpu
, inst
);
1980 status
= kvm_tpa(vcpu
, inst
);
1983 kvm_tak(vcpu
, inst
);
1985 case EVENT_MOV_TO_AR_IMM
:
1986 kvm_mov_to_ar_imm(vcpu
, inst
);
1988 case EVENT_MOV_TO_AR
:
1989 kvm_mov_to_ar_reg(vcpu
, inst
);
1991 case EVENT_MOV_FROM_AR
:
1992 kvm_mov_from_ar_reg(vcpu
, inst
);
1994 case EVENT_MOV_TO_DBR
:
1995 kvm_mov_to_dbr(vcpu
, inst
);
1997 case EVENT_MOV_TO_IBR
:
1998 kvm_mov_to_ibr(vcpu
, inst
);
2000 case EVENT_MOV_TO_PMC
:
2001 kvm_mov_to_pmc(vcpu
, inst
);
2003 case EVENT_MOV_TO_PMD
:
2004 kvm_mov_to_pmd(vcpu
, inst
);
2006 case EVENT_MOV_TO_PKR
:
2007 kvm_mov_to_pkr(vcpu
, inst
);
2009 case EVENT_MOV_FROM_DBR
:
2010 kvm_mov_from_dbr(vcpu
, inst
);
2012 case EVENT_MOV_FROM_IBR
:
2013 kvm_mov_from_ibr(vcpu
, inst
);
2015 case EVENT_MOV_FROM_PMC
:
2016 kvm_mov_from_pmc(vcpu
, inst
);
2018 case EVENT_MOV_FROM_PKR
:
2019 kvm_mov_from_pkr(vcpu
, inst
);
2021 case EVENT_MOV_FROM_CPUID
:
2022 kvm_mov_from_cpuid(vcpu
, inst
);
2025 status
= IA64_FAULT
;
2030 /*Assume all status is NO_FAULT ?*/
2031 if (status
== IA64_NO_FAULT
&& cause
!= EVENT_RFI
)
2032 vcpu_increment_iip(vcpu
);
2034 recover_if_physical_mode(vcpu
);
2037 void init_vcpu(struct kvm_vcpu
*vcpu
)
2041 vcpu
->arch
.mode_flags
= GUEST_IN_PHY
;
2042 VMX(vcpu
, vrr
[0]) = 0x38;
2043 VMX(vcpu
, vrr
[1]) = 0x38;
2044 VMX(vcpu
, vrr
[2]) = 0x38;
2045 VMX(vcpu
, vrr
[3]) = 0x38;
2046 VMX(vcpu
, vrr
[4]) = 0x38;
2047 VMX(vcpu
, vrr
[5]) = 0x38;
2048 VMX(vcpu
, vrr
[6]) = 0x38;
2049 VMX(vcpu
, vrr
[7]) = 0x38;
2050 VCPU(vcpu
, vpsr
) = IA64_PSR_BN
;
2051 VCPU(vcpu
, dcr
) = 0;
2052 /* pta.size must not be 0. The minimum is 15 (32k) */
2053 VCPU(vcpu
, pta
) = 15 << 2;
2054 VCPU(vcpu
, itv
) = 0x10000;
2055 VCPU(vcpu
, itm
) = 0;
2056 VMX(vcpu
, last_itc
) = 0;
2058 VCPU(vcpu
, lid
) = VCPU_LID(vcpu
);
2059 VCPU(vcpu
, ivr
) = 0;
2060 VCPU(vcpu
, tpr
) = 0x10000;
2061 VCPU(vcpu
, eoi
) = 0;
2062 VCPU(vcpu
, irr
[0]) = 0;
2063 VCPU(vcpu
, irr
[1]) = 0;
2064 VCPU(vcpu
, irr
[2]) = 0;
2065 VCPU(vcpu
, irr
[3]) = 0;
2066 VCPU(vcpu
, pmv
) = 0x10000;
2067 VCPU(vcpu
, cmcv
) = 0x10000;
2068 VCPU(vcpu
, lrr0
) = 0x10000; /* default reset value? */
2069 VCPU(vcpu
, lrr1
) = 0x10000; /* default reset value? */
2070 update_vhpi(vcpu
, NULL_VECTOR
);
2071 VLSAPIC_XTP(vcpu
) = 0x80; /* disabled */
2073 for (i
= 0; i
< 4; i
++)
2074 VLSAPIC_INSVC(vcpu
, i
) = 0;
2077 void kvm_init_all_rr(struct kvm_vcpu
*vcpu
)
2081 local_irq_save(psr
);
2083 /* WARNING: not allow co-exist of both virtual mode and physical
2084 * mode in same region
2087 vcpu
->arch
.metaphysical_saved_rr0
= vrrtomrr(VMX(vcpu
, vrr
[VRN0
]));
2088 vcpu
->arch
.metaphysical_saved_rr4
= vrrtomrr(VMX(vcpu
, vrr
[VRN4
]));
2090 if (is_physical_mode(vcpu
)) {
2091 if (vcpu
->arch
.mode_flags
& GUEST_PHY_EMUL
)
2092 panic_vm(vcpu
, "Machine Status conflicts!\n");
2094 ia64_set_rr((VRN0
<< VRN_SHIFT
), vcpu
->arch
.metaphysical_rr0
);
2095 ia64_dv_serialize_data();
2096 ia64_set_rr((VRN4
<< VRN_SHIFT
), vcpu
->arch
.metaphysical_rr4
);
2097 ia64_dv_serialize_data();
2099 ia64_set_rr((VRN0
<< VRN_SHIFT
),
2100 vcpu
->arch
.metaphysical_saved_rr0
);
2101 ia64_dv_serialize_data();
2102 ia64_set_rr((VRN4
<< VRN_SHIFT
),
2103 vcpu
->arch
.metaphysical_saved_rr4
);
2104 ia64_dv_serialize_data();
2106 ia64_set_rr((VRN1
<< VRN_SHIFT
),
2107 vrrtomrr(VMX(vcpu
, vrr
[VRN1
])));
2108 ia64_dv_serialize_data();
2109 ia64_set_rr((VRN2
<< VRN_SHIFT
),
2110 vrrtomrr(VMX(vcpu
, vrr
[VRN2
])));
2111 ia64_dv_serialize_data();
2112 ia64_set_rr((VRN3
<< VRN_SHIFT
),
2113 vrrtomrr(VMX(vcpu
, vrr
[VRN3
])));
2114 ia64_dv_serialize_data();
2115 ia64_set_rr((VRN5
<< VRN_SHIFT
),
2116 vrrtomrr(VMX(vcpu
, vrr
[VRN5
])));
2117 ia64_dv_serialize_data();
2118 ia64_set_rr((VRN7
<< VRN_SHIFT
),
2119 vrrtomrr(VMX(vcpu
, vrr
[VRN7
])));
2120 ia64_dv_serialize_data();
2130 ia64_call_vsa(PAL_VPS_RESTORE
, (unsigned long)v
->arch
.vpd
,
2141 static void kvm_show_registers(struct kvm_pt_regs
*regs
)
2143 unsigned long ip
= regs
->cr_iip
+ ia64_psr(regs
)->ri
;
2145 struct kvm_vcpu
*vcpu
= current_vcpu
;
2147 printk("vcpu 0x%p vcpu %d\n",
2148 vcpu
, vcpu
->vcpu_id
);
2150 printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
2151 regs
->cr_ipsr
, regs
->cr_ifs
, ip
);
2153 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2154 regs
->ar_unat
, regs
->ar_pfs
, regs
->ar_rsc
);
2155 printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
2156 regs
->ar_rnat
, regs
->ar_bspstore
, regs
->pr
);
2157 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2158 regs
->loadrs
, regs
->ar_ccv
, regs
->ar_fpsr
);
2159 printk("csd : %016lx ssd : %016lx\n", regs
->ar_csd
, regs
->ar_ssd
);
2160 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs
->b0
,
2161 regs
->b6
, regs
->b7
);
2162 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
2163 regs
->f6
.u
.bits
[1], regs
->f6
.u
.bits
[0],
2164 regs
->f7
.u
.bits
[1], regs
->f7
.u
.bits
[0]);
2165 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
2166 regs
->f8
.u
.bits
[1], regs
->f8
.u
.bits
[0],
2167 regs
->f9
.u
.bits
[1], regs
->f9
.u
.bits
[0]);
2168 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2169 regs
->f10
.u
.bits
[1], regs
->f10
.u
.bits
[0],
2170 regs
->f11
.u
.bits
[1], regs
->f11
.u
.bits
[0]);
2172 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs
->r1
,
2173 regs
->r2
, regs
->r3
);
2174 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs
->r8
,
2175 regs
->r9
, regs
->r10
);
2176 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs
->r11
,
2177 regs
->r12
, regs
->r13
);
2178 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs
->r14
,
2179 regs
->r15
, regs
->r16
);
2180 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs
->r17
,
2181 regs
->r18
, regs
->r19
);
2182 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs
->r20
,
2183 regs
->r21
, regs
->r22
);
2184 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs
->r23
,
2185 regs
->r24
, regs
->r25
);
2186 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs
->r26
,
2187 regs
->r27
, regs
->r28
);
2188 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs
->r29
,
2189 regs
->r30
, regs
->r31
);
2193 void panic_vm(struct kvm_vcpu
*v
, const char *fmt
, ...)
2198 struct kvm_pt_regs
*regs
= vcpu_regs(v
);
2199 struct exit_ctl_data
*p
= &v
->arch
.exit_data
;
2200 va_start(args
, fmt
);
2201 vsnprintf(buf
, sizeof(buf
), fmt
, args
);
2204 kvm_show_registers(regs
);
2205 p
->exit_reason
= EXIT_REASON_VM_PANIC
;