2 * kvm_vcpu.c: handling all virtual cpu related thing.
3 * Copyright (c) 2005, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Shaofan Li (Susue Li) <susie.li@intel.com>
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 * Xiantao Zhang <xiantao.zhang@intel.com>
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
34 #include "asm-offsets.h"
39 * - Index by it/dt/rt sequence
40 * - Only existing mode transitions are allowed in this table
41 * - RSE is placed at lazy mode when emulating guest partial mode
42 * - If gva happens to be rr0 and rr4, only allowed case is identity
43 * mapping (gva=gpa), or panic! (How?)
45 int mm_switch_table
[8][8] = {
46 /* 2004/09/12(Kevin): Allow switch to self */
48 * (it,dt,rt): (0,0,0) -> (1,1,1)
49 * This kind of transition usually occurs in the very early
50 * stage of Linux boot up procedure. Another case is in efi
51 * and pal calls. (see "arch/ia64/kernel/head.S")
53 * (it,dt,rt): (0,0,0) -> (0,1,1)
54 * This kind of transition is found when OSYa exits efi boot
55 * service. Due to gva = gpa in this case (Same region),
56 * data access can be satisfied though itlb entry for physical
59 {SW_SELF
, 0, 0, SW_NOP
, 0, 0, 0, SW_P2V
},
60 {0, 0, 0, 0, 0, 0, 0, 0},
61 {0, 0, 0, 0, 0, 0, 0, 0},
63 * (it,dt,rt): (0,1,1) -> (1,1,1)
64 * This kind of transition is found in OSYa.
66 * (it,dt,rt): (0,1,1) -> (0,0,0)
67 * This kind of transition is found in OSYa
69 {SW_NOP
, 0, 0, SW_SELF
, 0, 0, 0, SW_P2V
},
70 /* (1,0,0)->(1,1,1) */
71 {0, 0, 0, 0, 0, 0, 0, SW_P2V
},
73 * (it,dt,rt): (1,0,1) -> (1,1,1)
74 * This kind of transition usually occurs when Linux returns
75 * from the low level TLB miss handlers.
76 * (see "arch/ia64/kernel/ivt.S")
78 {0, 0, 0, 0, 0, SW_SELF
, 0, SW_P2V
},
79 {0, 0, 0, 0, 0, 0, 0, 0},
81 * (it,dt,rt): (1,1,1) -> (1,0,1)
82 * This kind of transition usually occurs in Linux low level
83 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
85 * (it,dt,rt): (1,1,1) -> (0,0,0)
86 * This kind of transition usually occurs in pal and efi calls,
87 * which requires running in physical mode.
88 * (see "arch/ia64/kernel/head.S")
92 {SW_V2P
, 0, 0, 0, SW_V2P
, SW_V2P
, 0, SW_SELF
},
95 void physical_mode_init(struct kvm_vcpu
*vcpu
)
97 vcpu
->arch
.mode_flags
= GUEST_IN_PHY
;
100 void switch_to_physical_rid(struct kvm_vcpu
*vcpu
)
104 /* Save original virtual mode rr[0] and rr[4] */
105 psr
= ia64_clear_ic();
106 ia64_set_rr(VRN0
<<VRN_SHIFT
, vcpu
->arch
.metaphysical_rr0
);
108 ia64_set_rr(VRN4
<<VRN_SHIFT
, vcpu
->arch
.metaphysical_rr4
);
115 void switch_to_virtual_rid(struct kvm_vcpu
*vcpu
)
119 psr
= ia64_clear_ic();
120 ia64_set_rr(VRN0
<< VRN_SHIFT
, vcpu
->arch
.metaphysical_saved_rr0
);
122 ia64_set_rr(VRN4
<< VRN_SHIFT
, vcpu
->arch
.metaphysical_saved_rr4
);
128 static int mm_switch_action(struct ia64_psr opsr
, struct ia64_psr npsr
)
130 return mm_switch_table
[MODE_IND(opsr
)][MODE_IND(npsr
)];
133 void switch_mm_mode(struct kvm_vcpu
*vcpu
, struct ia64_psr old_psr
,
134 struct ia64_psr new_psr
)
137 act
= mm_switch_action(old_psr
, new_psr
);
140 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
141 old_psr.val, new_psr.val);*/
142 switch_to_physical_rid(vcpu
);
144 * Set rse to enforced lazy, to prevent active rse
145 *save/restor when guest physical mode.
147 vcpu
->arch
.mode_flags
|= GUEST_IN_PHY
;
150 switch_to_virtual_rid(vcpu
);
152 * recover old mode which is saved when entering
153 * guest physical mode
155 vcpu
->arch
.mode_flags
&= ~GUEST_IN_PHY
;
169 * In physical mode, insert tc/tr for region 0 and 4 uses
170 * RID[0] and RID[4] which is for physical mode emulation.
171 * However what those inserted tc/tr wants is rid for
172 * virtual mode. So original virtual rid needs to be restored
175 * Operations which required such switch include:
176 * - insertions (itc.*, itr.*)
177 * - purges (ptc.* and ptr.*)
181 * All above needs actual virtual rid for destination entry.
184 void check_mm_mode_switch(struct kvm_vcpu
*vcpu
, struct ia64_psr old_psr
,
185 struct ia64_psr new_psr
)
188 if ((old_psr
.dt
!= new_psr
.dt
)
189 || (old_psr
.it
!= new_psr
.it
)
190 || (old_psr
.rt
!= new_psr
.rt
))
191 switch_mm_mode(vcpu
, old_psr
, new_psr
);
198 * In physical mode, insert tc/tr for region 0 and 4 uses
199 * RID[0] and RID[4] which is for physical mode emulation.
200 * However what those inserted tc/tr wants is rid for
201 * virtual mode. So original virtual rid needs to be restored
204 * Operations which required such switch include:
205 * - insertions (itc.*, itr.*)
206 * - purges (ptc.* and ptr.*)
210 * All above needs actual virtual rid for destination entry.
213 void prepare_if_physical_mode(struct kvm_vcpu
*vcpu
)
215 if (is_physical_mode(vcpu
)) {
216 vcpu
->arch
.mode_flags
|= GUEST_PHY_EMUL
;
217 switch_to_virtual_rid(vcpu
);
222 /* Recover always follows prepare */
223 void recover_if_physical_mode(struct kvm_vcpu
*vcpu
)
225 if (is_physical_mode(vcpu
))
226 switch_to_physical_rid(vcpu
);
227 vcpu
->arch
.mode_flags
&= ~GUEST_PHY_EMUL
;
231 #define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
233 static u16 gr_info
[32] = {
234 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
235 RPT(r1
), RPT(r2
), RPT(r3
),
236 RPT(r4
), RPT(r5
), RPT(r6
), RPT(r7
),
237 RPT(r8
), RPT(r9
), RPT(r10
), RPT(r11
),
238 RPT(r12
), RPT(r13
), RPT(r14
), RPT(r15
),
239 RPT(r16
), RPT(r17
), RPT(r18
), RPT(r19
),
240 RPT(r20
), RPT(r21
), RPT(r22
), RPT(r23
),
241 RPT(r24
), RPT(r25
), RPT(r26
), RPT(r27
),
242 RPT(r28
), RPT(r29
), RPT(r30
), RPT(r31
)
245 #define IA64_FIRST_STACKED_GR 32
246 #define IA64_FIRST_ROTATING_FR 32
248 static inline unsigned long
249 rotate_reg(unsigned long sor
, unsigned long rrb
, unsigned long reg
)
258 * Return the (rotated) index for floating point register
259 * be in the REGNUM (REGNUM must range from 32-127,
260 * result is in the range from 0-95.
262 static inline unsigned long fph_index(struct kvm_pt_regs
*regs
,
265 unsigned long rrb_fr
= (regs
->cr_ifs
>> 25) & 0x7f;
266 return rotate_reg(96, rrb_fr
, (regnum
- IA64_FIRST_ROTATING_FR
));
270 * The inverse of the above: given bspstore and the number of
271 * registers, calculate ar.bsp.
273 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr
,
276 long delta
= ia64_rse_slot_num(addr
) + num_regs
;
282 while (delta
<= -0x3f) {
287 while (delta
>= 0x3f) {
293 return addr
+ num_regs
+ i
;
296 static void get_rse_reg(struct kvm_pt_regs
*regs
, unsigned long r1
,
297 unsigned long *val
, int *nat
)
299 unsigned long *bsp
, *addr
, *rnat_addr
, *bspstore
;
300 unsigned long *kbs
= (void *) current_vcpu
+ VMM_RBS_OFFSET
;
301 unsigned long nat_mask
;
302 unsigned long old_rsc
, new_rsc
;
303 long sof
= (regs
->cr_ifs
) & 0x7f;
304 long sor
= (((regs
->cr_ifs
>> 14) & 0xf) << 3);
305 long rrb_gr
= (regs
->cr_ifs
>> 18) & 0x7f;
309 ridx
= rotate_reg(sor
, rrb_gr
, ridx
);
311 old_rsc
= ia64_getreg(_IA64_REG_AR_RSC
);
312 new_rsc
= old_rsc
&(~(0x3));
313 ia64_setreg(_IA64_REG_AR_RSC
, new_rsc
);
315 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
316 bsp
= kbs
+ (regs
->loadrs
>> 19);
318 addr
= kvm_rse_skip_regs(bsp
, -sof
+ ridx
);
319 nat_mask
= 1UL << ia64_rse_slot_num(addr
);
320 rnat_addr
= ia64_rse_rnat_addr(addr
);
322 if (addr
>= bspstore
) {
325 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
329 if (bspstore
< rnat_addr
)
330 *nat
= (int)!!(ia64_getreg(_IA64_REG_AR_RNAT
)
333 *nat
= (int)!!((*rnat_addr
) & nat_mask
);
334 ia64_setreg(_IA64_REG_AR_RSC
, old_rsc
);
338 void set_rse_reg(struct kvm_pt_regs
*regs
, unsigned long r1
,
339 unsigned long val
, unsigned long nat
)
341 unsigned long *bsp
, *bspstore
, *addr
, *rnat_addr
;
342 unsigned long *kbs
= (void *) current_vcpu
+ VMM_RBS_OFFSET
;
343 unsigned long nat_mask
;
344 unsigned long old_rsc
, new_rsc
, psr
;
346 long sof
= (regs
->cr_ifs
) & 0x7f;
347 long sor
= (((regs
->cr_ifs
>> 14) & 0xf) << 3);
348 long rrb_gr
= (regs
->cr_ifs
>> 18) & 0x7f;
352 ridx
= rotate_reg(sor
, rrb_gr
, ridx
);
354 old_rsc
= ia64_getreg(_IA64_REG_AR_RSC
);
355 /* put RSC to lazy mode, and set loadrs 0 */
356 new_rsc
= old_rsc
& (~0x3fff0003);
357 ia64_setreg(_IA64_REG_AR_RSC
, new_rsc
);
358 bsp
= kbs
+ (regs
->loadrs
>> 19); /* 16 + 3 */
360 addr
= kvm_rse_skip_regs(bsp
, -sof
+ ridx
);
361 nat_mask
= 1UL << ia64_rse_slot_num(addr
);
362 rnat_addr
= ia64_rse_rnat_addr(addr
);
365 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
366 if (addr
>= bspstore
) {
371 bspstore
= (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE
);
372 rnat
= ia64_getreg(_IA64_REG_AR_RNAT
);
373 if (bspstore
< rnat_addr
)
374 rnat
= rnat
& (~nat_mask
);
376 *rnat_addr
= (*rnat_addr
)&(~nat_mask
);
380 ia64_setreg(_IA64_REG_AR_RNAT
, rnat
);
382 rnat
= ia64_getreg(_IA64_REG_AR_RNAT
);
384 if (bspstore
< rnat_addr
)
385 rnat
= rnat
&(~nat_mask
);
387 *rnat_addr
= (*rnat_addr
) & (~nat_mask
);
389 ia64_setreg(_IA64_REG_AR_BSPSTORE
, (unsigned long)bspstore
);
390 ia64_setreg(_IA64_REG_AR_RNAT
, rnat
);
392 local_irq_restore(psr
);
393 ia64_setreg(_IA64_REG_AR_RSC
, old_rsc
);
396 void getreg(unsigned long regnum
, unsigned long *val
,
397 int *nat
, struct kvm_pt_regs
*regs
)
399 unsigned long addr
, *unat
;
400 if (regnum
>= IA64_FIRST_STACKED_GR
) {
401 get_rse_reg(regs
, regnum
, val
, nat
);
406 * Now look at registers in [0-31] range and init correct UNAT
408 addr
= (unsigned long)regs
;
409 unat
= ®s
->eml_unat
;;
411 addr
+= gr_info
[regnum
];
413 *val
= *(unsigned long *)addr
;
415 * do it only when requested
418 *nat
= (*unat
>> ((addr
>> 3) & 0x3f)) & 0x1UL
;
421 void setreg(unsigned long regnum
, unsigned long val
,
422 int nat
, struct kvm_pt_regs
*regs
)
425 unsigned long bitmask
;
429 * First takes care of stacked registers
431 if (regnum
>= IA64_FIRST_STACKED_GR
) {
432 set_rse_reg(regs
, regnum
, val
, nat
);
437 * Now look at registers in [0-31] range and init correct UNAT
439 addr
= (unsigned long)regs
;
440 unat
= ®s
->eml_unat
;
442 * add offset from base of struct
445 addr
+= gr_info
[regnum
];
447 *(unsigned long *)addr
= val
;
450 * We need to clear the corresponding UNAT bit to fully emulate the load
451 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
453 bitmask
= 1UL << ((addr
>> 3) & 0x3f);
461 u64
vcpu_get_gr(struct kvm_vcpu
*vcpu
, unsigned long reg
)
463 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
468 getreg(reg
, &val
, 0, regs
);
472 void vcpu_set_gr(struct kvm_vcpu
*vcpu
, u64 reg
, u64 value
, int nat
)
474 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
475 long sof
= (regs
->cr_ifs
) & 0x7f;
481 setreg(reg
, value
, nat
, regs
); /* FIXME: handle NATs later*/
484 void getfpreg(unsigned long regnum
, struct ia64_fpreg
*fpval
,
485 struct kvm_pt_regs
*regs
)
487 /* Take floating register rotation into consideration*/
488 if (regnum
>= IA64_FIRST_ROTATING_FR
)
489 regnum
= IA64_FIRST_ROTATING_FR
+ fph_index(regs
, regnum
);
490 #define CASE_FIXED_FP(reg) \
492 ia64_stf_spill(fpval, reg); \
630 void setfpreg(unsigned long regnum
, struct ia64_fpreg
*fpval
,
631 struct kvm_pt_regs
*regs
)
633 /* Take floating register rotation into consideration*/
634 if (regnum
>= IA64_FIRST_ROTATING_FR
)
635 regnum
= IA64_FIRST_ROTATING_FR
+ fph_index(regs
, regnum
);
637 #define CASE_FIXED_FP(reg) \
639 ia64_ldf_fill(reg, fpval); \
774 void vcpu_get_fpreg(struct kvm_vcpu
*vcpu
, unsigned long reg
,
775 struct ia64_fpreg
*val
)
777 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
779 getfpreg(reg
, val
, regs
); /* FIXME: handle NATs later*/
782 void vcpu_set_fpreg(struct kvm_vcpu
*vcpu
, unsigned long reg
,
783 struct ia64_fpreg
*val
)
785 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
788 setfpreg(reg
, val
, regs
); /* FIXME: handle NATs later*/
791 /************************************************************************
793 ***********************************************************************/
794 u64
vcpu_get_itc(struct kvm_vcpu
*vcpu
)
796 unsigned long guest_itc
;
797 guest_itc
= VMX(vcpu
, itc_offset
) + ia64_getreg(_IA64_REG_AR_ITC
);
799 if (guest_itc
>= VMX(vcpu
, last_itc
)) {
800 VMX(vcpu
, last_itc
) = guest_itc
;
803 return VMX(vcpu
, last_itc
);
806 static inline void vcpu_set_itm(struct kvm_vcpu
*vcpu
, u64 val
);
807 static void vcpu_set_itc(struct kvm_vcpu
*vcpu
, u64 val
)
812 long itc_offset
= val
- ia64_getreg(_IA64_REG_AR_ITC
);
813 unsigned long vitv
= VCPU(vcpu
, itv
);
815 kvm
= (struct kvm
*)KVM_VM_BASE
;
817 if (vcpu
->vcpu_id
== 0) {
818 for (i
= 0; i
< kvm
->arch
.online_vcpus
; i
++) {
819 v
= (struct kvm_vcpu
*)((char *)vcpu
+
820 sizeof(struct kvm_vcpu_data
) * i
);
821 VMX(v
, itc_offset
) = itc_offset
;
822 VMX(v
, last_itc
) = 0;
825 VMX(vcpu
, last_itc
) = 0;
826 if (VCPU(vcpu
, itm
) <= val
) {
827 VMX(vcpu
, itc_check
) = 0;
828 vcpu_unpend_interrupt(vcpu
, vitv
);
830 VMX(vcpu
, itc_check
) = 1;
831 vcpu_set_itm(vcpu
, VCPU(vcpu
, itm
));
836 static inline u64
vcpu_get_itm(struct kvm_vcpu
*vcpu
)
838 return ((u64
)VCPU(vcpu
, itm
));
841 static inline void vcpu_set_itm(struct kvm_vcpu
*vcpu
, u64 val
)
843 unsigned long vitv
= VCPU(vcpu
, itv
);
844 VCPU(vcpu
, itm
) = val
;
846 if (val
> vcpu_get_itc(vcpu
)) {
847 VMX(vcpu
, itc_check
) = 1;
848 vcpu_unpend_interrupt(vcpu
, vitv
);
849 VMX(vcpu
, timer_pending
) = 0;
851 VMX(vcpu
, itc_check
) = 0;
854 #define ITV_VECTOR(itv) (itv&0xff)
855 #define ITV_IRQ_MASK(itv) (itv&(1<<16))
857 static inline void vcpu_set_itv(struct kvm_vcpu
*vcpu
, u64 val
)
859 VCPU(vcpu
, itv
) = val
;
860 if (!ITV_IRQ_MASK(val
) && vcpu
->arch
.timer_pending
) {
861 vcpu_pend_interrupt(vcpu
, ITV_VECTOR(val
));
862 vcpu
->arch
.timer_pending
= 0;
866 static inline void vcpu_set_eoi(struct kvm_vcpu
*vcpu
, u64 val
)
870 vec
= highest_inservice_irq(vcpu
);
871 if (vec
== NULL_VECTOR
)
873 VMX(vcpu
, insvc
[vec
>> 6]) &= ~(1UL << (vec
& 63));
875 vcpu
->arch
.irq_new_pending
= 1;
879 /* See Table 5-8 in SDM vol2 for the definition */
880 int irq_masked(struct kvm_vcpu
*vcpu
, int h_pending
, int h_inservice
)
884 vtpr
.val
= VCPU(vcpu
, tpr
);
886 if (h_inservice
== NMI_VECTOR
)
887 return IRQ_MASKED_BY_INSVC
;
889 if (h_pending
== NMI_VECTOR
) {
890 /* Non Maskable Interrupt */
891 return IRQ_NO_MASKED
;
894 if (h_inservice
== ExtINT_VECTOR
)
895 return IRQ_MASKED_BY_INSVC
;
897 if (h_pending
== ExtINT_VECTOR
) {
899 /* mask all external IRQ */
900 return IRQ_MASKED_BY_VTPR
;
902 return IRQ_NO_MASKED
;
905 if (is_higher_irq(h_pending
, h_inservice
)) {
906 if (is_higher_class(h_pending
, vtpr
.mic
+ (vtpr
.mmi
<< 4)))
907 return IRQ_NO_MASKED
;
909 return IRQ_MASKED_BY_VTPR
;
911 return IRQ_MASKED_BY_INSVC
;
915 void vcpu_pend_interrupt(struct kvm_vcpu
*vcpu
, u8 vec
)
920 local_irq_save(spsr
);
921 ret
= test_and_set_bit(vec
, &VCPU(vcpu
, irr
[0]));
922 local_irq_restore(spsr
);
924 vcpu
->arch
.irq_new_pending
= 1;
927 void vcpu_unpend_interrupt(struct kvm_vcpu
*vcpu
, u8 vec
)
932 local_irq_save(spsr
);
933 ret
= test_and_clear_bit(vec
, &VCPU(vcpu
, irr
[0]));
934 local_irq_restore(spsr
);
936 vcpu
->arch
.irq_new_pending
= 1;
941 void update_vhpi(struct kvm_vcpu
*vcpu
, int vec
)
945 if (vec
== NULL_VECTOR
)
947 else if (vec
== NMI_VECTOR
)
949 else if (vec
== ExtINT_VECTOR
)
954 VCPU(vcpu
, vhpi
) = vhpi
;
955 if (VCPU(vcpu
, vac
).a_int
)
956 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT
,
957 (u64
)vcpu
->arch
.vpd
, 0, 0, 0, 0, 0, 0);
960 u64
vcpu_get_ivr(struct kvm_vcpu
*vcpu
)
962 int vec
, h_inservice
, mask
;
964 vec
= highest_pending_irq(vcpu
);
965 h_inservice
= highest_inservice_irq(vcpu
);
966 mask
= irq_masked(vcpu
, vec
, h_inservice
);
967 if (vec
== NULL_VECTOR
|| mask
== IRQ_MASKED_BY_INSVC
) {
968 if (VCPU(vcpu
, vhpi
))
969 update_vhpi(vcpu
, NULL_VECTOR
);
970 return IA64_SPURIOUS_INT_VECTOR
;
972 if (mask
== IRQ_MASKED_BY_VTPR
) {
973 update_vhpi(vcpu
, vec
);
974 return IA64_SPURIOUS_INT_VECTOR
;
976 VMX(vcpu
, insvc
[vec
>> 6]) |= (1UL << (vec
& 63));
977 vcpu_unpend_interrupt(vcpu
, vec
);
981 /**************************************************************************
982 Privileged operation emulation routines
983 **************************************************************************/
984 u64
vcpu_thash(struct kvm_vcpu
*vcpu
, u64 vadr
)
991 vpta
.val
= vcpu_get_pta(vcpu
);
992 vrr
.val
= vcpu_get_rr(vcpu
, vadr
);
993 vhpt_offset
= ((vadr
>> vrr
.ps
) << 3) & ((1UL << (vpta
.size
)) - 1);
995 pval
= ia64_call_vsa(PAL_VPS_THASH
, vadr
, vrr
.val
,
996 vpta
.val
, 0, 0, 0, 0);
998 pval
= (vadr
& VRN_MASK
) | vhpt_offset
|
999 (vpta
.val
<< 3 >> (vpta
.size
+ 3) << (vpta
.size
));
1004 u64
vcpu_ttag(struct kvm_vcpu
*vcpu
, u64 vadr
)
1007 union ia64_pta vpta
;
1010 vpta
.val
= vcpu_get_pta(vcpu
);
1011 vrr
.val
= vcpu_get_rr(vcpu
, vadr
);
1013 pval
= ia64_call_vsa(PAL_VPS_TTAG
, vadr
, vrr
.val
,
1021 u64
vcpu_tak(struct kvm_vcpu
*vcpu
, u64 vadr
)
1023 struct thash_data
*data
;
1024 union ia64_pta vpta
;
1027 vpta
.val
= vcpu_get_pta(vcpu
);
1032 data
= vtlb_lookup(vcpu
, vadr
, D_TLB
);
1033 if (!data
|| !data
->p
)
1041 void kvm_thash(struct kvm_vcpu
*vcpu
, INST64 inst
)
1043 unsigned long thash
, vadr
;
1045 vadr
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1046 thash
= vcpu_thash(vcpu
, vadr
);
1047 vcpu_set_gr(vcpu
, inst
.M46
.r1
, thash
, 0);
1050 void kvm_ttag(struct kvm_vcpu
*vcpu
, INST64 inst
)
1052 unsigned long tag
, vadr
;
1054 vadr
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1055 tag
= vcpu_ttag(vcpu
, vadr
);
1056 vcpu_set_gr(vcpu
, inst
.M46
.r1
, tag
, 0);
1059 int vcpu_tpa(struct kvm_vcpu
*vcpu
, u64 vadr
, u64
*padr
)
1061 struct thash_data
*data
;
1062 union ia64_isr visr
, pt_isr
;
1063 struct kvm_pt_regs
*regs
;
1064 struct ia64_psr vpsr
;
1066 regs
= vcpu_regs(vcpu
);
1067 pt_isr
.val
= VMX(vcpu
, cr_isr
);
1069 visr
.ei
= pt_isr
.ei
;
1070 visr
.ir
= pt_isr
.ir
;
1071 vpsr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1074 data
= vhpt_lookup(vadr
);
1077 vcpu_set_isr(vcpu
, visr
.val
);
1078 data_page_not_present(vcpu
, vadr
);
1080 } else if (data
->ma
== VA_MATTR_NATPAGE
) {
1081 vcpu_set_isr(vcpu
, visr
.val
);
1082 dnat_page_consumption(vcpu
, vadr
);
1085 *padr
= (data
->gpaddr
>> data
->ps
<< data
->ps
) |
1086 (vadr
& (PSIZE(data
->ps
) - 1));
1087 return IA64_NO_FAULT
;
1091 data
= vtlb_lookup(vcpu
, vadr
, D_TLB
);
1094 vcpu_set_isr(vcpu
, visr
.val
);
1095 data_page_not_present(vcpu
, vadr
);
1097 } else if (data
->ma
== VA_MATTR_NATPAGE
) {
1098 vcpu_set_isr(vcpu
, visr
.val
);
1099 dnat_page_consumption(vcpu
, vadr
);
1102 *padr
= ((data
->ppn
>> (data
->ps
- 12)) << data
->ps
)
1103 | (vadr
& (PSIZE(data
->ps
) - 1));
1104 return IA64_NO_FAULT
;
1107 if (!vhpt_enabled(vcpu
, vadr
, NA_REF
)) {
1109 vcpu_set_isr(vcpu
, visr
.val
);
1110 alt_dtlb(vcpu
, vadr
);
1118 vcpu_set_isr(vcpu
, visr
.val
);
1119 dvhpt_fault(vcpu
, vadr
);
1127 return IA64_NO_FAULT
;
1130 int kvm_tpa(struct kvm_vcpu
*vcpu
, INST64 inst
)
1132 unsigned long r1
, r3
;
1134 r3
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1136 if (vcpu_tpa(vcpu
, r3
, &r1
))
1139 vcpu_set_gr(vcpu
, inst
.M46
.r1
, r1
, 0);
1140 return(IA64_NO_FAULT
);
1143 void kvm_tak(struct kvm_vcpu
*vcpu
, INST64 inst
)
1145 unsigned long r1
, r3
;
1147 r3
= vcpu_get_gr(vcpu
, inst
.M46
.r3
);
1148 r1
= vcpu_tak(vcpu
, r3
);
1149 vcpu_set_gr(vcpu
, inst
.M46
.r1
, r1
, 0);
1152 /************************************
1153 * Insert/Purge translation register/cache
1154 ************************************/
1155 void vcpu_itc_i(struct kvm_vcpu
*vcpu
, u64 pte
, u64 itir
, u64 ifa
)
1157 thash_purge_and_insert(vcpu
, pte
, itir
, ifa
, I_TLB
);
1160 void vcpu_itc_d(struct kvm_vcpu
*vcpu
, u64 pte
, u64 itir
, u64 ifa
)
1162 thash_purge_and_insert(vcpu
, pte
, itir
, ifa
, D_TLB
);
1165 void vcpu_itr_i(struct kvm_vcpu
*vcpu
, u64 slot
, u64 pte
, u64 itir
, u64 ifa
)
1168 struct thash_data
*p_itr
;
1171 va
= PAGEALIGN(ifa
, ps
);
1172 pte
&= ~PAGE_FLAGS_RV_MASK
;
1173 rid
= vcpu_get_rr(vcpu
, ifa
);
1174 rid
= rid
& RR_RID_MASK
;
1175 p_itr
= (struct thash_data
*)&vcpu
->arch
.itrs
[slot
];
1176 vcpu_set_tr(p_itr
, pte
, itir
, va
, rid
);
1177 vcpu_quick_region_set(VMX(vcpu
, itr_regions
), va
);
1181 void vcpu_itr_d(struct kvm_vcpu
*vcpu
, u64 slot
, u64 pte
, u64 itir
, u64 ifa
)
1185 struct thash_data
*p_dtr
;
1188 va
= PAGEALIGN(ifa
, ps
);
1189 pte
&= ~PAGE_FLAGS_RV_MASK
;
1191 if (ps
!= _PAGE_SIZE_16M
)
1192 thash_purge_entries(vcpu
, va
, ps
);
1193 gpfn
= (pte
& _PAGE_PPN_MASK
) >> PAGE_SHIFT
;
1194 if (__gpfn_is_io(gpfn
))
1196 rid
= vcpu_get_rr(vcpu
, va
);
1197 rid
= rid
& RR_RID_MASK
;
1198 p_dtr
= (struct thash_data
*)&vcpu
->arch
.dtrs
[slot
];
1199 vcpu_set_tr((struct thash_data
*)&vcpu
->arch
.dtrs
[slot
],
1200 pte
, itir
, va
, rid
);
1201 vcpu_quick_region_set(VMX(vcpu
, dtr_regions
), va
);
1204 void vcpu_ptr_d(struct kvm_vcpu
*vcpu
, u64 ifa
, u64 ps
)
1209 va
= PAGEALIGN(ifa
, ps
);
1210 while ((index
= vtr_find_overlap(vcpu
, va
, ps
, D_TLB
)) >= 0)
1211 vcpu
->arch
.dtrs
[index
].page_flags
= 0;
1213 thash_purge_entries(vcpu
, va
, ps
);
1216 void vcpu_ptr_i(struct kvm_vcpu
*vcpu
, u64 ifa
, u64 ps
)
1221 va
= PAGEALIGN(ifa
, ps
);
1222 while ((index
= vtr_find_overlap(vcpu
, va
, ps
, I_TLB
)) >= 0)
1223 vcpu
->arch
.itrs
[index
].page_flags
= 0;
1225 thash_purge_entries(vcpu
, va
, ps
);
1228 void vcpu_ptc_l(struct kvm_vcpu
*vcpu
, u64 va
, u64 ps
)
1230 va
= PAGEALIGN(va
, ps
);
1231 thash_purge_entries(vcpu
, va
, ps
);
1234 void vcpu_ptc_e(struct kvm_vcpu
*vcpu
, u64 va
)
1236 thash_purge_all(vcpu
);
1239 void vcpu_ptc_ga(struct kvm_vcpu
*vcpu
, u64 va
, u64 ps
)
1241 struct exit_ctl_data
*p
= &vcpu
->arch
.exit_data
;
1243 local_irq_save(psr
);
1244 p
->exit_reason
= EXIT_REASON_PTC_G
;
1246 p
->u
.ptc_g_data
.rr
= vcpu_get_rr(vcpu
, va
);
1247 p
->u
.ptc_g_data
.vaddr
= va
;
1248 p
->u
.ptc_g_data
.ps
= ps
;
1249 vmm_transition(vcpu
);
1250 /* Do Local Purge Here*/
1251 vcpu_ptc_l(vcpu
, va
, ps
);
1252 local_irq_restore(psr
);
1256 void vcpu_ptc_g(struct kvm_vcpu
*vcpu
, u64 va
, u64 ps
)
1258 vcpu_ptc_ga(vcpu
, va
, ps
);
1261 void kvm_ptc_e(struct kvm_vcpu
*vcpu
, INST64 inst
)
1265 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1266 vcpu_ptc_e(vcpu
, ifa
);
1269 void kvm_ptc_g(struct kvm_vcpu
*vcpu
, INST64 inst
)
1271 unsigned long ifa
, itir
;
1273 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1274 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1275 vcpu_ptc_g(vcpu
, ifa
, itir_ps(itir
));
1278 void kvm_ptc_ga(struct kvm_vcpu
*vcpu
, INST64 inst
)
1280 unsigned long ifa
, itir
;
1282 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1283 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1284 vcpu_ptc_ga(vcpu
, ifa
, itir_ps(itir
));
1287 void kvm_ptc_l(struct kvm_vcpu
*vcpu
, INST64 inst
)
1289 unsigned long ifa
, itir
;
1291 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1292 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1293 vcpu_ptc_l(vcpu
, ifa
, itir_ps(itir
));
1296 void kvm_ptr_d(struct kvm_vcpu
*vcpu
, INST64 inst
)
1298 unsigned long ifa
, itir
;
1300 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1301 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1302 vcpu_ptr_d(vcpu
, ifa
, itir_ps(itir
));
1305 void kvm_ptr_i(struct kvm_vcpu
*vcpu
, INST64 inst
)
1307 unsigned long ifa
, itir
;
1309 ifa
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1310 itir
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1311 vcpu_ptr_i(vcpu
, ifa
, itir_ps(itir
));
1314 void kvm_itr_d(struct kvm_vcpu
*vcpu
, INST64 inst
)
1316 unsigned long itir
, ifa
, pte
, slot
;
1318 slot
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1319 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1320 itir
= vcpu_get_itir(vcpu
);
1321 ifa
= vcpu_get_ifa(vcpu
);
1322 vcpu_itr_d(vcpu
, slot
, pte
, itir
, ifa
);
1327 void kvm_itr_i(struct kvm_vcpu
*vcpu
, INST64 inst
)
1329 unsigned long itir
, ifa
, pte
, slot
;
1331 slot
= vcpu_get_gr(vcpu
, inst
.M45
.r3
);
1332 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1333 itir
= vcpu_get_itir(vcpu
);
1334 ifa
= vcpu_get_ifa(vcpu
);
1335 vcpu_itr_i(vcpu
, slot
, pte
, itir
, ifa
);
1338 void kvm_itc_d(struct kvm_vcpu
*vcpu
, INST64 inst
)
1340 unsigned long itir
, ifa
, pte
;
1342 itir
= vcpu_get_itir(vcpu
);
1343 ifa
= vcpu_get_ifa(vcpu
);
1344 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1345 vcpu_itc_d(vcpu
, pte
, itir
, ifa
);
1348 void kvm_itc_i(struct kvm_vcpu
*vcpu
, INST64 inst
)
1350 unsigned long itir
, ifa
, pte
;
1352 itir
= vcpu_get_itir(vcpu
);
1353 ifa
= vcpu_get_ifa(vcpu
);
1354 pte
= vcpu_get_gr(vcpu
, inst
.M45
.r2
);
1355 vcpu_itc_i(vcpu
, pte
, itir
, ifa
);
1358 /*************************************
1359 * Moves to semi-privileged registers
1360 *************************************/
1362 void kvm_mov_to_ar_imm(struct kvm_vcpu
*vcpu
, INST64 inst
)
1367 imm
= -inst
.M30
.imm
;
1371 vcpu_set_itc(vcpu
, imm
);
1374 void kvm_mov_to_ar_reg(struct kvm_vcpu
*vcpu
, INST64 inst
)
1378 r2
= vcpu_get_gr(vcpu
, inst
.M29
.r2
);
1379 vcpu_set_itc(vcpu
, r2
);
1382 void kvm_mov_from_ar_reg(struct kvm_vcpu
*vcpu
, INST64 inst
)
1386 r1
= vcpu_get_itc(vcpu
);
1387 vcpu_set_gr(vcpu
, inst
.M31
.r1
, r1
, 0);
1390 /**************************************************************************
1391 struct kvm_vcpu protection key register access routines
1392 **************************************************************************/
1394 unsigned long vcpu_get_pkr(struct kvm_vcpu
*vcpu
, unsigned long reg
)
1396 return ((unsigned long)ia64_get_pkr(reg
));
1399 void vcpu_set_pkr(struct kvm_vcpu
*vcpu
, unsigned long reg
, unsigned long val
)
1401 ia64_set_pkr(reg
, val
);
1404 /********************************
1405 * Moves to privileged registers
1406 ********************************/
1407 unsigned long vcpu_set_rr(struct kvm_vcpu
*vcpu
, unsigned long reg
,
1410 union ia64_rr oldrr
, newrr
;
1411 unsigned long rrval
;
1412 struct exit_ctl_data
*p
= &vcpu
->arch
.exit_data
;
1415 oldrr
.val
= vcpu_get_rr(vcpu
, reg
);
1417 vcpu
->arch
.vrr
[reg
>> VRN_SHIFT
] = val
;
1419 switch ((unsigned long)(reg
>> VRN_SHIFT
)) {
1421 vcpu
->arch
.vmm_rr
= vrrtomrr(val
);
1422 local_irq_save(psr
);
1423 p
->exit_reason
= EXIT_REASON_SWITCH_RR6
;
1424 vmm_transition(vcpu
);
1425 local_irq_restore(psr
);
1428 rrval
= vrrtomrr(val
);
1429 vcpu
->arch
.metaphysical_saved_rr4
= rrval
;
1430 if (!is_physical_mode(vcpu
))
1431 ia64_set_rr(reg
, rrval
);
1434 rrval
= vrrtomrr(val
);
1435 vcpu
->arch
.metaphysical_saved_rr0
= rrval
;
1436 if (!is_physical_mode(vcpu
))
1437 ia64_set_rr(reg
, rrval
);
1440 ia64_set_rr(reg
, vrrtomrr(val
));
1444 return (IA64_NO_FAULT
);
1447 void kvm_mov_to_rr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1449 unsigned long r3
, r2
;
1451 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1452 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1453 vcpu_set_rr(vcpu
, r3
, r2
);
1456 void kvm_mov_to_dbr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1460 void kvm_mov_to_ibr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1464 void kvm_mov_to_pmc(struct kvm_vcpu
*vcpu
, INST64 inst
)
1466 unsigned long r3
, r2
;
1468 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1469 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1470 vcpu_set_pmc(vcpu
, r3
, r2
);
1473 void kvm_mov_to_pmd(struct kvm_vcpu
*vcpu
, INST64 inst
)
1475 unsigned long r3
, r2
;
1477 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1478 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1479 vcpu_set_pmd(vcpu
, r3
, r2
);
1482 void kvm_mov_to_pkr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1486 r3
= vcpu_get_gr(vcpu
, inst
.M42
.r3
);
1487 r2
= vcpu_get_gr(vcpu
, inst
.M42
.r2
);
1488 vcpu_set_pkr(vcpu
, r3
, r2
);
1491 void kvm_mov_from_rr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1493 unsigned long r3
, r1
;
1495 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1496 r1
= vcpu_get_rr(vcpu
, r3
);
1497 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1500 void kvm_mov_from_pkr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1502 unsigned long r3
, r1
;
1504 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1505 r1
= vcpu_get_pkr(vcpu
, r3
);
1506 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1509 void kvm_mov_from_dbr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1511 unsigned long r3
, r1
;
1513 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1514 r1
= vcpu_get_dbr(vcpu
, r3
);
1515 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1518 void kvm_mov_from_ibr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1520 unsigned long r3
, r1
;
1522 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1523 r1
= vcpu_get_ibr(vcpu
, r3
);
1524 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1527 void kvm_mov_from_pmc(struct kvm_vcpu
*vcpu
, INST64 inst
)
1529 unsigned long r3
, r1
;
1531 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1532 r1
= vcpu_get_pmc(vcpu
, r3
);
1533 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1536 unsigned long vcpu_get_cpuid(struct kvm_vcpu
*vcpu
, unsigned long reg
)
1538 /* FIXME: This could get called as a result of a rsvd-reg fault */
1539 if (reg
> (ia64_get_cpuid(3) & 0xff))
1542 return ia64_get_cpuid(reg
);
1545 void kvm_mov_from_cpuid(struct kvm_vcpu
*vcpu
, INST64 inst
)
1547 unsigned long r3
, r1
;
1549 r3
= vcpu_get_gr(vcpu
, inst
.M43
.r3
);
1550 r1
= vcpu_get_cpuid(vcpu
, r3
);
1551 vcpu_set_gr(vcpu
, inst
.M43
.r1
, r1
, 0);
1554 void vcpu_set_tpr(struct kvm_vcpu
*vcpu
, unsigned long val
)
1556 VCPU(vcpu
, tpr
) = val
;
1557 vcpu
->arch
.irq_check
= 1;
1560 unsigned long kvm_mov_to_cr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1564 r2
= vcpu_get_gr(vcpu
, inst
.M32
.r2
);
1565 VCPU(vcpu
, vcr
[inst
.M32
.cr3
]) = r2
;
1567 switch (inst
.M32
.cr3
) {
1569 vcpu_set_dcr(vcpu
, r2
);
1572 vcpu_set_itm(vcpu
, r2
);
1575 vcpu_set_tpr(vcpu
, r2
);
1578 vcpu_set_eoi(vcpu
, r2
);
1587 unsigned long kvm_mov_from_cr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1589 unsigned long tgt
= inst
.M33
.r1
;
1592 switch (inst
.M33
.cr3
) {
1594 val
= vcpu_get_ivr(vcpu
);
1595 vcpu_set_gr(vcpu
, tgt
, val
, 0);
1599 vcpu_set_gr(vcpu
, tgt
, 0L, 0);
1602 val
= VCPU(vcpu
, vcr
[inst
.M33
.cr3
]);
1603 vcpu_set_gr(vcpu
, tgt
, val
, 0);
1610 void vcpu_set_psr(struct kvm_vcpu
*vcpu
, unsigned long val
)
1614 struct kvm_pt_regs
*regs
;
1615 struct ia64_psr old_psr
, new_psr
;
1617 old_psr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1619 regs
= vcpu_regs(vcpu
);
1620 /* We only support guest as:
1625 if (val
& (IA64_PSR_PK
| IA64_PSR_IS
| IA64_PSR_VM
))
1626 panic_vm(vcpu
, "Only support guests with vpsr.pk =0 \
1630 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1631 * Since these bits will become 0, after success execution of each
1632 * instruction, we will change set them to mIA64_PSR
1634 VCPU(vcpu
, vpsr
) = val
1635 & (~(IA64_PSR_ID
| IA64_PSR_DA
| IA64_PSR_DD
|
1636 IA64_PSR_SS
| IA64_PSR_ED
| IA64_PSR_IA
));
1638 if (!old_psr
.i
&& (val
& IA64_PSR_I
)) {
1640 vcpu
->arch
.irq_check
= 1;
1642 new_psr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1645 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1646 * , except for the following bits:
1647 * ic/i/dt/si/rt/mc/it/bn/vm
1649 mask
= IA64_PSR_IC
+ IA64_PSR_I
+ IA64_PSR_DT
+ IA64_PSR_SI
+
1650 IA64_PSR_RT
+ IA64_PSR_MC
+ IA64_PSR_IT
+ IA64_PSR_BN
+
1653 regs
->cr_ipsr
= (regs
->cr_ipsr
& mask
) | (val
& (~mask
));
1655 check_mm_mode_switch(vcpu
, old_psr
, new_psr
);
1660 unsigned long vcpu_cover(struct kvm_vcpu
*vcpu
)
1662 struct ia64_psr vpsr
;
1664 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1665 vpsr
= *(struct ia64_psr
*)&VCPU(vcpu
, vpsr
);
1668 VCPU(vcpu
, ifs
) = regs
->cr_ifs
;
1669 regs
->cr_ifs
= IA64_IFS_V
;
1670 return (IA64_NO_FAULT
);
1675 /**************************************************************************
1676 VCPU banked general register access routines
1677 **************************************************************************/
1678 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1680 __asm__ __volatile__ ( \
1681 ";;extr.u %0 = %3,%6,16;;\n" \
1682 "dep %1 = %0, %1, 0, 16;;\n" \
1684 "extr.u %0 = %2, 16, 16;;\n" \
1685 "dep %3 = %0, %3, %6, 16;;\n" \
1687 ::"r"(i), "r"(*b1unat), "r"(*b0unat), \
1688 "r"(*runat), "r"(b1unat), "r"(runat), \
1689 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1692 void vcpu_bsw0(struct kvm_vcpu
*vcpu
)
1696 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1697 unsigned long *r
= ®s
->r16
;
1698 unsigned long *b0
= &VCPU(vcpu
, vbgr
[0]);
1699 unsigned long *b1
= &VCPU(vcpu
, vgr
[0]);
1700 unsigned long *runat
= ®s
->eml_unat
;
1701 unsigned long *b0unat
= &VCPU(vcpu
, vbnat
);
1702 unsigned long *b1unat
= &VCPU(vcpu
, vnat
);
1705 if (VCPU(vcpu
, vpsr
) & IA64_PSR_BN
) {
1706 for (i
= 0; i
< 16; i
++) {
1710 vcpu_bsw0_unat(i
, b0unat
, b1unat
, runat
,
1711 VMM_PT_REGS_R16_SLOT
);
1712 VCPU(vcpu
, vpsr
) &= ~IA64_PSR_BN
;
1716 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
1718 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
1719 "dep %1 = %0, %1, 16, 16;;\n" \
1721 "extr.u %0 = %2, 0, 16;;\n" \
1722 "dep %3 = %0, %3, %6, 16;;\n" \
1724 ::"r"(i), "r"(*b0unat), "r"(*b1unat), \
1725 "r"(*runat), "r"(b0unat), "r"(runat), \
1726 "i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
1729 void vcpu_bsw1(struct kvm_vcpu
*vcpu
)
1732 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1733 unsigned long *r
= ®s
->r16
;
1734 unsigned long *b0
= &VCPU(vcpu
, vbgr
[0]);
1735 unsigned long *b1
= &VCPU(vcpu
, vgr
[0]);
1736 unsigned long *runat
= ®s
->eml_unat
;
1737 unsigned long *b0unat
= &VCPU(vcpu
, vbnat
);
1738 unsigned long *b1unat
= &VCPU(vcpu
, vnat
);
1740 if (!(VCPU(vcpu
, vpsr
) & IA64_PSR_BN
)) {
1741 for (i
= 0; i
< 16; i
++) {
1745 vcpu_bsw1_unat(i
, b0unat
, b1unat
, runat
,
1746 VMM_PT_REGS_R16_SLOT
);
1747 VCPU(vcpu
, vpsr
) |= IA64_PSR_BN
;
1751 void vcpu_rfi(struct kvm_vcpu
*vcpu
)
1753 unsigned long ifs
, psr
;
1754 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1756 psr
= VCPU(vcpu
, ipsr
);
1757 if (psr
& IA64_PSR_BN
)
1761 vcpu_set_psr(vcpu
, psr
);
1762 ifs
= VCPU(vcpu
, ifs
);
1765 regs
->cr_iip
= VCPU(vcpu
, iip
);
1769 VPSR can't keep track of below bits of guest PSR
1770 This function gets guest PSR
1773 unsigned long vcpu_get_psr(struct kvm_vcpu
*vcpu
)
1776 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1778 mask
= IA64_PSR_BE
| IA64_PSR_UP
| IA64_PSR_AC
| IA64_PSR_MFL
|
1779 IA64_PSR_MFH
| IA64_PSR_CPL
| IA64_PSR_RI
;
1780 return (VCPU(vcpu
, vpsr
) & ~mask
) | (regs
->cr_ipsr
& mask
);
1783 void kvm_rsm(struct kvm_vcpu
*vcpu
, INST64 inst
)
1786 unsigned long imm24
= (inst
.M44
.i
<<23) | (inst
.M44
.i2
<<21)
1789 vpsr
= vcpu_get_psr(vcpu
);
1791 vcpu_set_psr(vcpu
, vpsr
);
1794 void kvm_ssm(struct kvm_vcpu
*vcpu
, INST64 inst
)
1797 unsigned long imm24
= (inst
.M44
.i
<< 23) | (inst
.M44
.i2
<< 21)
1800 vpsr
= vcpu_get_psr(vcpu
);
1802 vcpu_set_psr(vcpu
, vpsr
);
1807 * bit -- starting bit
1808 * len -- how many bits
1810 #define MASK(bit,len) \
1814 __asm __volatile("dep %0=-1, r0, %1, %2"\
1821 void vcpu_set_psr_l(struct kvm_vcpu
*vcpu
, unsigned long val
)
1823 val
= (val
& MASK(0, 32)) | (vcpu_get_psr(vcpu
) & MASK(32, 32));
1824 vcpu_set_psr(vcpu
, val
);
1827 void kvm_mov_to_psr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1831 val
= vcpu_get_gr(vcpu
, inst
.M35
.r2
);
1832 vcpu_set_psr_l(vcpu
, val
);
1835 void kvm_mov_from_psr(struct kvm_vcpu
*vcpu
, INST64 inst
)
1839 val
= vcpu_get_psr(vcpu
);
1840 val
= (val
& MASK(0, 32)) | (val
& MASK(35, 2));
1841 vcpu_set_gr(vcpu
, inst
.M33
.r1
, val
, 0);
1844 void vcpu_increment_iip(struct kvm_vcpu
*vcpu
)
1846 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1847 struct ia64_psr
*ipsr
= (struct ia64_psr
*)®s
->cr_ipsr
;
1848 if (ipsr
->ri
== 2) {
1855 void vcpu_decrement_iip(struct kvm_vcpu
*vcpu
)
1857 struct kvm_pt_regs
*regs
= vcpu_regs(vcpu
);
1858 struct ia64_psr
*ipsr
= (struct ia64_psr
*)®s
->cr_ipsr
;
1860 if (ipsr
->ri
== 0) {
1867 /** Emulate a privileged operation.
1870 * @param vcpu virtual cpu
1871 * @cause the reason cause virtualization fault
1872 * @opcode the instruction code which cause virtualization fault
1875 void kvm_emulate(struct kvm_vcpu
*vcpu
, struct kvm_pt_regs
*regs
)
1877 unsigned long status
, cause
, opcode
;
1880 status
= IA64_NO_FAULT
;
1881 cause
= VMX(vcpu
, cause
);
1882 opcode
= VMX(vcpu
, opcode
);
1885 * Switch to actual virtual rid in rr0 and rr4,
1886 * which is required by some tlb related instructions.
1888 prepare_if_physical_mode(vcpu
);
1892 kvm_rsm(vcpu
, inst
);
1895 kvm_ssm(vcpu
, inst
);
1897 case EVENT_MOV_TO_PSR
:
1898 kvm_mov_to_psr(vcpu
, inst
);
1900 case EVENT_MOV_FROM_PSR
:
1901 kvm_mov_from_psr(vcpu
, inst
);
1903 case EVENT_MOV_FROM_CR
:
1904 kvm_mov_from_cr(vcpu
, inst
);
1906 case EVENT_MOV_TO_CR
:
1907 kvm_mov_to_cr(vcpu
, inst
);
1922 kvm_itr_d(vcpu
, inst
);
1925 kvm_itr_i(vcpu
, inst
);
1928 kvm_ptr_d(vcpu
, inst
);
1931 kvm_ptr_i(vcpu
, inst
);
1934 kvm_itc_d(vcpu
, inst
);
1937 kvm_itc_i(vcpu
, inst
);
1940 kvm_ptc_l(vcpu
, inst
);
1943 kvm_ptc_g(vcpu
, inst
);
1946 kvm_ptc_ga(vcpu
, inst
);
1949 kvm_ptc_e(vcpu
, inst
);
1951 case EVENT_MOV_TO_RR
:
1952 kvm_mov_to_rr(vcpu
, inst
);
1954 case EVENT_MOV_FROM_RR
:
1955 kvm_mov_from_rr(vcpu
, inst
);
1958 kvm_thash(vcpu
, inst
);
1961 kvm_ttag(vcpu
, inst
);
1964 status
= kvm_tpa(vcpu
, inst
);
1967 kvm_tak(vcpu
, inst
);
1969 case EVENT_MOV_TO_AR_IMM
:
1970 kvm_mov_to_ar_imm(vcpu
, inst
);
1972 case EVENT_MOV_TO_AR
:
1973 kvm_mov_to_ar_reg(vcpu
, inst
);
1975 case EVENT_MOV_FROM_AR
:
1976 kvm_mov_from_ar_reg(vcpu
, inst
);
1978 case EVENT_MOV_TO_DBR
:
1979 kvm_mov_to_dbr(vcpu
, inst
);
1981 case EVENT_MOV_TO_IBR
:
1982 kvm_mov_to_ibr(vcpu
, inst
);
1984 case EVENT_MOV_TO_PMC
:
1985 kvm_mov_to_pmc(vcpu
, inst
);
1987 case EVENT_MOV_TO_PMD
:
1988 kvm_mov_to_pmd(vcpu
, inst
);
1990 case EVENT_MOV_TO_PKR
:
1991 kvm_mov_to_pkr(vcpu
, inst
);
1993 case EVENT_MOV_FROM_DBR
:
1994 kvm_mov_from_dbr(vcpu
, inst
);
1996 case EVENT_MOV_FROM_IBR
:
1997 kvm_mov_from_ibr(vcpu
, inst
);
1999 case EVENT_MOV_FROM_PMC
:
2000 kvm_mov_from_pmc(vcpu
, inst
);
2002 case EVENT_MOV_FROM_PKR
:
2003 kvm_mov_from_pkr(vcpu
, inst
);
2005 case EVENT_MOV_FROM_CPUID
:
2006 kvm_mov_from_cpuid(vcpu
, inst
);
2009 status
= IA64_FAULT
;
2014 /*Assume all status is NO_FAULT ?*/
2015 if (status
== IA64_NO_FAULT
&& cause
!= EVENT_RFI
)
2016 vcpu_increment_iip(vcpu
);
2018 recover_if_physical_mode(vcpu
);
2021 void init_vcpu(struct kvm_vcpu
*vcpu
)
2025 vcpu
->arch
.mode_flags
= GUEST_IN_PHY
;
2026 VMX(vcpu
, vrr
[0]) = 0x38;
2027 VMX(vcpu
, vrr
[1]) = 0x38;
2028 VMX(vcpu
, vrr
[2]) = 0x38;
2029 VMX(vcpu
, vrr
[3]) = 0x38;
2030 VMX(vcpu
, vrr
[4]) = 0x38;
2031 VMX(vcpu
, vrr
[5]) = 0x38;
2032 VMX(vcpu
, vrr
[6]) = 0x38;
2033 VMX(vcpu
, vrr
[7]) = 0x38;
2034 VCPU(vcpu
, vpsr
) = IA64_PSR_BN
;
2035 VCPU(vcpu
, dcr
) = 0;
2036 /* pta.size must not be 0. The minimum is 15 (32k) */
2037 VCPU(vcpu
, pta
) = 15 << 2;
2038 VCPU(vcpu
, itv
) = 0x10000;
2039 VCPU(vcpu
, itm
) = 0;
2040 VMX(vcpu
, last_itc
) = 0;
2042 VCPU(vcpu
, lid
) = VCPU_LID(vcpu
);
2043 VCPU(vcpu
, ivr
) = 0;
2044 VCPU(vcpu
, tpr
) = 0x10000;
2045 VCPU(vcpu
, eoi
) = 0;
2046 VCPU(vcpu
, irr
[0]) = 0;
2047 VCPU(vcpu
, irr
[1]) = 0;
2048 VCPU(vcpu
, irr
[2]) = 0;
2049 VCPU(vcpu
, irr
[3]) = 0;
2050 VCPU(vcpu
, pmv
) = 0x10000;
2051 VCPU(vcpu
, cmcv
) = 0x10000;
2052 VCPU(vcpu
, lrr0
) = 0x10000; /* default reset value? */
2053 VCPU(vcpu
, lrr1
) = 0x10000; /* default reset value? */
2054 update_vhpi(vcpu
, NULL_VECTOR
);
2055 VLSAPIC_XTP(vcpu
) = 0x80; /* disabled */
2057 for (i
= 0; i
< 4; i
++)
2058 VLSAPIC_INSVC(vcpu
, i
) = 0;
2061 void kvm_init_all_rr(struct kvm_vcpu
*vcpu
)
2065 local_irq_save(psr
);
2067 /* WARNING: not allow co-exist of both virtual mode and physical
2068 * mode in same region
2071 vcpu
->arch
.metaphysical_saved_rr0
= vrrtomrr(VMX(vcpu
, vrr
[VRN0
]));
2072 vcpu
->arch
.metaphysical_saved_rr4
= vrrtomrr(VMX(vcpu
, vrr
[VRN4
]));
2074 if (is_physical_mode(vcpu
)) {
2075 if (vcpu
->arch
.mode_flags
& GUEST_PHY_EMUL
)
2076 panic_vm(vcpu
, "Machine Status conflicts!\n");
2078 ia64_set_rr((VRN0
<< VRN_SHIFT
), vcpu
->arch
.metaphysical_rr0
);
2079 ia64_dv_serialize_data();
2080 ia64_set_rr((VRN4
<< VRN_SHIFT
), vcpu
->arch
.metaphysical_rr4
);
2081 ia64_dv_serialize_data();
2083 ia64_set_rr((VRN0
<< VRN_SHIFT
),
2084 vcpu
->arch
.metaphysical_saved_rr0
);
2085 ia64_dv_serialize_data();
2086 ia64_set_rr((VRN4
<< VRN_SHIFT
),
2087 vcpu
->arch
.metaphysical_saved_rr4
);
2088 ia64_dv_serialize_data();
2090 ia64_set_rr((VRN1
<< VRN_SHIFT
),
2091 vrrtomrr(VMX(vcpu
, vrr
[VRN1
])));
2092 ia64_dv_serialize_data();
2093 ia64_set_rr((VRN2
<< VRN_SHIFT
),
2094 vrrtomrr(VMX(vcpu
, vrr
[VRN2
])));
2095 ia64_dv_serialize_data();
2096 ia64_set_rr((VRN3
<< VRN_SHIFT
),
2097 vrrtomrr(VMX(vcpu
, vrr
[VRN3
])));
2098 ia64_dv_serialize_data();
2099 ia64_set_rr((VRN5
<< VRN_SHIFT
),
2100 vrrtomrr(VMX(vcpu
, vrr
[VRN5
])));
2101 ia64_dv_serialize_data();
2102 ia64_set_rr((VRN7
<< VRN_SHIFT
),
2103 vrrtomrr(VMX(vcpu
, vrr
[VRN7
])));
2104 ia64_dv_serialize_data();
2114 ia64_call_vsa(PAL_VPS_RESTORE
, (unsigned long)v
->arch
.vpd
,
2125 static void kvm_show_registers(struct kvm_pt_regs
*regs
)
2127 unsigned long ip
= regs
->cr_iip
+ ia64_psr(regs
)->ri
;
2129 struct kvm_vcpu
*vcpu
= current_vcpu
;
2131 printk("vcpu 0x%p vcpu %d\n",
2132 vcpu
, vcpu
->vcpu_id
);
2134 printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
2135 regs
->cr_ipsr
, regs
->cr_ifs
, ip
);
2137 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2138 regs
->ar_unat
, regs
->ar_pfs
, regs
->ar_rsc
);
2139 printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
2140 regs
->ar_rnat
, regs
->ar_bspstore
, regs
->pr
);
2141 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2142 regs
->loadrs
, regs
->ar_ccv
, regs
->ar_fpsr
);
2143 printk("csd : %016lx ssd : %016lx\n", regs
->ar_csd
, regs
->ar_ssd
);
2144 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs
->b0
,
2145 regs
->b6
, regs
->b7
);
2146 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
2147 regs
->f6
.u
.bits
[1], regs
->f6
.u
.bits
[0],
2148 regs
->f7
.u
.bits
[1], regs
->f7
.u
.bits
[0]);
2149 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
2150 regs
->f8
.u
.bits
[1], regs
->f8
.u
.bits
[0],
2151 regs
->f9
.u
.bits
[1], regs
->f9
.u
.bits
[0]);
2152 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2153 regs
->f10
.u
.bits
[1], regs
->f10
.u
.bits
[0],
2154 regs
->f11
.u
.bits
[1], regs
->f11
.u
.bits
[0]);
2156 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs
->r1
,
2157 regs
->r2
, regs
->r3
);
2158 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs
->r8
,
2159 regs
->r9
, regs
->r10
);
2160 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs
->r11
,
2161 regs
->r12
, regs
->r13
);
2162 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs
->r14
,
2163 regs
->r15
, regs
->r16
);
2164 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs
->r17
,
2165 regs
->r18
, regs
->r19
);
2166 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs
->r20
,
2167 regs
->r21
, regs
->r22
);
2168 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs
->r23
,
2169 regs
->r24
, regs
->r25
);
2170 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs
->r26
,
2171 regs
->r27
, regs
->r28
);
2172 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs
->r29
,
2173 regs
->r30
, regs
->r31
);
2177 void panic_vm(struct kvm_vcpu
*v
, const char *fmt
, ...)
2182 struct kvm_pt_regs
*regs
= vcpu_regs(v
);
2183 struct exit_ctl_data
*p
= &v
->arch
.exit_data
;
2184 va_start(args
, fmt
);
2185 vsnprintf(buf
, sizeof(buf
), fmt
, args
);
2188 kvm_show_registers(regs
);
2189 p
->exit_reason
= EXIT_REASON_VM_PANIC
;