2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright Novell Inc 2010
17 * Authors: Alexander Graf <agraf@suse.de>
21 #include <asm/kvm_ppc.h>
22 #include <asm/disassemble.h>
23 #include <asm/kvm_book3s.h>
24 #include <asm/kvm_fpu.h>
26 #include <asm/cacheflush.h>
27 #include <linux/vmalloc.h>
32 #define dprintk printk
34 #define dprintk(...) do { } while(0);
50 #define OP_31_LFSX 535
51 #define OP_31_LFSUX 567
52 #define OP_31_LFDX 599
53 #define OP_31_LFDUX 631
54 #define OP_31_STFSX 663
55 #define OP_31_STFSUX 695
56 #define OP_31_STFX 727
57 #define OP_31_STFUX 759
58 #define OP_31_LWIZX 887
59 #define OP_31_STFIWX 983
61 #define OP_59_FADDS 21
62 #define OP_59_FSUBS 20
63 #define OP_59_FSQRTS 22
64 #define OP_59_FDIVS 18
66 #define OP_59_FMULS 25
67 #define OP_59_FRSQRTES 26
68 #define OP_59_FMSUBS 28
69 #define OP_59_FMADDS 29
70 #define OP_59_FNMSUBS 30
71 #define OP_59_FNMADDS 31
74 #define OP_63_FCPSGN 8
76 #define OP_63_FCTIW 14
77 #define OP_63_FCTIWZ 15
80 #define OP_63_FSQRT 22
84 #define OP_63_FRSQRTE 26
85 #define OP_63_FMSUB 28
86 #define OP_63_FMADD 29
87 #define OP_63_FNMSUB 30
88 #define OP_63_FNMADD 31
89 #define OP_63_FCMPO 32
90 #define OP_63_MTFSB1 38 // XXX
93 #define OP_63_MCRFS 64
94 #define OP_63_MTFSB0 70
96 #define OP_63_MTFSFI 134
97 #define OP_63_FABS 264
98 #define OP_63_MFFS 583
99 #define OP_63_MTFSF 711
101 #define OP_4X_PS_CMPU0 0
102 #define OP_4X_PSQ_LX 6
103 #define OP_4XW_PSQ_STX 7
104 #define OP_4A_PS_SUM0 10
105 #define OP_4A_PS_SUM1 11
106 #define OP_4A_PS_MULS0 12
107 #define OP_4A_PS_MULS1 13
108 #define OP_4A_PS_MADDS0 14
109 #define OP_4A_PS_MADDS1 15
110 #define OP_4A_PS_DIV 18
111 #define OP_4A_PS_SUB 20
112 #define OP_4A_PS_ADD 21
113 #define OP_4A_PS_SEL 23
114 #define OP_4A_PS_RES 24
115 #define OP_4A_PS_MUL 25
116 #define OP_4A_PS_RSQRTE 26
117 #define OP_4A_PS_MSUB 28
118 #define OP_4A_PS_MADD 29
119 #define OP_4A_PS_NMSUB 30
120 #define OP_4A_PS_NMADD 31
121 #define OP_4X_PS_CMPO0 32
122 #define OP_4X_PSQ_LUX 38
123 #define OP_4XW_PSQ_STUX 39
124 #define OP_4X_PS_NEG 40
125 #define OP_4X_PS_CMPU1 64
126 #define OP_4X_PS_MR 72
127 #define OP_4X_PS_CMPO1 96
128 #define OP_4X_PS_NABS 136
129 #define OP_4X_PS_ABS 264
130 #define OP_4X_PS_MERGE00 528
131 #define OP_4X_PS_MERGE01 560
132 #define OP_4X_PS_MERGE10 592
133 #define OP_4X_PS_MERGE11 624
135 #define SCALAR_NONE 0
136 #define SCALAR_HIGH (1 << 0)
137 #define SCALAR_LOW (1 << 1)
138 #define SCALAR_NO_PS0 (1 << 2)
139 #define SCALAR_NO_PS1 (1 << 3)
141 #define GQR_ST_TYPE_MASK 0x00000007
142 #define GQR_ST_TYPE_SHIFT 0
143 #define GQR_ST_SCALE_MASK 0x00003f00
144 #define GQR_ST_SCALE_SHIFT 8
145 #define GQR_LD_TYPE_MASK 0x00070000
146 #define GQR_LD_TYPE_SHIFT 16
147 #define GQR_LD_SCALE_MASK 0x3f000000
148 #define GQR_LD_SCALE_SHIFT 24
150 #define GQR_QUANTIZE_FLOAT 0
151 #define GQR_QUANTIZE_U8 4
152 #define GQR_QUANTIZE_U16 5
153 #define GQR_QUANTIZE_S8 6
154 #define GQR_QUANTIZE_S16 7
156 #define FPU_LS_SINGLE 0
157 #define FPU_LS_DOUBLE 1
158 #define FPU_LS_SINGLE_LOW 2
160 static inline void kvmppc_sync_qpr(struct kvm_vcpu
*vcpu
, int rt
)
162 kvm_cvt_df(&vcpu
->arch
.fpr
[rt
], &vcpu
->arch
.qpr
[rt
]);
165 static void kvmppc_inject_pf(struct kvm_vcpu
*vcpu
, ulong eaddr
, bool is_store
)
168 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
170 shared
->msr
= kvmppc_set_field(shared
->msr
, 33, 36, 0);
171 shared
->msr
= kvmppc_set_field(shared
->msr
, 42, 47, 0);
174 dsisr
= kvmppc_set_field(0, 33, 33, 1);
176 shared
->dsisr
= kvmppc_set_field(dsisr
, 38, 38, 1);
177 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
);
180 static int kvmppc_emulate_fpr_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
181 int rs
, ulong addr
, int ls_type
)
183 int emulated
= EMULATE_FAIL
;
186 int len
= sizeof(u32
);
188 if (ls_type
== FPU_LS_DOUBLE
)
191 /* read from memory */
192 r
= kvmppc_ld(vcpu
, &addr
, len
, tmp
, true);
193 vcpu
->arch
.paddr_accessed
= addr
;
196 kvmppc_inject_pf(vcpu
, addr
, false);
198 } else if (r
== EMULATE_DO_MMIO
) {
199 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_REG_FPR
| rs
, len
, 1);
203 emulated
= EMULATE_DONE
;
205 /* put in registers */
208 kvm_cvt_fd((u32
*)tmp
, &vcpu
->arch
.fpr
[rs
]);
209 vcpu
->arch
.qpr
[rs
] = *((u32
*)tmp
);
212 vcpu
->arch
.fpr
[rs
] = *((u64
*)tmp
);
216 dprintk(KERN_INFO
"KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64
*)tmp
,
223 static int kvmppc_emulate_fpr_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
224 int rs
, ulong addr
, int ls_type
)
226 int emulated
= EMULATE_FAIL
;
234 kvm_cvt_df(&vcpu
->arch
.fpr
[rs
], (u32
*)tmp
);
238 case FPU_LS_SINGLE_LOW
:
239 *((u32
*)tmp
) = vcpu
->arch
.fpr
[rs
];
240 val
= vcpu
->arch
.fpr
[rs
] & 0xffffffff;
244 *((u64
*)tmp
) = vcpu
->arch
.fpr
[rs
];
245 val
= vcpu
->arch
.fpr
[rs
];
253 r
= kvmppc_st(vcpu
, &addr
, len
, tmp
, true);
254 vcpu
->arch
.paddr_accessed
= addr
;
256 kvmppc_inject_pf(vcpu
, addr
, true);
257 } else if (r
== EMULATE_DO_MMIO
) {
258 emulated
= kvmppc_handle_store(run
, vcpu
, val
, len
, 1);
260 emulated
= EMULATE_DONE
;
263 dprintk(KERN_INFO
"KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
269 static int kvmppc_emulate_psq_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
270 int rs
, ulong addr
, bool w
, int i
)
272 int emulated
= EMULATE_FAIL
;
277 /* read from memory */
279 r
= kvmppc_ld(vcpu
, &addr
, sizeof(u32
), tmp
, true);
280 memcpy(&tmp
[1], &one
, sizeof(u32
));
282 r
= kvmppc_ld(vcpu
, &addr
, sizeof(u32
) * 2, tmp
, true);
284 vcpu
->arch
.paddr_accessed
= addr
;
286 kvmppc_inject_pf(vcpu
, addr
, false);
288 } else if ((r
== EMULATE_DO_MMIO
) && w
) {
289 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_REG_FPR
| rs
, 4, 1);
290 vcpu
->arch
.qpr
[rs
] = tmp
[1];
292 } else if (r
== EMULATE_DO_MMIO
) {
293 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_REG_FQPR
| rs
, 8, 1);
297 emulated
= EMULATE_DONE
;
299 /* put in registers */
300 kvm_cvt_fd(&tmp
[0], &vcpu
->arch
.fpr
[rs
]);
301 vcpu
->arch
.qpr
[rs
] = tmp
[1];
303 dprintk(KERN_INFO
"KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp
[0],
304 tmp
[1], addr
, w
? 4 : 8);
310 static int kvmppc_emulate_psq_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
311 int rs
, ulong addr
, bool w
, int i
)
313 int emulated
= EMULATE_FAIL
;
316 int len
= w
? sizeof(u32
) : sizeof(u64
);
318 kvm_cvt_df(&vcpu
->arch
.fpr
[rs
], &tmp
[0]);
319 tmp
[1] = vcpu
->arch
.qpr
[rs
];
321 r
= kvmppc_st(vcpu
, &addr
, len
, tmp
, true);
322 vcpu
->arch
.paddr_accessed
= addr
;
324 kvmppc_inject_pf(vcpu
, addr
, true);
325 } else if ((r
== EMULATE_DO_MMIO
) && w
) {
326 emulated
= kvmppc_handle_store(run
, vcpu
, tmp
[0], 4, 1);
327 } else if (r
== EMULATE_DO_MMIO
) {
328 u64 val
= ((u64
)tmp
[0] << 32) | tmp
[1];
329 emulated
= kvmppc_handle_store(run
, vcpu
, val
, 8, 1);
331 emulated
= EMULATE_DONE
;
334 dprintk(KERN_INFO
"KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
335 tmp
[0], tmp
[1], addr
, len
);
341 * Cuts out inst bits with ordering according to spec.
342 * That means the leftmost bit is zero. All given bits are included.
344 static inline u32
inst_get_field(u32 inst
, int msb
, int lsb
)
346 return kvmppc_get_field(inst
, msb
+ 32, lsb
+ 32);
350 * Replaces inst bits with ordering according to spec.
352 static inline u32
inst_set_field(u32 inst
, int msb
, int lsb
, int value
)
354 return kvmppc_set_field(inst
, msb
+ 32, lsb
+ 32, value
);
357 bool kvmppc_inst_is_paired_single(struct kvm_vcpu
*vcpu
, u32 inst
)
359 if (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
))
362 switch (get_op(inst
)) {
378 switch (inst_get_field(inst
, 21, 30)) {
389 case OP_4X_PS_MERGE00
:
390 case OP_4X_PS_MERGE01
:
391 case OP_4X_PS_MERGE10
:
392 case OP_4X_PS_MERGE11
:
396 switch (inst_get_field(inst
, 25, 30)) {
398 case OP_4XW_PSQ_STUX
:
402 switch (inst_get_field(inst
, 26, 30)) {
407 case OP_4A_PS_MADDS0
:
408 case OP_4A_PS_MADDS1
:
415 case OP_4A_PS_RSQRTE
:
424 switch (inst_get_field(inst
, 21, 30)) {
432 switch (inst_get_field(inst
, 26, 30)) {
442 switch (inst_get_field(inst
, 21, 30)) {
464 switch (inst_get_field(inst
, 26, 30)) {
475 switch (inst_get_field(inst
, 21, 30)) {
493 static int get_d_signext(u32 inst
)
495 int d
= inst
& 0x8ff;
503 static int kvmppc_ps_three_in(struct kvm_vcpu
*vcpu
, bool rc
,
504 int reg_out
, int reg_in1
, int reg_in2
,
505 int reg_in3
, int scalar
,
506 void (*func
)(u64
*fpscr
,
508 u32
*src2
, u32
*src3
))
510 u32
*qpr
= vcpu
->arch
.qpr
;
511 u64
*fpr
= vcpu
->arch
.fpr
;
513 u32 ps0_in1
, ps0_in2
, ps0_in3
;
514 u32 ps1_in1
, ps1_in2
, ps1_in3
;
520 kvm_cvt_df(&fpr
[reg_in1
], &ps0_in1
);
521 kvm_cvt_df(&fpr
[reg_in2
], &ps0_in2
);
522 kvm_cvt_df(&fpr
[reg_in3
], &ps0_in3
);
524 if (scalar
& SCALAR_LOW
)
525 ps0_in2
= qpr
[reg_in2
];
527 func(&vcpu
->arch
.fpscr
, &ps0_out
, &ps0_in1
, &ps0_in2
, &ps0_in3
);
529 dprintk(KERN_INFO
"PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
530 ps0_in1
, ps0_in2
, ps0_in3
, ps0_out
);
532 if (!(scalar
& SCALAR_NO_PS0
))
533 kvm_cvt_fd(&ps0_out
, &fpr
[reg_out
]);
536 ps1_in1
= qpr
[reg_in1
];
537 ps1_in2
= qpr
[reg_in2
];
538 ps1_in3
= qpr
[reg_in3
];
540 if (scalar
& SCALAR_HIGH
)
543 if (!(scalar
& SCALAR_NO_PS1
))
544 func(&vcpu
->arch
.fpscr
, &qpr
[reg_out
], &ps1_in1
, &ps1_in2
, &ps1_in3
);
546 dprintk(KERN_INFO
"PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
547 ps1_in1
, ps1_in2
, ps1_in3
, qpr
[reg_out
]);
552 static int kvmppc_ps_two_in(struct kvm_vcpu
*vcpu
, bool rc
,
553 int reg_out
, int reg_in1
, int reg_in2
,
555 void (*func
)(u64
*fpscr
,
559 u32
*qpr
= vcpu
->arch
.qpr
;
560 u64
*fpr
= vcpu
->arch
.fpr
;
562 u32 ps0_in1
, ps0_in2
;
564 u32 ps1_in1
, ps1_in2
;
570 kvm_cvt_df(&fpr
[reg_in1
], &ps0_in1
);
572 if (scalar
& SCALAR_LOW
)
573 ps0_in2
= qpr
[reg_in2
];
575 kvm_cvt_df(&fpr
[reg_in2
], &ps0_in2
);
577 func(&vcpu
->arch
.fpscr
, &ps0_out
, &ps0_in1
, &ps0_in2
);
579 if (!(scalar
& SCALAR_NO_PS0
)) {
580 dprintk(KERN_INFO
"PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
581 ps0_in1
, ps0_in2
, ps0_out
);
583 kvm_cvt_fd(&ps0_out
, &fpr
[reg_out
]);
587 ps1_in1
= qpr
[reg_in1
];
588 ps1_in2
= qpr
[reg_in2
];
590 if (scalar
& SCALAR_HIGH
)
593 func(&vcpu
->arch
.fpscr
, &ps1_out
, &ps1_in1
, &ps1_in2
);
595 if (!(scalar
& SCALAR_NO_PS1
)) {
596 qpr
[reg_out
] = ps1_out
;
598 dprintk(KERN_INFO
"PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
599 ps1_in1
, ps1_in2
, qpr
[reg_out
]);
605 static int kvmppc_ps_one_in(struct kvm_vcpu
*vcpu
, bool rc
,
606 int reg_out
, int reg_in
,
608 u32
*dst
, u32
*src1
))
610 u32
*qpr
= vcpu
->arch
.qpr
;
611 u64
*fpr
= vcpu
->arch
.fpr
;
619 kvm_cvt_df(&fpr
[reg_in
], &ps0_in
);
620 func(&vcpu
->arch
.fpscr
, &ps0_out
, &ps0_in
);
622 dprintk(KERN_INFO
"PS1 ps0 -> f(0x%x) = 0x%x\n",
625 kvm_cvt_fd(&ps0_out
, &fpr
[reg_out
]);
628 ps1_in
= qpr
[reg_in
];
629 func(&vcpu
->arch
.fpscr
, &qpr
[reg_out
], &ps1_in
);
631 dprintk(KERN_INFO
"PS1 ps1 -> f(0x%x) = 0x%x\n",
632 ps1_in
, qpr
[reg_out
]);
637 int kvmppc_emulate_paired_single(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
639 u32 inst
= kvmppc_get_last_inst(vcpu
);
640 enum emulation_result emulated
= EMULATE_DONE
;
642 int ax_rd
= inst_get_field(inst
, 6, 10);
643 int ax_ra
= inst_get_field(inst
, 11, 15);
644 int ax_rb
= inst_get_field(inst
, 16, 20);
645 int ax_rc
= inst_get_field(inst
, 21, 25);
646 short full_d
= inst_get_field(inst
, 16, 31);
648 u64
*fpr_d
= &vcpu
->arch
.fpr
[ax_rd
];
649 u64
*fpr_a
= &vcpu
->arch
.fpr
[ax_ra
];
650 u64
*fpr_b
= &vcpu
->arch
.fpr
[ax_rb
];
651 u64
*fpr_c
= &vcpu
->arch
.fpr
[ax_rc
];
653 bool rcomp
= (inst
& 1) ? true : false;
654 u32 cr
= kvmppc_get_cr(vcpu
);
659 if (!kvmppc_inst_is_paired_single(vcpu
, inst
))
662 if (!(vcpu
->arch
.shared
->msr
& MSR_FP
)) {
663 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
);
664 return EMULATE_AGAIN
;
667 kvmppc_giveup_ext(vcpu
, MSR_FP
);
670 /* Do we need to clear FE0 / FE1 here? Don't think so. */
673 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++) {
675 kvm_cvt_df(&vcpu
->arch
.fpr
[i
], &f
);
676 dprintk(KERN_INFO
"FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
677 i
, f
, vcpu
->arch
.fpr
[i
], i
, vcpu
->arch
.qpr
[i
]);
681 switch (get_op(inst
)) {
684 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
685 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
686 int i
= inst_get_field(inst
, 17, 19);
688 addr
+= get_d_signext(inst
);
689 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
694 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
695 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
696 int i
= inst_get_field(inst
, 17, 19);
698 addr
+= get_d_signext(inst
);
699 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
701 if (emulated
== EMULATE_DONE
)
702 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
707 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
708 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
709 int i
= inst_get_field(inst
, 17, 19);
711 addr
+= get_d_signext(inst
);
712 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
717 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
718 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
719 int i
= inst_get_field(inst
, 17, 19);
721 addr
+= get_d_signext(inst
);
722 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
724 if (emulated
== EMULATE_DONE
)
725 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
730 switch (inst_get_field(inst
, 21, 30)) {
733 emulated
= EMULATE_FAIL
;
737 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
738 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
739 int i
= inst_get_field(inst
, 22, 24);
741 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
742 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
747 emulated
= EMULATE_FAIL
;
751 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
752 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
753 int i
= inst_get_field(inst
, 22, 24);
755 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
756 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
758 if (emulated
== EMULATE_DONE
)
759 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
763 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
764 vcpu
->arch
.fpr
[ax_rd
] ^= 0x8000000000000000ULL
;
765 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
766 vcpu
->arch
.qpr
[ax_rd
] ^= 0x80000000;
770 emulated
= EMULATE_FAIL
;
774 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
775 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
779 emulated
= EMULATE_FAIL
;
783 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
784 vcpu
->arch
.fpr
[ax_rd
] |= 0x8000000000000000ULL
;
785 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
786 vcpu
->arch
.qpr
[ax_rd
] |= 0x80000000;
790 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rb
];
791 vcpu
->arch
.fpr
[ax_rd
] &= ~0x8000000000000000ULL
;
792 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
793 vcpu
->arch
.qpr
[ax_rd
] &= ~0x80000000;
795 case OP_4X_PS_MERGE00
:
797 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_ra
];
798 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
799 kvm_cvt_df(&vcpu
->arch
.fpr
[ax_rb
],
800 &vcpu
->arch
.qpr
[ax_rd
]);
802 case OP_4X_PS_MERGE01
:
804 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_ra
];
805 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
807 case OP_4X_PS_MERGE10
:
809 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
810 kvm_cvt_fd(&vcpu
->arch
.qpr
[ax_ra
],
811 &vcpu
->arch
.fpr
[ax_rd
]);
812 /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
813 kvm_cvt_df(&vcpu
->arch
.fpr
[ax_rb
],
814 &vcpu
->arch
.qpr
[ax_rd
]);
816 case OP_4X_PS_MERGE11
:
818 /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
819 kvm_cvt_fd(&vcpu
->arch
.qpr
[ax_ra
],
820 &vcpu
->arch
.fpr
[ax_rd
]);
821 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
825 switch (inst_get_field(inst
, 25, 30)) {
828 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
829 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
830 int i
= inst_get_field(inst
, 22, 24);
832 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
833 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
836 case OP_4XW_PSQ_STUX
:
838 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
839 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
840 int i
= inst_get_field(inst
, 22, 24);
842 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
843 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
845 if (emulated
== EMULATE_DONE
)
846 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
851 switch (inst_get_field(inst
, 26, 30)) {
853 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
854 ax_rb
, ax_ra
, SCALAR_NO_PS0
| SCALAR_HIGH
, fps_fadds
);
855 vcpu
->arch
.fpr
[ax_rd
] = vcpu
->arch
.fpr
[ax_rc
];
858 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
859 ax_ra
, ax_rb
, SCALAR_NO_PS1
| SCALAR_LOW
, fps_fadds
);
860 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rc
];
863 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
864 ax_ra
, ax_rc
, SCALAR_HIGH
, fps_fmuls
);
867 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
868 ax_ra
, ax_rc
, SCALAR_LOW
, fps_fmuls
);
870 case OP_4A_PS_MADDS0
:
871 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
872 ax_ra
, ax_rc
, ax_rb
, SCALAR_HIGH
, fps_fmadds
);
874 case OP_4A_PS_MADDS1
:
875 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
876 ax_ra
, ax_rc
, ax_rb
, SCALAR_LOW
, fps_fmadds
);
879 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
880 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fdivs
);
883 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
884 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fsubs
);
887 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
888 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fadds
);
891 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
892 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fsel
);
895 emulated
= kvmppc_ps_one_in(vcpu
, rcomp
, ax_rd
,
899 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
900 ax_ra
, ax_rc
, SCALAR_NONE
, fps_fmuls
);
902 case OP_4A_PS_RSQRTE
:
903 emulated
= kvmppc_ps_one_in(vcpu
, rcomp
, ax_rd
,
907 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
908 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fmsubs
);
911 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
912 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fmadds
);
915 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
916 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fnmsubs
);
919 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
920 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fnmadds
);
925 /* Real FPU operations */
929 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
931 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
937 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
939 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
942 if (emulated
== EMULATE_DONE
)
943 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
948 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
950 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
956 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
958 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
961 if (emulated
== EMULATE_DONE
)
962 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
967 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
969 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
975 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
977 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
980 if (emulated
== EMULATE_DONE
)
981 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
986 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
988 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
994 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
996 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
999 if (emulated
== EMULATE_DONE
)
1000 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1004 switch (inst_get_field(inst
, 21, 30)) {
1007 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
1009 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
1010 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1011 addr
, FPU_LS_SINGLE
);
1016 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1017 kvmppc_get_gpr(vcpu
, ax_rb
);
1019 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1020 addr
, FPU_LS_SINGLE
);
1022 if (emulated
== EMULATE_DONE
)
1023 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1028 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1029 kvmppc_get_gpr(vcpu
, ax_rb
);
1031 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1032 addr
, FPU_LS_DOUBLE
);
1037 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1038 kvmppc_get_gpr(vcpu
, ax_rb
);
1040 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1041 addr
, FPU_LS_DOUBLE
);
1043 if (emulated
== EMULATE_DONE
)
1044 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1049 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1050 kvmppc_get_gpr(vcpu
, ax_rb
);
1052 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1053 addr
, FPU_LS_SINGLE
);
1058 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1059 kvmppc_get_gpr(vcpu
, ax_rb
);
1061 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1062 addr
, FPU_LS_SINGLE
);
1064 if (emulated
== EMULATE_DONE
)
1065 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1070 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1071 kvmppc_get_gpr(vcpu
, ax_rb
);
1073 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1074 addr
, FPU_LS_DOUBLE
);
1079 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1080 kvmppc_get_gpr(vcpu
, ax_rb
);
1082 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1083 addr
, FPU_LS_DOUBLE
);
1085 if (emulated
== EMULATE_DONE
)
1086 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1091 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1092 kvmppc_get_gpr(vcpu
, ax_rb
);
1094 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1103 switch (inst_get_field(inst
, 21, 30)) {
1105 fpd_fadds(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1106 kvmppc_sync_qpr(vcpu
, ax_rd
);
1109 fpd_fsubs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1110 kvmppc_sync_qpr(vcpu
, ax_rd
);
1113 fpd_fdivs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1114 kvmppc_sync_qpr(vcpu
, ax_rd
);
1117 fpd_fres(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1118 kvmppc_sync_qpr(vcpu
, ax_rd
);
1120 case OP_59_FRSQRTES
:
1121 fpd_frsqrtes(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1122 kvmppc_sync_qpr(vcpu
, ax_rd
);
1125 switch (inst_get_field(inst
, 26, 30)) {
1127 fpd_fmuls(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
);
1128 kvmppc_sync_qpr(vcpu
, ax_rd
);
1131 fpd_fmsubs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1132 kvmppc_sync_qpr(vcpu
, ax_rd
);
1135 fpd_fmadds(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1136 kvmppc_sync_qpr(vcpu
, ax_rd
);
1139 fpd_fnmsubs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1140 kvmppc_sync_qpr(vcpu
, ax_rd
);
1143 fpd_fnmadds(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1144 kvmppc_sync_qpr(vcpu
, ax_rd
);
1149 switch (inst_get_field(inst
, 21, 30)) {
1154 /* XXX need to implement */
1157 /* XXX missing CR */
1158 *fpr_d
= vcpu
->arch
.fpscr
;
1161 /* XXX missing fm bits */
1162 /* XXX missing CR */
1163 vcpu
->arch
.fpscr
= *fpr_b
;
1168 u32 cr0_mask
= 0xf0000000;
1169 u32 cr_shift
= inst_get_field(inst
, 6, 8) * 4;
1171 fpd_fcmpu(&vcpu
->arch
.fpscr
, &tmp_cr
, fpr_a
, fpr_b
);
1172 cr
&= ~(cr0_mask
>> cr_shift
);
1173 cr
|= (cr
& cr0_mask
) >> cr_shift
;
1179 u32 cr0_mask
= 0xf0000000;
1180 u32 cr_shift
= inst_get_field(inst
, 6, 8) * 4;
1182 fpd_fcmpo(&vcpu
->arch
.fpscr
, &tmp_cr
, fpr_a
, fpr_b
);
1183 cr
&= ~(cr0_mask
>> cr_shift
);
1184 cr
|= (cr
& cr0_mask
) >> cr_shift
;
1188 fpd_fneg(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1194 fpd_fabs(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1197 fpd_fcpsgn(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1200 fpd_fdiv(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1203 fpd_fadd(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1206 fpd_fsub(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1209 fpd_fctiw(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1212 fpd_fctiwz(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1215 fpd_frsp(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1216 kvmppc_sync_qpr(vcpu
, ax_rd
);
1223 fpd_fsqrt(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_b
);
1224 /* fD = 1.0f / fD */
1225 fpd_fdiv(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, (u64
*)&one
, fpr_d
);
1229 switch (inst_get_field(inst
, 26, 30)) {
1231 fpd_fmul(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
);
1234 fpd_fsel(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1237 fpd_fmsub(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1240 fpd_fmadd(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1243 fpd_fnmsub(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1246 fpd_fnmadd(&vcpu
->arch
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1253 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++) {
1255 kvm_cvt_df(&vcpu
->arch
.fpr
[i
], &f
);
1256 dprintk(KERN_INFO
"FPR[%d] = 0x%x\n", i
, f
);
1261 kvmppc_set_cr(vcpu
, cr
);