1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright Novell Inc 2010
6 * Authors: Alexander Graf <agraf@suse.de>
10 #include <asm/kvm_ppc.h>
11 #include <asm/disassemble.h>
12 #include <asm/kvm_book3s.h>
13 #include <asm/kvm_fpu.h>
15 #include <asm/cacheflush.h>
16 #include <asm/switch_to.h>
17 #include <linux/vmalloc.h>
22 #define dprintk printk
24 #define dprintk(...) do { } while(0);
40 #define OP_31_LFSX 535
41 #define OP_31_LFSUX 567
42 #define OP_31_LFDX 599
43 #define OP_31_LFDUX 631
44 #define OP_31_STFSX 663
45 #define OP_31_STFSUX 695
46 #define OP_31_STFX 727
47 #define OP_31_STFUX 759
48 #define OP_31_LWIZX 887
49 #define OP_31_STFIWX 983
51 #define OP_59_FADDS 21
52 #define OP_59_FSUBS 20
53 #define OP_59_FSQRTS 22
54 #define OP_59_FDIVS 18
56 #define OP_59_FMULS 25
57 #define OP_59_FRSQRTES 26
58 #define OP_59_FMSUBS 28
59 #define OP_59_FMADDS 29
60 #define OP_59_FNMSUBS 30
61 #define OP_59_FNMADDS 31
64 #define OP_63_FCPSGN 8
66 #define OP_63_FCTIW 14
67 #define OP_63_FCTIWZ 15
70 #define OP_63_FSQRT 22
74 #define OP_63_FRSQRTE 26
75 #define OP_63_FMSUB 28
76 #define OP_63_FMADD 29
77 #define OP_63_FNMSUB 30
78 #define OP_63_FNMADD 31
79 #define OP_63_FCMPO 32
80 #define OP_63_MTFSB1 38 // XXX
83 #define OP_63_MCRFS 64
84 #define OP_63_MTFSB0 70
86 #define OP_63_MTFSFI 134
87 #define OP_63_FABS 264
88 #define OP_63_MFFS 583
89 #define OP_63_MTFSF 711
91 #define OP_4X_PS_CMPU0 0
92 #define OP_4X_PSQ_LX 6
93 #define OP_4XW_PSQ_STX 7
94 #define OP_4A_PS_SUM0 10
95 #define OP_4A_PS_SUM1 11
96 #define OP_4A_PS_MULS0 12
97 #define OP_4A_PS_MULS1 13
98 #define OP_4A_PS_MADDS0 14
99 #define OP_4A_PS_MADDS1 15
100 #define OP_4A_PS_DIV 18
101 #define OP_4A_PS_SUB 20
102 #define OP_4A_PS_ADD 21
103 #define OP_4A_PS_SEL 23
104 #define OP_4A_PS_RES 24
105 #define OP_4A_PS_MUL 25
106 #define OP_4A_PS_RSQRTE 26
107 #define OP_4A_PS_MSUB 28
108 #define OP_4A_PS_MADD 29
109 #define OP_4A_PS_NMSUB 30
110 #define OP_4A_PS_NMADD 31
111 #define OP_4X_PS_CMPO0 32
112 #define OP_4X_PSQ_LUX 38
113 #define OP_4XW_PSQ_STUX 39
114 #define OP_4X_PS_NEG 40
115 #define OP_4X_PS_CMPU1 64
116 #define OP_4X_PS_MR 72
117 #define OP_4X_PS_CMPO1 96
118 #define OP_4X_PS_NABS 136
119 #define OP_4X_PS_ABS 264
120 #define OP_4X_PS_MERGE00 528
121 #define OP_4X_PS_MERGE01 560
122 #define OP_4X_PS_MERGE10 592
123 #define OP_4X_PS_MERGE11 624
125 #define SCALAR_NONE 0
126 #define SCALAR_HIGH (1 << 0)
127 #define SCALAR_LOW (1 << 1)
128 #define SCALAR_NO_PS0 (1 << 2)
129 #define SCALAR_NO_PS1 (1 << 3)
131 #define GQR_ST_TYPE_MASK 0x00000007
132 #define GQR_ST_TYPE_SHIFT 0
133 #define GQR_ST_SCALE_MASK 0x00003f00
134 #define GQR_ST_SCALE_SHIFT 8
135 #define GQR_LD_TYPE_MASK 0x00070000
136 #define GQR_LD_TYPE_SHIFT 16
137 #define GQR_LD_SCALE_MASK 0x3f000000
138 #define GQR_LD_SCALE_SHIFT 24
140 #define GQR_QUANTIZE_FLOAT 0
141 #define GQR_QUANTIZE_U8 4
142 #define GQR_QUANTIZE_U16 5
143 #define GQR_QUANTIZE_S8 6
144 #define GQR_QUANTIZE_S16 7
146 #define FPU_LS_SINGLE 0
147 #define FPU_LS_DOUBLE 1
148 #define FPU_LS_SINGLE_LOW 2
150 static inline void kvmppc_sync_qpr(struct kvm_vcpu
*vcpu
, int rt
)
152 kvm_cvt_df(&VCPU_FPR(vcpu
, rt
), &vcpu
->arch
.qpr
[rt
]);
155 static void kvmppc_inject_pf(struct kvm_vcpu
*vcpu
, ulong eaddr
, bool is_store
)
158 u64 msr
= kvmppc_get_msr(vcpu
);
160 msr
= kvmppc_set_field(msr
, 33, 36, 0);
161 msr
= kvmppc_set_field(msr
, 42, 47, 0);
162 kvmppc_set_msr(vcpu
, msr
);
163 kvmppc_set_dar(vcpu
, eaddr
);
165 dsisr
= kvmppc_set_field(0, 33, 33, 1);
167 dsisr
= kvmppc_set_field(dsisr
, 38, 38, 1);
168 kvmppc_set_dsisr(vcpu
, dsisr
);
169 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
);
172 static int kvmppc_emulate_fpr_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
173 int rs
, ulong addr
, int ls_type
)
175 int emulated
= EMULATE_FAIL
;
178 int len
= sizeof(u32
);
180 if (ls_type
== FPU_LS_DOUBLE
)
183 /* read from memory */
184 r
= kvmppc_ld(vcpu
, &addr
, len
, tmp
, true);
185 vcpu
->arch
.paddr_accessed
= addr
;
188 kvmppc_inject_pf(vcpu
, addr
, false);
190 } else if (r
== EMULATE_DO_MMIO
) {
191 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_MMIO_REG_FPR
| rs
,
196 emulated
= EMULATE_DONE
;
198 /* put in registers */
201 kvm_cvt_fd((u32
*)tmp
, &VCPU_FPR(vcpu
, rs
));
202 vcpu
->arch
.qpr
[rs
] = *((u32
*)tmp
);
205 VCPU_FPR(vcpu
, rs
) = *((u64
*)tmp
);
209 dprintk(KERN_INFO
"KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64
*)tmp
,
216 static int kvmppc_emulate_fpr_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
217 int rs
, ulong addr
, int ls_type
)
219 int emulated
= EMULATE_FAIL
;
227 kvm_cvt_df(&VCPU_FPR(vcpu
, rs
), (u32
*)tmp
);
231 case FPU_LS_SINGLE_LOW
:
232 *((u32
*)tmp
) = VCPU_FPR(vcpu
, rs
);
233 val
= VCPU_FPR(vcpu
, rs
) & 0xffffffff;
237 *((u64
*)tmp
) = VCPU_FPR(vcpu
, rs
);
238 val
= VCPU_FPR(vcpu
, rs
);
246 r
= kvmppc_st(vcpu
, &addr
, len
, tmp
, true);
247 vcpu
->arch
.paddr_accessed
= addr
;
249 kvmppc_inject_pf(vcpu
, addr
, true);
250 } else if (r
== EMULATE_DO_MMIO
) {
251 emulated
= kvmppc_handle_store(run
, vcpu
, val
, len
, 1);
253 emulated
= EMULATE_DONE
;
256 dprintk(KERN_INFO
"KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
262 static int kvmppc_emulate_psq_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
263 int rs
, ulong addr
, bool w
, int i
)
265 int emulated
= EMULATE_FAIL
;
270 /* read from memory */
272 r
= kvmppc_ld(vcpu
, &addr
, sizeof(u32
), tmp
, true);
273 memcpy(&tmp
[1], &one
, sizeof(u32
));
275 r
= kvmppc_ld(vcpu
, &addr
, sizeof(u32
) * 2, tmp
, true);
277 vcpu
->arch
.paddr_accessed
= addr
;
279 kvmppc_inject_pf(vcpu
, addr
, false);
281 } else if ((r
== EMULATE_DO_MMIO
) && w
) {
282 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_MMIO_REG_FPR
| rs
,
284 vcpu
->arch
.qpr
[rs
] = tmp
[1];
286 } else if (r
== EMULATE_DO_MMIO
) {
287 emulated
= kvmppc_handle_load(run
, vcpu
, KVM_MMIO_REG_FQPR
| rs
,
292 emulated
= EMULATE_DONE
;
294 /* put in registers */
295 kvm_cvt_fd(&tmp
[0], &VCPU_FPR(vcpu
, rs
));
296 vcpu
->arch
.qpr
[rs
] = tmp
[1];
298 dprintk(KERN_INFO
"KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp
[0],
299 tmp
[1], addr
, w
? 4 : 8);
305 static int kvmppc_emulate_psq_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
306 int rs
, ulong addr
, bool w
, int i
)
308 int emulated
= EMULATE_FAIL
;
311 int len
= w
? sizeof(u32
) : sizeof(u64
);
313 kvm_cvt_df(&VCPU_FPR(vcpu
, rs
), &tmp
[0]);
314 tmp
[1] = vcpu
->arch
.qpr
[rs
];
316 r
= kvmppc_st(vcpu
, &addr
, len
, tmp
, true);
317 vcpu
->arch
.paddr_accessed
= addr
;
319 kvmppc_inject_pf(vcpu
, addr
, true);
320 } else if ((r
== EMULATE_DO_MMIO
) && w
) {
321 emulated
= kvmppc_handle_store(run
, vcpu
, tmp
[0], 4, 1);
322 } else if (r
== EMULATE_DO_MMIO
) {
323 u64 val
= ((u64
)tmp
[0] << 32) | tmp
[1];
324 emulated
= kvmppc_handle_store(run
, vcpu
, val
, 8, 1);
326 emulated
= EMULATE_DONE
;
329 dprintk(KERN_INFO
"KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
330 tmp
[0], tmp
[1], addr
, len
);
336 * Cuts out inst bits with ordering according to spec.
337 * That means the leftmost bit is zero. All given bits are included.
339 static inline u32
inst_get_field(u32 inst
, int msb
, int lsb
)
341 return kvmppc_get_field(inst
, msb
+ 32, lsb
+ 32);
344 static bool kvmppc_inst_is_paired_single(struct kvm_vcpu
*vcpu
, u32 inst
)
346 if (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
))
349 switch (get_op(inst
)) {
365 switch (inst_get_field(inst
, 21, 30)) {
376 case OP_4X_PS_MERGE00
:
377 case OP_4X_PS_MERGE01
:
378 case OP_4X_PS_MERGE10
:
379 case OP_4X_PS_MERGE11
:
383 switch (inst_get_field(inst
, 25, 30)) {
385 case OP_4XW_PSQ_STUX
:
389 switch (inst_get_field(inst
, 26, 30)) {
394 case OP_4A_PS_MADDS0
:
395 case OP_4A_PS_MADDS1
:
402 case OP_4A_PS_RSQRTE
:
411 switch (inst_get_field(inst
, 21, 30)) {
419 switch (inst_get_field(inst
, 26, 30)) {
429 switch (inst_get_field(inst
, 21, 30)) {
451 switch (inst_get_field(inst
, 26, 30)) {
462 switch (inst_get_field(inst
, 21, 30)) {
480 static int get_d_signext(u32 inst
)
482 int d
= inst
& 0x8ff;
490 static int kvmppc_ps_three_in(struct kvm_vcpu
*vcpu
, bool rc
,
491 int reg_out
, int reg_in1
, int reg_in2
,
492 int reg_in3
, int scalar
,
493 void (*func
)(u64
*fpscr
,
495 u32
*src2
, u32
*src3
))
497 u32
*qpr
= vcpu
->arch
.qpr
;
499 u32 ps0_in1
, ps0_in2
, ps0_in3
;
500 u32 ps1_in1
, ps1_in2
, ps1_in3
;
506 kvm_cvt_df(&VCPU_FPR(vcpu
, reg_in1
), &ps0_in1
);
507 kvm_cvt_df(&VCPU_FPR(vcpu
, reg_in2
), &ps0_in2
);
508 kvm_cvt_df(&VCPU_FPR(vcpu
, reg_in3
), &ps0_in3
);
510 if (scalar
& SCALAR_LOW
)
511 ps0_in2
= qpr
[reg_in2
];
513 func(&vcpu
->arch
.fp
.fpscr
, &ps0_out
, &ps0_in1
, &ps0_in2
, &ps0_in3
);
515 dprintk(KERN_INFO
"PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
516 ps0_in1
, ps0_in2
, ps0_in3
, ps0_out
);
518 if (!(scalar
& SCALAR_NO_PS0
))
519 kvm_cvt_fd(&ps0_out
, &VCPU_FPR(vcpu
, reg_out
));
522 ps1_in1
= qpr
[reg_in1
];
523 ps1_in2
= qpr
[reg_in2
];
524 ps1_in3
= qpr
[reg_in3
];
526 if (scalar
& SCALAR_HIGH
)
529 if (!(scalar
& SCALAR_NO_PS1
))
530 func(&vcpu
->arch
.fp
.fpscr
, &qpr
[reg_out
], &ps1_in1
, &ps1_in2
, &ps1_in3
);
532 dprintk(KERN_INFO
"PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
533 ps1_in1
, ps1_in2
, ps1_in3
, qpr
[reg_out
]);
538 static int kvmppc_ps_two_in(struct kvm_vcpu
*vcpu
, bool rc
,
539 int reg_out
, int reg_in1
, int reg_in2
,
541 void (*func
)(u64
*fpscr
,
545 u32
*qpr
= vcpu
->arch
.qpr
;
547 u32 ps0_in1
, ps0_in2
;
549 u32 ps1_in1
, ps1_in2
;
555 kvm_cvt_df(&VCPU_FPR(vcpu
, reg_in1
), &ps0_in1
);
557 if (scalar
& SCALAR_LOW
)
558 ps0_in2
= qpr
[reg_in2
];
560 kvm_cvt_df(&VCPU_FPR(vcpu
, reg_in2
), &ps0_in2
);
562 func(&vcpu
->arch
.fp
.fpscr
, &ps0_out
, &ps0_in1
, &ps0_in2
);
564 if (!(scalar
& SCALAR_NO_PS0
)) {
565 dprintk(KERN_INFO
"PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
566 ps0_in1
, ps0_in2
, ps0_out
);
568 kvm_cvt_fd(&ps0_out
, &VCPU_FPR(vcpu
, reg_out
));
572 ps1_in1
= qpr
[reg_in1
];
573 ps1_in2
= qpr
[reg_in2
];
575 if (scalar
& SCALAR_HIGH
)
578 func(&vcpu
->arch
.fp
.fpscr
, &ps1_out
, &ps1_in1
, &ps1_in2
);
580 if (!(scalar
& SCALAR_NO_PS1
)) {
581 qpr
[reg_out
] = ps1_out
;
583 dprintk(KERN_INFO
"PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
584 ps1_in1
, ps1_in2
, qpr
[reg_out
]);
590 static int kvmppc_ps_one_in(struct kvm_vcpu
*vcpu
, bool rc
,
591 int reg_out
, int reg_in
,
593 u32
*dst
, u32
*src1
))
595 u32
*qpr
= vcpu
->arch
.qpr
;
603 kvm_cvt_df(&VCPU_FPR(vcpu
, reg_in
), &ps0_in
);
604 func(&vcpu
->arch
.fp
.fpscr
, &ps0_out
, &ps0_in
);
606 dprintk(KERN_INFO
"PS1 ps0 -> f(0x%x) = 0x%x\n",
609 kvm_cvt_fd(&ps0_out
, &VCPU_FPR(vcpu
, reg_out
));
612 ps1_in
= qpr
[reg_in
];
613 func(&vcpu
->arch
.fp
.fpscr
, &qpr
[reg_out
], &ps1_in
);
615 dprintk(KERN_INFO
"PS1 ps1 -> f(0x%x) = 0x%x\n",
616 ps1_in
, qpr
[reg_out
]);
621 int kvmppc_emulate_paired_single(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
624 enum emulation_result emulated
= EMULATE_DONE
;
625 int ax_rd
, ax_ra
, ax_rb
, ax_rc
;
627 u64
*fpr_d
, *fpr_a
, *fpr_b
, *fpr_c
;
635 emulated
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &inst
);
636 if (emulated
!= EMULATE_DONE
)
639 ax_rd
= inst_get_field(inst
, 6, 10);
640 ax_ra
= inst_get_field(inst
, 11, 15);
641 ax_rb
= inst_get_field(inst
, 16, 20);
642 ax_rc
= inst_get_field(inst
, 21, 25);
643 full_d
= inst_get_field(inst
, 16, 31);
645 fpr_d
= &VCPU_FPR(vcpu
, ax_rd
);
646 fpr_a
= &VCPU_FPR(vcpu
, ax_ra
);
647 fpr_b
= &VCPU_FPR(vcpu
, ax_rb
);
648 fpr_c
= &VCPU_FPR(vcpu
, ax_rc
);
650 rcomp
= (inst
& 1) ? true : false;
651 cr
= kvmppc_get_cr(vcpu
);
653 if (!kvmppc_inst_is_paired_single(vcpu
, inst
))
656 if (!(kvmppc_get_msr(vcpu
) & MSR_FP
)) {
657 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
);
658 return EMULATE_AGAIN
;
661 kvmppc_giveup_ext(vcpu
, MSR_FP
);
664 /* Do we need to clear FE0 / FE1 here? Don't think so. */
667 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fp
.fpr
); i
++) {
669 kvm_cvt_df(&VCPU_FPR(vcpu
, i
), &f
);
670 dprintk(KERN_INFO
"FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
671 i
, f
, VCPU_FPR(vcpu
, i
), i
, vcpu
->arch
.qpr
[i
]);
675 switch (get_op(inst
)) {
678 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
679 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
680 int i
= inst_get_field(inst
, 17, 19);
682 addr
+= get_d_signext(inst
);
683 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
688 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
689 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
690 int i
= inst_get_field(inst
, 17, 19);
692 addr
+= get_d_signext(inst
);
693 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
695 if (emulated
== EMULATE_DONE
)
696 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
701 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
702 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
703 int i
= inst_get_field(inst
, 17, 19);
705 addr
+= get_d_signext(inst
);
706 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
711 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
712 bool w
= inst_get_field(inst
, 16, 16) ? true : false;
713 int i
= inst_get_field(inst
, 17, 19);
715 addr
+= get_d_signext(inst
);
716 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
718 if (emulated
== EMULATE_DONE
)
719 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
724 switch (inst_get_field(inst
, 21, 30)) {
727 emulated
= EMULATE_FAIL
;
731 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
732 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
733 int i
= inst_get_field(inst
, 22, 24);
735 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
736 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
741 emulated
= EMULATE_FAIL
;
745 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
746 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
747 int i
= inst_get_field(inst
, 22, 24);
749 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
750 emulated
= kvmppc_emulate_psq_load(run
, vcpu
, ax_rd
, addr
, w
, i
);
752 if (emulated
== EMULATE_DONE
)
753 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
757 VCPU_FPR(vcpu
, ax_rd
) = VCPU_FPR(vcpu
, ax_rb
);
758 VCPU_FPR(vcpu
, ax_rd
) ^= 0x8000000000000000ULL
;
759 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
760 vcpu
->arch
.qpr
[ax_rd
] ^= 0x80000000;
764 emulated
= EMULATE_FAIL
;
768 VCPU_FPR(vcpu
, ax_rd
) = VCPU_FPR(vcpu
, ax_rb
);
769 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
773 emulated
= EMULATE_FAIL
;
777 VCPU_FPR(vcpu
, ax_rd
) = VCPU_FPR(vcpu
, ax_rb
);
778 VCPU_FPR(vcpu
, ax_rd
) |= 0x8000000000000000ULL
;
779 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
780 vcpu
->arch
.qpr
[ax_rd
] |= 0x80000000;
784 VCPU_FPR(vcpu
, ax_rd
) = VCPU_FPR(vcpu
, ax_rb
);
785 VCPU_FPR(vcpu
, ax_rd
) &= ~0x8000000000000000ULL
;
786 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
787 vcpu
->arch
.qpr
[ax_rd
] &= ~0x80000000;
789 case OP_4X_PS_MERGE00
:
791 VCPU_FPR(vcpu
, ax_rd
) = VCPU_FPR(vcpu
, ax_ra
);
792 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
793 kvm_cvt_df(&VCPU_FPR(vcpu
, ax_rb
),
794 &vcpu
->arch
.qpr
[ax_rd
]);
796 case OP_4X_PS_MERGE01
:
798 VCPU_FPR(vcpu
, ax_rd
) = VCPU_FPR(vcpu
, ax_ra
);
799 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
801 case OP_4X_PS_MERGE10
:
803 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
804 kvm_cvt_fd(&vcpu
->arch
.qpr
[ax_ra
],
805 &VCPU_FPR(vcpu
, ax_rd
));
806 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
807 kvm_cvt_df(&VCPU_FPR(vcpu
, ax_rb
),
808 &vcpu
->arch
.qpr
[ax_rd
]);
810 case OP_4X_PS_MERGE11
:
812 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
813 kvm_cvt_fd(&vcpu
->arch
.qpr
[ax_ra
],
814 &VCPU_FPR(vcpu
, ax_rd
));
815 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rb
];
819 switch (inst_get_field(inst
, 25, 30)) {
822 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
823 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
824 int i
= inst_get_field(inst
, 22, 24);
826 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
827 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
830 case OP_4XW_PSQ_STUX
:
832 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
);
833 bool w
= inst_get_field(inst
, 21, 21) ? true : false;
834 int i
= inst_get_field(inst
, 22, 24);
836 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
837 emulated
= kvmppc_emulate_psq_store(run
, vcpu
, ax_rd
, addr
, w
, i
);
839 if (emulated
== EMULATE_DONE
)
840 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
845 switch (inst_get_field(inst
, 26, 30)) {
847 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
848 ax_rb
, ax_ra
, SCALAR_NO_PS0
| SCALAR_HIGH
, fps_fadds
);
849 VCPU_FPR(vcpu
, ax_rd
) = VCPU_FPR(vcpu
, ax_rc
);
852 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
853 ax_ra
, ax_rb
, SCALAR_NO_PS1
| SCALAR_LOW
, fps_fadds
);
854 vcpu
->arch
.qpr
[ax_rd
] = vcpu
->arch
.qpr
[ax_rc
];
857 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
858 ax_ra
, ax_rc
, SCALAR_HIGH
, fps_fmuls
);
861 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
862 ax_ra
, ax_rc
, SCALAR_LOW
, fps_fmuls
);
864 case OP_4A_PS_MADDS0
:
865 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
866 ax_ra
, ax_rc
, ax_rb
, SCALAR_HIGH
, fps_fmadds
);
868 case OP_4A_PS_MADDS1
:
869 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
870 ax_ra
, ax_rc
, ax_rb
, SCALAR_LOW
, fps_fmadds
);
873 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
874 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fdivs
);
877 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
878 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fsubs
);
881 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
882 ax_ra
, ax_rb
, SCALAR_NONE
, fps_fadds
);
885 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
886 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fsel
);
889 emulated
= kvmppc_ps_one_in(vcpu
, rcomp
, ax_rd
,
893 emulated
= kvmppc_ps_two_in(vcpu
, rcomp
, ax_rd
,
894 ax_ra
, ax_rc
, SCALAR_NONE
, fps_fmuls
);
896 case OP_4A_PS_RSQRTE
:
897 emulated
= kvmppc_ps_one_in(vcpu
, rcomp
, ax_rd
,
901 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
902 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fmsubs
);
905 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
906 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fmadds
);
909 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
910 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fnmsubs
);
913 emulated
= kvmppc_ps_three_in(vcpu
, rcomp
, ax_rd
,
914 ax_ra
, ax_rc
, ax_rb
, SCALAR_NONE
, fps_fnmadds
);
919 /* Real FPU operations */
923 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
925 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
931 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
933 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
936 if (emulated
== EMULATE_DONE
)
937 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
942 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
944 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
950 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
952 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
, addr
,
955 if (emulated
== EMULATE_DONE
)
956 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
961 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
963 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
969 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
971 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
974 if (emulated
== EMULATE_DONE
)
975 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
980 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) + full_d
;
982 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
988 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) + full_d
;
990 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
, addr
,
993 if (emulated
== EMULATE_DONE
)
994 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
998 switch (inst_get_field(inst
, 21, 30)) {
1001 ulong addr
= ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0;
1003 addr
+= kvmppc_get_gpr(vcpu
, ax_rb
);
1004 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1005 addr
, FPU_LS_SINGLE
);
1010 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1011 kvmppc_get_gpr(vcpu
, ax_rb
);
1013 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1014 addr
, FPU_LS_SINGLE
);
1016 if (emulated
== EMULATE_DONE
)
1017 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1022 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1023 kvmppc_get_gpr(vcpu
, ax_rb
);
1025 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1026 addr
, FPU_LS_DOUBLE
);
1031 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1032 kvmppc_get_gpr(vcpu
, ax_rb
);
1034 emulated
= kvmppc_emulate_fpr_load(run
, vcpu
, ax_rd
,
1035 addr
, FPU_LS_DOUBLE
);
1037 if (emulated
== EMULATE_DONE
)
1038 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1043 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1044 kvmppc_get_gpr(vcpu
, ax_rb
);
1046 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1047 addr
, FPU_LS_SINGLE
);
1052 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1053 kvmppc_get_gpr(vcpu
, ax_rb
);
1055 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1056 addr
, FPU_LS_SINGLE
);
1058 if (emulated
== EMULATE_DONE
)
1059 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1064 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1065 kvmppc_get_gpr(vcpu
, ax_rb
);
1067 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1068 addr
, FPU_LS_DOUBLE
);
1073 ulong addr
= kvmppc_get_gpr(vcpu
, ax_ra
) +
1074 kvmppc_get_gpr(vcpu
, ax_rb
);
1076 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1077 addr
, FPU_LS_DOUBLE
);
1079 if (emulated
== EMULATE_DONE
)
1080 kvmppc_set_gpr(vcpu
, ax_ra
, addr
);
1085 ulong addr
= (ax_ra
? kvmppc_get_gpr(vcpu
, ax_ra
) : 0) +
1086 kvmppc_get_gpr(vcpu
, ax_rb
);
1088 emulated
= kvmppc_emulate_fpr_store(run
, vcpu
, ax_rd
,
1097 switch (inst_get_field(inst
, 21, 30)) {
1099 fpd_fadds(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1100 kvmppc_sync_qpr(vcpu
, ax_rd
);
1103 fpd_fsubs(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1104 kvmppc_sync_qpr(vcpu
, ax_rd
);
1107 fpd_fdivs(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1108 kvmppc_sync_qpr(vcpu
, ax_rd
);
1111 fpd_fres(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1112 kvmppc_sync_qpr(vcpu
, ax_rd
);
1114 case OP_59_FRSQRTES
:
1115 fpd_frsqrtes(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1116 kvmppc_sync_qpr(vcpu
, ax_rd
);
1119 switch (inst_get_field(inst
, 26, 30)) {
1121 fpd_fmuls(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
);
1122 kvmppc_sync_qpr(vcpu
, ax_rd
);
1125 fpd_fmsubs(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1126 kvmppc_sync_qpr(vcpu
, ax_rd
);
1129 fpd_fmadds(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1130 kvmppc_sync_qpr(vcpu
, ax_rd
);
1133 fpd_fnmsubs(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1134 kvmppc_sync_qpr(vcpu
, ax_rd
);
1137 fpd_fnmadds(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1138 kvmppc_sync_qpr(vcpu
, ax_rd
);
1143 switch (inst_get_field(inst
, 21, 30)) {
1148 /* XXX need to implement */
1151 /* XXX missing CR */
1152 *fpr_d
= vcpu
->arch
.fp
.fpscr
;
1155 /* XXX missing fm bits */
1156 /* XXX missing CR */
1157 vcpu
->arch
.fp
.fpscr
= *fpr_b
;
1162 u32 cr0_mask
= 0xf0000000;
1163 u32 cr_shift
= inst_get_field(inst
, 6, 8) * 4;
1165 fpd_fcmpu(&vcpu
->arch
.fp
.fpscr
, &tmp_cr
, fpr_a
, fpr_b
);
1166 cr
&= ~(cr0_mask
>> cr_shift
);
1167 cr
|= (cr
& cr0_mask
) >> cr_shift
;
1173 u32 cr0_mask
= 0xf0000000;
1174 u32 cr_shift
= inst_get_field(inst
, 6, 8) * 4;
1176 fpd_fcmpo(&vcpu
->arch
.fp
.fpscr
, &tmp_cr
, fpr_a
, fpr_b
);
1177 cr
&= ~(cr0_mask
>> cr_shift
);
1178 cr
|= (cr
& cr0_mask
) >> cr_shift
;
1182 fpd_fneg(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1188 fpd_fabs(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1191 fpd_fcpsgn(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1194 fpd_fdiv(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1197 fpd_fadd(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1200 fpd_fsub(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_b
);
1203 fpd_fctiw(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1206 fpd_fctiwz(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1209 fpd_frsp(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1210 kvmppc_sync_qpr(vcpu
, ax_rd
);
1217 fpd_fsqrt(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_b
);
1218 /* fD = 1.0f / fD */
1219 fpd_fdiv(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, (u64
*)&one
, fpr_d
);
1223 switch (inst_get_field(inst
, 26, 30)) {
1225 fpd_fmul(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
);
1228 fpd_fsel(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1231 fpd_fmsub(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1234 fpd_fmadd(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1237 fpd_fnmsub(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1240 fpd_fnmadd(&vcpu
->arch
.fp
.fpscr
, &cr
, fpr_d
, fpr_a
, fpr_c
, fpr_b
);
1247 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fp
.fpr
); i
++) {
1249 kvm_cvt_df(&VCPU_FPR(vcpu
, i
), &f
);
1250 dprintk(KERN_INFO
"FPR[%d] = 0x%x\n", i
, f
);
1255 kvmppc_set_cr(vcpu
, cr
);
1257 disable_kernel_fp();