treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / kvm / book3s_paired_singles.c
blobbf0282775e37acd70bc570e74eb388c98558c4eb
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
4 * Copyright Novell Inc 2010
6 * Authors: Alexander Graf <agraf@suse.de>
7 */
9 #include <asm/kvm.h>
10 #include <asm/kvm_ppc.h>
11 #include <asm/disassemble.h>
12 #include <asm/kvm_book3s.h>
13 #include <asm/kvm_fpu.h>
14 #include <asm/reg.h>
15 #include <asm/cacheflush.h>
16 #include <asm/switch_to.h>
17 #include <linux/vmalloc.h>
19 /* #define DEBUG */
21 #ifdef DEBUG
22 #define dprintk printk
23 #else
24 #define dprintk(...) do { } while(0);
25 #endif
27 #define OP_LFS 48
28 #define OP_LFSU 49
29 #define OP_LFD 50
30 #define OP_LFDU 51
31 #define OP_STFS 52
32 #define OP_STFSU 53
33 #define OP_STFD 54
34 #define OP_STFDU 55
35 #define OP_PSQ_L 56
36 #define OP_PSQ_LU 57
37 #define OP_PSQ_ST 60
38 #define OP_PSQ_STU 61
40 #define OP_31_LFSX 535
41 #define OP_31_LFSUX 567
42 #define OP_31_LFDX 599
43 #define OP_31_LFDUX 631
44 #define OP_31_STFSX 663
45 #define OP_31_STFSUX 695
46 #define OP_31_STFX 727
47 #define OP_31_STFUX 759
48 #define OP_31_LWIZX 887
49 #define OP_31_STFIWX 983
51 #define OP_59_FADDS 21
52 #define OP_59_FSUBS 20
53 #define OP_59_FSQRTS 22
54 #define OP_59_FDIVS 18
55 #define OP_59_FRES 24
56 #define OP_59_FMULS 25
57 #define OP_59_FRSQRTES 26
58 #define OP_59_FMSUBS 28
59 #define OP_59_FMADDS 29
60 #define OP_59_FNMSUBS 30
61 #define OP_59_FNMADDS 31
63 #define OP_63_FCMPU 0
64 #define OP_63_FCPSGN 8
65 #define OP_63_FRSP 12
66 #define OP_63_FCTIW 14
67 #define OP_63_FCTIWZ 15
68 #define OP_63_FDIV 18
69 #define OP_63_FADD 21
70 #define OP_63_FSQRT 22
71 #define OP_63_FSEL 23
72 #define OP_63_FRE 24
73 #define OP_63_FMUL 25
74 #define OP_63_FRSQRTE 26
75 #define OP_63_FMSUB 28
76 #define OP_63_FMADD 29
77 #define OP_63_FNMSUB 30
78 #define OP_63_FNMADD 31
79 #define OP_63_FCMPO 32
80 #define OP_63_MTFSB1 38 // XXX
81 #define OP_63_FSUB 20
82 #define OP_63_FNEG 40
83 #define OP_63_MCRFS 64
84 #define OP_63_MTFSB0 70
85 #define OP_63_FMR 72
86 #define OP_63_MTFSFI 134
87 #define OP_63_FABS 264
88 #define OP_63_MFFS 583
89 #define OP_63_MTFSF 711
91 #define OP_4X_PS_CMPU0 0
92 #define OP_4X_PSQ_LX 6
93 #define OP_4XW_PSQ_STX 7
94 #define OP_4A_PS_SUM0 10
95 #define OP_4A_PS_SUM1 11
96 #define OP_4A_PS_MULS0 12
97 #define OP_4A_PS_MULS1 13
98 #define OP_4A_PS_MADDS0 14
99 #define OP_4A_PS_MADDS1 15
100 #define OP_4A_PS_DIV 18
101 #define OP_4A_PS_SUB 20
102 #define OP_4A_PS_ADD 21
103 #define OP_4A_PS_SEL 23
104 #define OP_4A_PS_RES 24
105 #define OP_4A_PS_MUL 25
106 #define OP_4A_PS_RSQRTE 26
107 #define OP_4A_PS_MSUB 28
108 #define OP_4A_PS_MADD 29
109 #define OP_4A_PS_NMSUB 30
110 #define OP_4A_PS_NMADD 31
111 #define OP_4X_PS_CMPO0 32
112 #define OP_4X_PSQ_LUX 38
113 #define OP_4XW_PSQ_STUX 39
114 #define OP_4X_PS_NEG 40
115 #define OP_4X_PS_CMPU1 64
116 #define OP_4X_PS_MR 72
117 #define OP_4X_PS_CMPO1 96
118 #define OP_4X_PS_NABS 136
119 #define OP_4X_PS_ABS 264
120 #define OP_4X_PS_MERGE00 528
121 #define OP_4X_PS_MERGE01 560
122 #define OP_4X_PS_MERGE10 592
123 #define OP_4X_PS_MERGE11 624
125 #define SCALAR_NONE 0
126 #define SCALAR_HIGH (1 << 0)
127 #define SCALAR_LOW (1 << 1)
128 #define SCALAR_NO_PS0 (1 << 2)
129 #define SCALAR_NO_PS1 (1 << 3)
131 #define GQR_ST_TYPE_MASK 0x00000007
132 #define GQR_ST_TYPE_SHIFT 0
133 #define GQR_ST_SCALE_MASK 0x00003f00
134 #define GQR_ST_SCALE_SHIFT 8
135 #define GQR_LD_TYPE_MASK 0x00070000
136 #define GQR_LD_TYPE_SHIFT 16
137 #define GQR_LD_SCALE_MASK 0x3f000000
138 #define GQR_LD_SCALE_SHIFT 24
140 #define GQR_QUANTIZE_FLOAT 0
141 #define GQR_QUANTIZE_U8 4
142 #define GQR_QUANTIZE_U16 5
143 #define GQR_QUANTIZE_S8 6
144 #define GQR_QUANTIZE_S16 7
146 #define FPU_LS_SINGLE 0
147 #define FPU_LS_DOUBLE 1
148 #define FPU_LS_SINGLE_LOW 2
150 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
152 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
155 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
157 u32 dsisr;
158 u64 msr = kvmppc_get_msr(vcpu);
160 msr = kvmppc_set_field(msr, 33, 36, 0);
161 msr = kvmppc_set_field(msr, 42, 47, 0);
162 kvmppc_set_msr(vcpu, msr);
163 kvmppc_set_dar(vcpu, eaddr);
164 /* Page Fault */
165 dsisr = kvmppc_set_field(0, 33, 33, 1);
166 if (is_store)
167 dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
168 kvmppc_set_dsisr(vcpu, dsisr);
169 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
172 static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
173 int rs, ulong addr, int ls_type)
175 int emulated = EMULATE_FAIL;
176 int r;
177 char tmp[8];
178 int len = sizeof(u32);
180 if (ls_type == FPU_LS_DOUBLE)
181 len = sizeof(u64);
183 /* read from memory */
184 r = kvmppc_ld(vcpu, &addr, len, tmp, true);
185 vcpu->arch.paddr_accessed = addr;
187 if (r < 0) {
188 kvmppc_inject_pf(vcpu, addr, false);
189 goto done_load;
190 } else if (r == EMULATE_DO_MMIO) {
191 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
192 len, 1);
193 goto done_load;
196 emulated = EMULATE_DONE;
198 /* put in registers */
199 switch (ls_type) {
200 case FPU_LS_SINGLE:
201 kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
202 vcpu->arch.qpr[rs] = *((u32*)tmp);
203 break;
204 case FPU_LS_DOUBLE:
205 VCPU_FPR(vcpu, rs) = *((u64*)tmp);
206 break;
209 dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp,
210 addr, len);
212 done_load:
213 return emulated;
216 static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
217 int rs, ulong addr, int ls_type)
219 int emulated = EMULATE_FAIL;
220 int r;
221 char tmp[8];
222 u64 val;
223 int len;
225 switch (ls_type) {
226 case FPU_LS_SINGLE:
227 kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
228 val = *((u32*)tmp);
229 len = sizeof(u32);
230 break;
231 case FPU_LS_SINGLE_LOW:
232 *((u32*)tmp) = VCPU_FPR(vcpu, rs);
233 val = VCPU_FPR(vcpu, rs) & 0xffffffff;
234 len = sizeof(u32);
235 break;
236 case FPU_LS_DOUBLE:
237 *((u64*)tmp) = VCPU_FPR(vcpu, rs);
238 val = VCPU_FPR(vcpu, rs);
239 len = sizeof(u64);
240 break;
241 default:
242 val = 0;
243 len = 0;
246 r = kvmppc_st(vcpu, &addr, len, tmp, true);
247 vcpu->arch.paddr_accessed = addr;
248 if (r < 0) {
249 kvmppc_inject_pf(vcpu, addr, true);
250 } else if (r == EMULATE_DO_MMIO) {
251 emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
252 } else {
253 emulated = EMULATE_DONE;
256 dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n",
257 val, addr, len);
259 return emulated;
262 static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
263 int rs, ulong addr, bool w, int i)
265 int emulated = EMULATE_FAIL;
266 int r;
267 float one = 1.0;
268 u32 tmp[2];
270 /* read from memory */
271 if (w) {
272 r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
273 memcpy(&tmp[1], &one, sizeof(u32));
274 } else {
275 r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
277 vcpu->arch.paddr_accessed = addr;
278 if (r < 0) {
279 kvmppc_inject_pf(vcpu, addr, false);
280 goto done_load;
281 } else if ((r == EMULATE_DO_MMIO) && w) {
282 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
283 4, 1);
284 vcpu->arch.qpr[rs] = tmp[1];
285 goto done_load;
286 } else if (r == EMULATE_DO_MMIO) {
287 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
288 8, 1);
289 goto done_load;
292 emulated = EMULATE_DONE;
294 /* put in registers */
295 kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
296 vcpu->arch.qpr[rs] = tmp[1];
298 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
299 tmp[1], addr, w ? 4 : 8);
301 done_load:
302 return emulated;
305 static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
306 int rs, ulong addr, bool w, int i)
308 int emulated = EMULATE_FAIL;
309 int r;
310 u32 tmp[2];
311 int len = w ? sizeof(u32) : sizeof(u64);
313 kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
314 tmp[1] = vcpu->arch.qpr[rs];
316 r = kvmppc_st(vcpu, &addr, len, tmp, true);
317 vcpu->arch.paddr_accessed = addr;
318 if (r < 0) {
319 kvmppc_inject_pf(vcpu, addr, true);
320 } else if ((r == EMULATE_DO_MMIO) && w) {
321 emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
322 } else if (r == EMULATE_DO_MMIO) {
323 u64 val = ((u64)tmp[0] << 32) | tmp[1];
324 emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
325 } else {
326 emulated = EMULATE_DONE;
329 dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n",
330 tmp[0], tmp[1], addr, len);
332 return emulated;
336 * Cuts out inst bits with ordering according to spec.
337 * That means the leftmost bit is zero. All given bits are included.
339 static inline u32 inst_get_field(u32 inst, int msb, int lsb)
341 return kvmppc_get_field(inst, msb + 32, lsb + 32);
344 static bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
346 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
347 return false;
349 switch (get_op(inst)) {
350 case OP_PSQ_L:
351 case OP_PSQ_LU:
352 case OP_PSQ_ST:
353 case OP_PSQ_STU:
354 case OP_LFS:
355 case OP_LFSU:
356 case OP_LFD:
357 case OP_LFDU:
358 case OP_STFS:
359 case OP_STFSU:
360 case OP_STFD:
361 case OP_STFDU:
362 return true;
363 case 4:
364 /* X form */
365 switch (inst_get_field(inst, 21, 30)) {
366 case OP_4X_PS_CMPU0:
367 case OP_4X_PSQ_LX:
368 case OP_4X_PS_CMPO0:
369 case OP_4X_PSQ_LUX:
370 case OP_4X_PS_NEG:
371 case OP_4X_PS_CMPU1:
372 case OP_4X_PS_MR:
373 case OP_4X_PS_CMPO1:
374 case OP_4X_PS_NABS:
375 case OP_4X_PS_ABS:
376 case OP_4X_PS_MERGE00:
377 case OP_4X_PS_MERGE01:
378 case OP_4X_PS_MERGE10:
379 case OP_4X_PS_MERGE11:
380 return true;
382 /* XW form */
383 switch (inst_get_field(inst, 25, 30)) {
384 case OP_4XW_PSQ_STX:
385 case OP_4XW_PSQ_STUX:
386 return true;
388 /* A form */
389 switch (inst_get_field(inst, 26, 30)) {
390 case OP_4A_PS_SUM1:
391 case OP_4A_PS_SUM0:
392 case OP_4A_PS_MULS0:
393 case OP_4A_PS_MULS1:
394 case OP_4A_PS_MADDS0:
395 case OP_4A_PS_MADDS1:
396 case OP_4A_PS_DIV:
397 case OP_4A_PS_SUB:
398 case OP_4A_PS_ADD:
399 case OP_4A_PS_SEL:
400 case OP_4A_PS_RES:
401 case OP_4A_PS_MUL:
402 case OP_4A_PS_RSQRTE:
403 case OP_4A_PS_MSUB:
404 case OP_4A_PS_MADD:
405 case OP_4A_PS_NMSUB:
406 case OP_4A_PS_NMADD:
407 return true;
409 break;
410 case 59:
411 switch (inst_get_field(inst, 21, 30)) {
412 case OP_59_FADDS:
413 case OP_59_FSUBS:
414 case OP_59_FDIVS:
415 case OP_59_FRES:
416 case OP_59_FRSQRTES:
417 return true;
419 switch (inst_get_field(inst, 26, 30)) {
420 case OP_59_FMULS:
421 case OP_59_FMSUBS:
422 case OP_59_FMADDS:
423 case OP_59_FNMSUBS:
424 case OP_59_FNMADDS:
425 return true;
427 break;
428 case 63:
429 switch (inst_get_field(inst, 21, 30)) {
430 case OP_63_MTFSB0:
431 case OP_63_MTFSB1:
432 case OP_63_MTFSF:
433 case OP_63_MTFSFI:
434 case OP_63_MCRFS:
435 case OP_63_MFFS:
436 case OP_63_FCMPU:
437 case OP_63_FCMPO:
438 case OP_63_FNEG:
439 case OP_63_FMR:
440 case OP_63_FABS:
441 case OP_63_FRSP:
442 case OP_63_FDIV:
443 case OP_63_FADD:
444 case OP_63_FSUB:
445 case OP_63_FCTIW:
446 case OP_63_FCTIWZ:
447 case OP_63_FRSQRTE:
448 case OP_63_FCPSGN:
449 return true;
451 switch (inst_get_field(inst, 26, 30)) {
452 case OP_63_FMUL:
453 case OP_63_FSEL:
454 case OP_63_FMSUB:
455 case OP_63_FMADD:
456 case OP_63_FNMSUB:
457 case OP_63_FNMADD:
458 return true;
460 break;
461 case 31:
462 switch (inst_get_field(inst, 21, 30)) {
463 case OP_31_LFSX:
464 case OP_31_LFSUX:
465 case OP_31_LFDX:
466 case OP_31_LFDUX:
467 case OP_31_STFSX:
468 case OP_31_STFSUX:
469 case OP_31_STFX:
470 case OP_31_STFUX:
471 case OP_31_STFIWX:
472 return true;
474 break;
477 return false;
480 static int get_d_signext(u32 inst)
482 int d = inst & 0x8ff;
484 if (d & 0x800)
485 return -(d & 0x7ff);
487 return (d & 0x7ff);
490 static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
491 int reg_out, int reg_in1, int reg_in2,
492 int reg_in3, int scalar,
493 void (*func)(u64 *fpscr,
494 u32 *dst, u32 *src1,
495 u32 *src2, u32 *src3))
497 u32 *qpr = vcpu->arch.qpr;
498 u32 ps0_out;
499 u32 ps0_in1, ps0_in2, ps0_in3;
500 u32 ps1_in1, ps1_in2, ps1_in3;
502 /* RC */
503 WARN_ON(rc);
505 /* PS0 */
506 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
507 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
508 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
510 if (scalar & SCALAR_LOW)
511 ps0_in2 = qpr[reg_in2];
513 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
515 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
516 ps0_in1, ps0_in2, ps0_in3, ps0_out);
518 if (!(scalar & SCALAR_NO_PS0))
519 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
521 /* PS1 */
522 ps1_in1 = qpr[reg_in1];
523 ps1_in2 = qpr[reg_in2];
524 ps1_in3 = qpr[reg_in3];
526 if (scalar & SCALAR_HIGH)
527 ps1_in2 = ps0_in2;
529 if (!(scalar & SCALAR_NO_PS1))
530 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
532 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
533 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
535 return EMULATE_DONE;
538 static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
539 int reg_out, int reg_in1, int reg_in2,
540 int scalar,
541 void (*func)(u64 *fpscr,
542 u32 *dst, u32 *src1,
543 u32 *src2))
545 u32 *qpr = vcpu->arch.qpr;
546 u32 ps0_out;
547 u32 ps0_in1, ps0_in2;
548 u32 ps1_out;
549 u32 ps1_in1, ps1_in2;
551 /* RC */
552 WARN_ON(rc);
554 /* PS0 */
555 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
557 if (scalar & SCALAR_LOW)
558 ps0_in2 = qpr[reg_in2];
559 else
560 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
562 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
564 if (!(scalar & SCALAR_NO_PS0)) {
565 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
566 ps0_in1, ps0_in2, ps0_out);
568 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
571 /* PS1 */
572 ps1_in1 = qpr[reg_in1];
573 ps1_in2 = qpr[reg_in2];
575 if (scalar & SCALAR_HIGH)
576 ps1_in2 = ps0_in2;
578 func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
580 if (!(scalar & SCALAR_NO_PS1)) {
581 qpr[reg_out] = ps1_out;
583 dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n",
584 ps1_in1, ps1_in2, qpr[reg_out]);
587 return EMULATE_DONE;
590 static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
591 int reg_out, int reg_in,
592 void (*func)(u64 *t,
593 u32 *dst, u32 *src1))
595 u32 *qpr = vcpu->arch.qpr;
596 u32 ps0_out, ps0_in;
597 u32 ps1_in;
599 /* RC */
600 WARN_ON(rc);
602 /* PS0 */
603 kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
604 func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
606 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
607 ps0_in, ps0_out);
609 kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
611 /* PS1 */
612 ps1_in = qpr[reg_in];
613 func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
615 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
616 ps1_in, qpr[reg_out]);
618 return EMULATE_DONE;
621 int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
623 u32 inst;
624 enum emulation_result emulated = EMULATE_DONE;
625 int ax_rd, ax_ra, ax_rb, ax_rc;
626 short full_d;
627 u64 *fpr_d, *fpr_a, *fpr_b, *fpr_c;
629 bool rcomp;
630 u32 cr;
631 #ifdef DEBUG
632 int i;
633 #endif
635 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
636 if (emulated != EMULATE_DONE)
637 return emulated;
639 ax_rd = inst_get_field(inst, 6, 10);
640 ax_ra = inst_get_field(inst, 11, 15);
641 ax_rb = inst_get_field(inst, 16, 20);
642 ax_rc = inst_get_field(inst, 21, 25);
643 full_d = inst_get_field(inst, 16, 31);
645 fpr_d = &VCPU_FPR(vcpu, ax_rd);
646 fpr_a = &VCPU_FPR(vcpu, ax_ra);
647 fpr_b = &VCPU_FPR(vcpu, ax_rb);
648 fpr_c = &VCPU_FPR(vcpu, ax_rc);
650 rcomp = (inst & 1) ? true : false;
651 cr = kvmppc_get_cr(vcpu);
653 if (!kvmppc_inst_is_paired_single(vcpu, inst))
654 return EMULATE_FAIL;
656 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
657 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
658 return EMULATE_AGAIN;
661 kvmppc_giveup_ext(vcpu, MSR_FP);
662 preempt_disable();
663 enable_kernel_fp();
664 /* Do we need to clear FE0 / FE1 here? Don't think so. */
666 #ifdef DEBUG
667 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
668 u32 f;
669 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
670 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
671 i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
673 #endif
675 switch (get_op(inst)) {
676 case OP_PSQ_L:
678 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
679 bool w = inst_get_field(inst, 16, 16) ? true : false;
680 int i = inst_get_field(inst, 17, 19);
682 addr += get_d_signext(inst);
683 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
684 break;
686 case OP_PSQ_LU:
688 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
689 bool w = inst_get_field(inst, 16, 16) ? true : false;
690 int i = inst_get_field(inst, 17, 19);
692 addr += get_d_signext(inst);
693 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
695 if (emulated == EMULATE_DONE)
696 kvmppc_set_gpr(vcpu, ax_ra, addr);
697 break;
699 case OP_PSQ_ST:
701 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
702 bool w = inst_get_field(inst, 16, 16) ? true : false;
703 int i = inst_get_field(inst, 17, 19);
705 addr += get_d_signext(inst);
706 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
707 break;
709 case OP_PSQ_STU:
711 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
712 bool w = inst_get_field(inst, 16, 16) ? true : false;
713 int i = inst_get_field(inst, 17, 19);
715 addr += get_d_signext(inst);
716 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
718 if (emulated == EMULATE_DONE)
719 kvmppc_set_gpr(vcpu, ax_ra, addr);
720 break;
722 case 4:
723 /* X form */
724 switch (inst_get_field(inst, 21, 30)) {
725 case OP_4X_PS_CMPU0:
726 /* XXX */
727 emulated = EMULATE_FAIL;
728 break;
729 case OP_4X_PSQ_LX:
731 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
732 bool w = inst_get_field(inst, 21, 21) ? true : false;
733 int i = inst_get_field(inst, 22, 24);
735 addr += kvmppc_get_gpr(vcpu, ax_rb);
736 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
737 break;
739 case OP_4X_PS_CMPO0:
740 /* XXX */
741 emulated = EMULATE_FAIL;
742 break;
743 case OP_4X_PSQ_LUX:
745 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
746 bool w = inst_get_field(inst, 21, 21) ? true : false;
747 int i = inst_get_field(inst, 22, 24);
749 addr += kvmppc_get_gpr(vcpu, ax_rb);
750 emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
752 if (emulated == EMULATE_DONE)
753 kvmppc_set_gpr(vcpu, ax_ra, addr);
754 break;
756 case OP_4X_PS_NEG:
757 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
758 VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
759 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
760 vcpu->arch.qpr[ax_rd] ^= 0x80000000;
761 break;
762 case OP_4X_PS_CMPU1:
763 /* XXX */
764 emulated = EMULATE_FAIL;
765 break;
766 case OP_4X_PS_MR:
767 WARN_ON(rcomp);
768 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
769 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
770 break;
771 case OP_4X_PS_CMPO1:
772 /* XXX */
773 emulated = EMULATE_FAIL;
774 break;
775 case OP_4X_PS_NABS:
776 WARN_ON(rcomp);
777 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
778 VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
779 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
780 vcpu->arch.qpr[ax_rd] |= 0x80000000;
781 break;
782 case OP_4X_PS_ABS:
783 WARN_ON(rcomp);
784 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
785 VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
786 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
787 vcpu->arch.qpr[ax_rd] &= ~0x80000000;
788 break;
789 case OP_4X_PS_MERGE00:
790 WARN_ON(rcomp);
791 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
792 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
793 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
794 &vcpu->arch.qpr[ax_rd]);
795 break;
796 case OP_4X_PS_MERGE01:
797 WARN_ON(rcomp);
798 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
799 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
800 break;
801 case OP_4X_PS_MERGE10:
802 WARN_ON(rcomp);
803 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
804 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
805 &VCPU_FPR(vcpu, ax_rd));
806 /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
807 kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
808 &vcpu->arch.qpr[ax_rd]);
809 break;
810 case OP_4X_PS_MERGE11:
811 WARN_ON(rcomp);
812 /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
813 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
814 &VCPU_FPR(vcpu, ax_rd));
815 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
816 break;
818 /* XW form */
819 switch (inst_get_field(inst, 25, 30)) {
820 case OP_4XW_PSQ_STX:
822 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
823 bool w = inst_get_field(inst, 21, 21) ? true : false;
824 int i = inst_get_field(inst, 22, 24);
826 addr += kvmppc_get_gpr(vcpu, ax_rb);
827 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
828 break;
830 case OP_4XW_PSQ_STUX:
832 ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
833 bool w = inst_get_field(inst, 21, 21) ? true : false;
834 int i = inst_get_field(inst, 22, 24);
836 addr += kvmppc_get_gpr(vcpu, ax_rb);
837 emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
839 if (emulated == EMULATE_DONE)
840 kvmppc_set_gpr(vcpu, ax_ra, addr);
841 break;
844 /* A form */
845 switch (inst_get_field(inst, 26, 30)) {
846 case OP_4A_PS_SUM1:
847 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
848 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
849 VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
850 break;
851 case OP_4A_PS_SUM0:
852 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
853 ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
854 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
855 break;
856 case OP_4A_PS_MULS0:
857 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
858 ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
859 break;
860 case OP_4A_PS_MULS1:
861 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
862 ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
863 break;
864 case OP_4A_PS_MADDS0:
865 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
866 ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
867 break;
868 case OP_4A_PS_MADDS1:
869 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
870 ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
871 break;
872 case OP_4A_PS_DIV:
873 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
874 ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
875 break;
876 case OP_4A_PS_SUB:
877 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
878 ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
879 break;
880 case OP_4A_PS_ADD:
881 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
882 ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
883 break;
884 case OP_4A_PS_SEL:
885 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
886 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
887 break;
888 case OP_4A_PS_RES:
889 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
890 ax_rb, fps_fres);
891 break;
892 case OP_4A_PS_MUL:
893 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
894 ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
895 break;
896 case OP_4A_PS_RSQRTE:
897 emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
898 ax_rb, fps_frsqrte);
899 break;
900 case OP_4A_PS_MSUB:
901 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
902 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
903 break;
904 case OP_4A_PS_MADD:
905 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
906 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
907 break;
908 case OP_4A_PS_NMSUB:
909 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
910 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
911 break;
912 case OP_4A_PS_NMADD:
913 emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
914 ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
915 break;
917 break;
919 /* Real FPU operations */
921 case OP_LFS:
923 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
925 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
926 FPU_LS_SINGLE);
927 break;
929 case OP_LFSU:
931 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
933 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
934 FPU_LS_SINGLE);
936 if (emulated == EMULATE_DONE)
937 kvmppc_set_gpr(vcpu, ax_ra, addr);
938 break;
940 case OP_LFD:
942 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
944 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
945 FPU_LS_DOUBLE);
946 break;
948 case OP_LFDU:
950 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
952 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
953 FPU_LS_DOUBLE);
955 if (emulated == EMULATE_DONE)
956 kvmppc_set_gpr(vcpu, ax_ra, addr);
957 break;
959 case OP_STFS:
961 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
963 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
964 FPU_LS_SINGLE);
965 break;
967 case OP_STFSU:
969 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
971 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
972 FPU_LS_SINGLE);
974 if (emulated == EMULATE_DONE)
975 kvmppc_set_gpr(vcpu, ax_ra, addr);
976 break;
978 case OP_STFD:
980 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
982 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
983 FPU_LS_DOUBLE);
984 break;
986 case OP_STFDU:
988 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
990 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
991 FPU_LS_DOUBLE);
993 if (emulated == EMULATE_DONE)
994 kvmppc_set_gpr(vcpu, ax_ra, addr);
995 break;
997 case 31:
998 switch (inst_get_field(inst, 21, 30)) {
999 case OP_31_LFSX:
1001 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
1003 addr += kvmppc_get_gpr(vcpu, ax_rb);
1004 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1005 addr, FPU_LS_SINGLE);
1006 break;
1008 case OP_31_LFSUX:
1010 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1011 kvmppc_get_gpr(vcpu, ax_rb);
1013 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1014 addr, FPU_LS_SINGLE);
1016 if (emulated == EMULATE_DONE)
1017 kvmppc_set_gpr(vcpu, ax_ra, addr);
1018 break;
1020 case OP_31_LFDX:
1022 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1023 kvmppc_get_gpr(vcpu, ax_rb);
1025 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1026 addr, FPU_LS_DOUBLE);
1027 break;
1029 case OP_31_LFDUX:
1031 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1032 kvmppc_get_gpr(vcpu, ax_rb);
1034 emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
1035 addr, FPU_LS_DOUBLE);
1037 if (emulated == EMULATE_DONE)
1038 kvmppc_set_gpr(vcpu, ax_ra, addr);
1039 break;
1041 case OP_31_STFSX:
1043 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1044 kvmppc_get_gpr(vcpu, ax_rb);
1046 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1047 addr, FPU_LS_SINGLE);
1048 break;
1050 case OP_31_STFSUX:
1052 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1053 kvmppc_get_gpr(vcpu, ax_rb);
1055 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1056 addr, FPU_LS_SINGLE);
1058 if (emulated == EMULATE_DONE)
1059 kvmppc_set_gpr(vcpu, ax_ra, addr);
1060 break;
1062 case OP_31_STFX:
1064 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1065 kvmppc_get_gpr(vcpu, ax_rb);
1067 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1068 addr, FPU_LS_DOUBLE);
1069 break;
1071 case OP_31_STFUX:
1073 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
1074 kvmppc_get_gpr(vcpu, ax_rb);
1076 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1077 addr, FPU_LS_DOUBLE);
1079 if (emulated == EMULATE_DONE)
1080 kvmppc_set_gpr(vcpu, ax_ra, addr);
1081 break;
1083 case OP_31_STFIWX:
1085 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
1086 kvmppc_get_gpr(vcpu, ax_rb);
1088 emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
1089 addr,
1090 FPU_LS_SINGLE_LOW);
1091 break;
1093 break;
1095 break;
1096 case 59:
1097 switch (inst_get_field(inst, 21, 30)) {
1098 case OP_59_FADDS:
1099 fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1100 kvmppc_sync_qpr(vcpu, ax_rd);
1101 break;
1102 case OP_59_FSUBS:
1103 fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1104 kvmppc_sync_qpr(vcpu, ax_rd);
1105 break;
1106 case OP_59_FDIVS:
1107 fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1108 kvmppc_sync_qpr(vcpu, ax_rd);
1109 break;
1110 case OP_59_FRES:
1111 fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1112 kvmppc_sync_qpr(vcpu, ax_rd);
1113 break;
1114 case OP_59_FRSQRTES:
1115 fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1116 kvmppc_sync_qpr(vcpu, ax_rd);
1117 break;
1119 switch (inst_get_field(inst, 26, 30)) {
1120 case OP_59_FMULS:
1121 fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1122 kvmppc_sync_qpr(vcpu, ax_rd);
1123 break;
1124 case OP_59_FMSUBS:
1125 fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1126 kvmppc_sync_qpr(vcpu, ax_rd);
1127 break;
1128 case OP_59_FMADDS:
1129 fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1130 kvmppc_sync_qpr(vcpu, ax_rd);
1131 break;
1132 case OP_59_FNMSUBS:
1133 fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1134 kvmppc_sync_qpr(vcpu, ax_rd);
1135 break;
1136 case OP_59_FNMADDS:
1137 fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1138 kvmppc_sync_qpr(vcpu, ax_rd);
1139 break;
1141 break;
1142 case 63:
1143 switch (inst_get_field(inst, 21, 30)) {
1144 case OP_63_MTFSB0:
1145 case OP_63_MTFSB1:
1146 case OP_63_MCRFS:
1147 case OP_63_MTFSFI:
1148 /* XXX need to implement */
1149 break;
1150 case OP_63_MFFS:
1151 /* XXX missing CR */
1152 *fpr_d = vcpu->arch.fp.fpscr;
1153 break;
1154 case OP_63_MTFSF:
1155 /* XXX missing fm bits */
1156 /* XXX missing CR */
1157 vcpu->arch.fp.fpscr = *fpr_b;
1158 break;
1159 case OP_63_FCMPU:
1161 u32 tmp_cr;
1162 u32 cr0_mask = 0xf0000000;
1163 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1165 fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1166 cr &= ~(cr0_mask >> cr_shift);
1167 cr |= (cr & cr0_mask) >> cr_shift;
1168 break;
1170 case OP_63_FCMPO:
1172 u32 tmp_cr;
1173 u32 cr0_mask = 0xf0000000;
1174 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1176 fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1177 cr &= ~(cr0_mask >> cr_shift);
1178 cr |= (cr & cr0_mask) >> cr_shift;
1179 break;
1181 case OP_63_FNEG:
1182 fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1183 break;
1184 case OP_63_FMR:
1185 *fpr_d = *fpr_b;
1186 break;
1187 case OP_63_FABS:
1188 fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1189 break;
1190 case OP_63_FCPSGN:
1191 fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1192 break;
1193 case OP_63_FDIV:
1194 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1195 break;
1196 case OP_63_FADD:
1197 fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1198 break;
1199 case OP_63_FSUB:
1200 fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1201 break;
1202 case OP_63_FCTIW:
1203 fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1204 break;
1205 case OP_63_FCTIWZ:
1206 fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1207 break;
1208 case OP_63_FRSP:
1209 fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1210 kvmppc_sync_qpr(vcpu, ax_rd);
1211 break;
1212 case OP_63_FRSQRTE:
1214 double one = 1.0f;
1216 /* fD = sqrt(fB) */
1217 fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1218 /* fD = 1.0f / fD */
1219 fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1220 break;
1223 switch (inst_get_field(inst, 26, 30)) {
1224 case OP_63_FMUL:
1225 fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1226 break;
1227 case OP_63_FSEL:
1228 fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1229 break;
1230 case OP_63_FMSUB:
1231 fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1232 break;
1233 case OP_63_FMADD:
1234 fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1235 break;
1236 case OP_63_FNMSUB:
1237 fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1238 break;
1239 case OP_63_FNMADD:
1240 fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1241 break;
1243 break;
1246 #ifdef DEBUG
1247 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
1248 u32 f;
1249 kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
1250 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1252 #endif
1254 if (rcomp)
1255 kvmppc_set_cr(vcpu, cr);
1257 disable_kernel_fp();
1258 preempt_enable();
1260 return emulated;