2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/jiffies.h>
21 #include <linux/timer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
28 #include <asm/byteorder.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/disassemble.h>
36 #define OP_31_XOP_LWZX 23
37 #define OP_31_XOP_LBZX 87
38 #define OP_31_XOP_STWX 151
39 #define OP_31_XOP_STBX 215
40 #define OP_31_XOP_STBUX 247
41 #define OP_31_XOP_LHZX 279
42 #define OP_31_XOP_LHZUX 311
43 #define OP_31_XOP_MFSPR 339
44 #define OP_31_XOP_STHX 407
45 #define OP_31_XOP_STHUX 439
46 #define OP_31_XOP_MTSPR 467
47 #define OP_31_XOP_DCBI 470
48 #define OP_31_XOP_LWBRX 534
49 #define OP_31_XOP_TLBSYNC 566
50 #define OP_31_XOP_STWBRX 662
51 #define OP_31_XOP_LHBRX 790
52 #define OP_31_XOP_STHBRX 918
67 void kvmppc_emulate_dec(struct kvm_vcpu
*vcpu
)
69 if (vcpu
->arch
.tcr
& TCR_DIE
) {
70 /* The decrementer ticks at the same rate as the timebase, so
71 * that's how we convert the guest DEC value to the number of
73 unsigned long nr_jiffies
;
75 nr_jiffies
= vcpu
->arch
.dec
/ tb_ticks_per_jiffy
;
76 mod_timer(&vcpu
->arch
.dec_timer
,
77 get_jiffies_64() + nr_jiffies
);
79 del_timer(&vcpu
->arch
.dec_timer
);
95 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
97 /* XXX Should probably auto-generate instruction decoding for a particular core
98 * from opcode tables in the future. */
99 int kvmppc_emulate_instruction(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
101 u32 inst
= vcpu
->arch
.last_inst
;
108 enum emulation_result emulated
= EMULATE_DONE
;
111 /* this default type might be overwritten by subcategories */
112 kvmppc_set_exit_type(vcpu
, EMULATED_INST_EXITS
);
114 switch (get_op(inst
)) {
116 vcpu
->arch
.esr
|= ESR_PTR
;
117 kvmppc_core_queue_program(vcpu
);
122 switch (get_xop(inst
)) {
126 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
131 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
136 emulated
= kvmppc_handle_store(run
, vcpu
,
143 emulated
= kvmppc_handle_store(run
, vcpu
,
148 case OP_31_XOP_STBUX
:
153 ea
= vcpu
->arch
.gpr
[rb
];
155 ea
+= vcpu
->arch
.gpr
[ra
];
157 emulated
= kvmppc_handle_store(run
, vcpu
,
160 vcpu
->arch
.gpr
[rs
] = ea
;
165 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
168 case OP_31_XOP_LHZUX
:
173 ea
= vcpu
->arch
.gpr
[rb
];
175 ea
+= vcpu
->arch
.gpr
[ra
];
177 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
178 vcpu
->arch
.gpr
[ra
] = ea
;
181 case OP_31_XOP_MFSPR
:
182 sprn
= get_sprn(inst
);
187 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.srr0
; break;
189 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.srr1
; break;
191 vcpu
->arch
.gpr
[rt
] = mfspr(SPRN_PVR
); break;
193 vcpu
->arch
.gpr
[rt
] = mfspr(SPRN_PIR
); break;
195 /* Note: mftb and TBRL/TBWL are user-accessible, so
196 * the guest can always access the real TB anyways.
197 * In fact, we probably will never see these traps. */
199 vcpu
->arch
.gpr
[rt
] = mftbl(); break;
201 vcpu
->arch
.gpr
[rt
] = mftbu(); break;
204 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg0
; break;
206 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg1
; break;
208 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg2
; break;
210 vcpu
->arch
.gpr
[rt
] = vcpu
->arch
.sprg3
; break;
211 /* Note: SPRG4-7 are user-readable, so we don't get
215 emulated
= kvmppc_core_emulate_mfspr(vcpu
, sprn
, rt
);
216 if (emulated
== EMULATE_FAIL
) {
217 printk("mfspr: unknown spr %x\n", sprn
);
218 vcpu
->arch
.gpr
[rt
] = 0;
229 emulated
= kvmppc_handle_store(run
, vcpu
,
234 case OP_31_XOP_STHUX
:
239 ea
= vcpu
->arch
.gpr
[rb
];
241 ea
+= vcpu
->arch
.gpr
[ra
];
243 emulated
= kvmppc_handle_store(run
, vcpu
,
246 vcpu
->arch
.gpr
[ra
] = ea
;
249 case OP_31_XOP_MTSPR
:
250 sprn
= get_sprn(inst
);
254 vcpu
->arch
.srr0
= vcpu
->arch
.gpr
[rs
]; break;
256 vcpu
->arch
.srr1
= vcpu
->arch
.gpr
[rs
]; break;
258 /* XXX We need to context-switch the timebase for
259 * watchdog and FIT. */
260 case SPRN_TBWL
: break;
261 case SPRN_TBWU
: break;
264 vcpu
->arch
.dec
= vcpu
->arch
.gpr
[rs
];
265 kvmppc_emulate_dec(vcpu
);
269 vcpu
->arch
.sprg0
= vcpu
->arch
.gpr
[rs
]; break;
271 vcpu
->arch
.sprg1
= vcpu
->arch
.gpr
[rs
]; break;
273 vcpu
->arch
.sprg2
= vcpu
->arch
.gpr
[rs
]; break;
275 vcpu
->arch
.sprg3
= vcpu
->arch
.gpr
[rs
]; break;
278 emulated
= kvmppc_core_emulate_mtspr(vcpu
, sprn
, rs
);
279 if (emulated
== EMULATE_FAIL
)
280 printk("mtspr: unknown spr %x\n", sprn
);
286 /* Do nothing. The guest is performing dcbi because
287 * hardware DMA is not snooped by the dcache, but
288 * emulated DMA either goes through the dcache as
289 * normal writes, or the host kernel has handled dcache
293 case OP_31_XOP_LWBRX
:
295 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 0);
298 case OP_31_XOP_TLBSYNC
:
301 case OP_31_XOP_STWBRX
:
306 emulated
= kvmppc_handle_store(run
, vcpu
,
311 case OP_31_XOP_LHBRX
:
313 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 0);
316 case OP_31_XOP_STHBRX
:
321 emulated
= kvmppc_handle_store(run
, vcpu
,
327 /* Attempt core-specific emulation below. */
328 emulated
= EMULATE_FAIL
;
334 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
340 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
341 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
346 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
352 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
353 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
358 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
365 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
367 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
372 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
379 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
381 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
386 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
392 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
393 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
398 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
405 emulated
= kvmppc_handle_store(run
, vcpu
, vcpu
->arch
.gpr
[rs
],
407 vcpu
->arch
.gpr
[ra
] = vcpu
->arch
.paddr_accessed
;
411 emulated
= EMULATE_FAIL
;
414 if (emulated
== EMULATE_FAIL
) {
415 emulated
= kvmppc_core_emulate_op(run
, vcpu
, inst
, &advance
);
416 if (emulated
== EMULATE_FAIL
) {
418 printk(KERN_ERR
"Couldn't emulate instruction 0x%08x "
419 "(op %d xop %d)\n", inst
, get_op(inst
), get_xop(inst
));
423 trace_kvm_ppc_instr(inst
, vcpu
->arch
.pc
, emulated
);
426 vcpu
->arch
.pc
+= 4; /* Advance past emulated instruction. */