MIPS: SB1250: Include correct header and fix a warning
[linux-2.6/linux-mips.git] / arch / powerpc / kvm / emulate.c
blobcb72a65f4eccc01bcac5a431a090df8f36a9a374
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/jiffies.h>
21 #include <linux/hrtimer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
26 #include <asm/reg.h>
27 #include <asm/time.h>
28 #include <asm/byteorder.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/disassemble.h>
31 #include "timing.h"
32 #include "trace.h"
34 #define OP_TRAP 3
35 #define OP_TRAP_64 2
37 #define OP_31_XOP_LWZX 23
38 #define OP_31_XOP_LBZX 87
39 #define OP_31_XOP_STWX 151
40 #define OP_31_XOP_STBX 215
41 #define OP_31_XOP_STBUX 247
42 #define OP_31_XOP_LHZX 279
43 #define OP_31_XOP_LHZUX 311
44 #define OP_31_XOP_MFSPR 339
45 #define OP_31_XOP_STHX 407
46 #define OP_31_XOP_STHUX 439
47 #define OP_31_XOP_MTSPR 467
48 #define OP_31_XOP_DCBI 470
49 #define OP_31_XOP_LWBRX 534
50 #define OP_31_XOP_TLBSYNC 566
51 #define OP_31_XOP_STWBRX 662
52 #define OP_31_XOP_LHBRX 790
53 #define OP_31_XOP_STHBRX 918
55 #define OP_LWZ 32
56 #define OP_LWZU 33
57 #define OP_LBZ 34
58 #define OP_LBZU 35
59 #define OP_STW 36
60 #define OP_STWU 37
61 #define OP_STB 38
62 #define OP_STBU 39
63 #define OP_LHZ 40
64 #define OP_LHZU 41
65 #define OP_STH 44
66 #define OP_STHU 45
68 #ifdef CONFIG_PPC64
69 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
71 return 1;
73 #else
74 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
76 return vcpu->arch.tcr & TCR_DIE;
78 #endif
80 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
82 unsigned long dec_nsec;
84 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85 #ifdef CONFIG_PPC64
86 /* mtdec lowers the interrupt line when positive. */
87 kvmppc_core_dequeue_dec(vcpu);
89 /* POWER4+ triggers a dec interrupt if the value is < 0 */
90 if (vcpu->arch.dec & 0x80000000) {
91 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
92 kvmppc_core_queue_dec(vcpu);
93 return;
95 #endif
96 if (kvmppc_dec_enabled(vcpu)) {
97 /* The decrementer ticks at the same rate as the timebase, so
98 * that's how we convert the guest DEC value to the number of
99 * host ticks. */
101 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
102 dec_nsec = vcpu->arch.dec;
103 dec_nsec *= 1000;
104 dec_nsec /= tb_ticks_per_usec;
105 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
106 HRTIMER_MODE_REL);
107 vcpu->arch.dec_jiffies = get_tb();
108 } else {
109 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
113 /* XXX to do:
114 * lhax
115 * lhaux
116 * lswx
117 * lswi
118 * stswx
119 * stswi
120 * lha
121 * lhau
122 * lmw
123 * stmw
125 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
127 /* XXX Should probably auto-generate instruction decoding for a particular core
128 * from opcode tables in the future. */
129 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
131 u32 inst = vcpu->arch.last_inst;
132 u32 ea;
133 int ra;
134 int rb;
135 int rs;
136 int rt;
137 int sprn;
138 enum emulation_result emulated = EMULATE_DONE;
139 int advance = 1;
141 /* this default type might be overwritten by subcategories */
142 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
144 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
146 /* Try again next time */
147 if (inst == KVM_INST_FETCH_FAILED)
148 return EMULATE_DONE;
150 switch (get_op(inst)) {
151 case OP_TRAP:
152 #ifdef CONFIG_PPC64
153 case OP_TRAP_64:
154 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
155 #else
156 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
157 #endif
158 advance = 0;
159 break;
161 case 31:
162 switch (get_xop(inst)) {
164 case OP_31_XOP_LWZX:
165 rt = get_rt(inst);
166 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
167 break;
169 case OP_31_XOP_LBZX:
170 rt = get_rt(inst);
171 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
172 break;
174 case OP_31_XOP_STWX:
175 rs = get_rs(inst);
176 emulated = kvmppc_handle_store(run, vcpu,
177 kvmppc_get_gpr(vcpu, rs),
178 4, 1);
179 break;
181 case OP_31_XOP_STBX:
182 rs = get_rs(inst);
183 emulated = kvmppc_handle_store(run, vcpu,
184 kvmppc_get_gpr(vcpu, rs),
185 1, 1);
186 break;
188 case OP_31_XOP_STBUX:
189 rs = get_rs(inst);
190 ra = get_ra(inst);
191 rb = get_rb(inst);
193 ea = kvmppc_get_gpr(vcpu, rb);
194 if (ra)
195 ea += kvmppc_get_gpr(vcpu, ra);
197 emulated = kvmppc_handle_store(run, vcpu,
198 kvmppc_get_gpr(vcpu, rs),
199 1, 1);
200 kvmppc_set_gpr(vcpu, rs, ea);
201 break;
203 case OP_31_XOP_LHZX:
204 rt = get_rt(inst);
205 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
206 break;
208 case OP_31_XOP_LHZUX:
209 rt = get_rt(inst);
210 ra = get_ra(inst);
211 rb = get_rb(inst);
213 ea = kvmppc_get_gpr(vcpu, rb);
214 if (ra)
215 ea += kvmppc_get_gpr(vcpu, ra);
217 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
218 kvmppc_set_gpr(vcpu, ra, ea);
219 break;
221 case OP_31_XOP_MFSPR:
222 sprn = get_sprn(inst);
223 rt = get_rt(inst);
225 switch (sprn) {
226 case SPRN_SRR0:
227 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
228 case SPRN_SRR1:
229 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
230 case SPRN_PVR:
231 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
232 case SPRN_PIR:
233 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
234 case SPRN_MSSSR0:
235 kvmppc_set_gpr(vcpu, rt, 0); break;
237 /* Note: mftb and TBRL/TBWL are user-accessible, so
238 * the guest can always access the real TB anyways.
239 * In fact, we probably will never see these traps. */
240 case SPRN_TBWL:
241 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
242 case SPRN_TBWU:
243 kvmppc_set_gpr(vcpu, rt, get_tb()); break;
245 case SPRN_SPRG0:
246 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
247 case SPRN_SPRG1:
248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
249 case SPRN_SPRG2:
250 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
251 case SPRN_SPRG3:
252 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
253 /* Note: SPRG4-7 are user-readable, so we don't get
254 * a trap. */
256 case SPRN_DEC:
258 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
259 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
260 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
261 vcpu->arch.dec, jd,
262 kvmppc_get_gpr(vcpu, rt));
263 break;
265 default:
266 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
267 if (emulated == EMULATE_FAIL) {
268 printk("mfspr: unknown spr %x\n", sprn);
269 kvmppc_set_gpr(vcpu, rt, 0);
271 break;
273 break;
275 case OP_31_XOP_STHX:
276 rs = get_rs(inst);
277 ra = get_ra(inst);
278 rb = get_rb(inst);
280 emulated = kvmppc_handle_store(run, vcpu,
281 kvmppc_get_gpr(vcpu, rs),
282 2, 1);
283 break;
285 case OP_31_XOP_STHUX:
286 rs = get_rs(inst);
287 ra = get_ra(inst);
288 rb = get_rb(inst);
290 ea = kvmppc_get_gpr(vcpu, rb);
291 if (ra)
292 ea += kvmppc_get_gpr(vcpu, ra);
294 emulated = kvmppc_handle_store(run, vcpu,
295 kvmppc_get_gpr(vcpu, rs),
296 2, 1);
297 kvmppc_set_gpr(vcpu, ra, ea);
298 break;
300 case OP_31_XOP_MTSPR:
301 sprn = get_sprn(inst);
302 rs = get_rs(inst);
303 switch (sprn) {
304 case SPRN_SRR0:
305 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
306 case SPRN_SRR1:
307 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
309 /* XXX We need to context-switch the timebase for
310 * watchdog and FIT. */
311 case SPRN_TBWL: break;
312 case SPRN_TBWU: break;
314 case SPRN_MSSSR0: break;
316 case SPRN_DEC:
317 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
318 kvmppc_emulate_dec(vcpu);
319 break;
321 case SPRN_SPRG0:
322 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
323 case SPRN_SPRG1:
324 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
325 case SPRN_SPRG2:
326 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
327 case SPRN_SPRG3:
328 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
330 default:
331 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
332 if (emulated == EMULATE_FAIL)
333 printk("mtspr: unknown spr %x\n", sprn);
334 break;
336 break;
338 case OP_31_XOP_DCBI:
339 /* Do nothing. The guest is performing dcbi because
340 * hardware DMA is not snooped by the dcache, but
341 * emulated DMA either goes through the dcache as
342 * normal writes, or the host kernel has handled dcache
343 * coherence. */
344 break;
346 case OP_31_XOP_LWBRX:
347 rt = get_rt(inst);
348 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
349 break;
351 case OP_31_XOP_TLBSYNC:
352 break;
354 case OP_31_XOP_STWBRX:
355 rs = get_rs(inst);
356 ra = get_ra(inst);
357 rb = get_rb(inst);
359 emulated = kvmppc_handle_store(run, vcpu,
360 kvmppc_get_gpr(vcpu, rs),
361 4, 0);
362 break;
364 case OP_31_XOP_LHBRX:
365 rt = get_rt(inst);
366 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
367 break;
369 case OP_31_XOP_STHBRX:
370 rs = get_rs(inst);
371 ra = get_ra(inst);
372 rb = get_rb(inst);
374 emulated = kvmppc_handle_store(run, vcpu,
375 kvmppc_get_gpr(vcpu, rs),
376 2, 0);
377 break;
379 default:
380 /* Attempt core-specific emulation below. */
381 emulated = EMULATE_FAIL;
383 break;
385 case OP_LWZ:
386 rt = get_rt(inst);
387 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
388 break;
390 case OP_LWZU:
391 ra = get_ra(inst);
392 rt = get_rt(inst);
393 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
394 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
395 break;
397 case OP_LBZ:
398 rt = get_rt(inst);
399 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
400 break;
402 case OP_LBZU:
403 ra = get_ra(inst);
404 rt = get_rt(inst);
405 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
406 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
407 break;
409 case OP_STW:
410 rs = get_rs(inst);
411 emulated = kvmppc_handle_store(run, vcpu,
412 kvmppc_get_gpr(vcpu, rs),
413 4, 1);
414 break;
416 case OP_STWU:
417 ra = get_ra(inst);
418 rs = get_rs(inst);
419 emulated = kvmppc_handle_store(run, vcpu,
420 kvmppc_get_gpr(vcpu, rs),
421 4, 1);
422 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
423 break;
425 case OP_STB:
426 rs = get_rs(inst);
427 emulated = kvmppc_handle_store(run, vcpu,
428 kvmppc_get_gpr(vcpu, rs),
429 1, 1);
430 break;
432 case OP_STBU:
433 ra = get_ra(inst);
434 rs = get_rs(inst);
435 emulated = kvmppc_handle_store(run, vcpu,
436 kvmppc_get_gpr(vcpu, rs),
437 1, 1);
438 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
439 break;
441 case OP_LHZ:
442 rt = get_rt(inst);
443 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
444 break;
446 case OP_LHZU:
447 ra = get_ra(inst);
448 rt = get_rt(inst);
449 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
451 break;
453 case OP_STH:
454 rs = get_rs(inst);
455 emulated = kvmppc_handle_store(run, vcpu,
456 kvmppc_get_gpr(vcpu, rs),
457 2, 1);
458 break;
460 case OP_STHU:
461 ra = get_ra(inst);
462 rs = get_rs(inst);
463 emulated = kvmppc_handle_store(run, vcpu,
464 kvmppc_get_gpr(vcpu, rs),
465 2, 1);
466 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
467 break;
469 default:
470 emulated = EMULATE_FAIL;
473 if (emulated == EMULATE_FAIL) {
474 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
475 if (emulated == EMULATE_FAIL) {
476 advance = 0;
477 printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
478 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
479 kvmppc_core_queue_program(vcpu, 0);
483 trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
485 if (advance)
486 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
488 return emulated;