2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Main entry point for the guest, exception handling.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <asm/asmmacro.h>
14 #include <asm/regdef.h>
15 #include <asm/mipsregs.h>
16 #include <asm/stackframe.h>
17 #include <asm/asm-offsets.h>
20 #define MIPSX(name) mips32_ ## name
21 #define CALLFRAME_SIZ 32
25 * exception vector entrypoint
27 #define VECTOR(x, regmask) \
31 #define VECTOR_END(x) \
34 /* Overload, Danger Will Robinson!! */
35 #define PT_HOST_ASID PT_BVADDR
36 #define PT_HOST_USERLOCAL PT_EPC
38 #define CP0_DDATA_LO $28,3
39 #define CP0_CONFIG3 $16,3
40 #define CP0_CONFIG5 $16,5
41 #define CP0_EBASE $15,1
43 #define CP0_INTCTL $12,1
44 #define CP0_SRSCTL $12,2
45 #define CP0_SRSMAP $12,3
46 #define CP0_HWRENA $7,0
49 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
51 #define RESUME_GUEST 0
52 #define RESUME_HOST RESUME_FLAG_HOST
55 * __kvm_mips_vcpu_run: entry point to the guest
62 FEXPORT(__kvm_mips_vcpu_run)
63 /* k0/k1 not being used in host kernel context */
64 INT_ADDIU k1, sp, -PT_SIZE
77 LONG_S $10, PT_R10(k1)
78 LONG_S $11, PT_R11(k1)
79 LONG_S $12, PT_R12(k1)
80 LONG_S $13, PT_R13(k1)
81 LONG_S $14, PT_R14(k1)
82 LONG_S $15, PT_R15(k1)
83 LONG_S $16, PT_R16(k1)
84 LONG_S $17, PT_R17(k1)
86 LONG_S $18, PT_R18(k1)
87 LONG_S $19, PT_R19(k1)
88 LONG_S $20, PT_R20(k1)
89 LONG_S $21, PT_R21(k1)
90 LONG_S $22, PT_R22(k1)
91 LONG_S $23, PT_R23(k1)
92 LONG_S $24, PT_R24(k1)
93 LONG_S $25, PT_R25(k1)
96 * XXXKYMA k0/k1 not saved, not being used if we got here through
100 LONG_S $28, PT_R28(k1)
101 LONG_S $29, PT_R29(k1)
102 LONG_S $30, PT_R30(k1)
103 LONG_S $31, PT_R31(k1)
111 /* Save host status */
113 LONG_S v0, PT_STATUS(k1)
115 /* Save host ASID, shove it into the BVADDR location */
118 LONG_S v1, PT_HOST_ASID(k1)
120 /* Save DDATA_LO, will be used to store pointer to vcpu */
121 mfc0 v1, CP0_DDATA_LO
122 LONG_S v1, PT_HOST_USERLOCAL(k1)
124 /* DDATA_LO has pointer to vcpu */
125 mtc0 a1, CP0_DDATA_LO
127 /* Offset into vcpu->arch */
128 INT_ADDIU k1, a1, VCPU_HOST_ARCH
131 * Save the host stack to VCPU, used for exception processing
132 * when we exit from the Guest
134 LONG_S sp, VCPU_HOST_STACK(k1)
136 /* Save the kernel gp as well */
137 LONG_S gp, VCPU_HOST_GP(k1)
140 * Setup status register for running the guest in UM, interrupts
143 li k0, (ST0_EXL | KSU_USER | ST0_BEV)
147 /* load up the new EBASE */
148 LONG_L k0, VCPU_GUEST_EBASE(k1)
152 * Now that the new EBASE has been loaded, unset BEV, set
153 * interrupt mask as it was but make sure that timer interrupts
156 li k0, (ST0_EXL | KSU_USER | ST0_IE)
163 LONG_L t0, VCPU_PC(k1)
166 FEXPORT(__kvm_mips_load_asid)
167 /* Set the ASID for the Guest Kernel */
168 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
169 /* addresses shift to 0x80000000 */
170 bltz t0, 1f /* If kernel */
171 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
172 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
174 /* t1: contains the base of the ASID array, need to get the cpu id */
175 LONG_L t2, TI_CPU($28) /* smp_processor_id */
176 INT_SLL t2, t2, 2 /* x4 */
183 /* Disable RDHWR access */
184 mtc0 zero, CP0_HWRENA
186 /* Now load up the Guest Context from VCPU */
187 LONG_L $1, VCPU_R1(k1)
188 LONG_L $2, VCPU_R2(k1)
189 LONG_L $3, VCPU_R3(k1)
191 LONG_L $4, VCPU_R4(k1)
192 LONG_L $5, VCPU_R5(k1)
193 LONG_L $6, VCPU_R6(k1)
194 LONG_L $7, VCPU_R7(k1)
196 LONG_L $8, VCPU_R8(k1)
197 LONG_L $9, VCPU_R9(k1)
198 LONG_L $10, VCPU_R10(k1)
199 LONG_L $11, VCPU_R11(k1)
200 LONG_L $12, VCPU_R12(k1)
201 LONG_L $13, VCPU_R13(k1)
202 LONG_L $14, VCPU_R14(k1)
203 LONG_L $15, VCPU_R15(k1)
204 LONG_L $16, VCPU_R16(k1)
205 LONG_L $17, VCPU_R17(k1)
206 LONG_L $18, VCPU_R18(k1)
207 LONG_L $19, VCPU_R19(k1)
208 LONG_L $20, VCPU_R20(k1)
209 LONG_L $21, VCPU_R21(k1)
210 LONG_L $22, VCPU_R22(k1)
211 LONG_L $23, VCPU_R23(k1)
212 LONG_L $24, VCPU_R24(k1)
213 LONG_L $25, VCPU_R25(k1)
215 /* k0/k1 loaded up later */
217 LONG_L $28, VCPU_R28(k1)
218 LONG_L $29, VCPU_R29(k1)
219 LONG_L $30, VCPU_R30(k1)
220 LONG_L $31, VCPU_R31(k1)
223 LONG_L k0, VCPU_LO(k1)
226 LONG_L k0, VCPU_HI(k1)
229 FEXPORT(__kvm_mips_load_k0k1)
230 /* Restore the guest's k0/k1 registers */
231 LONG_L k0, VCPU_R26(k1)
232 LONG_L k1, VCPU_R27(k1)
237 VECTOR(MIPSX(exception), unknown)
238 /* Find out what mode we came from and jump to the proper handler. */
239 mtc0 k0, CP0_ERROREPC #01: Save guest k0
242 mfc0 k0, CP0_EBASE #02: Get EBASE
243 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
244 INT_SLL k0, k0, 10 #04
245 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
246 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
247 # installed @ offset 0x2000
248 j k0 #07: jump to the function
249 nop #08: branch delay slot
250 VECTOR_END(MIPSX(exceptionEnd))
251 .end MIPSX(exception)
254 * Generic Guest exception handler. We end up here when the guest
255 * does something that causes a trap to kernel mode.
257 NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
258 /* Get the VCPU pointer from DDTATA_LO */
259 mfc0 k1, CP0_DDATA_LO
260 INT_ADDIU k1, k1, VCPU_HOST_ARCH
262 /* Start saving Guest context to VCPU */
263 LONG_S $0, VCPU_R0(k1)
264 LONG_S $1, VCPU_R1(k1)
265 LONG_S $2, VCPU_R2(k1)
266 LONG_S $3, VCPU_R3(k1)
267 LONG_S $4, VCPU_R4(k1)
268 LONG_S $5, VCPU_R5(k1)
269 LONG_S $6, VCPU_R6(k1)
270 LONG_S $7, VCPU_R7(k1)
271 LONG_S $8, VCPU_R8(k1)
272 LONG_S $9, VCPU_R9(k1)
273 LONG_S $10, VCPU_R10(k1)
274 LONG_S $11, VCPU_R11(k1)
275 LONG_S $12, VCPU_R12(k1)
276 LONG_S $13, VCPU_R13(k1)
277 LONG_S $14, VCPU_R14(k1)
278 LONG_S $15, VCPU_R15(k1)
279 LONG_S $16, VCPU_R16(k1)
280 LONG_S $17, VCPU_R17(k1)
281 LONG_S $18, VCPU_R18(k1)
282 LONG_S $19, VCPU_R19(k1)
283 LONG_S $20, VCPU_R20(k1)
284 LONG_S $21, VCPU_R21(k1)
285 LONG_S $22, VCPU_R22(k1)
286 LONG_S $23, VCPU_R23(k1)
287 LONG_S $24, VCPU_R24(k1)
288 LONG_S $25, VCPU_R25(k1)
290 /* Guest k0/k1 saved later */
292 LONG_S $28, VCPU_R28(k1)
293 LONG_S $29, VCPU_R29(k1)
294 LONG_S $30, VCPU_R30(k1)
295 LONG_S $31, VCPU_R31(k1)
297 /* We need to save hi/lo and restore them on the way out */
299 LONG_S t0, VCPU_HI(k1)
302 LONG_S t0, VCPU_LO(k1)
304 /* Finally save guest k0/k1 to VCPU */
305 mfc0 t0, CP0_ERROREPC
306 LONG_S t0, VCPU_R26(k1)
308 /* Get GUEST k1 and save it in VCPU */
312 LONG_L t0, 0x3000(t0)
313 LONG_S t0, VCPU_R27(k1)
315 /* Now that context has been saved, we can use other registers */
318 mfc0 a1, CP0_DDATA_LO
321 /* Restore run (vcpu->run) */
322 LONG_L a0, VCPU_RUN(a1)
323 /* Save pointer to run in s0, will be saved by the compiler */
327 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
328 * process the exception
331 LONG_S k0, VCPU_PC(k1)
333 mfc0 k0, CP0_BADVADDR
334 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
337 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
340 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
342 /* Now restore the host state just enough to run the handlers */
344 /* Swtich EBASE to the one used by Linux */
345 /* load up the host EBASE */
355 LONG_L k0, VCPU_HOST_EBASE(k1)
359 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
360 * trigger FPE for pending exceptions.
369 sw t0, VCPU_FCR31(k1)
375 #ifdef CONFIG_CPU_HAS_MSA
377 * If MSA is enabled, save MSACSR and clear it so that later
378 * instructions don't trigger MSAFPE for pending exceptions.
381 ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
385 ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
389 sw t0, VCPU_MSA_CSR(k1)
390 _ctcmsa MSA_CSR, zero
394 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
396 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
402 /* Load up host GP */
403 LONG_L gp, VCPU_HOST_GP(k1)
405 /* Need a stack before we can jump to "C" */
406 LONG_L sp, VCPU_HOST_STACK(k1)
408 /* Saved host state */
409 INT_ADDIU sp, sp, -PT_SIZE
412 * XXXKYMA do we need to load the host ASID, maybe not because the
413 * kernel entries are marked GLOBAL, need to verify
416 /* Restore host DDATA_LO */
417 LONG_L k0, PT_HOST_USERLOCAL(sp)
418 mtc0 k0, CP0_DDATA_LO
420 /* Restore RDHWR access */
421 PTR_LI k0, 0x2000000F
424 /* Jump to handler */
425 FEXPORT(__kvm_mips_jump_to_handler)
427 * XXXKYMA: not sure if this is safe, how large is the stack??
428 * Now jump to the kvm_mips_handle_exit() to see if we can deal
429 * with this in the kernel
431 PTR_LA t9, kvm_mips_handle_exit
433 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
435 /* Return from handler Make sure interrupts are disabled */
440 * XXXKYMA: k0/k1 could have been blown away if we processed
441 * an exception while we were handling the exception from the
446 INT_ADDIU k1, k1, VCPU_HOST_ARCH
449 * Check return value, should tell us if we are returning to the
450 * host (handle I/O etc)or resuming the guest
452 andi t0, v0, RESUME_HOST
453 bnez t0, __kvm_mips_return_to_host
456 __kvm_mips_return_to_guest:
457 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
458 mtc0 s1, CP0_DDATA_LO
460 /* Load up the Guest EBASE to minimize the window where BEV is set */
461 LONG_L t0, VCPU_GUEST_EBASE(k1)
463 /* Switch EBASE back to the one used by KVM */
472 /* Setup status register for running guest in UM */
474 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
475 and v1, v1, ~(ST0_CU0 | ST0_MX)
481 LONG_L t0, VCPU_PC(k1)
484 /* Set the ASID for the Guest Kernel */
485 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
486 /* addresses shift to 0x80000000 */
487 bltz t0, 1f /* If kernel */
488 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
489 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
491 /* t1: contains the base of the ASID array, need to get the cpu id */
492 LONG_L t2, TI_CPU($28) /* smp_processor_id */
493 INT_SLL t2, t2, 2 /* x4 */
500 /* Disable RDHWR access */
501 mtc0 zero, CP0_HWRENA
503 /* load the guest context from VCPU and return */
504 LONG_L $0, VCPU_R0(k1)
505 LONG_L $1, VCPU_R1(k1)
506 LONG_L $2, VCPU_R2(k1)
507 LONG_L $3, VCPU_R3(k1)
508 LONG_L $4, VCPU_R4(k1)
509 LONG_L $5, VCPU_R5(k1)
510 LONG_L $6, VCPU_R6(k1)
511 LONG_L $7, VCPU_R7(k1)
512 LONG_L $8, VCPU_R8(k1)
513 LONG_L $9, VCPU_R9(k1)
514 LONG_L $10, VCPU_R10(k1)
515 LONG_L $11, VCPU_R11(k1)
516 LONG_L $12, VCPU_R12(k1)
517 LONG_L $13, VCPU_R13(k1)
518 LONG_L $14, VCPU_R14(k1)
519 LONG_L $15, VCPU_R15(k1)
520 LONG_L $16, VCPU_R16(k1)
521 LONG_L $17, VCPU_R17(k1)
522 LONG_L $18, VCPU_R18(k1)
523 LONG_L $19, VCPU_R19(k1)
524 LONG_L $20, VCPU_R20(k1)
525 LONG_L $21, VCPU_R21(k1)
526 LONG_L $22, VCPU_R22(k1)
527 LONG_L $23, VCPU_R23(k1)
528 LONG_L $24, VCPU_R24(k1)
529 LONG_L $25, VCPU_R25(k1)
531 /* $/k1 loaded later */
532 LONG_L $28, VCPU_R28(k1)
533 LONG_L $29, VCPU_R29(k1)
534 LONG_L $30, VCPU_R30(k1)
535 LONG_L $31, VCPU_R31(k1)
537 FEXPORT(__kvm_mips_skip_guest_restore)
538 LONG_L k0, VCPU_HI(k1)
541 LONG_L k0, VCPU_LO(k1)
544 LONG_L k0, VCPU_R26(k1)
545 LONG_L k1, VCPU_R27(k1)
549 __kvm_mips_return_to_host:
550 /* EBASE is already pointing to Linux */
551 LONG_L k1, VCPU_HOST_STACK(k1)
552 INT_ADDIU k1,k1, -PT_SIZE
554 /* Restore host DDATA_LO */
555 LONG_L k0, PT_HOST_USERLOCAL(k1)
556 mtc0 k0, CP0_DDATA_LO
558 /* Restore host ASID */
559 LONG_L k0, PT_HOST_ASID(sp)
564 /* Load context saved on the host stack */
569 * r2/v0 is the return code, shift it down by 2 (arithmetic)
570 * to recover the err code
582 LONG_L $10, PT_R10(k1)
583 LONG_L $11, PT_R11(k1)
584 LONG_L $12, PT_R12(k1)
585 LONG_L $13, PT_R13(k1)
586 LONG_L $14, PT_R14(k1)
587 LONG_L $15, PT_R15(k1)
588 LONG_L $16, PT_R16(k1)
589 LONG_L $17, PT_R17(k1)
590 LONG_L $18, PT_R18(k1)
591 LONG_L $19, PT_R19(k1)
592 LONG_L $20, PT_R20(k1)
593 LONG_L $21, PT_R21(k1)
594 LONG_L $22, PT_R22(k1)
595 LONG_L $23, PT_R23(k1)
596 LONG_L $24, PT_R24(k1)
597 LONG_L $25, PT_R25(k1)
599 /* Host k0/k1 were not saved */
601 LONG_L $28, PT_R28(k1)
602 LONG_L $29, PT_R29(k1)
603 LONG_L $30, PT_R30(k1)
611 /* Restore RDHWR access */
612 PTR_LI k0, 0x2000000F
615 /* Restore RA, which is the address we will return to */
616 LONG_L ra, PT_R31(k1)
620 VECTOR_END(MIPSX(GuestExceptionEnd))
621 .end MIPSX(GuestException)
625 ##### The exception handlers.
627 .word _C_LABEL(MIPSX(GuestException)) # 0
628 .word _C_LABEL(MIPSX(GuestException)) # 1
629 .word _C_LABEL(MIPSX(GuestException)) # 2
630 .word _C_LABEL(MIPSX(GuestException)) # 3
631 .word _C_LABEL(MIPSX(GuestException)) # 4
632 .word _C_LABEL(MIPSX(GuestException)) # 5
633 .word _C_LABEL(MIPSX(GuestException)) # 6
634 .word _C_LABEL(MIPSX(GuestException)) # 7
635 .word _C_LABEL(MIPSX(GuestException)) # 8
636 .word _C_LABEL(MIPSX(GuestException)) # 9
637 .word _C_LABEL(MIPSX(GuestException)) # 10
638 .word _C_LABEL(MIPSX(GuestException)) # 11
639 .word _C_LABEL(MIPSX(GuestException)) # 12
640 .word _C_LABEL(MIPSX(GuestException)) # 13
641 .word _C_LABEL(MIPSX(GuestException)) # 14
642 .word _C_LABEL(MIPSX(GuestException)) # 15
643 .word _C_LABEL(MIPSX(GuestException)) # 16
644 .word _C_LABEL(MIPSX(GuestException)) # 17
645 .word _C_LABEL(MIPSX(GuestException)) # 18
646 .word _C_LABEL(MIPSX(GuestException)) # 19
647 .word _C_LABEL(MIPSX(GuestException)) # 20
648 .word _C_LABEL(MIPSX(GuestException)) # 21
649 .word _C_LABEL(MIPSX(GuestException)) # 22
650 .word _C_LABEL(MIPSX(GuestException)) # 23
651 .word _C_LABEL(MIPSX(GuestException)) # 24
652 .word _C_LABEL(MIPSX(GuestException)) # 25
653 .word _C_LABEL(MIPSX(GuestException)) # 26
654 .word _C_LABEL(MIPSX(GuestException)) # 27
655 .word _C_LABEL(MIPSX(GuestException)) # 28
656 .word _C_LABEL(MIPSX(GuestException)) # 29
657 .word _C_LABEL(MIPSX(GuestException)) # 30
658 .word _C_LABEL(MIPSX(GuestException)) # 31