2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Generation of main entry point for the guest, exception handling.
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 * Copyright (C) 2016 Imagination Technologies Ltd.
14 #include <linux/kvm_host.h>
16 #include <asm/setup.h>
27 #if _MIPS_SIM == _MIPS_SIM_ABI32
32 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
34 #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
39 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
50 /* Some CP0 registers */
51 #define C0_HWRENA 7, 0
52 #define C0_BADVADDR 8, 0
53 #define C0_ENTRYHI 10, 0
54 #define C0_STATUS 12, 0
55 #define C0_CAUSE 13, 0
57 #define C0_EBASE 15, 1
58 #define C0_CONFIG5 16, 5
59 #define C0_DDATA_LO 28, 3
60 #define C0_ERROREPC 30, 0
62 #define CALLFRAME_SIZ 32
65 #define ST0_KX_IF_64 ST0_KX
67 #define ST0_KX_IF_64 0
70 static unsigned int scratch_vcpu
[2] = { C0_DDATA_LO
};
71 static unsigned int scratch_tmp
[2] = { C0_ERROREPC
};
83 UASM_L_LA(_return_to_host
)
84 UASM_L_LA(_kernel_asid
)
85 UASM_L_LA(_exit_common
)
87 static void *kvm_mips_build_enter_guest(void *addr
);
88 static void *kvm_mips_build_ret_from_exit(void *addr
);
89 static void *kvm_mips_build_ret_to_guest(void *addr
);
90 static void *kvm_mips_build_ret_to_host(void *addr
);
93 * kvm_mips_entry_setup() - Perform global setup for entry code.
95 * Perform global setup for entry code, such as choosing a scratch register.
97 * Returns: 0 on success.
100 int kvm_mips_entry_setup(void)
103 * We prefer to use KScratchN registers if they are available over the
104 * defaults above, which may not work on all cores.
106 unsigned int kscratch_mask
= cpu_data
[0].kscratch_mask
& 0xfc;
108 /* Pick a scratch register for storing VCPU */
110 scratch_vcpu
[0] = 31;
111 scratch_vcpu
[1] = ffs(kscratch_mask
) - 1;
112 kscratch_mask
&= ~BIT(scratch_vcpu
[1]);
115 /* Pick a scratch register to use as a temp for saving state */
118 scratch_tmp
[1] = ffs(kscratch_mask
) - 1;
119 kscratch_mask
&= ~BIT(scratch_tmp
[1]);
125 static void kvm_mips_build_save_scratch(u32
**p
, unsigned int tmp
,
128 /* Save the VCPU scratch register value in cp0_epc of the stack frame */
129 UASM_i_MFC0(p
, tmp
, scratch_vcpu
[0], scratch_vcpu
[1]);
130 UASM_i_SW(p
, tmp
, offsetof(struct pt_regs
, cp0_epc
), frame
);
132 /* Save the temp scratch register value in cp0_cause of stack frame */
133 if (scratch_tmp
[0] == 31) {
134 UASM_i_MFC0(p
, tmp
, scratch_tmp
[0], scratch_tmp
[1]);
135 UASM_i_SW(p
, tmp
, offsetof(struct pt_regs
, cp0_cause
), frame
);
139 static void kvm_mips_build_restore_scratch(u32
**p
, unsigned int tmp
,
143 * Restore host scratch register values saved by
144 * kvm_mips_build_save_scratch().
146 UASM_i_LW(p
, tmp
, offsetof(struct pt_regs
, cp0_epc
), frame
);
147 UASM_i_MTC0(p
, tmp
, scratch_vcpu
[0], scratch_vcpu
[1]);
149 if (scratch_tmp
[0] == 31) {
150 UASM_i_LW(p
, tmp
, offsetof(struct pt_regs
, cp0_cause
), frame
);
151 UASM_i_MTC0(p
, tmp
, scratch_tmp
[0], scratch_tmp
[1]);
156 * build_set_exc_base() - Assemble code to write exception base address.
157 * @p: Code buffer pointer.
158 * @reg: Source register (generated code may set WG bit in @reg).
160 * Assemble code to modify the exception base address in the EBase register,
161 * using the appropriately sized access and setting the WG bit if necessary.
163 static inline void build_set_exc_base(u32
**p
, unsigned int reg
)
165 if (cpu_has_ebase_wg
) {
166 /* Set WG so that all the bits get written */
167 uasm_i_ori(p
, reg
, reg
, MIPS_EBASE_WG
);
168 UASM_i_MTC0(p
, reg
, C0_EBASE
);
170 uasm_i_mtc0(p
, reg
, C0_EBASE
);
175 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
176 * @addr: Address to start writing code.
178 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
179 * conforms to the following prototype:
181 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
183 * The exit from the guest and return to the caller is handled by the code
184 * generated by kvm_mips_build_ret_to_host().
186 * Returns: Next address after end of written function.
188 void *kvm_mips_build_vcpu_run(void *addr
)
198 /* k0/k1 not being used in host kernel context */
199 UASM_i_ADDIU(&p
, K1
, SP
, -(int)sizeof(struct pt_regs
));
200 for (i
= 16; i
< 32; ++i
) {
203 UASM_i_SW(&p
, i
, offsetof(struct pt_regs
, regs
[i
]), K1
);
206 /* Save host status */
207 uasm_i_mfc0(&p
, V0
, C0_STATUS
);
208 UASM_i_SW(&p
, V0
, offsetof(struct pt_regs
, cp0_status
), K1
);
210 /* Save scratch registers, will be used to store pointer to vcpu etc */
211 kvm_mips_build_save_scratch(&p
, V1
, K1
);
213 /* VCPU scratch register has pointer to vcpu */
214 UASM_i_MTC0(&p
, A1
, scratch_vcpu
[0], scratch_vcpu
[1]);
216 /* Offset into vcpu->arch */
217 UASM_i_ADDIU(&p
, K1
, A1
, offsetof(struct kvm_vcpu
, arch
));
220 * Save the host stack to VCPU, used for exception processing
221 * when we exit from the Guest
223 UASM_i_SW(&p
, SP
, offsetof(struct kvm_vcpu_arch
, host_stack
), K1
);
225 /* Save the kernel gp as well */
226 UASM_i_SW(&p
, GP
, offsetof(struct kvm_vcpu_arch
, host_gp
), K1
);
229 * Setup status register for running the guest in UM, interrupts
232 UASM_i_LA(&p
, K0
, ST0_EXL
| KSU_USER
| ST0_BEV
| ST0_KX_IF_64
);
233 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
236 /* load up the new EBASE */
237 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, guest_ebase
), K1
);
238 build_set_exc_base(&p
, K0
);
241 * Now that the new EBASE has been loaded, unset BEV, set
242 * interrupt mask as it was but make sure that timer interrupts
245 uasm_i_addiu(&p
, K0
, ZERO
, ST0_EXL
| KSU_USER
| ST0_IE
| ST0_KX_IF_64
);
246 uasm_i_andi(&p
, V0
, V0
, ST0_IM
);
247 uasm_i_or(&p
, K0
, K0
, V0
);
248 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
251 p
= kvm_mips_build_enter_guest(p
);
257 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
258 * @addr: Address to start writing code.
260 * Assemble the code to resume guest execution. This code is common between the
261 * initial entry into the guest from the host, and returning from the exit
262 * handler back to the guest.
264 * Returns: Next address after end of written function.
266 static void *kvm_mips_build_enter_guest(void *addr
)
270 struct uasm_label labels
[2];
271 struct uasm_reloc relocs
[2];
272 struct uasm_label
*l
= labels
;
273 struct uasm_reloc
*r
= relocs
;
275 memset(labels
, 0, sizeof(labels
));
276 memset(relocs
, 0, sizeof(relocs
));
279 UASM_i_LW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, pc
), K1
);
280 UASM_i_MTC0(&p
, T0
, C0_EPC
);
282 /* Set the ASID for the Guest Kernel */
283 UASM_i_LW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, cop0
), K1
);
284 UASM_i_LW(&p
, T0
, offsetof(struct mips_coproc
, reg
[MIPS_CP0_STATUS
][0]),
286 uasm_i_andi(&p
, T0
, T0
, KSU_USER
| ST0_ERL
| ST0_EXL
);
287 uasm_i_xori(&p
, T0
, T0
, KSU_USER
);
288 uasm_il_bnez(&p
, &r
, T0
, label_kernel_asid
);
289 UASM_i_ADDIU(&p
, T1
, K1
,
290 offsetof(struct kvm_vcpu_arch
, guest_kernel_asid
));
292 UASM_i_ADDIU(&p
, T1
, K1
,
293 offsetof(struct kvm_vcpu_arch
, guest_user_asid
));
294 uasm_l_kernel_asid(&l
, p
);
296 /* t1: contains the base of the ASID array, need to get the cpu id */
297 /* smp_processor_id */
298 uasm_i_lw(&p
, T2
, offsetof(struct thread_info
, cpu
), GP
);
300 uasm_i_sll(&p
, T2
, T2
, 2);
301 UASM_i_ADDU(&p
, T3
, T1
, T2
);
302 uasm_i_lw(&p
, K0
, 0, T3
);
303 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
304 /* x sizeof(struct cpuinfo_mips)/4 */
305 uasm_i_addiu(&p
, T3
, ZERO
, sizeof(struct cpuinfo_mips
)/4);
306 uasm_i_mul(&p
, T2
, T2
, T3
);
308 UASM_i_LA_mostly(&p
, AT
, (long)&cpu_data
[0].asid_mask
);
309 UASM_i_ADDU(&p
, AT
, AT
, T2
);
310 UASM_i_LW(&p
, T2
, uasm_rel_lo((long)&cpu_data
[0].asid_mask
), AT
);
311 uasm_i_and(&p
, K0
, K0
, T2
);
313 uasm_i_andi(&p
, K0
, K0
, MIPS_ENTRYHI_ASID
);
315 uasm_i_mtc0(&p
, K0
, C0_ENTRYHI
);
318 /* Disable RDHWR access */
319 uasm_i_mtc0(&p
, ZERO
, C0_HWRENA
);
321 /* load the guest context from VCPU and return */
322 for (i
= 1; i
< 32; ++i
) {
323 /* Guest k0/k1 loaded later */
324 if (i
== K0
|| i
== K1
)
326 UASM_i_LW(&p
, i
, offsetof(struct kvm_vcpu_arch
, gprs
[i
]), K1
);
329 #ifndef CONFIG_CPU_MIPSR6
331 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, hi
), K1
);
334 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, lo
), K1
);
338 /* Restore the guest's k0/k1 registers */
339 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, gprs
[K0
]), K1
);
340 UASM_i_LW(&p
, K1
, offsetof(struct kvm_vcpu_arch
, gprs
[K1
]), K1
);
345 uasm_resolve_relocs(relocs
, labels
);
351 * kvm_mips_build_exception() - Assemble first level guest exception handler.
352 * @addr: Address to start writing code.
353 * @handler: Address of common handler (within range of @addr).
355 * Assemble exception vector code for guest execution. The generated vector will
356 * branch to the common exception handler generated by kvm_mips_build_exit().
358 * Returns: Next address after end of written function.
360 void *kvm_mips_build_exception(void *addr
, void *handler
)
363 struct uasm_label labels
[2];
364 struct uasm_reloc relocs
[2];
365 struct uasm_label
*l
= labels
;
366 struct uasm_reloc
*r
= relocs
;
368 memset(labels
, 0, sizeof(labels
));
369 memset(relocs
, 0, sizeof(relocs
));
371 /* Save guest k1 into scratch register */
372 UASM_i_MTC0(&p
, K1
, scratch_tmp
[0], scratch_tmp
[1]);
374 /* Get the VCPU pointer from the VCPU scratch register */
375 UASM_i_MFC0(&p
, K1
, scratch_vcpu
[0], scratch_vcpu
[1]);
376 UASM_i_ADDIU(&p
, K1
, K1
, offsetof(struct kvm_vcpu
, arch
));
378 /* Save guest k0 into VCPU structure */
379 UASM_i_SW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, gprs
[K0
]), K1
);
381 /* Branch to the common handler */
382 uasm_il_b(&p
, &r
, label_exit_common
);
385 uasm_l_exit_common(&l
, handler
);
386 uasm_resolve_relocs(relocs
, labels
);
392 * kvm_mips_build_exit() - Assemble common guest exit handler.
393 * @addr: Address to start writing code.
395 * Assemble the generic guest exit handling code. This is called by the
396 * exception vectors (generated by kvm_mips_build_exception()), and calls
397 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
398 * depending on the return value.
400 * Returns: Next address after end of written function.
402 void *kvm_mips_build_exit(void *addr
)
406 struct uasm_label labels
[3];
407 struct uasm_reloc relocs
[3];
408 struct uasm_label
*l
= labels
;
409 struct uasm_reloc
*r
= relocs
;
411 memset(labels
, 0, sizeof(labels
));
412 memset(relocs
, 0, sizeof(relocs
));
415 * Generic Guest exception handler. We end up here when the guest
416 * does something that causes a trap to kernel mode.
418 * Both k0/k1 registers will have already been saved (k0 into the vcpu
419 * structure, and k1 into the scratch_tmp register).
421 * The k1 register will already contain the kvm_vcpu_arch pointer.
424 /* Start saving Guest context to VCPU */
425 for (i
= 0; i
< 32; ++i
) {
426 /* Guest k0/k1 saved later */
427 if (i
== K0
|| i
== K1
)
429 UASM_i_SW(&p
, i
, offsetof(struct kvm_vcpu_arch
, gprs
[i
]), K1
);
432 #ifndef CONFIG_CPU_MIPSR6
433 /* We need to save hi/lo and restore them on the way out */
435 UASM_i_SW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, hi
), K1
);
438 UASM_i_SW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, lo
), K1
);
441 /* Finally save guest k1 to VCPU */
443 UASM_i_MFC0(&p
, T0
, scratch_tmp
[0], scratch_tmp
[1]);
444 UASM_i_SW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, gprs
[K1
]), K1
);
446 /* Now that context has been saved, we can use other registers */
449 UASM_i_MFC0(&p
, A1
, scratch_vcpu
[0], scratch_vcpu
[1]);
450 uasm_i_move(&p
, S1
, A1
);
452 /* Restore run (vcpu->run) */
453 UASM_i_LW(&p
, A0
, offsetof(struct kvm_vcpu
, run
), A1
);
454 /* Save pointer to run in s0, will be saved by the compiler */
455 uasm_i_move(&p
, S0
, A0
);
458 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
461 UASM_i_MFC0(&p
, K0
, C0_EPC
);
462 UASM_i_SW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, pc
), K1
);
464 UASM_i_MFC0(&p
, K0
, C0_BADVADDR
);
465 UASM_i_SW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, host_cp0_badvaddr
),
468 uasm_i_mfc0(&p
, K0
, C0_CAUSE
);
469 uasm_i_sw(&p
, K0
, offsetof(struct kvm_vcpu_arch
, host_cp0_cause
), K1
);
471 /* Now restore the host state just enough to run the handlers */
473 /* Switch EBASE to the one used by Linux */
474 /* load up the host EBASE */
475 uasm_i_mfc0(&p
, V0
, C0_STATUS
);
477 uasm_i_lui(&p
, AT
, ST0_BEV
>> 16);
478 uasm_i_or(&p
, K0
, V0
, AT
);
480 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
483 UASM_i_LA_mostly(&p
, K0
, (long)&ebase
);
484 UASM_i_LW(&p
, K0
, uasm_rel_lo((long)&ebase
), K0
);
485 build_set_exc_base(&p
, K0
);
487 if (raw_cpu_has_fpu
) {
489 * If FPU is enabled, save FCR31 and clear it so that later
490 * ctc1's don't trigger FPE for pending exceptions.
492 uasm_i_lui(&p
, AT
, ST0_CU1
>> 16);
493 uasm_i_and(&p
, V1
, V0
, AT
);
494 uasm_il_beqz(&p
, &r
, V1
, label_fpu_1
);
496 uasm_i_cfc1(&p
, T0
, 31);
497 uasm_i_sw(&p
, T0
, offsetof(struct kvm_vcpu_arch
, fpu
.fcr31
),
499 uasm_i_ctc1(&p
, ZERO
, 31);
505 * If MSA is enabled, save MSACSR and clear it so that later
506 * instructions don't trigger MSAFPE for pending exceptions.
508 uasm_i_mfc0(&p
, T0
, C0_CONFIG5
);
509 uasm_i_ext(&p
, T0
, T0
, 27, 1); /* MIPS_CONF5_MSAEN */
510 uasm_il_beqz(&p
, &r
, T0
, label_msa_1
);
512 uasm_i_cfcmsa(&p
, T0
, MSA_CSR
);
513 uasm_i_sw(&p
, T0
, offsetof(struct kvm_vcpu_arch
, fpu
.msacsr
),
515 uasm_i_ctcmsa(&p
, MSA_CSR
, ZERO
);
519 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
520 uasm_i_addiu(&p
, AT
, ZERO
, ~(ST0_EXL
| KSU_USER
| ST0_IE
));
521 uasm_i_and(&p
, V0
, V0
, AT
);
522 uasm_i_lui(&p
, AT
, ST0_CU0
>> 16);
523 uasm_i_or(&p
, V0
, V0
, AT
);
524 uasm_i_mtc0(&p
, V0
, C0_STATUS
);
527 /* Load up host GP */
528 UASM_i_LW(&p
, GP
, offsetof(struct kvm_vcpu_arch
, host_gp
), K1
);
530 /* Need a stack before we can jump to "C" */
531 UASM_i_LW(&p
, SP
, offsetof(struct kvm_vcpu_arch
, host_stack
), K1
);
533 /* Saved host state */
534 UASM_i_ADDIU(&p
, SP
, SP
, -(int)sizeof(struct pt_regs
));
537 * XXXKYMA do we need to load the host ASID, maybe not because the
538 * kernel entries are marked GLOBAL, need to verify
541 /* Restore host scratch registers, as we'll have clobbered them */
542 kvm_mips_build_restore_scratch(&p
, K0
, SP
);
544 /* Restore RDHWR access */
545 UASM_i_LA_mostly(&p
, K0
, (long)&hwrena
);
546 uasm_i_lw(&p
, K0
, uasm_rel_lo((long)&hwrena
), K0
);
547 uasm_i_mtc0(&p
, K0
, C0_HWRENA
);
549 /* Jump to handler */
551 * XXXKYMA: not sure if this is safe, how large is the stack??
552 * Now jump to the kvm_mips_handle_exit() to see if we can deal
553 * with this in the kernel
555 UASM_i_LA(&p
, T9
, (unsigned long)kvm_mips_handle_exit
);
556 uasm_i_jalr(&p
, RA
, T9
);
557 UASM_i_ADDIU(&p
, SP
, SP
, -CALLFRAME_SIZ
);
559 uasm_resolve_relocs(relocs
, labels
);
561 p
= kvm_mips_build_ret_from_exit(p
);
567 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
568 * @addr: Address to start writing code.
570 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
571 * resuming the guest or returning to the host depending on the return value.
573 * Returns: Next address after end of written function.
575 static void *kvm_mips_build_ret_from_exit(void *addr
)
578 struct uasm_label labels
[2];
579 struct uasm_reloc relocs
[2];
580 struct uasm_label
*l
= labels
;
581 struct uasm_reloc
*r
= relocs
;
583 memset(labels
, 0, sizeof(labels
));
584 memset(relocs
, 0, sizeof(relocs
));
586 /* Return from handler Make sure interrupts are disabled */
591 * XXXKYMA: k0/k1 could have been blown away if we processed
592 * an exception while we were handling the exception from the
596 uasm_i_move(&p
, K1
, S1
);
597 UASM_i_ADDIU(&p
, K1
, K1
, offsetof(struct kvm_vcpu
, arch
));
600 * Check return value, should tell us if we are returning to the
601 * host (handle I/O etc)or resuming the guest
603 uasm_i_andi(&p
, T0
, V0
, RESUME_HOST
);
604 uasm_il_bnez(&p
, &r
, T0
, label_return_to_host
);
607 p
= kvm_mips_build_ret_to_guest(p
);
609 uasm_l_return_to_host(&l
, p
);
610 p
= kvm_mips_build_ret_to_host(p
);
612 uasm_resolve_relocs(relocs
, labels
);
618 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
619 * @addr: Address to start writing code.
621 * Assemble the code to handle return from the guest exit handler
622 * (kvm_mips_handle_exit()) back to the guest.
624 * Returns: Next address after end of written function.
626 static void *kvm_mips_build_ret_to_guest(void *addr
)
630 /* Put the saved pointer to vcpu (s1) back into the scratch register */
631 UASM_i_MTC0(&p
, S1
, scratch_vcpu
[0], scratch_vcpu
[1]);
633 /* Load up the Guest EBASE to minimize the window where BEV is set */
634 UASM_i_LW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, guest_ebase
), K1
);
636 /* Switch EBASE back to the one used by KVM */
637 uasm_i_mfc0(&p
, V1
, C0_STATUS
);
638 uasm_i_lui(&p
, AT
, ST0_BEV
>> 16);
639 uasm_i_or(&p
, K0
, V1
, AT
);
640 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
642 build_set_exc_base(&p
, T0
);
644 /* Setup status register for running guest in UM */
645 uasm_i_ori(&p
, V1
, V1
, ST0_EXL
| KSU_USER
| ST0_IE
);
646 UASM_i_LA(&p
, AT
, ~(ST0_CU0
| ST0_MX
));
647 uasm_i_and(&p
, V1
, V1
, AT
);
648 uasm_i_mtc0(&p
, V1
, C0_STATUS
);
651 p
= kvm_mips_build_enter_guest(p
);
657 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
658 * @addr: Address to start writing code.
660 * Assemble the code to handle return from the guest exit handler
661 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
662 * function generated by kvm_mips_build_vcpu_run().
664 * Returns: Next address after end of written function.
666 static void *kvm_mips_build_ret_to_host(void *addr
)
671 /* EBASE is already pointing to Linux */
672 UASM_i_LW(&p
, K1
, offsetof(struct kvm_vcpu_arch
, host_stack
), K1
);
673 UASM_i_ADDIU(&p
, K1
, K1
, -(int)sizeof(struct pt_regs
));
676 * r2/v0 is the return code, shift it down by 2 (arithmetic)
677 * to recover the err code
679 uasm_i_sra(&p
, K0
, V0
, 2);
680 uasm_i_move(&p
, V0
, K0
);
682 /* Load context saved on the host stack */
683 for (i
= 16; i
< 31; ++i
) {
686 UASM_i_LW(&p
, i
, offsetof(struct pt_regs
, regs
[i
]), K1
);
689 /* Restore RDHWR access */
690 UASM_i_LA_mostly(&p
, K0
, (long)&hwrena
);
691 uasm_i_lw(&p
, K0
, uasm_rel_lo((long)&hwrena
), K0
);
692 uasm_i_mtc0(&p
, K0
, C0_HWRENA
);
694 /* Restore RA, which is the address we will return to */
695 UASM_i_LW(&p
, RA
, offsetof(struct pt_regs
, regs
[RA
]), K1
);