1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
8 * Exception entry code. This code runs with address translation
9 * turned off, i.e. using physical addresses.
10 * We assume sprg3 has the physical address of the current
11 * task's thread_struct.
14 .macro EXCEPTION_PROLOG
15 mtspr SPRN_SPRG_SCRATCH0
,r10
16 mtspr SPRN_SPRG_SCRATCH1
,r11
22 .macro EXCEPTION_PROLOG_1
23 mfspr r11
,SPRN_SRR1
/* check whether user or kernel */
25 tophys(r11
,r1
) /* use tophys(r1) if kernel */
27 mfspr r11
,SPRN_SPRG_THREAD
28 lwz r11
,TASK_STACK
-THREAD(r11
)
29 addi r11
,r11
,THREAD_SIZE
31 1: subi r11
,r11
,INT_FRAME_SIZE
/* alloc exc. frame */
34 .macro EXCEPTION_PROLOG_2
35 stw r10
,_CCR(r11
) /* save registers */
38 mfspr r10
,SPRN_SPRG_SCRATCH0
40 mfspr r12
,SPRN_SPRG_SCRATCH1
48 tovirt(r1
,r11
) /* set new kernel sp */
50 rlwinm r9
,r9
,0,14,12 /* clear MSR_WE (necessary?) */
52 li r10
,MSR_KERNEL
& ~(MSR_IR
|MSR_DR
) /* can take exceptions */
53 MTMSRD(r10
) /* (except for mach check in rtas) */
56 lis r10
,STACK_FRAME_REGS_MARKER@ha
/* exception frame marker */
57 addi r10
,r10
,STACK_FRAME_REGS_MARKER@l
63 .macro SYSCALL_ENTRY trapno
64 mfspr r12
,SPRN_SPRG_THREAD
66 lwz r11
,TASK_STACK
-THREAD(r12
)
68 addi r11
,r11
,THREAD_SIZE
- INT_FRAME_SIZE
69 rlwinm r10
,r10
,0,4,2 /* Clear SO bit in CR */
71 stw r10
,_CCR(r11
) /* save registers */
77 tovirt(r1
,r11
) /* set new kernel sp */
80 rlwinm r9
,r9
,0,14,12 /* clear MSR_WE (necessary?) */
82 LOAD_REG_IMMEDIATE(r10
, MSR_KERNEL
& ~(MSR_IR
|MSR_DR
)) /* can take exceptions */
83 MTMSRD(r10
) /* (except for mach check in rtas) */
85 lis r10
,STACK_FRAME_REGS_MARKER@ha
/* exception frame marker */
87 addi r10
,r10
,STACK_FRAME_REGS_MARKER@l
95 addi r11
,r1
,STACK_FRAME_OVERHEAD
98 #if defined(CONFIG_40x)
99 /* Check to see if the dbcr0 register is set up to debug. Use the
100 internal debug mode bit to do this. */
101 lwz r12
,THREAD_DBCR0(r12
)
102 andis
. r12
,r12
,DBCR0_IDM@h
104 ACCOUNT_CPU_USER_ENTRY(r2
, r11
, r12
)
105 #if defined(CONFIG_40x)
107 /* From user and task is ptraced - load up global dbcr0 */
108 li r12
,-1 /* clear all pending debug events */
110 lis r11
,global_dbcr0@ha
112 addi r11
,r11
,global_dbcr0@l
121 tovirt(r2
, r2
) /* set r2 to current */
122 lis r11
, transfer_to_syscall@h
123 ori r11
, r11
, transfer_to_syscall@l
124 #ifdef CONFIG_TRACE_IRQFLAGS
126 * If MSR is changing we need to keep interrupts disabled at this point
127 * otherwise we might risk taking an interrupt before we tell lockdep
130 LOAD_REG_IMMEDIATE(r10
, MSR_KERNEL
)
131 rlwimi r10
, r9
, 0, MSR_EE
133 LOAD_REG_IMMEDIATE(r10
, MSR_KERNEL
| MSR_EE
)
135 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
141 RFI
/* jump to handler, enable MMU */
145 * Note: code which follows this uses cr0.eq (set if from kernel),
146 * r11, r12 (SRR0), and r9 (SRR1).
148 * Note2: once we have set r1 we are in a position to take exceptions
149 * again, and we could thus set MSR:RI at that point.
155 #ifdef CONFIG_PPC_BOOK3S
156 #define START_EXCEPTION(n, label) \
162 #define START_EXCEPTION(n, label) \
168 #define EXCEPTION(n, label, hdlr, xfer) \
169 START_EXCEPTION(n, label) \
171 addi r3,r1,STACK_FRAME_OVERHEAD; \
174 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
176 stw r10,_TRAP(r11); \
177 LOAD_REG_IMMEDIATE(r10, msr); \
182 #define EXC_XFER_STD(n, hdlr) \
183 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
184 ret_from_except_full)
186 #define EXC_XFER_LITE(n, hdlr) \
187 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
190 #endif /* __HEAD_32_H__ */