treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / kernel / head_32.h
blob8abc7783dbe5a75e091942072c52f00d6f7ded6f
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __HEAD_32_H__
3 #define __HEAD_32_H__
5 #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
7 /*
8 * Exception entry code. This code runs with address translation
9 * turned off, i.e. using physical addresses.
10 * We assume sprg3 has the physical address of the current
11 * task's thread_struct.
14 .macro EXCEPTION_PROLOG
15 mtspr SPRN_SPRG_SCRATCH0,r10
16 mtspr SPRN_SPRG_SCRATCH1,r11
17 mfcr r10
18 EXCEPTION_PROLOG_1
19 EXCEPTION_PROLOG_2
20 .endm
22 .macro EXCEPTION_PROLOG_1
23 mfspr r11,SPRN_SRR1 /* check whether user or kernel */
24 andi. r11,r11,MSR_PR
25 tophys(r11,r1) /* use tophys(r1) if kernel */
26 beq 1f
27 mfspr r11,SPRN_SPRG_THREAD
28 lwz r11,TASK_STACK-THREAD(r11)
29 addi r11,r11,THREAD_SIZE
30 tophys(r11,r11)
31 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
32 .endm
34 .macro EXCEPTION_PROLOG_2
35 stw r10,_CCR(r11) /* save registers */
36 stw r12,GPR12(r11)
37 stw r9,GPR9(r11)
38 mfspr r10,SPRN_SPRG_SCRATCH0
39 stw r10,GPR10(r11)
40 mfspr r12,SPRN_SPRG_SCRATCH1
41 stw r12,GPR11(r11)
42 mflr r10
43 stw r10,_LINK(r11)
44 mfspr r12,SPRN_SRR0
45 mfspr r9,SPRN_SRR1
46 stw r1,GPR1(r11)
47 stw r1,0(r11)
48 tovirt(r1,r11) /* set new kernel sp */
49 #ifdef CONFIG_40x
50 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
51 #else
52 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
53 MTMSRD(r10) /* (except for mach check in rtas) */
54 #endif
55 stw r0,GPR0(r11)
56 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
57 addi r10,r10,STACK_FRAME_REGS_MARKER@l
58 stw r10,8(r11)
59 SAVE_4GPRS(3, r11)
60 SAVE_2GPRS(7, r11)
61 .endm
63 .macro SYSCALL_ENTRY trapno
64 mfspr r12,SPRN_SPRG_THREAD
65 mfcr r10
66 lwz r11,TASK_STACK-THREAD(r12)
67 mflr r9
68 addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE
69 rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
70 tophys(r11,r11)
71 stw r10,_CCR(r11) /* save registers */
72 mfspr r10,SPRN_SRR0
73 stw r9,_LINK(r11)
74 mfspr r9,SPRN_SRR1
75 stw r1,GPR1(r11)
76 stw r1,0(r11)
77 tovirt(r1,r11) /* set new kernel sp */
78 stw r10,_NIP(r11)
79 #ifdef CONFIG_40x
80 rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
81 #else
82 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
83 MTMSRD(r10) /* (except for mach check in rtas) */
84 #endif
85 lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
86 stw r2,GPR2(r11)
87 addi r10,r10,STACK_FRAME_REGS_MARKER@l
88 stw r9,_MSR(r11)
89 li r2, \trapno + 1
90 stw r10,8(r11)
91 stw r2,_TRAP(r11)
92 SAVE_GPR(0, r11)
93 SAVE_4GPRS(3, r11)
94 SAVE_2GPRS(7, r11)
95 addi r11,r1,STACK_FRAME_OVERHEAD
96 addi r2,r12,-THREAD
97 stw r11,PT_REGS(r12)
98 #if defined(CONFIG_40x)
99 /* Check to see if the dbcr0 register is set up to debug. Use the
100 internal debug mode bit to do this. */
101 lwz r12,THREAD_DBCR0(r12)
102 andis. r12,r12,DBCR0_IDM@h
103 #endif
104 ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
105 #if defined(CONFIG_40x)
106 beq+ 3f
107 /* From user and task is ptraced - load up global dbcr0 */
108 li r12,-1 /* clear all pending debug events */
109 mtspr SPRN_DBSR,r12
110 lis r11,global_dbcr0@ha
111 tophys(r11,r11)
112 addi r11,r11,global_dbcr0@l
113 lwz r12,0(r11)
114 mtspr SPRN_DBCR0,r12
115 lwz r12,4(r11)
116 addi r12,r12,-1
117 stw r12,4(r11)
118 #endif
121 tovirt(r2, r2) /* set r2 to current */
122 lis r11, transfer_to_syscall@h
123 ori r11, r11, transfer_to_syscall@l
124 #ifdef CONFIG_TRACE_IRQFLAGS
126 * If MSR is changing we need to keep interrupts disabled at this point
127 * otherwise we might risk taking an interrupt before we tell lockdep
128 * they are enabled.
130 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
131 rlwimi r10, r9, 0, MSR_EE
132 #else
133 LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
134 #endif
135 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
136 mtspr SPRN_NRI, r0
137 #endif
138 mtspr SPRN_SRR1,r10
139 mtspr SPRN_SRR0,r11
140 SYNC
141 RFI /* jump to handler, enable MMU */
142 .endm
145 * Note: code which follows this uses cr0.eq (set if from kernel),
146 * r11, r12 (SRR0), and r9 (SRR1).
148 * Note2: once we have set r1 we are in a position to take exceptions
149 * again, and we could thus set MSR:RI at that point.
153 * Exception vectors.
155 #ifdef CONFIG_PPC_BOOK3S
156 #define START_EXCEPTION(n, label) \
157 . = n; \
158 DO_KVM n; \
159 label:
161 #else
162 #define START_EXCEPTION(n, label) \
163 . = n; \
164 label:
166 #endif
168 #define EXCEPTION(n, label, hdlr, xfer) \
169 START_EXCEPTION(n, label) \
170 EXCEPTION_PROLOG; \
171 addi r3,r1,STACK_FRAME_OVERHEAD; \
172 xfer(n, hdlr)
174 #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
175 li r10,trap; \
176 stw r10,_TRAP(r11); \
177 LOAD_REG_IMMEDIATE(r10, msr); \
178 bl tfer; \
179 .long hdlr; \
180 .long ret
182 #define EXC_XFER_STD(n, hdlr) \
183 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
184 ret_from_except_full)
186 #define EXC_XFER_LITE(n, hdlr) \
187 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
188 ret_from_except)
190 #endif /* __HEAD_32_H__ */