Remove building with NOCRYPTO option
[minix3.git] / sys / arch / arm / include / locore.h
blobad549c9ff25425cc776e0e680eb5351521330bfe
1 /* $NetBSD: locore.h,v 1.26 2015/06/09 08:13:17 skrll Exp $ */
3 /*
4 * Copyright (c) 1994-1996 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
8 * This code is derived from software written for Brini by Mark Brinicombe
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Brini.
21 * 4. The name of the company nor the name of the author may be used to
22 * endorse or promote products derived from this software without specific
23 * prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * RiscBSD kernel project
39 * cpu.h
41 * CPU specific symbols
43 * Created : 18/09/94
45 * Based on kate/katelib/arm6.h
48 #ifndef _ARM_LOCORE_H_
49 #define _ARM_LOCORE_H_
51 #ifdef _KERNEL_OPT
52 #include "opt_cpuoptions.h"
53 #include "opt_cputypes.h"
54 #include "opt_arm_debug.h"
55 #endif
57 #include <sys/pcu.h>
59 #include <arm/cpuconf.h>
60 #include <arm/armreg.h>
62 #include <machine/frame.h>
64 #ifdef _LOCORE
66 #if defined(_ARM_ARCH_6)
67 #define IRQdisable cpsid i
68 #define IRQenable cpsie i
69 #elif defined(__PROG32)
70 #define IRQdisable \
71 stmfd sp!, {r0} ; \
72 mrs r0, cpsr ; \
73 orr r0, r0, #(I32_bit) ; \
74 msr cpsr_c, r0 ; \
75 ldmfd sp!, {r0}
77 #define IRQenable \
78 stmfd sp!, {r0} ; \
79 mrs r0, cpsr ; \
80 bic r0, r0, #(I32_bit) ; \
81 msr cpsr_c, r0 ; \
82 ldmfd sp!, {r0}
83 #else
84 /* Not yet used in 26-bit code */
85 #endif
87 #if defined (TPIDRPRW_IS_CURCPU)
88 #define GET_CURCPU(rX) mrc p15, 0, rX, c13, c0, 4
89 #define GET_CURLWP(rX) GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
90 #elif defined (TPIDRPRW_IS_CURLWP)
91 #define GET_CURLWP(rX) mrc p15, 0, rX, c13, c0, 4
92 #if defined (MULTIPROCESSOR)
93 #define GET_CURCPU(rX) GET_CURLWP(rX); ldr rX, [rX, #L_CPU]
94 #elif defined(_ARM_ARCH_7)
95 #define GET_CURCPU(rX) movw rX, #:lower16:cpu_info_store; movt rX, #:upper16:cpu_info_store
96 #else
97 #define GET_CURCPU(rX) ldr rX, =_C_LABEL(cpu_info_store)
98 #endif
99 #elif !defined(MULTIPROCESSOR)
100 #define GET_CURCPU(rX) ldr rX, =_C_LABEL(cpu_info_store)
101 #define GET_CURLWP(rX) GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
102 #endif
103 #define GET_CURPCB(rX) GET_CURLWP(rX); ldr rX, [rX, #L_PCB]
105 #else /* !_LOCORE */
107 #include <arm/cpufunc.h>
109 #ifdef __PROG32
110 #define IRQdisable __set_cpsr_c(I32_bit, I32_bit);
111 #define IRQenable __set_cpsr_c(I32_bit, 0);
112 #else
113 #define IRQdisable set_r15(R15_IRQ_DISABLE, R15_IRQ_DISABLE);
114 #define IRQenable set_r15(R15_IRQ_DISABLE, 0);
115 #endif
118 * Validate a PC or PSR for a user process. Used by various system calls
119 * that take a context passed by the user and restore it.
122 #ifdef __PROG32
123 #ifdef __NO_FIQ
124 #define VALID_R15_PSR(r15,psr) \
125 (((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & I32_bit) == 0)
126 #else
127 #define VALID_R15_PSR(r15,psr) \
128 (((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & IF32_bits) == 0)
129 #endif
130 #else
131 #define VALID_R15_PSR(r15,psr) \
132 (((r15) & R15_MODE) == R15_MODE_USR && \
133 ((r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)) == 0)
134 #endif
137 * Translation Table Base Register Share/Cache settings
139 #define TTBR_UPATTR (TTBR_S | TTBR_RGN_WBNWA | TTBR_C)
140 #define TTBR_MPATTR (TTBR_S | TTBR_RGN_WBNWA /* | TTBR_NOS */ | TTBR_IRGN_WBNWA)
142 /* The address of the vector page. */
143 extern vaddr_t vector_page;
144 #ifdef __PROG32
145 void arm32_vector_init(vaddr_t, int);
147 #define ARM_VEC_RESET (1 << 0)
148 #define ARM_VEC_UNDEFINED (1 << 1)
149 #define ARM_VEC_SWI (1 << 2)
150 #define ARM_VEC_PREFETCH_ABORT (1 << 3)
151 #define ARM_VEC_DATA_ABORT (1 << 4)
152 #define ARM_VEC_ADDRESS_EXCEPTION (1 << 5)
153 #define ARM_VEC_IRQ (1 << 6)
154 #define ARM_VEC_FIQ (1 << 7)
156 #define ARM_NVEC 8
157 #define ARM_VEC_ALL 0xffffffff
158 #endif /* __PROG32 */
160 #ifndef acorn26
162 * cpu device glue (belongs in cpuvar.h)
164 void cpu_attach(device_t, cpuid_t);
165 #endif
167 /* 1 == use cpu_sleep(), 0 == don't */
168 extern int cpu_do_powersave;
169 extern int cpu_printfataltraps;
170 extern int cpu_fpu_present;
171 extern int cpu_hwdiv_present;
172 extern int cpu_neon_present;
173 extern int cpu_simd_present;
174 extern int cpu_simdex_present;
175 extern int cpu_umull_present;
176 extern int cpu_synchprim_present;
178 extern int cpu_instruction_set_attributes[6];
179 extern int cpu_memory_model_features[4];
180 extern int cpu_processor_features[2];
181 extern int cpu_media_and_vfp_features[2];
183 extern bool arm_has_tlbiasid_p;
184 #ifdef MULTIPROCESSOR
185 extern u_int arm_cpu_max;
186 extern volatile u_int arm_cpu_hatched;
187 #endif
189 #if !defined(CPU_ARMV7)
190 #define CPU_IS_ARMV7_P() false
191 #elif defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
192 extern bool cpu_armv7_p;
193 #define CPU_IS_ARMV7_P() (cpu_armv7_p)
194 #else
195 #define CPU_IS_ARMV7_P() true
196 #endif
197 #if !defined(CPU_ARMV6)
198 #define CPU_IS_ARMV6_P() false
199 #elif defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
200 extern bool cpu_armv6_p;
201 #define CPU_IS_ARMV6_P() (cpu_armv6_p)
202 #else
203 #define CPU_IS_ARMV6_P() true
204 #endif
207 * Used by the fault code to read the current instruction.
209 static inline uint32_t
210 read_insn(vaddr_t va, bool user_p)
212 uint32_t insn;
213 if (user_p) {
214 __asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va));
215 } else {
216 insn = *(const uint32_t *)va;
218 #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
219 insn = bswap32(insn);
220 #endif
221 return insn;
225 * Used by the fault code to read the current thumb instruction.
227 static inline uint32_t
228 read_thumb_insn(vaddr_t va, bool user_p)
230 va &= ~1;
231 uint32_t insn;
232 if (user_p) {
233 #if defined(__thumb__) && defined(_ARM_ARCH_T2)
234 __asm __volatile("ldrht %0, [%1, #0]" : "=&r"(insn) : "r"(va));
235 #elif defined(_ARM_ARCH_7)
236 __asm __volatile("ldrht %0, [%1], #0" : "=&r"(insn) : "r"(va));
237 #else
238 __asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va & ~3));
239 #ifdef __ARMEB__
240 insn = (uint16_t) (insn >> (((va ^ 2) & 2) << 3));
241 #else
242 insn = (uint16_t) (insn >> ((va & 2) << 3));
243 #endif
244 #endif
245 } else {
246 insn = *(const uint16_t *)va;
248 #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
249 insn = bswap16(insn);
250 #endif
251 return insn;
254 #ifndef _RUMPKERNEL
255 static inline void
256 arm_dmb(void)
258 if (CPU_IS_ARMV6_P())
259 armreg_dmb_write(0);
260 else if (CPU_IS_ARMV7_P())
261 __asm __volatile("dmb" ::: "memory");
264 static inline void
265 arm_dsb(void)
267 if (CPU_IS_ARMV6_P())
268 armreg_dsb_write(0);
269 else if (CPU_IS_ARMV7_P())
270 __asm __volatile("dsb" ::: "memory");
273 static inline void
274 arm_isb(void)
276 if (CPU_IS_ARMV6_P())
277 armreg_isb_write(0);
278 else if (CPU_IS_ARMV7_P())
279 __asm __volatile("isb" ::: "memory");
281 #endif
284 * Random cruft
287 struct lwp;
289 /* cpu.c */
290 void identify_arm_cpu(device_t, struct cpu_info *);
292 /* cpuswitch.S */
293 struct pcb;
294 void savectx(struct pcb *);
296 /* ast.c */
297 void userret(struct lwp *);
299 /* *_machdep.c */
300 void bootsync(void);
302 /* fault.c */
303 int badaddr_read(void *, size_t, void *);
305 /* syscall.c */
306 void swi_handler(trapframe_t *);
308 /* arm_machdep.c */
309 void ucas_ras_check(trapframe_t *);
311 /* vfp_init.c */
312 void vfp_attach(struct cpu_info *);
313 void vfp_discardcontext(bool);
314 void vfp_savecontext(void);
315 void vfp_kernel_acquire(void);
316 void vfp_kernel_release(void);
317 bool vfp_used_p(void);
318 extern const pcu_ops_t arm_vfp_ops;
320 #endif /* !_LOCORE */
322 #endif /* !_ARM_LOCORE_H_ */