2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 #ifndef _ASM_PROCESSOR_H
12 #define _ASM_PROCESSOR_H
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/sizes.h>
17 #include <linux/threads.h>
19 #include <asm/cachectl.h>
21 #include <asm/cpu-info.h>
22 #include <asm/dsemul.h>
23 #include <asm/mipsregs.h>
24 #include <asm/prefetch.h>
27 * System setup and hardware flags..
30 extern unsigned int vced_count
, vcei_count
;
33 #ifdef CONFIG_KVM_GUEST
34 /* User space process size is limited to 1GB in KVM Guest Mode */
35 #define TASK_SIZE 0x3fff8000UL
38 * User space process size: 2GB. This is hardcoded into a few places,
39 * so don't change it unless you know what you are doing.
41 #define TASK_SIZE 0x80000000UL
44 #define STACK_TOP_MAX TASK_SIZE
46 #define TASK_IS_32BIT_ADDR 1
52 * User space process size: 1TB. This is hardcoded into a few places,
53 * so don't change it unless you know what you are doing. TASK_SIZE
54 * is limited to 1TB by the R4000 architecture; R10000 and better can
55 * support 16TB; the architectural reserve for future expansion is
58 #define TASK_SIZE32 0x7fff8000UL
59 #ifdef CONFIG_MIPS_VA_BITS_48
60 #define TASK_SIZE64 (0x1UL << ((cpu_data[0].vmbits>48)?48:cpu_data[0].vmbits))
62 #define TASK_SIZE64 0x10000000000UL
64 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
65 #define STACK_TOP_MAX TASK_SIZE64
67 #define TASK_SIZE_OF(tsk) \
68 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
70 #define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
74 #define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
76 extern unsigned long mips_stack_top(void);
77 #define STACK_TOP mips_stack_top()
80 * This decides where the kernel will search for a free chunk of vm
81 * space during mmap's.
83 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
86 #define NUM_FPU_REGS 32
88 #ifdef CONFIG_CPU_HAS_MSA
89 # define FPU_REG_WIDTH 128
91 # define FPU_REG_WIDTH 64
95 __u32 val32
[FPU_REG_WIDTH
/ 32];
96 __u64 val64
[FPU_REG_WIDTH
/ 64];
99 #ifdef CONFIG_CPU_LITTLE_ENDIAN
100 # define FPR_IDX(width, idx) (idx)
102 # define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
105 #define BUILD_FPR_ACCESS(width) \
106 static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
108 return fpr->val##width[FPR_IDX(width, idx)]; \
111 static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
114 fpr->val##width[FPR_IDX(width, idx)] = val; \
121 * It would be nice to add some more fields for emulator statistics,
122 * the additional information is private to the FPU emulator for now.
123 * See arch/mips/include/asm/fpu_emulator.h.
126 struct mips_fpu_struct
{
127 union fpureg fpr
[NUM_FPU_REGS
];
132 #define NUM_DSP_REGS 6
134 typedef unsigned long dspreg_t
;
136 struct mips_dsp_state
{
137 dspreg_t dspr
[NUM_DSP_REGS
];
138 unsigned int dspcontrol
;
141 #define INIT_CPUMASK { \
145 struct mips3264_watch_reg_state
{
146 /* The width of watchlo is 32 in a 32 bit kernel and 64 in a
147 64 bit kernel. We use unsigned long as it has the same
149 unsigned long watchlo
[NUM_WATCH_REGS
];
150 /* Only the mask and IRW bits from watchhi. */
151 u16 watchhi
[NUM_WATCH_REGS
];
154 union mips_watch_reg_state
{
155 struct mips3264_watch_reg_state mips3264
;
158 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
160 struct octeon_cop2_state
{
161 /* DMFC2 rt, 0x0201 */
162 unsigned long cop2_crc_iv
;
163 /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
164 unsigned long cop2_crc_length
;
165 /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
166 unsigned long cop2_crc_poly
;
167 /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
168 unsigned long cop2_llm_dat
[2];
169 /* DMFC2 rt, 0x0084 */
170 unsigned long cop2_3des_iv
;
171 /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
172 unsigned long cop2_3des_key
[3];
173 /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
174 unsigned long cop2_3des_result
;
175 /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
176 unsigned long cop2_aes_inp0
;
177 /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
178 unsigned long cop2_aes_iv
[2];
179 /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
181 unsigned long cop2_aes_key
[4];
182 /* DMFC2 rt, 0x0110 */
183 unsigned long cop2_aes_keylen
;
184 /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
185 unsigned long cop2_aes_result
[2];
186 /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
187 * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
188 * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
189 * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
190 * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
191 unsigned long cop2_hsh_datw
[15];
192 /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
193 * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
194 * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
195 unsigned long cop2_hsh_ivw
[8];
196 /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
197 unsigned long cop2_gfm_mult
[2];
198 /* DMFC2 rt, 0x025E - Pass2 */
199 unsigned long cop2_gfm_poly
;
200 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
201 unsigned long cop2_gfm_result
[2];
202 /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
203 unsigned long cop2_sha3
[2];
208 struct octeon_cvmseg_state
{
209 unsigned long cvmseg
[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
]
210 [cpu_dcache_line_size() / sizeof(unsigned long)];
213 #elif defined(CONFIG_CPU_XLP)
214 struct nlm_cop2_state
{
222 .cp2 = {{0}, {0}, 0, 0},
231 #ifdef CONFIG_CPU_HAS_MSA
232 # define ARCH_MIN_TASKALIGN 16
233 # define FPU_ALIGN __aligned(16)
235 # define ARCH_MIN_TASKALIGN 8
242 * If you change thread_struct remember to change the #defines below too!
244 struct thread_struct
{
245 /* Saved main processor registers. */
247 unsigned long reg17
, reg18
, reg19
, reg20
, reg21
, reg22
, reg23
;
248 unsigned long reg29
, reg30
, reg31
;
250 /* Saved cp0 stuff. */
251 unsigned long cp0_status
;
253 #ifdef CONFIG_MIPS_FP_SUPPORT
254 /* Saved fpu/fpu emulator stuff. */
255 struct mips_fpu_struct fpu FPU_ALIGN
;
257 /* Assigned branch delay slot 'emulation' frame */
258 atomic_t bd_emu_frame
;
259 /* PC of the branch from a branch delay slot 'emulation' */
260 unsigned long bd_emu_branch_pc
;
261 /* PC to continue from following a branch delay slot 'emulation' */
262 unsigned long bd_emu_cont_pc
;
263 #ifdef CONFIG_MIPS_MT_FPAFF
264 /* Emulated instruction count */
265 unsigned long emulated_fp
;
266 /* Saved per-thread scheduler affinity mask */
267 cpumask_t user_cpus_allowed
;
268 #endif /* CONFIG_MIPS_MT_FPAFF */
270 /* Saved state of the DSP ASE, if available. */
271 struct mips_dsp_state dsp
;
273 /* Saved watch register state, if available. */
274 union mips_watch_reg_state watch
;
276 /* Other stuff associated with the thread. */
277 unsigned long cp0_badvaddr
; /* Last user fault */
278 unsigned long cp0_baduaddr
; /* Last kernel fault accessing USEG */
279 unsigned long error_code
;
280 unsigned long trap_nr
;
281 #ifdef CONFIG_CPU_CAVIUM_OCTEON
282 struct octeon_cop2_state cp2
__attribute__ ((__aligned__(128)));
283 struct octeon_cvmseg_state cvmseg
__attribute__ ((__aligned__(128)));
285 #ifdef CONFIG_CPU_XLP
286 struct nlm_cop2_state cp2
;
288 struct mips_abi
*abi
;
291 #ifdef CONFIG_MIPS_MT_FPAFF
294 .user_cpus_allowed = INIT_CPUMASK,
297 #endif /* CONFIG_MIPS_MT_FPAFF */
299 #ifdef CONFIG_MIPS_FP_SUPPORT
310 #define INIT_THREAD { \
312 * Saved main processor registers \
330 * Saved FPU/FPU emulator stuff \
334 * FPU affinity state (null if not FPAFF) \
337 /* Delay slot emulation */ \
338 .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \
339 .bd_emu_branch_pc = 0, \
340 .bd_emu_cont_pc = 0, \
349 * saved watch register stuff \
351 .watch = {{{0,},},}, \
353 * Other stuff associated with the process \
360 * Platform specific cop2 registers(null if no COP2) \
367 /* Free all resources held by a thread. */
368 #define release_thread(thread) do { } while(0)
371 * Do necessary setup to start up a newly executed thread.
373 extern void start_thread(struct pt_regs
* regs
, unsigned long pc
, unsigned long sp
);
375 static inline void flush_thread(void)
379 unsigned long get_wchan(struct task_struct
*p
);
381 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
382 THREAD_SIZE - 32 - sizeof(struct pt_regs))
383 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
384 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
385 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
386 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
388 #ifdef CONFIG_CPU_LOONGSON64
390 * Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
391 * tight read loop is executed, because reads take priority over writes & the
392 * hardware (incorrectly) doesn't ensure that writes will eventually occur.
394 * Since spin loops of any kind should have a cpu_relax() in them, force an SFB
395 * flush from cpu_relax() such that any pending writes will become visible as
398 #define cpu_relax() smp_mb()
400 #define cpu_relax() barrier()
404 * Return_address is a replacement for __builtin_return_address(count)
405 * which on certain architectures cannot reasonably be implemented in GCC
406 * (MIPS, Alpha) or is unusable with -fomit-frame-pointer (i386).
407 * Note that __builtin_return_address(x>=1) is forbidden because GCC
408 * aborts compilation on some CPUs. It's simply not possible to unwind
409 * some CPU's stackframes.
411 * __builtin_return_address works only for non-leaf functions. We avoid the
412 * overhead of a function call by forcing the compiler to save the return
413 * address register on the stack.
415 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
417 #ifdef CONFIG_CPU_HAS_PREFETCH
419 #define ARCH_HAS_PREFETCH
420 #define prefetch(x) __builtin_prefetch((x), 0, 1)
422 #define ARCH_HAS_PREFETCHW
423 #define prefetchw(x) __builtin_prefetch((x), 1, 1)
428 * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
429 * to the prctl syscall.
431 extern int mips_get_process_fp_mode(struct task_struct
*task
);
432 extern int mips_set_process_fp_mode(struct task_struct
*task
,
435 #define GET_FP_MODE(task) mips_get_process_fp_mode(task)
436 #define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
438 #endif /* _ASM_PROCESSOR_H */