ia64/pv_ops/xen: implement xen pv_time_ops.
[pv_ops_mirror.git] / include / asm-mips / processor.h
blob58cbac5a64e4472dd45e17c3cef7426b86dbf243
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 #ifndef _ASM_PROCESSOR_H
12 #define _ASM_PROCESSOR_H
14 #include <linux/cpumask.h>
15 #include <linux/threads.h>
17 #include <asm/cachectl.h>
18 #include <asm/cpu.h>
19 #include <asm/cpu-info.h>
20 #include <asm/mipsregs.h>
21 #include <asm/prefetch.h>
22 #include <asm/system.h>
25 * Return current * instruction pointer ("program counter").
27 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
30 * System setup and hardware flags..
32 extern void (*cpu_wait)(void);
34 extern unsigned int vced_count, vcei_count;
36 #ifdef CONFIG_32BIT
38 * User space process size: 2GB. This is hardcoded into a few places,
39 * so don't change it unless you know what you are doing.
41 #define TASK_SIZE 0x7fff8000UL
42 #define STACK_TOP TASK_SIZE
45 * This decides where the kernel will search for a free chunk of vm
46 * space during mmap's.
48 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
49 #endif
51 #ifdef CONFIG_64BIT
53 * User space process size: 1TB. This is hardcoded into a few places,
54 * so don't change it unless you know what you are doing. TASK_SIZE
55 * is limited to 1TB by the R4000 architecture; R10000 and better can
56 * support 16TB; the architectural reserve for future expansion is
57 * 8192EB ...
59 #define TASK_SIZE32 0x7fff8000UL
60 #define TASK_SIZE 0x10000000000UL
61 #define STACK_TOP \
62 (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE)
65 * This decides where the kernel will search for a free chunk of vm
66 * space during mmap's.
68 #define TASK_UNMAPPED_BASE \
69 (test_thread_flag(TIF_32BIT_ADDR) ? \
70 PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3))
71 #define TASK_SIZE_OF(tsk) \
72 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE)
73 #endif
75 #ifdef __KERNEL__
76 #define STACK_TOP_MAX TASK_SIZE
77 #endif
79 #define NUM_FPU_REGS 32
81 typedef __u64 fpureg_t;
84 * It would be nice to add some more fields for emulator statistics, but there
85 * are a number of fixed offsets in offset.h and elsewhere that would have to
86 * be recalculated by hand. So the additional information will be private to
87 * the FPU emulator for now. See asm-mips/fpu_emulator.h.
90 struct mips_fpu_struct {
91 fpureg_t fpr[NUM_FPU_REGS];
92 unsigned int fcr31;
95 #define NUM_DSP_REGS 6
97 typedef __u32 dspreg_t;
99 struct mips_dsp_state {
100 dspreg_t dspr[NUM_DSP_REGS];
101 unsigned int dspcontrol;
104 #define INIT_CPUMASK { \
105 {0,} \
108 typedef struct {
109 unsigned long seg;
110 } mm_segment_t;
112 #define ARCH_MIN_TASKALIGN 8
114 struct mips_abi;
117 * If you change thread_struct remember to change the #defines below too!
119 struct thread_struct {
120 /* Saved main processor registers. */
121 unsigned long reg16;
122 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
123 unsigned long reg29, reg30, reg31;
125 /* Saved cp0 stuff. */
126 unsigned long cp0_status;
128 /* Saved fpu/fpu emulator stuff. */
129 struct mips_fpu_struct fpu;
130 #ifdef CONFIG_MIPS_MT_FPAFF
131 /* Emulated instruction count */
132 unsigned long emulated_fp;
133 /* Saved per-thread scheduler affinity mask */
134 cpumask_t user_cpus_allowed;
135 #endif /* CONFIG_MIPS_MT_FPAFF */
137 /* Saved state of the DSP ASE, if available. */
138 struct mips_dsp_state dsp;
140 /* Other stuff associated with the thread. */
141 unsigned long cp0_badvaddr; /* Last user fault */
142 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
143 unsigned long error_code;
144 unsigned long trap_no;
145 unsigned long irix_trampoline; /* Wheee... */
146 unsigned long irix_oldctx;
147 struct mips_abi *abi;
150 #ifdef CONFIG_MIPS_MT_FPAFF
151 #define FPAFF_INIT \
152 .emulated_fp = 0, \
153 .user_cpus_allowed = INIT_CPUMASK,
154 #else
155 #define FPAFF_INIT
156 #endif /* CONFIG_MIPS_MT_FPAFF */
158 #define INIT_THREAD { \
159 /* \
160 * Saved main processor registers \
161 */ \
162 .reg16 = 0, \
163 .reg17 = 0, \
164 .reg18 = 0, \
165 .reg19 = 0, \
166 .reg20 = 0, \
167 .reg21 = 0, \
168 .reg22 = 0, \
169 .reg23 = 0, \
170 .reg29 = 0, \
171 .reg30 = 0, \
172 .reg31 = 0, \
173 /* \
174 * Saved cp0 stuff \
175 */ \
176 .cp0_status = 0, \
177 /* \
178 * Saved FPU/FPU emulator stuff \
179 */ \
180 .fpu = { \
181 .fpr = {0,}, \
182 .fcr31 = 0, \
183 }, \
184 /* \
185 * FPU affinity state (null if not FPAFF) \
186 */ \
187 FPAFF_INIT \
188 /* \
189 * Saved DSP stuff \
190 */ \
191 .dsp = { \
192 .dspr = {0, }, \
193 .dspcontrol = 0, \
194 }, \
195 /* \
196 * Other stuff associated with the process \
197 */ \
198 .cp0_badvaddr = 0, \
199 .cp0_baduaddr = 0, \
200 .error_code = 0, \
201 .trap_no = 0, \
202 .irix_trampoline = 0, \
203 .irix_oldctx = 0, \
206 struct task_struct;
208 /* Free all resources held by a thread. */
209 #define release_thread(thread) do { } while(0)
211 /* Prepare to copy thread state - unlazy all lazy status */
212 #define prepare_to_copy(tsk) do { } while (0)
214 extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
216 extern unsigned long thread_saved_pc(struct task_struct *tsk);
219 * Do necessary setup to start up a newly executed thread.
221 extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
223 unsigned long get_wchan(struct task_struct *p);
225 #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
226 #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1)
227 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
228 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
229 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
231 #define cpu_relax() barrier()
234 * Return_address is a replacement for __builtin_return_address(count)
235 * which on certain architectures cannot reasonably be implemented in GCC
236 * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386).
237 * Note that __builtin_return_address(x>=1) is forbidden because GCC
238 * aborts compilation on some CPUs. It's simply not possible to unwind
239 * some CPU's stackframes.
241 * __builtin_return_address works only for non-leaf functions. We avoid the
242 * overhead of a function call by forcing the compiler to save the return
243 * address register on the stack.
245 #define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
247 #ifdef CONFIG_CPU_HAS_PREFETCH
249 #define ARCH_HAS_PREFETCH
251 static inline void prefetch(const void *addr)
253 __asm__ __volatile__(
254 " .set mips4 \n"
255 " pref %0, (%1) \n"
256 " .set mips0 \n"
258 : "i" (Pref_Load), "r" (addr));
261 #endif
263 #endif /* _ASM_PROCESSOR_H */