[SPARC64]: Fix cpu trampoline et al. mismatch warnings.
[linux-2.6/openmoko-kernel/knife-kernel.git] / include / asm-x86 / processor.h
blob149920dcd341520c21964883302a95668111e6a0
1 #ifndef __ASM_X86_PROCESSOR_H
2 #define __ASM_X86_PROCESSOR_H
4 #include <asm/processor-flags.h>
6 /* migration helpers, for KVM - will be removed in 2.6.25: */
7 #include <asm/vm86.h>
8 #define Xgt_desc_struct desc_ptr
10 /* Forward declaration, a strange C thing */
11 struct task_struct;
12 struct mm_struct;
14 #include <asm/vm86.h>
15 #include <asm/math_emu.h>
16 #include <asm/segment.h>
17 #include <asm/types.h>
18 #include <asm/sigcontext.h>
19 #include <asm/current.h>
20 #include <asm/cpufeature.h>
21 #include <asm/system.h>
22 #include <asm/page.h>
23 #include <asm/percpu.h>
24 #include <asm/msr.h>
25 #include <asm/desc_defs.h>
26 #include <asm/nops.h>
27 #include <linux/personality.h>
28 #include <linux/cpumask.h>
29 #include <linux/cache.h>
30 #include <linux/threads.h>
31 #include <linux/init.h>
34 * Default implementation of macro that returns current
35 * instruction pointer ("program counter").
37 static inline void *current_text_addr(void)
39 void *pc;
40 asm volatile("mov $1f,%0\n1:":"=r" (pc));
41 return pc;
44 #ifdef CONFIG_X86_VSMP
45 #define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
46 #define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
47 #else
48 #define ARCH_MIN_TASKALIGN 16
49 #define ARCH_MIN_MMSTRUCT_ALIGN 0
50 #endif
53 * CPU type and hardware bug flags. Kept separately for each CPU.
54 * Members of this structure are referenced in head.S, so think twice
55 * before touching them. [mj]
58 struct cpuinfo_x86 {
59 __u8 x86; /* CPU family */
60 __u8 x86_vendor; /* CPU vendor */
61 __u8 x86_model;
62 __u8 x86_mask;
63 #ifdef CONFIG_X86_32
64 char wp_works_ok; /* It doesn't on 386's */
65 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
66 char hard_math;
67 char rfu;
68 char fdiv_bug;
69 char f00f_bug;
70 char coma_bug;
71 char pad0;
72 #else
73 /* number of 4K pages in DTLB/ITLB combined(in pages)*/
74 int x86_tlbsize;
75 __u8 x86_virt_bits, x86_phys_bits;
76 /* cpuid returned core id bits */
77 __u8 x86_coreid_bits;
78 /* Max extended CPUID function supported */
79 __u32 extended_cpuid_level;
80 #endif
81 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
82 __u32 x86_capability[NCAPINTS];
83 char x86_vendor_id[16];
84 char x86_model_id[64];
85 int x86_cache_size; /* in KB - valid for CPUS which support this
86 call */
87 int x86_cache_alignment; /* In bytes */
88 int x86_power;
89 unsigned long loops_per_jiffy;
90 #ifdef CONFIG_SMP
91 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
92 #endif
93 u16 x86_max_cores; /* cpuid returned max cores value */
94 u16 apicid;
95 u16 x86_clflush_size;
96 #ifdef CONFIG_SMP
97 u16 booted_cores; /* number of cores as seen by OS */
98 u16 phys_proc_id; /* Physical processor id. */
99 u16 cpu_core_id; /* Core id */
100 u16 cpu_index; /* index into per_cpu list */
101 #endif
102 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
104 #define X86_VENDOR_INTEL 0
105 #define X86_VENDOR_CYRIX 1
106 #define X86_VENDOR_AMD 2
107 #define X86_VENDOR_UMC 3
108 #define X86_VENDOR_NEXGEN 4
109 #define X86_VENDOR_CENTAUR 5
110 #define X86_VENDOR_TRANSMETA 7
111 #define X86_VENDOR_NSC 8
112 #define X86_VENDOR_NUM 9
113 #define X86_VENDOR_UNKNOWN 0xff
116 * capabilities of CPUs
118 extern struct cpuinfo_x86 boot_cpu_data;
119 extern struct cpuinfo_x86 new_cpu_data;
120 extern struct tss_struct doublefault_tss;
121 extern __u32 cleared_cpu_caps[NCAPINTS];
123 #ifdef CONFIG_SMP
124 DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
125 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
126 #define current_cpu_data cpu_data(smp_processor_id())
127 #else
128 #define cpu_data(cpu) boot_cpu_data
129 #define current_cpu_data boot_cpu_data
130 #endif
132 void cpu_detect(struct cpuinfo_x86 *c);
134 extern void identify_cpu(struct cpuinfo_x86 *);
135 extern void identify_boot_cpu(void);
136 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
137 extern void print_cpu_info(struct cpuinfo_x86 *);
138 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
139 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
140 extern unsigned short num_cache_leaves;
142 #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
143 extern void detect_ht(struct cpuinfo_x86 *c);
144 #else
145 static inline void detect_ht(struct cpuinfo_x86 *c) {}
146 #endif
148 static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
149 unsigned int *ecx, unsigned int *edx)
151 /* ecx is often an input as well as an output. */
152 __asm__("cpuid"
153 : "=a" (*eax),
154 "=b" (*ebx),
155 "=c" (*ecx),
156 "=d" (*edx)
157 : "0" (*eax), "2" (*ecx));
160 static inline void load_cr3(pgd_t *pgdir)
162 write_cr3(__pa(pgdir));
165 #ifdef CONFIG_X86_32
166 /* This is the TSS defined by the hardware. */
167 struct x86_hw_tss {
168 unsigned short back_link, __blh;
169 unsigned long sp0;
170 unsigned short ss0, __ss0h;
171 unsigned long sp1;
172 unsigned short ss1, __ss1h; /* ss1 caches MSR_IA32_SYSENTER_CS */
173 unsigned long sp2;
174 unsigned short ss2, __ss2h;
175 unsigned long __cr3;
176 unsigned long ip;
177 unsigned long flags;
178 unsigned long ax, cx, dx, bx;
179 unsigned long sp, bp, si, di;
180 unsigned short es, __esh;
181 unsigned short cs, __csh;
182 unsigned short ss, __ssh;
183 unsigned short ds, __dsh;
184 unsigned short fs, __fsh;
185 unsigned short gs, __gsh;
186 unsigned short ldt, __ldth;
187 unsigned short trace, io_bitmap_base;
188 } __attribute__((packed));
189 #else
190 struct x86_hw_tss {
191 u32 reserved1;
192 u64 sp0;
193 u64 sp1;
194 u64 sp2;
195 u64 reserved2;
196 u64 ist[7];
197 u32 reserved3;
198 u32 reserved4;
199 u16 reserved5;
200 u16 io_bitmap_base;
201 } __attribute__((packed)) ____cacheline_aligned;
202 #endif
205 * Size of io_bitmap.
207 #define IO_BITMAP_BITS 65536
208 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
209 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
210 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
211 #define INVALID_IO_BITMAP_OFFSET 0x8000
212 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
214 struct tss_struct {
215 struct x86_hw_tss x86_tss;
218 * The extra 1 is there because the CPU will access an
219 * additional byte beyond the end of the IO permission
220 * bitmap. The extra byte must be all 1 bits, and must
221 * be within the limit.
223 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
225 * Cache the current maximum and the last task that used the bitmap:
227 unsigned long io_bitmap_max;
228 struct thread_struct *io_bitmap_owner;
230 * pads the TSS to be cacheline-aligned (size is 0x100)
232 unsigned long __cacheline_filler[35];
234 * .. and then another 0x100 bytes for emergency kernel stack
236 unsigned long stack[64];
237 } __attribute__((packed));
239 DECLARE_PER_CPU(struct tss_struct, init_tss);
241 /* Save the original ist values for checking stack pointers during debugging */
242 struct orig_ist {
243 unsigned long ist[7];
246 #define MXCSR_DEFAULT 0x1f80
248 struct i387_fsave_struct {
249 u32 cwd;
250 u32 swd;
251 u32 twd;
252 u32 fip;
253 u32 fcs;
254 u32 foo;
255 u32 fos;
256 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
257 u32 status; /* software status information */
260 struct i387_fxsave_struct {
261 u16 cwd;
262 u16 swd;
263 u16 twd;
264 u16 fop;
265 union {
266 struct {
267 u64 rip;
268 u64 rdp;
270 struct {
271 u32 fip;
272 u32 fcs;
273 u32 foo;
274 u32 fos;
277 u32 mxcsr;
278 u32 mxcsr_mask;
279 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
280 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
281 u32 padding[24];
282 } __attribute__((aligned(16)));
284 struct i387_soft_struct {
285 u32 cwd;
286 u32 swd;
287 u32 twd;
288 u32 fip;
289 u32 fcs;
290 u32 foo;
291 u32 fos;
292 u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
293 u8 ftop, changed, lookahead, no_update, rm, alimit;
294 struct info *info;
295 u32 entry_eip;
298 union i387_union {
299 struct i387_fsave_struct fsave;
300 struct i387_fxsave_struct fxsave;
301 struct i387_soft_struct soft;
304 #ifdef CONFIG_X86_32
306 * the following now lives in the per cpu area:
307 * extern int cpu_llc_id[NR_CPUS];
309 DECLARE_PER_CPU(u8, cpu_llc_id);
310 #else
311 DECLARE_PER_CPU(struct orig_ist, orig_ist);
312 #endif
314 extern void print_cpu_info(struct cpuinfo_x86 *);
315 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
316 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
317 extern unsigned short num_cache_leaves;
319 struct thread_struct {
320 /* cached TLS descriptors. */
321 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
322 unsigned long sp0;
323 unsigned long sp;
324 #ifdef CONFIG_X86_32
325 unsigned long sysenter_cs;
326 #else
327 unsigned long usersp; /* Copy from PDA */
328 unsigned short es, ds, fsindex, gsindex;
329 #endif
330 unsigned long ip;
331 unsigned long fs;
332 unsigned long gs;
333 /* Hardware debugging registers */
334 unsigned long debugreg0;
335 unsigned long debugreg1;
336 unsigned long debugreg2;
337 unsigned long debugreg3;
338 unsigned long debugreg6;
339 unsigned long debugreg7;
340 /* fault info */
341 unsigned long cr2, trap_no, error_code;
342 /* floating point info */
343 union i387_union i387 __attribute__((aligned(16)));;
344 #ifdef CONFIG_X86_32
345 /* virtual 86 mode info */
346 struct vm86_struct __user *vm86_info;
347 unsigned long screen_bitmap;
348 unsigned long v86flags, v86mask, saved_sp0;
349 unsigned int saved_fs, saved_gs;
350 #endif
351 /* IO permissions */
352 unsigned long *io_bitmap_ptr;
353 unsigned long iopl;
354 /* max allowed port in the bitmap, in bytes: */
355 unsigned io_bitmap_max;
356 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
357 unsigned long debugctlmsr;
358 /* Debug Store - if not 0 points to a DS Save Area configuration;
359 * goes into MSR_IA32_DS_AREA */
360 unsigned long ds_area_msr;
363 static inline unsigned long native_get_debugreg(int regno)
365 unsigned long val = 0; /* Damn you, gcc! */
367 switch (regno) {
368 case 0:
369 asm("mov %%db0, %0" :"=r" (val)); break;
370 case 1:
371 asm("mov %%db1, %0" :"=r" (val)); break;
372 case 2:
373 asm("mov %%db2, %0" :"=r" (val)); break;
374 case 3:
375 asm("mov %%db3, %0" :"=r" (val)); break;
376 case 6:
377 asm("mov %%db6, %0" :"=r" (val)); break;
378 case 7:
379 asm("mov %%db7, %0" :"=r" (val)); break;
380 default:
381 BUG();
383 return val;
386 static inline void native_set_debugreg(int regno, unsigned long value)
388 switch (regno) {
389 case 0:
390 asm("mov %0,%%db0" : /* no output */ :"r" (value));
391 break;
392 case 1:
393 asm("mov %0,%%db1" : /* no output */ :"r" (value));
394 break;
395 case 2:
396 asm("mov %0,%%db2" : /* no output */ :"r" (value));
397 break;
398 case 3:
399 asm("mov %0,%%db3" : /* no output */ :"r" (value));
400 break;
401 case 6:
402 asm("mov %0,%%db6" : /* no output */ :"r" (value));
403 break;
404 case 7:
405 asm("mov %0,%%db7" : /* no output */ :"r" (value));
406 break;
407 default:
408 BUG();
413 * Set IOPL bits in EFLAGS from given mask
415 static inline void native_set_iopl_mask(unsigned mask)
417 #ifdef CONFIG_X86_32
418 unsigned int reg;
419 __asm__ __volatile__ ("pushfl;"
420 "popl %0;"
421 "andl %1, %0;"
422 "orl %2, %0;"
423 "pushl %0;"
424 "popfl"
425 : "=&r" (reg)
426 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
427 #endif
430 static inline void native_load_sp0(struct tss_struct *tss,
431 struct thread_struct *thread)
433 tss->x86_tss.sp0 = thread->sp0;
434 #ifdef CONFIG_X86_32
435 /* Only happens when SEP is enabled, no need to test "SEP"arately */
436 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
437 tss->x86_tss.ss1 = thread->sysenter_cs;
438 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
440 #endif
443 static inline void native_swapgs(void)
445 #ifdef CONFIG_X86_64
446 asm volatile("swapgs" ::: "memory");
447 #endif
450 #ifdef CONFIG_PARAVIRT
451 #include <asm/paravirt.h>
452 #else
453 #define __cpuid native_cpuid
454 #define paravirt_enabled() 0
457 * These special macros can be used to get or set a debugging register
459 #define get_debugreg(var, register) \
460 (var) = native_get_debugreg(register)
461 #define set_debugreg(value, register) \
462 native_set_debugreg(register, value)
464 static inline void load_sp0(struct tss_struct *tss,
465 struct thread_struct *thread)
467 native_load_sp0(tss, thread);
470 #define set_iopl_mask native_set_iopl_mask
471 #define SWAPGS swapgs
472 #endif /* CONFIG_PARAVIRT */
475 * Save the cr4 feature set we're using (ie
476 * Pentium 4MB enable and PPro Global page
477 * enable), so that any CPU's that boot up
478 * after us can get the correct flags.
480 extern unsigned long mmu_cr4_features;
482 static inline void set_in_cr4(unsigned long mask)
484 unsigned cr4;
485 mmu_cr4_features |= mask;
486 cr4 = read_cr4();
487 cr4 |= mask;
488 write_cr4(cr4);
491 static inline void clear_in_cr4(unsigned long mask)
493 unsigned cr4;
494 mmu_cr4_features &= ~mask;
495 cr4 = read_cr4();
496 cr4 &= ~mask;
497 write_cr4(cr4);
500 struct microcode_header {
501 unsigned int hdrver;
502 unsigned int rev;
503 unsigned int date;
504 unsigned int sig;
505 unsigned int cksum;
506 unsigned int ldrver;
507 unsigned int pf;
508 unsigned int datasize;
509 unsigned int totalsize;
510 unsigned int reserved[3];
513 struct microcode {
514 struct microcode_header hdr;
515 unsigned int bits[0];
518 typedef struct microcode microcode_t;
519 typedef struct microcode_header microcode_header_t;
521 /* microcode format is extended from prescott processors */
522 struct extended_signature {
523 unsigned int sig;
524 unsigned int pf;
525 unsigned int cksum;
528 struct extended_sigtable {
529 unsigned int count;
530 unsigned int cksum;
531 unsigned int reserved[3];
532 struct extended_signature sigs[0];
535 typedef struct {
536 unsigned long seg;
537 } mm_segment_t;
541 * create a kernel thread without removing it from tasklists
543 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
545 /* Free all resources held by a thread. */
546 extern void release_thread(struct task_struct *);
548 /* Prepare to copy thread state - unlazy all lazy status */
549 extern void prepare_to_copy(struct task_struct *tsk);
551 unsigned long get_wchan(struct task_struct *p);
554 * Generic CPUID function
555 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
556 * resulting in stale register contents being returned.
558 static inline void cpuid(unsigned int op,
559 unsigned int *eax, unsigned int *ebx,
560 unsigned int *ecx, unsigned int *edx)
562 *eax = op;
563 *ecx = 0;
564 __cpuid(eax, ebx, ecx, edx);
567 /* Some CPUID calls want 'count' to be placed in ecx */
568 static inline void cpuid_count(unsigned int op, int count,
569 unsigned int *eax, unsigned int *ebx,
570 unsigned int *ecx, unsigned int *edx)
572 *eax = op;
573 *ecx = count;
574 __cpuid(eax, ebx, ecx, edx);
578 * CPUID functions returning a single datum
580 static inline unsigned int cpuid_eax(unsigned int op)
582 unsigned int eax, ebx, ecx, edx;
584 cpuid(op, &eax, &ebx, &ecx, &edx);
585 return eax;
587 static inline unsigned int cpuid_ebx(unsigned int op)
589 unsigned int eax, ebx, ecx, edx;
591 cpuid(op, &eax, &ebx, &ecx, &edx);
592 return ebx;
594 static inline unsigned int cpuid_ecx(unsigned int op)
596 unsigned int eax, ebx, ecx, edx;
598 cpuid(op, &eax, &ebx, &ecx, &edx);
599 return ecx;
601 static inline unsigned int cpuid_edx(unsigned int op)
603 unsigned int eax, ebx, ecx, edx;
605 cpuid(op, &eax, &ebx, &ecx, &edx);
606 return edx;
609 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
610 static inline void rep_nop(void)
612 __asm__ __volatile__("rep;nop": : :"memory");
615 /* Stop speculative execution */
616 static inline void sync_core(void)
618 int tmp;
619 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
620 : "ebx", "ecx", "edx", "memory");
623 #define cpu_relax() rep_nop()
625 static inline void __monitor(const void *eax, unsigned long ecx,
626 unsigned long edx)
628 /* "monitor %eax,%ecx,%edx;" */
629 asm volatile(
630 ".byte 0x0f,0x01,0xc8;"
631 : :"a" (eax), "c" (ecx), "d"(edx));
634 static inline void __mwait(unsigned long eax, unsigned long ecx)
636 /* "mwait %eax,%ecx;" */
637 asm volatile(
638 ".byte 0x0f,0x01,0xc9;"
639 : :"a" (eax), "c" (ecx));
642 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
644 /* "mwait %eax,%ecx;" */
645 asm volatile(
646 "sti; .byte 0x0f,0x01,0xc9;"
647 : :"a" (eax), "c" (ecx));
650 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
652 extern int force_mwait;
654 extern void select_idle_routine(const struct cpuinfo_x86 *c);
656 extern unsigned long boot_option_idle_override;
658 extern void enable_sep_cpu(void);
659 extern int sysenter_setup(void);
661 /* Defined in head.S */
662 extern struct desc_ptr early_gdt_descr;
664 extern void cpu_set_gdt(int);
665 extern void switch_to_new_gdt(void);
666 extern void cpu_init(void);
667 extern void init_gdt(int cpu);
669 /* from system description table in BIOS. Mostly for MCA use, but
670 * others may find it useful. */
671 extern unsigned int machine_id;
672 extern unsigned int machine_submodel_id;
673 extern unsigned int BIOS_revision;
674 extern unsigned int mca_pentium_flag;
676 /* Boot loader type from the setup header */
677 extern int bootloader_type;
679 extern char ignore_fpu_irq;
680 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
682 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
683 #define ARCH_HAS_PREFETCHW
684 #define ARCH_HAS_SPINLOCK_PREFETCH
686 #ifdef CONFIG_X86_32
687 #define BASE_PREFETCH ASM_NOP4
688 #define ARCH_HAS_PREFETCH
689 #else
690 #define BASE_PREFETCH "prefetcht0 (%1)"
691 #endif
693 /* Prefetch instructions for Pentium III and AMD Athlon */
694 /* It's not worth to care about 3dnow! prefetches for the K6
695 because they are microcoded there and very slow.
696 However we don't do prefetches for pre XP Athlons currently
697 That should be fixed. */
698 static inline void prefetch(const void *x)
700 alternative_input(BASE_PREFETCH,
701 "prefetchnta (%1)",
702 X86_FEATURE_XMM,
703 "r" (x));
706 /* 3dnow! prefetch to get an exclusive cache line. Useful for
707 spinlocks to avoid one state transition in the cache coherency protocol. */
708 static inline void prefetchw(const void *x)
710 alternative_input(BASE_PREFETCH,
711 "prefetchw (%1)",
712 X86_FEATURE_3DNOW,
713 "r" (x));
716 #define spin_lock_prefetch(x) prefetchw(x)
717 #ifdef CONFIG_X86_32
719 * User space process size: 3GB (default).
721 #define TASK_SIZE (PAGE_OFFSET)
722 #define STACK_TOP TASK_SIZE
723 #define STACK_TOP_MAX STACK_TOP
725 #define INIT_THREAD { \
726 .sp0 = sizeof(init_stack) + (long)&init_stack, \
727 .vm86_info = NULL, \
728 .sysenter_cs = __KERNEL_CS, \
729 .io_bitmap_ptr = NULL, \
730 .fs = __KERNEL_PERCPU, \
734 * Note that the .io_bitmap member must be extra-big. This is because
735 * the CPU will access an additional byte beyond the end of the IO
736 * permission bitmap. The extra byte must be all 1 bits, and must
737 * be within the limit.
739 #define INIT_TSS { \
740 .x86_tss = { \
741 .sp0 = sizeof(init_stack) + (long)&init_stack, \
742 .ss0 = __KERNEL_DS, \
743 .ss1 = __KERNEL_CS, \
744 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
745 }, \
746 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
749 #define start_thread(regs, new_eip, new_esp) do { \
750 __asm__("movl %0,%%gs": :"r" (0)); \
751 regs->fs = 0; \
752 set_fs(USER_DS); \
753 regs->ds = __USER_DS; \
754 regs->es = __USER_DS; \
755 regs->ss = __USER_DS; \
756 regs->cs = __USER_CS; \
757 regs->ip = new_eip; \
758 regs->sp = new_esp; \
759 } while (0)
762 extern unsigned long thread_saved_pc(struct task_struct *tsk);
764 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
765 #define KSTK_TOP(info) \
766 ({ \
767 unsigned long *__ptr = (unsigned long *)(info); \
768 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
772 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
773 * This is necessary to guarantee that the entire "struct pt_regs"
774 * is accessable even if the CPU haven't stored the SS/ESP registers
775 * on the stack (interrupt gate does not save these registers
776 * when switching to the same priv ring).
777 * Therefore beware: accessing the ss/esp fields of the
778 * "struct pt_regs" is possible, but they may contain the
779 * completely wrong values.
781 #define task_pt_regs(task) \
782 ({ \
783 struct pt_regs *__regs__; \
784 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
785 __regs__ - 1; \
788 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
790 #else
792 * User space process size. 47bits minus one guard page.
794 #define TASK_SIZE64 (0x800000000000UL - 4096)
796 /* This decides where the kernel will search for a free chunk of vm
797 * space during mmap's.
799 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
800 0xc0000000 : 0xFFFFe000)
802 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
803 IA32_PAGE_OFFSET : TASK_SIZE64)
804 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
805 IA32_PAGE_OFFSET : TASK_SIZE64)
807 #define STACK_TOP TASK_SIZE
808 #define STACK_TOP_MAX TASK_SIZE64
810 #define INIT_THREAD { \
811 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
814 #define INIT_TSS { \
815 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
818 #define start_thread(regs, new_rip, new_rsp) do { \
819 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
820 load_gs_index(0); \
821 (regs)->ip = (new_rip); \
822 (regs)->sp = (new_rsp); \
823 write_pda(oldrsp, (new_rsp)); \
824 (regs)->cs = __USER_CS; \
825 (regs)->ss = __USER_DS; \
826 (regs)->flags = 0x200; \
827 set_fs(USER_DS); \
828 } while (0)
831 * Return saved PC of a blocked thread.
832 * What is this good for? it will be always the scheduler or ret_from_fork.
834 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
836 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
837 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
838 #endif /* CONFIG_X86_64 */
840 /* This decides where the kernel will search for a free chunk of vm
841 * space during mmap's.
843 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
845 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
847 #endif