2 * IA32 helper functions
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2001-2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
10 * 02/19/01 D. Mosberger dropped tssd; it's not needed
11 * 09/14/01 D. Mosberger fixed memory management for gdt/tss page
12 * 09/29/01 D. Mosberger added ia32_load_segment_descriptors()
15 #include <linux/kernel.h>
16 #include <linux/init.h>
18 #include <linux/personality.h>
19 #include <linux/sched.h>
21 #include <asm/intrinsics.h>
23 #include <asm/pgtable.h>
24 #include <asm/system.h>
25 #include <asm/processor.h>
26 #include <asm/uaccess.h>
30 extern void die_if_kernel (char *str
, struct pt_regs
*regs
, long err
);
32 struct exec_domain ia32_exec_domain
;
33 struct page
*ia32_shared_page
[NR_CPUS
];
34 unsigned long *ia32_boot_gdt
;
35 unsigned long *cpu_gdt_table
[NR_CPUS
];
36 struct page
*ia32_gate_page
;
39 load_desc (u16 selector
)
41 unsigned long *table
, limit
, index
;
45 if (selector
& IA32_SEGSEL_TI
) {
46 table
= (unsigned long *) IA32_LDT_OFFSET
;
47 limit
= IA32_LDT_ENTRIES
;
49 table
= cpu_gdt_table
[smp_processor_id()];
50 limit
= IA32_PAGE_SIZE
/ sizeof(ia32_boot_gdt
[0]);
52 index
= selector
>> IA32_SEGSEL_INDEX_SHIFT
;
55 return IA32_SEG_UNSCRAMBLE(table
[index
]);
59 ia32_load_segment_descriptors (struct task_struct
*task
)
61 struct pt_regs
*regs
= ia64_task_regs(task
);
63 /* Setup the segment descriptors */
64 regs
->r24
= load_desc(regs
->r16
>> 16); /* ESD */
65 regs
->r27
= load_desc(regs
->r16
>> 0); /* DSD */
66 regs
->r28
= load_desc(regs
->r16
>> 32); /* FSD */
67 regs
->r29
= load_desc(regs
->r16
>> 48); /* GSD */
68 regs
->ar_csd
= load_desc(regs
->r17
>> 0); /* CSD */
69 regs
->ar_ssd
= load_desc(regs
->r17
>> 16); /* SSD */
73 ia32_clone_tls (struct task_struct
*child
, struct pt_regs
*childregs
)
75 struct desc_struct
*desc
;
76 struct ia32_user_desc info
;
79 if (copy_from_user(&info
, (void __user
*)(childregs
->r14
& 0xffffffff), sizeof(info
)))
84 idx
= info
.entry_number
;
85 if (idx
< GDT_ENTRY_TLS_MIN
|| idx
> GDT_ENTRY_TLS_MAX
)
88 desc
= child
->thread
.tls_array
+ idx
- GDT_ENTRY_TLS_MIN
;
89 desc
->a
= LDT_entry_a(&info
);
90 desc
->b
= LDT_entry_b(&info
);
92 /* XXX: can this be done in a cleaner way ? */
93 load_TLS(&child
->thread
, smp_processor_id());
94 ia32_load_segment_descriptors(child
);
95 load_TLS(¤t
->thread
, smp_processor_id());
101 ia32_save_state (struct task_struct
*t
)
103 t
->thread
.eflag
= ia64_getreg(_IA64_REG_AR_EFLAG
);
104 t
->thread
.fsr
= ia64_getreg(_IA64_REG_AR_FSR
);
105 t
->thread
.fcr
= ia64_getreg(_IA64_REG_AR_FCR
);
106 t
->thread
.fir
= ia64_getreg(_IA64_REG_AR_FIR
);
107 t
->thread
.fdr
= ia64_getreg(_IA64_REG_AR_FDR
);
108 ia64_set_kr(IA64_KR_IO_BASE
, t
->thread
.old_iob
);
109 ia64_set_kr(IA64_KR_TSSD
, t
->thread
.old_k1
);
113 ia32_load_state (struct task_struct
*t
)
115 unsigned long eflag
, fsr
, fcr
, fir
, fdr
, tssd
;
116 struct pt_regs
*regs
= ia64_task_regs(t
);
118 eflag
= t
->thread
.eflag
;
123 tssd
= load_desc(_TSS
); /* TSSD */
125 ia64_setreg(_IA64_REG_AR_EFLAG
, eflag
);
126 ia64_setreg(_IA64_REG_AR_FSR
, fsr
);
127 ia64_setreg(_IA64_REG_AR_FCR
, fcr
);
128 ia64_setreg(_IA64_REG_AR_FIR
, fir
);
129 ia64_setreg(_IA64_REG_AR_FDR
, fdr
);
130 current
->thread
.old_iob
= ia64_get_kr(IA64_KR_IO_BASE
);
131 current
->thread
.old_k1
= ia64_get_kr(IA64_KR_TSSD
);
132 ia64_set_kr(IA64_KR_IO_BASE
, IA32_IOBASE
);
133 ia64_set_kr(IA64_KR_TSSD
, tssd
);
135 regs
->r17
= (_TSS
<< 48) | (_LDT
<< 32) | (__u32
) regs
->r17
;
136 regs
->r30
= load_desc(_LDT
); /* LDTD */
137 load_TLS(&t
->thread
, smp_processor_id());
141 * Setup IA32 GDT and TSS
146 int cpu
= smp_processor_id();
148 ia32_shared_page
[cpu
] = alloc_page(GFP_KERNEL
);
149 if (!ia32_shared_page
[cpu
])
150 panic("failed to allocate ia32_shared_page[%d]\n", cpu
);
152 cpu_gdt_table
[cpu
] = page_address(ia32_shared_page
[cpu
]);
154 /* Copy from the boot cpu's GDT */
155 memcpy(cpu_gdt_table
[cpu
], ia32_boot_gdt
, PAGE_SIZE
);
160 * Setup IA32 GDT and TSS
163 ia32_boot_gdt_init (void)
165 unsigned long ldt_size
;
167 ia32_shared_page
[0] = alloc_page(GFP_KERNEL
);
168 if (!ia32_shared_page
[0])
169 panic("failed to allocate ia32_shared_page[0]\n");
171 ia32_boot_gdt
= page_address(ia32_shared_page
[0]);
172 cpu_gdt_table
[0] = ia32_boot_gdt
;
174 /* CS descriptor in IA-32 (scrambled) format */
175 ia32_boot_gdt
[__USER_CS
>> 3]
176 = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END
-1) >> IA32_PAGE_SHIFT
,
177 0xb, 1, 3, 1, 1, 1, 1);
179 /* DS descriptor in IA-32 (scrambled) format */
180 ia32_boot_gdt
[__USER_DS
>> 3]
181 = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END
-1) >> IA32_PAGE_SHIFT
,
182 0x3, 1, 3, 1, 1, 1, 1);
184 ldt_size
= PAGE_ALIGN(IA32_LDT_ENTRIES
*IA32_LDT_ENTRY_SIZE
);
185 ia32_boot_gdt
[TSS_ENTRY
] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET
, 235,
186 0xb, 0, 3, 1, 1, 1, 0);
187 ia32_boot_gdt
[LDT_ENTRY
] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET
, ldt_size
- 1,
188 0x2, 0, 3, 1, 1, 1, 0);
192 ia32_gate_page_init(void)
196 ia32_gate_page
= alloc_page(GFP_KERNEL
);
197 sr
= page_address(ia32_gate_page
);
198 /* This is popl %eax ; movl $,%eax ; int $0x80 */
199 *sr
++ = 0xb858 | (__IA32_NR_sigreturn
<< 16) | (0x80cdUL
<< 48);
201 /* This is movl $,%eax ; int $0x80 */
202 *sr
= 0xb8 | (__IA32_NR_rt_sigreturn
<< 8) | (0x80cdUL
<< 40);
208 ia32_boot_gdt_init();
209 ia32_gate_page_init();
213 * Handle bad IA32 interrupt via syscall
216 ia32_bad_interrupt (unsigned long int_num
, struct pt_regs
*regs
)
220 die_if_kernel("Bad IA-32 interrupt", regs
, int_num
);
222 siginfo
.si_signo
= SIGTRAP
;
223 siginfo
.si_errno
= int_num
; /* XXX is it OK to abuse si_errno like this? */
224 siginfo
.si_flags
= 0;
226 siginfo
.si_addr
= NULL
;
228 siginfo
.si_code
= TRAP_BRKPT
;
229 force_sig_info(SIGTRAP
, &siginfo
, current
);
235 /* initialize global ia32 state - CR0 and CR4 */
236 ia64_setreg(_IA64_REG_AR_CFLAG
, (((ulong
) IA32_CR4
<< 32) | IA32_CR0
));
242 ia32_exec_domain
.name
= "Linux/x86";
243 ia32_exec_domain
.handler
= NULL
;
244 ia32_exec_domain
.pers_low
= PER_LINUX32
;
245 ia32_exec_domain
.pers_high
= PER_LINUX32
;
246 ia32_exec_domain
.signal_map
= default_exec_domain
.signal_map
;
247 ia32_exec_domain
.signal_invmap
= default_exec_domain
.signal_invmap
;
248 register_exec_domain(&ia32_exec_domain
);
250 #if PAGE_SHIFT > IA32_PAGE_SHIFT
252 extern kmem_cache_t
*partial_page_cachep
;
254 partial_page_cachep
= kmem_cache_create("partial_page_cache",
255 sizeof(struct partial_page
), 0, 0,
257 if (!partial_page_cachep
)
258 panic("Cannot create partial page SLAB cache");
264 __initcall(ia32_init
);