2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
21 #include <linux/kernel.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
37 #include <linux/ftrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
45 #include <asm/proto.h>
47 #include <asm/syscalls.h>
48 #include <asm/debugreg.h>
49 #include <asm/switch_to.h>
50 #include <asm/xen/hypervisor.h>
52 #include <asm/intel_rdt.h>
54 __visible
DEFINE_PER_CPU(unsigned long, rsp_scratch
);
56 /* Prints also some state that isn't saved in the pt_regs */
57 void __show_regs(struct pt_regs
*regs
, int all
)
59 unsigned long cr0
= 0L, cr2
= 0L, cr3
= 0L, cr4
= 0L, fs
, gs
, shadowgs
;
60 unsigned long d0
, d1
, d2
, d3
, d6
, d7
;
61 unsigned int fsindex
, gsindex
;
62 unsigned int ds
, cs
, es
;
64 printk(KERN_DEFAULT
"RIP: %04lx:%pS\n", regs
->cs
& 0xffff,
66 printk(KERN_DEFAULT
"RSP: %04lx:%016lx EFLAGS: %08lx", regs
->ss
,
67 regs
->sp
, regs
->flags
);
68 if (regs
->orig_ax
!= -1)
69 pr_cont(" ORIG_RAX: %016lx\n", regs
->orig_ax
);
73 printk(KERN_DEFAULT
"RAX: %016lx RBX: %016lx RCX: %016lx\n",
74 regs
->ax
, regs
->bx
, regs
->cx
);
75 printk(KERN_DEFAULT
"RDX: %016lx RSI: %016lx RDI: %016lx\n",
76 regs
->dx
, regs
->si
, regs
->di
);
77 printk(KERN_DEFAULT
"RBP: %016lx R08: %016lx R09: %016lx\n",
78 regs
->bp
, regs
->r8
, regs
->r9
);
79 printk(KERN_DEFAULT
"R10: %016lx R11: %016lx R12: %016lx\n",
80 regs
->r10
, regs
->r11
, regs
->r12
);
81 printk(KERN_DEFAULT
"R13: %016lx R14: %016lx R15: %016lx\n",
82 regs
->r13
, regs
->r14
, regs
->r15
);
84 asm("movl %%ds,%0" : "=r" (ds
));
85 asm("movl %%cs,%0" : "=r" (cs
));
86 asm("movl %%es,%0" : "=r" (es
));
87 asm("movl %%fs,%0" : "=r" (fsindex
));
88 asm("movl %%gs,%0" : "=r" (gsindex
));
90 rdmsrl(MSR_FS_BASE
, fs
);
91 rdmsrl(MSR_GS_BASE
, gs
);
92 rdmsrl(MSR_KERNEL_GS_BASE
, shadowgs
);
102 printk(KERN_DEFAULT
"FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
103 fs
, fsindex
, gs
, gsindex
, shadowgs
);
104 printk(KERN_DEFAULT
"CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs
, ds
,
106 printk(KERN_DEFAULT
"CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2
, cr3
,
116 /* Only print out debug registers if they are in their non-default state. */
117 if (!((d0
== 0) && (d1
== 0) && (d2
== 0) && (d3
== 0) &&
118 (d6
== DR6_RESERVED
) && (d7
== 0x400))) {
119 printk(KERN_DEFAULT
"DR0: %016lx DR1: %016lx DR2: %016lx\n",
121 printk(KERN_DEFAULT
"DR3: %016lx DR6: %016lx DR7: %016lx\n",
125 if (boot_cpu_has(X86_FEATURE_OSPKE
))
126 printk(KERN_DEFAULT
"PKRU: %08x\n", read_pkru());
129 void release_thread(struct task_struct
*dead_task
)
132 #ifdef CONFIG_MODIFY_LDT_SYSCALL
133 if (dead_task
->mm
->context
.ldt
) {
134 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
136 dead_task
->mm
->context
.ldt
->entries
,
137 dead_task
->mm
->context
.ldt
->size
);
144 int copy_thread_tls(unsigned long clone_flags
, unsigned long sp
,
145 unsigned long arg
, struct task_struct
*p
, unsigned long tls
)
148 struct pt_regs
*childregs
;
149 struct fork_frame
*fork_frame
;
150 struct inactive_task_frame
*frame
;
151 struct task_struct
*me
= current
;
153 p
->thread
.sp0
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
154 childregs
= task_pt_regs(p
);
155 fork_frame
= container_of(childregs
, struct fork_frame
, regs
);
156 frame
= &fork_frame
->frame
;
158 frame
->ret_addr
= (unsigned long) ret_from_fork
;
159 p
->thread
.sp
= (unsigned long) fork_frame
;
160 p
->thread
.io_bitmap_ptr
= NULL
;
162 savesegment(gs
, p
->thread
.gsindex
);
163 p
->thread
.gsbase
= p
->thread
.gsindex
? 0 : me
->thread
.gsbase
;
164 savesegment(fs
, p
->thread
.fsindex
);
165 p
->thread
.fsbase
= p
->thread
.fsindex
? 0 : me
->thread
.fsbase
;
166 savesegment(es
, p
->thread
.es
);
167 savesegment(ds
, p
->thread
.ds
);
168 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
170 if (unlikely(p
->flags
& PF_KTHREAD
)) {
172 memset(childregs
, 0, sizeof(struct pt_regs
));
173 frame
->bx
= sp
; /* function */
178 *childregs
= *current_pt_regs();
185 if (unlikely(test_tsk_thread_flag(me
, TIF_IO_BITMAP
))) {
186 p
->thread
.io_bitmap_ptr
= kmemdup(me
->thread
.io_bitmap_ptr
,
187 IO_BITMAP_BYTES
, GFP_KERNEL
);
188 if (!p
->thread
.io_bitmap_ptr
) {
189 p
->thread
.io_bitmap_max
= 0;
192 set_tsk_thread_flag(p
, TIF_IO_BITMAP
);
196 * Set a new TLS for the child thread?
198 if (clone_flags
& CLONE_SETTLS
) {
199 #ifdef CONFIG_IA32_EMULATION
200 if (in_ia32_syscall())
201 err
= do_set_thread_area(p
, -1,
202 (struct user_desc __user
*)tls
, 0);
205 err
= do_arch_prctl(p
, ARCH_SET_FS
, tls
);
211 if (err
&& p
->thread
.io_bitmap_ptr
) {
212 kfree(p
->thread
.io_bitmap_ptr
);
213 p
->thread
.io_bitmap_max
= 0;
220 start_thread_common(struct pt_regs
*regs
, unsigned long new_ip
,
221 unsigned long new_sp
,
222 unsigned int _cs
, unsigned int _ss
, unsigned int _ds
)
225 loadsegment(es
, _ds
);
226 loadsegment(ds
, _ds
);
232 regs
->flags
= X86_EFLAGS_IF
;
237 start_thread(struct pt_regs
*regs
, unsigned long new_ip
, unsigned long new_sp
)
239 start_thread_common(regs
, new_ip
, new_sp
,
240 __USER_CS
, __USER_DS
, 0);
244 void compat_start_thread(struct pt_regs
*regs
, u32 new_ip
, u32 new_sp
)
246 start_thread_common(regs
, new_ip
, new_sp
,
247 test_thread_flag(TIF_X32
)
248 ? __USER_CS
: __USER32_CS
,
249 __USER_DS
, __USER_DS
);
254 * switch_to(x,y) should switch tasks from x to y.
256 * This could still be optimized:
257 * - fold all the options into a flag word and test it with a single test.
258 * - could test fs/gs bitsliced
260 * Kprobes not supported here. Set the probe on schedule instead.
261 * Function graph tracer not supported too.
263 __visible __notrace_funcgraph
struct task_struct
*
264 __switch_to(struct task_struct
*prev_p
, struct task_struct
*next_p
)
266 struct thread_struct
*prev
= &prev_p
->thread
;
267 struct thread_struct
*next
= &next_p
->thread
;
268 struct fpu
*prev_fpu
= &prev
->fpu
;
269 struct fpu
*next_fpu
= &next
->fpu
;
270 int cpu
= smp_processor_id();
271 struct tss_struct
*tss
= &per_cpu(cpu_tss
, cpu
);
272 unsigned prev_fsindex
, prev_gsindex
;
274 switch_fpu_prepare(prev_fpu
, cpu
);
276 /* We must save %fs and %gs before load_TLS() because
277 * %fs and %gs may be cleared by load_TLS().
279 * (e.g. xen_load_tls())
281 savesegment(fs
, prev_fsindex
);
282 savesegment(gs
, prev_gsindex
);
285 * Load TLS before restoring any segments so that segment loads
286 * reference the correct GDT entries.
291 * Leave lazy mode, flushing any hypercalls made here. This
292 * must be done after loading TLS entries in the GDT but before
293 * loading segments that might reference them, and and it must
294 * be done before fpu__restore(), so the TS bit is up to
297 arch_end_context_switch(next_p
);
301 * Reading them only returns the selectors, but writing them (if
302 * nonzero) loads the full descriptor from the GDT or LDT. The
303 * LDT for next is loaded in switch_mm, and the GDT is loaded
306 * We therefore need to write new values to the segment
307 * registers on every context switch unless both the new and old
310 * Note that we don't need to do anything for CS and SS, as
311 * those are saved and restored as part of pt_regs.
313 savesegment(es
, prev
->es
);
314 if (unlikely(next
->es
| prev
->es
))
315 loadsegment(es
, next
->es
);
317 savesegment(ds
, prev
->ds
);
318 if (unlikely(next
->ds
| prev
->ds
))
319 loadsegment(ds
, next
->ds
);
324 * These are even more complicated than DS and ES: they have
325 * 64-bit bases are that controlled by arch_prctl. The bases
326 * don't necessarily match the selectors, as user code can do
327 * any number of things to cause them to be inconsistent.
329 * We don't promise to preserve the bases if the selectors are
330 * nonzero. We also don't promise to preserve the base if the
331 * selector is zero and the base doesn't match whatever was
332 * most recently passed to ARCH_SET_FS/GS. (If/when the
333 * FSGSBASE instructions are enabled, we'll need to offer
334 * stronger guarantees.)
337 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
341 /* Loading a nonzero value into FS sets the index and base. */
342 loadsegment(fs
, next
->fsindex
);
345 /* Next index is zero but next base is nonzero. */
348 wrmsrl(MSR_FS_BASE
, next
->fsbase
);
350 /* Next base and index are both zero. */
351 if (static_cpu_has_bug(X86_BUG_NULL_SEG
)) {
353 * We don't know the previous base and can't
354 * find out without RDMSR. Forcibly clear it.
356 loadsegment(fs
, __USER_DS
);
360 * If the previous index is zero and ARCH_SET_FS
361 * didn't change the base, then the base is
362 * also zero and we don't need to do anything.
364 if (prev
->fsbase
|| prev_fsindex
)
370 * Save the old state and preserve the invariant.
371 * NB: if prev_fsindex == 0, then we can't reliably learn the base
372 * without RDMSR because Intel user code can zero it without telling
373 * us and AMD user code can program any 32-bit value without telling
378 prev
->fsindex
= prev_fsindex
;
381 /* Loading a nonzero value into GS sets the index and base. */
382 load_gs_index(next
->gsindex
);
385 /* Next index is zero but next base is nonzero. */
388 wrmsrl(MSR_KERNEL_GS_BASE
, next
->gsbase
);
390 /* Next base and index are both zero. */
391 if (static_cpu_has_bug(X86_BUG_NULL_SEG
)) {
393 * We don't know the previous base and can't
394 * find out without RDMSR. Forcibly clear it.
396 * This contains a pointless SWAPGS pair.
397 * Fixing it would involve an explicit check
398 * for Xen or a new pvop.
400 load_gs_index(__USER_DS
);
404 * If the previous index is zero and ARCH_SET_GS
405 * didn't change the base, then the base is
406 * also zero and we don't need to do anything.
408 if (prev
->gsbase
|| prev_gsindex
)
414 * Save the old state and preserve the invariant.
415 * NB: if prev_gsindex == 0, then we can't reliably learn the base
416 * without RDMSR because Intel user code can zero it without telling
417 * us and AMD user code can program any 32-bit value without telling
422 prev
->gsindex
= prev_gsindex
;
424 switch_fpu_finish(next_fpu
, cpu
);
427 * Switch the PDA and FPU contexts.
429 this_cpu_write(current_task
, next_p
);
431 /* Reload esp0 and ss1. This changes current_thread_info(). */
435 * Now maybe reload the debug registers and handle I/O bitmaps
437 if (unlikely(task_thread_info(next_p
)->flags
& _TIF_WORK_CTXSW_NEXT
||
438 task_thread_info(prev_p
)->flags
& _TIF_WORK_CTXSW_PREV
))
439 __switch_to_xtra(prev_p
, next_p
, tss
);
443 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
444 * current_pt_regs()->flags may not match the current task's
445 * intended IOPL. We need to switch it manually.
447 if (unlikely(static_cpu_has(X86_FEATURE_XENPV
) &&
448 prev
->iopl
!= next
->iopl
))
449 xen_set_iopl_mask(next
->iopl
);
452 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS
)) {
454 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
455 * does not update the cached descriptor. As a result, if we
456 * do SYSRET while SS is NULL, we'll end up in user mode with
457 * SS apparently equal to __USER_DS but actually unusable.
459 * The straightforward workaround would be to fix it up just
460 * before SYSRET, but that would slow down the system call
461 * fast paths. Instead, we ensure that SS is never NULL in
462 * system call context. We do this by replacing NULL SS
463 * selectors at every context switch. SYSCALL sets up a valid
464 * SS, so the only way to get NULL is to re-enter the kernel
465 * from CPL 3 through an interrupt. Since that can't happen
466 * in the same task as a running syscall, we are guaranteed to
467 * context switch between every interrupt vector entry and a
470 * We read SS first because SS reads are much faster than
471 * writes. Out of caution, we force SS to __KERNEL_DS even if
472 * it previously had a different non-NULL value.
474 unsigned short ss_sel
;
475 savesegment(ss
, ss_sel
);
476 if (ss_sel
!= __KERNEL_DS
)
477 loadsegment(ss
, __KERNEL_DS
);
480 /* Load the Intel cache allocation PQR MSR. */
481 intel_rdt_sched_in();
486 void set_personality_64bit(void)
488 /* inherit personality from parent */
490 /* Make sure to be in 64bit mode */
491 clear_thread_flag(TIF_IA32
);
492 clear_thread_flag(TIF_ADDR32
);
493 clear_thread_flag(TIF_X32
);
495 /* Ensure the corresponding mm is not marked. */
497 current
->mm
->context
.ia32_compat
= 0;
499 /* TBD: overwrites user setup. Should have two bits.
500 But 64bit processes have always behaved this way,
501 so it's not too bad. The main problem is just that
502 32bit childs are affected again. */
503 current
->personality
&= ~READ_IMPLIES_EXEC
;
506 void set_personality_ia32(bool x32
)
508 /* inherit personality from parent */
510 /* Make sure to be in 32bit mode */
511 set_thread_flag(TIF_ADDR32
);
513 /* Mark the associated mm as containing 32-bit tasks. */
515 clear_thread_flag(TIF_IA32
);
516 set_thread_flag(TIF_X32
);
518 current
->mm
->context
.ia32_compat
= TIF_X32
;
519 current
->personality
&= ~READ_IMPLIES_EXEC
;
520 /* in_compat_syscall() uses the presence of the x32
521 syscall bit flag to determine compat status */
522 current
->thread
.status
&= ~TS_COMPAT
;
524 set_thread_flag(TIF_IA32
);
525 clear_thread_flag(TIF_X32
);
527 current
->mm
->context
.ia32_compat
= TIF_IA32
;
528 current
->personality
|= force_personality32
;
529 /* Prepare the first "return" to user space */
530 current
->thread
.status
|= TS_COMPAT
;
533 EXPORT_SYMBOL_GPL(set_personality_ia32
);
535 #ifdef CONFIG_CHECKPOINT_RESTORE
536 static long prctl_map_vdso(const struct vdso_image
*image
, unsigned long addr
)
540 ret
= map_vdso_once(image
, addr
);
544 return (long)image
->size
;
548 long do_arch_prctl(struct task_struct
*task
, int code
, unsigned long addr
)
551 int doit
= task
== current
;
556 if (addr
>= TASK_SIZE_MAX
)
559 task
->thread
.gsindex
= 0;
560 task
->thread
.gsbase
= addr
;
563 ret
= wrmsrl_safe(MSR_KERNEL_GS_BASE
, addr
);
568 /* Not strictly needed for fs, but do it for symmetry
570 if (addr
>= TASK_SIZE_MAX
)
573 task
->thread
.fsindex
= 0;
574 task
->thread
.fsbase
= addr
;
576 /* set the selector to 0 to not confuse __switch_to */
578 ret
= wrmsrl_safe(MSR_FS_BASE
, addr
);
585 rdmsrl(MSR_FS_BASE
, base
);
587 base
= task
->thread
.fsbase
;
588 ret
= put_user(base
, (unsigned long __user
*)addr
);
594 rdmsrl(MSR_KERNEL_GS_BASE
, base
);
596 base
= task
->thread
.gsbase
;
597 ret
= put_user(base
, (unsigned long __user
*)addr
);
601 #ifdef CONFIG_CHECKPOINT_RESTORE
602 # ifdef CONFIG_X86_X32_ABI
603 case ARCH_MAP_VDSO_X32
:
604 return prctl_map_vdso(&vdso_image_x32
, addr
);
606 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
607 case ARCH_MAP_VDSO_32
:
608 return prctl_map_vdso(&vdso_image_32
, addr
);
610 case ARCH_MAP_VDSO_64
:
611 return prctl_map_vdso(&vdso_image_64
, addr
);
622 long sys_arch_prctl(int code
, unsigned long addr
)
624 return do_arch_prctl(current
, code
, addr
);
627 unsigned long KSTK_ESP(struct task_struct
*task
)
629 return task_pt_regs(task
)->sp
;