treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / x86 / kernel / process_64.c
blobffd497804dbc3406426eb95589db30c90111d255
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1995 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 * X86-64 port
9 * Andi Kleen.
11 * CPU hotplug support - ashok.raj@intel.com
15 * This file handles the architecture-dependent parts of process handling..
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/sched/task.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/fs.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/elfcore.h>
27 #include <linux/smp.h>
28 #include <linux/slab.h>
29 #include <linux/user.h>
30 #include <linux/interrupt.h>
31 #include <linux/delay.h>
32 #include <linux/export.h>
33 #include <linux/ptrace.h>
34 #include <linux/notifier.h>
35 #include <linux/kprobes.h>
36 #include <linux/kdebug.h>
37 #include <linux/prctl.h>
38 #include <linux/uaccess.h>
39 #include <linux/io.h>
40 #include <linux/ftrace.h>
41 #include <linux/syscalls.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/fpu/internal.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
48 #include <asm/desc.h>
49 #include <asm/proto.h>
50 #include <asm/ia32.h>
51 #include <asm/syscalls.h>
52 #include <asm/debugreg.h>
53 #include <asm/switch_to.h>
54 #include <asm/xen/hypervisor.h>
55 #include <asm/vdso.h>
56 #include <asm/resctrl_sched.h>
57 #include <asm/unistd.h>
58 #include <asm/fsgsbase.h>
59 #ifdef CONFIG_IA32_EMULATION
60 /* Not included via unistd.h */
61 #include <asm/unistd_32_ia32.h>
62 #endif
64 #include "process.h"
66 /* Prints also some state that isn't saved in the pt_regs */
67 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
69 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
70 unsigned long d0, d1, d2, d3, d6, d7;
71 unsigned int fsindex, gsindex;
72 unsigned int ds, es;
74 show_iret_regs(regs);
76 if (regs->orig_ax != -1)
77 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
78 else
79 pr_cont("\n");
81 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
82 regs->ax, regs->bx, regs->cx);
83 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
84 regs->dx, regs->si, regs->di);
85 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
86 regs->bp, regs->r8, regs->r9);
87 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
88 regs->r10, regs->r11, regs->r12);
89 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
90 regs->r13, regs->r14, regs->r15);
92 if (mode == SHOW_REGS_SHORT)
93 return;
95 if (mode == SHOW_REGS_USER) {
96 rdmsrl(MSR_FS_BASE, fs);
97 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
98 printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
99 fs, shadowgs);
100 return;
103 asm("movl %%ds,%0" : "=r" (ds));
104 asm("movl %%es,%0" : "=r" (es));
105 asm("movl %%fs,%0" : "=r" (fsindex));
106 asm("movl %%gs,%0" : "=r" (gsindex));
108 rdmsrl(MSR_FS_BASE, fs);
109 rdmsrl(MSR_GS_BASE, gs);
110 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
112 cr0 = read_cr0();
113 cr2 = read_cr2();
114 cr3 = __read_cr3();
115 cr4 = __read_cr4();
117 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
118 fs, fsindex, gs, gsindex, shadowgs);
119 printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
120 es, cr0);
121 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
122 cr4);
124 get_debugreg(d0, 0);
125 get_debugreg(d1, 1);
126 get_debugreg(d2, 2);
127 get_debugreg(d3, 3);
128 get_debugreg(d6, 6);
129 get_debugreg(d7, 7);
131 /* Only print out debug registers if they are in their non-default state. */
132 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
133 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
134 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
135 d0, d1, d2);
136 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
137 d3, d6, d7);
140 if (boot_cpu_has(X86_FEATURE_OSPKE))
141 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
144 void release_thread(struct task_struct *dead_task)
146 WARN_ON(dead_task->mm);
149 enum which_selector {
155 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
156 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
157 * It's forcibly inlined because it'll generate better code and this function
158 * is hot.
160 static __always_inline void save_base_legacy(struct task_struct *prev_p,
161 unsigned short selector,
162 enum which_selector which)
164 if (likely(selector == 0)) {
166 * On Intel (without X86_BUG_NULL_SEG), the segment base could
167 * be the pre-existing saved base or it could be zero. On AMD
168 * (with X86_BUG_NULL_SEG), the segment base could be almost
169 * anything.
171 * This branch is very hot (it's hit twice on almost every
172 * context switch between 64-bit programs), and avoiding
173 * the RDMSR helps a lot, so we just assume that whatever
174 * value is already saved is correct. This matches historical
175 * Linux behavior, so it won't break existing applications.
177 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
178 * report that the base is zero, it needs to actually be zero:
179 * see the corresponding logic in load_seg_legacy.
181 } else {
183 * If the selector is 1, 2, or 3, then the base is zero on
184 * !X86_BUG_NULL_SEG CPUs and could be anything on
185 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
186 * has never attempted to preserve the base across context
187 * switches.
189 * If selector > 3, then it refers to a real segment, and
190 * saving the base isn't necessary.
192 if (which == FS)
193 prev_p->thread.fsbase = 0;
194 else
195 prev_p->thread.gsbase = 0;
199 static __always_inline void save_fsgs(struct task_struct *task)
201 savesegment(fs, task->thread.fsindex);
202 savesegment(gs, task->thread.gsindex);
203 save_base_legacy(task, task->thread.fsindex, FS);
204 save_base_legacy(task, task->thread.gsindex, GS);
207 #if IS_ENABLED(CONFIG_KVM)
209 * While a process is running,current->thread.fsbase and current->thread.gsbase
210 * may not match the corresponding CPU registers (see save_base_legacy()). KVM
211 * wants an efficient way to save and restore FSBASE and GSBASE.
212 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
214 void save_fsgs_for_kvm(void)
216 save_fsgs(current);
218 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
219 #endif
221 static __always_inline void loadseg(enum which_selector which,
222 unsigned short sel)
224 if (which == FS)
225 loadsegment(fs, sel);
226 else
227 load_gs_index(sel);
230 static __always_inline void load_seg_legacy(unsigned short prev_index,
231 unsigned long prev_base,
232 unsigned short next_index,
233 unsigned long next_base,
234 enum which_selector which)
236 if (likely(next_index <= 3)) {
238 * The next task is using 64-bit TLS, is not using this
239 * segment at all, or is having fun with arcane CPU features.
241 if (next_base == 0) {
243 * Nasty case: on AMD CPUs, we need to forcibly zero
244 * the base.
246 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
247 loadseg(which, __USER_DS);
248 loadseg(which, next_index);
249 } else {
251 * We could try to exhaustively detect cases
252 * under which we can skip the segment load,
253 * but there's really only one case that matters
254 * for performance: if both the previous and
255 * next states are fully zeroed, we can skip
256 * the load.
258 * (This assumes that prev_base == 0 has no
259 * false positives. This is the case on
260 * Intel-style CPUs.)
262 if (likely(prev_index | next_index | prev_base))
263 loadseg(which, next_index);
265 } else {
266 if (prev_index != next_index)
267 loadseg(which, next_index);
268 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
269 next_base);
271 } else {
273 * The next task is using a real segment. Loading the selector
274 * is sufficient.
276 loadseg(which, next_index);
280 static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
281 struct thread_struct *next)
283 load_seg_legacy(prev->fsindex, prev->fsbase,
284 next->fsindex, next->fsbase, FS);
285 load_seg_legacy(prev->gsindex, prev->gsbase,
286 next->gsindex, next->gsbase, GS);
289 static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
290 unsigned short selector)
292 unsigned short idx = selector >> 3;
293 unsigned long base;
295 if (likely((selector & SEGMENT_TI_MASK) == 0)) {
296 if (unlikely(idx >= GDT_ENTRIES))
297 return 0;
300 * There are no user segments in the GDT with nonzero bases
301 * other than the TLS segments.
303 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
304 return 0;
306 idx -= GDT_ENTRY_TLS_MIN;
307 base = get_desc_base(&task->thread.tls_array[idx]);
308 } else {
309 #ifdef CONFIG_MODIFY_LDT_SYSCALL
310 struct ldt_struct *ldt;
313 * If performance here mattered, we could protect the LDT
314 * with RCU. This is a slow path, though, so we can just
315 * take the mutex.
317 mutex_lock(&task->mm->context.lock);
318 ldt = task->mm->context.ldt;
319 if (unlikely(idx >= ldt->nr_entries))
320 base = 0;
321 else
322 base = get_desc_base(ldt->entries + idx);
323 mutex_unlock(&task->mm->context.lock);
324 #else
325 base = 0;
326 #endif
329 return base;
332 unsigned long x86_fsbase_read_task(struct task_struct *task)
334 unsigned long fsbase;
336 if (task == current)
337 fsbase = x86_fsbase_read_cpu();
338 else if (task->thread.fsindex == 0)
339 fsbase = task->thread.fsbase;
340 else
341 fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
343 return fsbase;
346 unsigned long x86_gsbase_read_task(struct task_struct *task)
348 unsigned long gsbase;
350 if (task == current)
351 gsbase = x86_gsbase_read_cpu_inactive();
352 else if (task->thread.gsindex == 0)
353 gsbase = task->thread.gsbase;
354 else
355 gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
357 return gsbase;
360 void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
362 WARN_ON_ONCE(task == current);
364 task->thread.fsbase = fsbase;
367 void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
369 WARN_ON_ONCE(task == current);
371 task->thread.gsbase = gsbase;
374 static void
375 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
376 unsigned long new_sp,
377 unsigned int _cs, unsigned int _ss, unsigned int _ds)
379 WARN_ON_ONCE(regs != current_pt_regs());
381 if (static_cpu_has(X86_BUG_NULL_SEG)) {
382 /* Loading zero below won't clear the base. */
383 loadsegment(fs, __USER_DS);
384 load_gs_index(__USER_DS);
387 loadsegment(fs, 0);
388 loadsegment(es, _ds);
389 loadsegment(ds, _ds);
390 load_gs_index(0);
392 regs->ip = new_ip;
393 regs->sp = new_sp;
394 regs->cs = _cs;
395 regs->ss = _ss;
396 regs->flags = X86_EFLAGS_IF;
399 void
400 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
402 start_thread_common(regs, new_ip, new_sp,
403 __USER_CS, __USER_DS, 0);
405 EXPORT_SYMBOL_GPL(start_thread);
407 #ifdef CONFIG_COMPAT
408 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
410 start_thread_common(regs, new_ip, new_sp,
411 test_thread_flag(TIF_X32)
412 ? __USER_CS : __USER32_CS,
413 __USER_DS, __USER_DS);
415 #endif
418 * switch_to(x,y) should switch tasks from x to y.
420 * This could still be optimized:
421 * - fold all the options into a flag word and test it with a single test.
422 * - could test fs/gs bitsliced
424 * Kprobes not supported here. Set the probe on schedule instead.
425 * Function graph tracer not supported too.
427 __visible __notrace_funcgraph struct task_struct *
428 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
430 struct thread_struct *prev = &prev_p->thread;
431 struct thread_struct *next = &next_p->thread;
432 struct fpu *prev_fpu = &prev->fpu;
433 struct fpu *next_fpu = &next->fpu;
434 int cpu = smp_processor_id();
436 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
437 this_cpu_read(irq_count) != -1);
439 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
440 switch_fpu_prepare(prev_fpu, cpu);
442 /* We must save %fs and %gs before load_TLS() because
443 * %fs and %gs may be cleared by load_TLS().
445 * (e.g. xen_load_tls())
447 save_fsgs(prev_p);
450 * Load TLS before restoring any segments so that segment loads
451 * reference the correct GDT entries.
453 load_TLS(next, cpu);
456 * Leave lazy mode, flushing any hypercalls made here. This
457 * must be done after loading TLS entries in the GDT but before
458 * loading segments that might reference them.
460 arch_end_context_switch(next_p);
462 /* Switch DS and ES.
464 * Reading them only returns the selectors, but writing them (if
465 * nonzero) loads the full descriptor from the GDT or LDT. The
466 * LDT for next is loaded in switch_mm, and the GDT is loaded
467 * above.
469 * We therefore need to write new values to the segment
470 * registers on every context switch unless both the new and old
471 * values are zero.
473 * Note that we don't need to do anything for CS and SS, as
474 * those are saved and restored as part of pt_regs.
476 savesegment(es, prev->es);
477 if (unlikely(next->es | prev->es))
478 loadsegment(es, next->es);
480 savesegment(ds, prev->ds);
481 if (unlikely(next->ds | prev->ds))
482 loadsegment(ds, next->ds);
484 x86_fsgsbase_load(prev, next);
487 * Switch the PDA and FPU contexts.
489 this_cpu_write(current_task, next_p);
490 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
492 switch_fpu_finish(next_fpu);
494 /* Reload sp0. */
495 update_task_stack(next_p);
497 switch_to_extra(prev_p, next_p);
499 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
501 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
502 * does not update the cached descriptor. As a result, if we
503 * do SYSRET while SS is NULL, we'll end up in user mode with
504 * SS apparently equal to __USER_DS but actually unusable.
506 * The straightforward workaround would be to fix it up just
507 * before SYSRET, but that would slow down the system call
508 * fast paths. Instead, we ensure that SS is never NULL in
509 * system call context. We do this by replacing NULL SS
510 * selectors at every context switch. SYSCALL sets up a valid
511 * SS, so the only way to get NULL is to re-enter the kernel
512 * from CPL 3 through an interrupt. Since that can't happen
513 * in the same task as a running syscall, we are guaranteed to
514 * context switch between every interrupt vector entry and a
515 * subsequent SYSRET.
517 * We read SS first because SS reads are much faster than
518 * writes. Out of caution, we force SS to __KERNEL_DS even if
519 * it previously had a different non-NULL value.
521 unsigned short ss_sel;
522 savesegment(ss, ss_sel);
523 if (ss_sel != __KERNEL_DS)
524 loadsegment(ss, __KERNEL_DS);
527 /* Load the Intel cache allocation PQR MSR. */
528 resctrl_sched_in();
530 return prev_p;
533 void set_personality_64bit(void)
535 /* inherit personality from parent */
537 /* Make sure to be in 64bit mode */
538 clear_thread_flag(TIF_IA32);
539 clear_thread_flag(TIF_ADDR32);
540 clear_thread_flag(TIF_X32);
541 /* Pretend that this comes from a 64bit execve */
542 task_pt_regs(current)->orig_ax = __NR_execve;
543 current_thread_info()->status &= ~TS_COMPAT;
545 /* Ensure the corresponding mm is not marked. */
546 if (current->mm)
547 current->mm->context.ia32_compat = 0;
549 /* TBD: overwrites user setup. Should have two bits.
550 But 64bit processes have always behaved this way,
551 so it's not too bad. The main problem is just that
552 32bit children are affected again. */
553 current->personality &= ~READ_IMPLIES_EXEC;
556 static void __set_personality_x32(void)
558 #ifdef CONFIG_X86_X32
559 clear_thread_flag(TIF_IA32);
560 set_thread_flag(TIF_X32);
561 if (current->mm)
562 current->mm->context.ia32_compat = TIF_X32;
563 current->personality &= ~READ_IMPLIES_EXEC;
565 * in_32bit_syscall() uses the presence of the x32 syscall bit
566 * flag to determine compat status. The x86 mmap() code relies on
567 * the syscall bitness so set x32 syscall bit right here to make
568 * in_32bit_syscall() work during exec().
570 * Pretend to come from a x32 execve.
572 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
573 current_thread_info()->status &= ~TS_COMPAT;
574 #endif
577 static void __set_personality_ia32(void)
579 #ifdef CONFIG_IA32_EMULATION
580 set_thread_flag(TIF_IA32);
581 clear_thread_flag(TIF_X32);
582 if (current->mm)
583 current->mm->context.ia32_compat = TIF_IA32;
584 current->personality |= force_personality32;
585 /* Prepare the first "return" to user space */
586 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
587 current_thread_info()->status |= TS_COMPAT;
588 #endif
591 void set_personality_ia32(bool x32)
593 /* Make sure to be in 32bit mode */
594 set_thread_flag(TIF_ADDR32);
596 if (x32)
597 __set_personality_x32();
598 else
599 __set_personality_ia32();
601 EXPORT_SYMBOL_GPL(set_personality_ia32);
603 #ifdef CONFIG_CHECKPOINT_RESTORE
604 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
606 int ret;
608 ret = map_vdso_once(image, addr);
609 if (ret)
610 return ret;
612 return (long)image->size;
614 #endif
616 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
618 int ret = 0;
620 switch (option) {
621 case ARCH_SET_GS: {
622 if (unlikely(arg2 >= TASK_SIZE_MAX))
623 return -EPERM;
625 preempt_disable();
627 * ARCH_SET_GS has always overwritten the index
628 * and the base. Zero is the most sensible value
629 * to put in the index, and is the only value that
630 * makes any sense if FSGSBASE is unavailable.
632 if (task == current) {
633 loadseg(GS, 0);
634 x86_gsbase_write_cpu_inactive(arg2);
637 * On non-FSGSBASE systems, save_base_legacy() expects
638 * that we also fill in thread.gsbase.
640 task->thread.gsbase = arg2;
642 } else {
643 task->thread.gsindex = 0;
644 x86_gsbase_write_task(task, arg2);
646 preempt_enable();
647 break;
649 case ARCH_SET_FS: {
651 * Not strictly needed for %fs, but do it for symmetry
652 * with %gs
654 if (unlikely(arg2 >= TASK_SIZE_MAX))
655 return -EPERM;
657 preempt_disable();
659 * Set the selector to 0 for the same reason
660 * as %gs above.
662 if (task == current) {
663 loadseg(FS, 0);
664 x86_fsbase_write_cpu(arg2);
667 * On non-FSGSBASE systems, save_base_legacy() expects
668 * that we also fill in thread.fsbase.
670 task->thread.fsbase = arg2;
671 } else {
672 task->thread.fsindex = 0;
673 x86_fsbase_write_task(task, arg2);
675 preempt_enable();
676 break;
678 case ARCH_GET_FS: {
679 unsigned long base = x86_fsbase_read_task(task);
681 ret = put_user(base, (unsigned long __user *)arg2);
682 break;
684 case ARCH_GET_GS: {
685 unsigned long base = x86_gsbase_read_task(task);
687 ret = put_user(base, (unsigned long __user *)arg2);
688 break;
691 #ifdef CONFIG_CHECKPOINT_RESTORE
692 # ifdef CONFIG_X86_X32_ABI
693 case ARCH_MAP_VDSO_X32:
694 return prctl_map_vdso(&vdso_image_x32, arg2);
695 # endif
696 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
697 case ARCH_MAP_VDSO_32:
698 return prctl_map_vdso(&vdso_image_32, arg2);
699 # endif
700 case ARCH_MAP_VDSO_64:
701 return prctl_map_vdso(&vdso_image_64, arg2);
702 #endif
704 default:
705 ret = -EINVAL;
706 break;
709 return ret;
712 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
714 long ret;
716 ret = do_arch_prctl_64(current, option, arg2);
717 if (ret == -EINVAL)
718 ret = do_arch_prctl_common(current, option, arg2);
720 return ret;
723 #ifdef CONFIG_IA32_EMULATION
724 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
726 return do_arch_prctl_common(current, option, arg2);
728 #endif
730 unsigned long KSTK_ESP(struct task_struct *task)
732 return task_pt_regs(task)->sp;