arm64: dts: Revert "specify console via command line"
[linux/fpc-iii.git] / arch / powerpc / mm / fault.c
blob8db0507619e23396866b9505257d94a3f322186e
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Derived from "arch/i386/mm/fault.c"
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Modified by Cort Dougan and Paul Mackerras.
11 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/interrupt.h>
26 #include <linux/highmem.h>
27 #include <linux/extable.h>
28 #include <linux/kprobes.h>
29 #include <linux/kdebug.h>
30 #include <linux/perf_event.h>
31 #include <linux/ratelimit.h>
32 #include <linux/context_tracking.h>
33 #include <linux/hugetlb.h>
34 #include <linux/uaccess.h>
36 #include <asm/firmware.h>
37 #include <asm/page.h>
38 #include <asm/pgtable.h>
39 #include <asm/mmu.h>
40 #include <asm/mmu_context.h>
41 #include <asm/siginfo.h>
42 #include <asm/debug.h>
43 #include <asm/kup.h>
46 * Check whether the instruction inst is a store using
47 * an update addressing form which will update r1.
49 static bool store_updates_sp(unsigned int inst)
51 /* check for 1 in the rA field */
52 if (((inst >> 16) & 0x1f) != 1)
53 return false;
54 /* check major opcode */
55 switch (inst >> 26) {
56 case OP_STWU:
57 case OP_STBU:
58 case OP_STHU:
59 case OP_STFSU:
60 case OP_STFDU:
61 return true;
62 case OP_STD: /* std or stdu */
63 return (inst & 3) == 1;
64 case OP_31:
65 /* check minor opcode */
66 switch ((inst >> 1) & 0x3ff) {
67 case OP_31_XOP_STDUX:
68 case OP_31_XOP_STWUX:
69 case OP_31_XOP_STBUX:
70 case OP_31_XOP_STHUX:
71 case OP_31_XOP_STFSUX:
72 case OP_31_XOP_STFDUX:
73 return true;
76 return false;
79 * do_page_fault error handling helpers
82 static int
83 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
86 * If we are in kernel mode, bail out with a SEGV, this will
87 * be caught by the assembly which will restore the non-volatile
88 * registers before calling bad_page_fault()
90 if (!user_mode(regs))
91 return SIGSEGV;
93 _exception(SIGSEGV, regs, si_code, address);
95 return 0;
98 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
100 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
103 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
105 struct mm_struct *mm = current->mm;
108 * Something tried to access memory that isn't in our memory map..
109 * Fix it, but check if it's kernel or user first..
111 up_read(&mm->mmap_sem);
113 return __bad_area_nosemaphore(regs, address, si_code);
116 static noinline int bad_area(struct pt_regs *regs, unsigned long address)
118 return __bad_area(regs, address, SEGV_MAPERR);
121 static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
122 int pkey)
125 * If we are in kernel mode, bail out with a SEGV, this will
126 * be caught by the assembly which will restore the non-volatile
127 * registers before calling bad_page_fault()
129 if (!user_mode(regs))
130 return SIGSEGV;
132 _exception_pkey(regs, address, pkey);
134 return 0;
137 static noinline int bad_access(struct pt_regs *regs, unsigned long address)
139 return __bad_area(regs, address, SEGV_ACCERR);
142 static int do_sigbus(struct pt_regs *regs, unsigned long address,
143 vm_fault_t fault)
145 if (!user_mode(regs))
146 return SIGBUS;
148 current->thread.trap_nr = BUS_ADRERR;
149 #ifdef CONFIG_MEMORY_FAILURE
150 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
151 unsigned int lsb = 0; /* shutup gcc */
153 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
154 current->comm, current->pid, address);
156 if (fault & VM_FAULT_HWPOISON_LARGE)
157 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
158 if (fault & VM_FAULT_HWPOISON)
159 lsb = PAGE_SHIFT;
161 force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
162 return 0;
165 #endif
166 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
167 return 0;
170 static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
171 vm_fault_t fault)
174 * Kernel page fault interrupted by SIGKILL. We have no reason to
175 * continue processing.
177 if (fatal_signal_pending(current) && !user_mode(regs))
178 return SIGKILL;
180 /* Out of memory */
181 if (fault & VM_FAULT_OOM) {
183 * We ran out of memory, or some other thing happened to us that
184 * made us unable to handle the page fault gracefully.
186 if (!user_mode(regs))
187 return SIGSEGV;
188 pagefault_out_of_memory();
189 } else {
190 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
191 VM_FAULT_HWPOISON_LARGE))
192 return do_sigbus(regs, addr, fault);
193 else if (fault & VM_FAULT_SIGSEGV)
194 return bad_area_nosemaphore(regs, addr);
195 else
196 BUG();
198 return 0;
201 /* Is this a bad kernel fault ? */
202 static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
203 unsigned long address, bool is_write)
205 int is_exec = TRAP(regs) == 0x400;
207 /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
208 if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
209 DSISR_PROTFAULT))) {
210 pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
211 address >= TASK_SIZE ? "exec-protected" : "user",
212 address,
213 from_kuid(&init_user_ns, current_uid()));
215 // Kernel exec fault is always bad
216 return true;
219 if (!is_exec && address < TASK_SIZE && (error_code & DSISR_PROTFAULT) &&
220 !search_exception_tables(regs->nip)) {
221 pr_crit_ratelimited("Kernel attempted to access user page (%lx) - exploit attempt? (uid: %d)\n",
222 address,
223 from_kuid(&init_user_ns, current_uid()));
226 // Kernel fault on kernel address is bad
227 if (address >= TASK_SIZE)
228 return true;
230 // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
231 if (!search_exception_tables(regs->nip))
232 return true;
234 // Read/write fault in a valid region (the exception table search passed
235 // above), but blocked by KUAP is bad, it can never succeed.
236 if (bad_kuap_fault(regs, address, is_write))
237 return true;
239 // What's left? Kernel fault on user in well defined regions (extable
240 // matched), and allowed by KUAP in the faulting context.
241 return false;
244 static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
245 struct vm_area_struct *vma, unsigned int flags,
246 bool *must_retry)
249 * N.B. The POWER/Open ABI allows programs to access up to
250 * 288 bytes below the stack pointer.
251 * The kernel signal delivery code writes up to about 1.5kB
252 * below the stack pointer (r1) before decrementing it.
253 * The exec code can write slightly over 640kB to the stack
254 * before setting the user r1. Thus we allow the stack to
255 * expand to 1MB without further checks.
257 if (address + 0x100000 < vma->vm_end) {
258 unsigned int __user *nip = (unsigned int __user *)regs->nip;
259 /* get user regs even if this fault is in kernel mode */
260 struct pt_regs *uregs = current->thread.regs;
261 if (uregs == NULL)
262 return true;
265 * A user-mode access to an address a long way below
266 * the stack pointer is only valid if the instruction
267 * is one which would update the stack pointer to the
268 * address accessed if the instruction completed,
269 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
270 * (or the byte, halfword, float or double forms).
272 * If we don't check this then any write to the area
273 * between the last mapped region and the stack will
274 * expand the stack rather than segfaulting.
276 if (address + 2048 >= uregs->gpr[1])
277 return false;
279 if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
280 access_ok(nip, sizeof(*nip))) {
281 unsigned int inst;
283 if (!probe_user_read(&inst, nip, sizeof(inst)))
284 return !store_updates_sp(inst);
285 *must_retry = true;
287 return true;
289 return false;
292 static bool access_error(bool is_write, bool is_exec,
293 struct vm_area_struct *vma)
296 * Allow execution from readable areas if the MMU does not
297 * provide separate controls over reading and executing.
299 * Note: That code used to not be enabled for 4xx/BookE.
300 * It is now as I/D cache coherency for these is done at
301 * set_pte_at() time and I see no reason why the test
302 * below wouldn't be valid on those processors. This -may-
303 * break programs compiled with a really old ABI though.
305 if (is_exec) {
306 return !(vma->vm_flags & VM_EXEC) &&
307 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
308 !(vma->vm_flags & (VM_READ | VM_WRITE)));
311 if (is_write) {
312 if (unlikely(!(vma->vm_flags & VM_WRITE)))
313 return true;
314 return false;
317 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
318 return true;
320 * We should ideally do the vma pkey access check here. But in the
321 * fault path, handle_mm_fault() also does the same check. To avoid
322 * these multiple checks, we skip it here and handle access error due
323 * to pkeys later.
325 return false;
328 #ifdef CONFIG_PPC_SMLPAR
329 static inline void cmo_account_page_fault(void)
331 if (firmware_has_feature(FW_FEATURE_CMO)) {
332 u32 page_ins;
334 preempt_disable();
335 page_ins = be32_to_cpu(get_lppaca()->page_ins);
336 page_ins += 1 << PAGE_FACTOR;
337 get_lppaca()->page_ins = cpu_to_be32(page_ins);
338 preempt_enable();
341 #else
342 static inline void cmo_account_page_fault(void) { }
343 #endif /* CONFIG_PPC_SMLPAR */
345 #ifdef CONFIG_PPC_BOOK3S
346 static void sanity_check_fault(bool is_write, bool is_user,
347 unsigned long error_code, unsigned long address)
350 * Userspace trying to access kernel address, we get PROTFAULT for that.
352 if (is_user && address >= TASK_SIZE) {
353 if ((long)address == -1)
354 return;
356 pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
357 current->comm, current->pid, address,
358 from_kuid(&init_user_ns, current_uid()));
359 return;
363 * For hash translation mode, we should never get a
364 * PROTFAULT. Any update to pte to reduce access will result in us
365 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
366 * fault instead of DSISR_PROTFAULT.
368 * A pte update to relax the access will not result in a hash page table
369 * entry invalidate and hence can result in DSISR_PROTFAULT.
370 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
371 * the special !is_write in the below conditional.
373 * For platforms that doesn't supports coherent icache and do support
374 * per page noexec bit, we do setup things such that we do the
375 * sync between D/I cache via fault. But that is handled via low level
376 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
377 * here in such case.
379 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
380 * check should handle those and hence we should fall to the bad_area
381 * handling correctly.
383 * For embedded with per page exec support that doesn't support coherent
384 * icache we do get PROTFAULT and we handle that D/I cache sync in
385 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
386 * is conditional for server MMU.
388 * For radix, we can get prot fault for autonuma case, because radix
389 * page table will have them marked noaccess for user.
391 if (radix_enabled() || is_write)
392 return;
394 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
396 #else
397 static void sanity_check_fault(bool is_write, bool is_user,
398 unsigned long error_code, unsigned long address) { }
399 #endif /* CONFIG_PPC_BOOK3S */
402 * Define the correct "is_write" bit in error_code based
403 * on the processor family
405 #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
406 #define page_fault_is_write(__err) ((__err) & ESR_DST)
407 #define page_fault_is_bad(__err) (0)
408 #else
409 #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
410 #if defined(CONFIG_PPC_8xx)
411 #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
412 #elif defined(CONFIG_PPC64)
413 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S)
414 #else
415 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
416 #endif
417 #endif
420 * For 600- and 800-family processors, the error_code parameter is DSISR
421 * for a data fault, SRR1 for an instruction fault. For 400-family processors
422 * the error_code parameter is ESR for a data fault, 0 for an instruction
423 * fault.
424 * For 64-bit processors, the error_code parameter is
425 * - DSISR for a non-SLB data access fault,
426 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
427 * - 0 any SLB fault.
429 * The return value is 0 if the fault was handled, or the signal
430 * number if this is a kernel fault that can't be handled here.
432 static int __do_page_fault(struct pt_regs *regs, unsigned long address,
433 unsigned long error_code)
435 struct vm_area_struct * vma;
436 struct mm_struct *mm = current->mm;
437 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
438 int is_exec = TRAP(regs) == 0x400;
439 int is_user = user_mode(regs);
440 int is_write = page_fault_is_write(error_code);
441 vm_fault_t fault, major = 0;
442 bool must_retry = false;
443 bool kprobe_fault = kprobe_page_fault(regs, 11);
445 if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
446 return 0;
448 if (unlikely(page_fault_is_bad(error_code))) {
449 if (is_user) {
450 _exception(SIGBUS, regs, BUS_OBJERR, address);
451 return 0;
453 return SIGBUS;
456 /* Additional sanity check(s) */
457 sanity_check_fault(is_write, is_user, error_code, address);
460 * The kernel should never take an execute fault nor should it
461 * take a page fault to a kernel address or a page fault to a user
462 * address outside of dedicated places
464 if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write)))
465 return SIGSEGV;
468 * If we're in an interrupt, have no user context or are running
469 * in a region with pagefaults disabled then we must not take the fault
471 if (unlikely(faulthandler_disabled() || !mm)) {
472 if (is_user)
473 printk_ratelimited(KERN_ERR "Page fault in user mode"
474 " with faulthandler_disabled()=%d"
475 " mm=%p\n",
476 faulthandler_disabled(), mm);
477 return bad_area_nosemaphore(regs, address);
480 /* We restore the interrupt state now */
481 if (!arch_irq_disabled_regs(regs))
482 local_irq_enable();
484 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
486 if (error_code & DSISR_KEYFAULT)
487 return bad_key_fault_exception(regs, address,
488 get_mm_addr_key(mm, address));
491 * We want to do this outside mmap_sem, because reading code around nip
492 * can result in fault, which will cause a deadlock when called with
493 * mmap_sem held
495 if (is_user)
496 flags |= FAULT_FLAG_USER;
497 if (is_write)
498 flags |= FAULT_FLAG_WRITE;
499 if (is_exec)
500 flags |= FAULT_FLAG_INSTRUCTION;
502 /* When running in the kernel we expect faults to occur only to
503 * addresses in user space. All other faults represent errors in the
504 * kernel and should generate an OOPS. Unfortunately, in the case of an
505 * erroneous fault occurring in a code path which already holds mmap_sem
506 * we will deadlock attempting to validate the fault against the
507 * address space. Luckily the kernel only validly references user
508 * space from well defined areas of code, which are listed in the
509 * exceptions table.
511 * As the vast majority of faults will be valid we will only perform
512 * the source reference check when there is a possibility of a deadlock.
513 * Attempt to lock the address space, if we cannot we then validate the
514 * source. If this is invalid we can skip the address space check,
515 * thus avoiding the deadlock.
517 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
518 if (!is_user && !search_exception_tables(regs->nip))
519 return bad_area_nosemaphore(regs, address);
521 retry:
522 down_read(&mm->mmap_sem);
523 } else {
525 * The above down_read_trylock() might have succeeded in
526 * which case we'll have missed the might_sleep() from
527 * down_read():
529 might_sleep();
532 vma = find_vma(mm, address);
533 if (unlikely(!vma))
534 return bad_area(regs, address);
535 if (likely(vma->vm_start <= address))
536 goto good_area;
537 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
538 return bad_area(regs, address);
540 /* The stack is being expanded, check if it's valid */
541 if (unlikely(bad_stack_expansion(regs, address, vma, flags,
542 &must_retry))) {
543 if (!must_retry)
544 return bad_area(regs, address);
546 up_read(&mm->mmap_sem);
547 if (fault_in_pages_readable((const char __user *)regs->nip,
548 sizeof(unsigned int)))
549 return bad_area_nosemaphore(regs, address);
550 goto retry;
553 /* Try to expand it */
554 if (unlikely(expand_stack(vma, address)))
555 return bad_area(regs, address);
557 good_area:
558 if (unlikely(access_error(is_write, is_exec, vma)))
559 return bad_access(regs, address);
562 * If for any reason at all we couldn't handle the fault,
563 * make sure we exit gracefully rather than endlessly redo
564 * the fault.
566 fault = handle_mm_fault(vma, address, flags);
568 #ifdef CONFIG_PPC_MEM_KEYS
570 * we skipped checking for access error due to key earlier.
571 * Check that using handle_mm_fault error return.
573 if (unlikely(fault & VM_FAULT_SIGSEGV) &&
574 !arch_vma_access_permitted(vma, is_write, is_exec, 0)) {
576 int pkey = vma_pkey(vma);
578 up_read(&mm->mmap_sem);
579 return bad_key_fault_exception(regs, address, pkey);
581 #endif /* CONFIG_PPC_MEM_KEYS */
583 major |= fault & VM_FAULT_MAJOR;
586 * Handle the retry right now, the mmap_sem has been released in that
587 * case.
589 if (unlikely(fault & VM_FAULT_RETRY)) {
590 /* We retry only once */
591 if (flags & FAULT_FLAG_ALLOW_RETRY) {
593 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
594 * of starvation.
596 flags &= ~FAULT_FLAG_ALLOW_RETRY;
597 flags |= FAULT_FLAG_TRIED;
598 if (!fatal_signal_pending(current))
599 goto retry;
603 * User mode? Just return to handle the fatal exception otherwise
604 * return to bad_page_fault
606 return is_user ? 0 : SIGBUS;
609 up_read(&current->mm->mmap_sem);
611 if (unlikely(fault & VM_FAULT_ERROR))
612 return mm_fault_error(regs, address, fault);
615 * Major/minor page fault accounting.
617 if (major) {
618 current->maj_flt++;
619 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
620 cmo_account_page_fault();
621 } else {
622 current->min_flt++;
623 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
625 return 0;
627 NOKPROBE_SYMBOL(__do_page_fault);
629 int do_page_fault(struct pt_regs *regs, unsigned long address,
630 unsigned long error_code)
632 enum ctx_state prev_state = exception_enter();
633 int rc = __do_page_fault(regs, address, error_code);
634 exception_exit(prev_state);
635 return rc;
637 NOKPROBE_SYMBOL(do_page_fault);
640 * bad_page_fault is called when we have a bad access from the kernel.
641 * It is called from the DSI and ISI handlers in head.S and from some
642 * of the procedures in traps.c.
644 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
646 const struct exception_table_entry *entry;
647 int is_write = page_fault_is_write(regs->dsisr);
649 /* Are we prepared to handle this fault? */
650 if ((entry = search_exception_tables(regs->nip)) != NULL) {
651 regs->nip = extable_fixup(entry);
652 return;
655 /* kernel has accessed a bad area */
657 switch (TRAP(regs)) {
658 case 0x300:
659 case 0x380:
660 case 0xe00:
661 pr_alert("BUG: %s on %s at 0x%08lx\n",
662 regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
663 "Unable to handle kernel data access",
664 is_write ? "write" : "read", regs->dar);
665 break;
666 case 0x400:
667 case 0x480:
668 pr_alert("BUG: Unable to handle kernel instruction fetch%s",
669 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
670 break;
671 case 0x600:
672 pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
673 regs->dar);
674 break;
675 default:
676 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
677 regs->dar);
678 break;
680 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
681 regs->nip);
683 if (task_stack_end_corrupted(current))
684 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
686 die("Kernel access of bad area", regs, sig);