1 // SPDX-License-Identifier: GPL-2.0
3 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
5 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/signal.h>
19 #include <linux/extable.h>
20 #include <linux/init.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kprobes.h>
24 #include <linux/kdebug.h>
25 #include <linux/percpu.h>
26 #include <linux/context_tracking.h>
27 #include <linux/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/openprom.h>
32 #include <asm/oplib.h>
35 #include <asm/sections.h>
36 #include <asm/mmu_context.h>
37 #include <asm/setup.h>
39 int show_unhandled_signals
= 1;
41 static void __kprobes
unhandled_fault(unsigned long address
,
42 struct task_struct
*tsk
,
45 if ((unsigned long) address
< PAGE_SIZE
) {
46 printk(KERN_ALERT
"Unable to handle kernel NULL "
47 "pointer dereference\n");
49 printk(KERN_ALERT
"Unable to handle kernel paging request "
50 "at virtual address %016lx\n", (unsigned long)address
);
52 printk(KERN_ALERT
"tsk->{mm,active_mm}->context = %016lx\n",
54 CTX_HWBITS(tsk
->mm
->context
) :
55 CTX_HWBITS(tsk
->active_mm
->context
)));
56 printk(KERN_ALERT
"tsk->{mm,active_mm}->pgd = %016lx\n",
57 (tsk
->mm
? (unsigned long) tsk
->mm
->pgd
:
58 (unsigned long) tsk
->active_mm
->pgd
));
59 die_if_kernel("Oops", regs
);
62 static void __kprobes
bad_kernel_pc(struct pt_regs
*regs
, unsigned long vaddr
)
64 printk(KERN_CRIT
"OOPS: Bogus kernel PC [%016lx] in fault handler\n",
66 printk(KERN_CRIT
"OOPS: RPC [%016lx]\n", regs
->u_regs
[15]);
67 printk("OOPS: RPC <%pS>\n", (void *) regs
->u_regs
[15]);
68 printk(KERN_CRIT
"OOPS: Fault was to vaddr[%lx]\n", vaddr
);
70 unhandled_fault(regs
->tpc
, current
, regs
);
74 * We now make sure that mmap_sem is held in all paths that call
75 * this. Additionally, to prevent kswapd from ripping ptes from
76 * under us, raise interrupts around the time that we look at the
77 * pte, kswapd will have to wait to get his smp ipi response from
78 * us. vmtruncate likewise. This saves us having to get pte lock.
80 static unsigned int get_user_insn(unsigned long tpc
)
82 pgd_t
*pgdp
= pgd_offset(current
->mm
, tpc
);
90 if (pgd_none(*pgdp
) || unlikely(pgd_bad(*pgdp
)))
92 p4dp
= p4d_offset(pgdp
, tpc
);
93 if (p4d_none(*p4dp
) || unlikely(p4d_bad(*p4dp
)))
95 pudp
= pud_offset(p4dp
, tpc
);
96 if (pud_none(*pudp
) || unlikely(pud_bad(*pudp
)))
99 /* This disables preemption for us as well. */
102 pmdp
= pmd_offset(pudp
, tpc
);
103 if (pmd_none(*pmdp
) || unlikely(pmd_bad(*pmdp
)))
106 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
107 if (is_hugetlb_pmd(*pmdp
)) {
108 pa
= pmd_pfn(*pmdp
) << PAGE_SHIFT
;
109 pa
+= tpc
& ~HPAGE_MASK
;
111 /* Use phys bypass so we don't pollute dtlb/dcache. */
112 __asm__
__volatile__("lduwa [%1] %2, %0"
114 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
118 ptep
= pte_offset_map(pmdp
, tpc
);
120 if (pte_present(pte
)) {
121 pa
= (pte_pfn(pte
) << PAGE_SHIFT
);
122 pa
+= (tpc
& ~PAGE_MASK
);
124 /* Use phys bypass so we don't pollute dtlb/dcache. */
125 __asm__
__volatile__("lduwa [%1] %2, %0"
127 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
138 show_signal_msg(struct pt_regs
*regs
, int sig
, int code
,
139 unsigned long address
, struct task_struct
*tsk
)
141 if (!unhandled_signal(tsk
, sig
))
144 if (!printk_ratelimit())
147 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
148 task_pid_nr(tsk
) > 1 ? KERN_INFO
: KERN_EMERG
,
149 tsk
->comm
, task_pid_nr(tsk
), address
,
150 (void *)regs
->tpc
, (void *)regs
->u_regs
[UREG_I7
],
151 (void *)regs
->u_regs
[UREG_FP
], code
);
153 print_vma_addr(KERN_CONT
" in ", regs
->tpc
);
155 printk(KERN_CONT
"\n");
158 static void do_fault_siginfo(int code
, int sig
, struct pt_regs
*regs
,
159 unsigned long fault_addr
, unsigned int insn
,
164 if (fault_code
& FAULT_CODE_ITLB
) {
167 /* If we were able to probe the faulting instruction, use it
168 * to compute a precise fault address. Otherwise use the fault
169 * time provided address which may only have page granularity.
172 addr
= compute_effective_address(regs
, insn
, 0);
177 if (unlikely(show_unhandled_signals
))
178 show_signal_msg(regs
, sig
, code
, addr
, current
);
180 force_sig_fault(sig
, code
, (void __user
*) addr
, 0);
183 static unsigned int get_fault_insn(struct pt_regs
*regs
, unsigned int insn
)
186 if (!regs
->tpc
|| (regs
->tpc
& 0x3))
188 if (regs
->tstate
& TSTATE_PRIV
) {
189 insn
= *(unsigned int *) regs
->tpc
;
191 insn
= get_user_insn(regs
->tpc
);
197 static void __kprobes
do_kernel_fault(struct pt_regs
*regs
, int si_code
,
198 int fault_code
, unsigned int insn
,
199 unsigned long address
)
201 unsigned char asi
= ASI_P
;
203 if ((!insn
) && (regs
->tstate
& TSTATE_PRIV
))
206 /* If user insn could be read (thus insn is zero), that
207 * is fine. We will just gun down the process with a signal
211 if (!(fault_code
& (FAULT_CODE_WRITE
|FAULT_CODE_ITLB
)) &&
212 (insn
& 0xc0800000) == 0xc0800000) {
214 asi
= (regs
->tstate
>> 24);
217 if ((asi
& 0xf2) == 0x82) {
218 if (insn
& 0x1000000) {
219 handle_ldf_stq(insn
, regs
);
221 /* This was a non-faulting load. Just clear the
222 * destination register(s) and continue with the next
225 handle_ld_nf(insn
, regs
);
231 /* Is this in ex_table? */
232 if (regs
->tstate
& TSTATE_PRIV
) {
233 const struct exception_table_entry
*entry
;
235 entry
= search_exception_tables(regs
->tpc
);
237 regs
->tpc
= entry
->fixup
;
238 regs
->tnpc
= regs
->tpc
+ 4;
242 /* The si_code was set to make clear whether
243 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
245 do_fault_siginfo(si_code
, SIGSEGV
, regs
, address
, insn
, fault_code
);
250 unhandled_fault (address
, current
, regs
);
253 static void noinline __kprobes
bogus_32bit_fault_tpc(struct pt_regs
*regs
)
258 printk(KERN_ERR
"FAULT[%s:%d]: 32-bit process reports "
259 "64-bit TPC [%lx]\n",
260 current
->comm
, current
->pid
,
265 asmlinkage
void __kprobes
do_sparc64_fault(struct pt_regs
*regs
)
267 enum ctx_state prev_state
= exception_enter();
268 struct mm_struct
*mm
= current
->mm
;
269 struct vm_area_struct
*vma
;
270 unsigned int insn
= 0;
271 int si_code
, fault_code
;
273 unsigned long address
, mm_rss
;
274 unsigned int flags
= FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
;
276 fault_code
= get_thread_fault_code();
278 if (kprobe_page_fault(regs
, 0))
281 si_code
= SEGV_MAPERR
;
282 address
= current_thread_info()->fault_address
;
284 if ((fault_code
& FAULT_CODE_ITLB
) &&
285 (fault_code
& FAULT_CODE_DTLB
))
288 if (test_thread_flag(TIF_32BIT
)) {
289 if (!(regs
->tstate
& TSTATE_PRIV
)) {
290 if (unlikely((regs
->tpc
>> 32) != 0)) {
291 bogus_32bit_fault_tpc(regs
);
295 if (unlikely((address
>> 32) != 0))
299 if (regs
->tstate
& TSTATE_PRIV
) {
300 unsigned long tpc
= regs
->tpc
;
302 /* Sanity check the PC. */
303 if ((tpc
>= KERNBASE
&& tpc
< (unsigned long) __init_end
) ||
304 (tpc
>= MODULES_VADDR
&& tpc
< MODULES_END
)) {
305 /* Valid, no problems... */
307 bad_kernel_pc(regs
, address
);
311 flags
|= FAULT_FLAG_USER
;
314 * If we're in an interrupt or have no user
315 * context, we must not take the fault..
317 if (faulthandler_disabled() || !mm
)
320 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
322 if (!down_read_trylock(&mm
->mmap_sem
)) {
323 if ((regs
->tstate
& TSTATE_PRIV
) &&
324 !search_exception_tables(regs
->tpc
)) {
325 insn
= get_fault_insn(regs
, insn
);
326 goto handle_kernel_fault
;
330 down_read(&mm
->mmap_sem
);
333 if (fault_code
& FAULT_CODE_BAD_RA
)
336 vma
= find_vma(mm
, address
);
340 /* Pure DTLB misses do not tell us whether the fault causing
341 * load/store/atomic was a write or not, it only says that there
342 * was no match. So in such a case we (carefully) read the
343 * instruction to try and figure this out. It's an optimization
344 * so it's ok if we can't do this.
346 * Special hack, window spill/fill knows the exact fault type.
349 (FAULT_CODE_DTLB
| FAULT_CODE_WRITE
| FAULT_CODE_WINFIXUP
)) == FAULT_CODE_DTLB
) &&
350 (vma
->vm_flags
& VM_WRITE
) != 0) {
351 insn
= get_fault_insn(regs
, 0);
354 /* All loads, stores and atomics have bits 30 and 31 both set
355 * in the instruction. Bit 21 is set in all stores, but we
356 * have to avoid prefetches which also have bit 21 set.
358 if ((insn
& 0xc0200000) == 0xc0200000 &&
359 (insn
& 0x01780000) != 0x01680000) {
360 /* Don't bother updating thread struct value,
361 * because update_mmu_cache only cares which tlb
362 * the access came from.
364 fault_code
|= FAULT_CODE_WRITE
;
369 if (vma
->vm_start
<= address
)
371 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
373 if (!(fault_code
& FAULT_CODE_WRITE
)) {
374 /* Non-faulting loads shouldn't expand stack. */
375 insn
= get_fault_insn(regs
, insn
);
376 if ((insn
& 0xc0800000) == 0xc0800000) {
380 asi
= (regs
->tstate
>> 24);
383 if ((asi
& 0xf2) == 0x82)
387 if (expand_stack(vma
, address
))
390 * Ok, we have a good vm_area for this memory access, so
394 si_code
= SEGV_ACCERR
;
396 /* If we took a ITLB miss on a non-executable page, catch
399 if ((fault_code
& FAULT_CODE_ITLB
) && !(vma
->vm_flags
& VM_EXEC
)) {
400 WARN(address
!= regs
->tpc
,
401 "address (%lx) != regs->tpc (%lx)\n", address
, regs
->tpc
);
402 WARN_ON(regs
->tstate
& TSTATE_PRIV
);
406 if (fault_code
& FAULT_CODE_WRITE
) {
407 if (!(vma
->vm_flags
& VM_WRITE
))
410 /* Spitfire has an icache which does not snoop
411 * processor stores. Later processors do...
413 if (tlb_type
== spitfire
&&
414 (vma
->vm_flags
& VM_EXEC
) != 0 &&
415 vma
->vm_file
!= NULL
)
416 set_thread_fault_code(fault_code
|
417 FAULT_CODE_BLKCOMMIT
);
419 flags
|= FAULT_FLAG_WRITE
;
421 /* Allow reads even for write-only mappings */
422 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
426 fault
= handle_mm_fault(vma
, address
, flags
);
428 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
431 if (unlikely(fault
& VM_FAULT_ERROR
)) {
432 if (fault
& VM_FAULT_OOM
)
434 else if (fault
& VM_FAULT_SIGSEGV
)
436 else if (fault
& VM_FAULT_SIGBUS
)
441 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
442 if (fault
& VM_FAULT_MAJOR
) {
444 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
,
448 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
,
451 if (fault
& VM_FAULT_RETRY
) {
452 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
453 flags
|= FAULT_FLAG_TRIED
;
455 /* No need to up_read(&mm->mmap_sem) as we would
456 * have already released it in __lock_page_or_retry
463 up_read(&mm
->mmap_sem
);
465 mm_rss
= get_mm_rss(mm
);
466 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
467 mm_rss
-= (mm
->context
.thp_pte_count
* (HPAGE_SIZE
/ PAGE_SIZE
));
469 if (unlikely(mm_rss
>
470 mm
->context
.tsb_block
[MM_TSB_BASE
].tsb_rss_limit
))
471 tsb_grow(mm
, MM_TSB_BASE
, mm_rss
);
472 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
473 mm_rss
= mm
->context
.hugetlb_pte_count
+ mm
->context
.thp_pte_count
;
474 mm_rss
*= REAL_HPAGE_PER_HPAGE
;
475 if (unlikely(mm_rss
>
476 mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb_rss_limit
)) {
477 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
)
478 tsb_grow(mm
, MM_TSB_HUGE
, mm_rss
);
485 exception_exit(prev_state
);
489 * Something tried to access memory that isn't in our memory map..
490 * Fix it, but check if it's kernel or user first..
493 insn
= get_fault_insn(regs
, insn
);
494 up_read(&mm
->mmap_sem
);
497 do_kernel_fault(regs
, si_code
, fault_code
, insn
, address
);
501 * We ran out of memory, or some other thing happened to us that made
502 * us unable to handle the page fault gracefully.
505 insn
= get_fault_insn(regs
, insn
);
506 up_read(&mm
->mmap_sem
);
507 if (!(regs
->tstate
& TSTATE_PRIV
)) {
508 pagefault_out_of_memory();
511 goto handle_kernel_fault
;
514 insn
= get_fault_insn(regs
, 0);
515 goto handle_kernel_fault
;
518 insn
= get_fault_insn(regs
, insn
);
519 up_read(&mm
->mmap_sem
);
522 * Send a sigbus, regardless of whether we were in kernel
525 do_fault_siginfo(BUS_ADRERR
, SIGBUS
, regs
, address
, insn
, fault_code
);
527 /* Kernel mode? Handle exceptions or die */
528 if (regs
->tstate
& TSTATE_PRIV
)
529 goto handle_kernel_fault
;