x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / metag / mm / fault.c
blob5055477486b6f13d3159431766e1bb0cde63f7aa
1 /*
2 * Meta page fault handling.
4 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
5 */
7 #include <linux/mman.h>
8 #include <linux/mm.h>
9 #include <linux/kernel.h>
10 #include <linux/ptrace.h>
11 #include <linux/sched/debug.h>
12 #include <linux/interrupt.h>
13 #include <linux/uaccess.h>
15 #include <asm/tlbflush.h>
16 #include <asm/mmu.h>
17 #include <asm/traps.h>
19 /* Clear any pending catch buffer state. */
20 static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr,
21 unsigned int trapno)
23 PTBICTXEXTCB0 cbuf = regs->extcb0;
25 switch (trapno) {
26 /* Instruction fetch faults leave no catch buffer state. */
27 case TBIXXF_SIGNUM_IGF:
28 case TBIXXF_SIGNUM_IPF:
29 return;
30 default:
31 if (cbuf[0].CBAddr == addr) {
32 cbuf[0].CBAddr = 0;
33 cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS;
35 /* And, as this is the ONLY catch entry, we
36 * need to clear the cbuf bit from the context!
38 regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT |
39 TBICTX_XCBF_BIT);
41 return;
43 pr_err("Failed to clear cbuf entry!\n");
47 int show_unhandled_signals = 1;
49 int do_page_fault(struct pt_regs *regs, unsigned long address,
50 unsigned int write_access, unsigned int trapno)
52 struct task_struct *tsk;
53 struct mm_struct *mm;
54 struct vm_area_struct *vma, *prev_vma;
55 siginfo_t info;
56 int fault;
57 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
59 tsk = current;
61 if ((address >= VMALLOC_START) && (address < VMALLOC_END)) {
63 * Synchronize this task's top level page-table
64 * with the 'reference' page table.
66 * Do _not_ use "tsk" here. We might be inside
67 * an interrupt in the middle of a task switch..
69 int offset = pgd_index(address);
70 pgd_t *pgd, *pgd_k;
71 pud_t *pud, *pud_k;
72 pmd_t *pmd, *pmd_k;
73 pte_t *pte_k;
75 pgd = ((pgd_t *)mmu_get_base()) + offset;
76 pgd_k = swapper_pg_dir + offset;
78 /* This will never happen with the folded page table. */
79 if (!pgd_present(*pgd)) {
80 if (!pgd_present(*pgd_k))
81 goto bad_area_nosemaphore;
82 set_pgd(pgd, *pgd_k);
83 return 0;
86 pud = pud_offset(pgd, address);
87 pud_k = pud_offset(pgd_k, address);
88 if (!pud_present(*pud_k))
89 goto bad_area_nosemaphore;
90 set_pud(pud, *pud_k);
92 pmd = pmd_offset(pud, address);
93 pmd_k = pmd_offset(pud_k, address);
94 if (!pmd_present(*pmd_k))
95 goto bad_area_nosemaphore;
96 set_pmd(pmd, *pmd_k);
98 pte_k = pte_offset_kernel(pmd_k, address);
99 if (!pte_present(*pte_k))
100 goto bad_area_nosemaphore;
102 /* May only be needed on Chorus2 */
103 flush_tlb_all();
104 return 0;
107 mm = tsk->mm;
109 if (faulthandler_disabled() || !mm)
110 goto no_context;
112 if (user_mode(regs))
113 flags |= FAULT_FLAG_USER;
114 retry:
115 down_read(&mm->mmap_sem);
117 vma = find_vma_prev(mm, address, &prev_vma);
119 if (!vma || address < vma->vm_start)
120 goto check_expansion;
122 good_area:
123 if (write_access) {
124 if (!(vma->vm_flags & VM_WRITE))
125 goto bad_area;
126 flags |= FAULT_FLAG_WRITE;
127 } else {
128 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
129 goto bad_area;
133 * If for any reason at all we couldn't handle the fault,
134 * make sure we exit gracefully rather than endlessly redo
135 * the fault.
137 fault = handle_mm_fault(vma, address, flags);
139 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
140 return 0;
142 if (unlikely(fault & VM_FAULT_ERROR)) {
143 if (fault & VM_FAULT_OOM)
144 goto out_of_memory;
145 else if (fault & VM_FAULT_SIGSEGV)
146 goto bad_area;
147 else if (fault & VM_FAULT_SIGBUS)
148 goto do_sigbus;
149 BUG();
151 if (flags & FAULT_FLAG_ALLOW_RETRY) {
152 if (fault & VM_FAULT_MAJOR)
153 tsk->maj_flt++;
154 else
155 tsk->min_flt++;
156 if (fault & VM_FAULT_RETRY) {
157 flags &= ~FAULT_FLAG_ALLOW_RETRY;
158 flags |= FAULT_FLAG_TRIED;
161 * No need to up_read(&mm->mmap_sem) as we would
162 * have already released it in __lock_page_or_retry
163 * in mm/filemap.c.
166 goto retry;
170 up_read(&mm->mmap_sem);
171 return 0;
173 check_expansion:
174 vma = prev_vma;
175 if (vma && (expand_stack(vma, address) == 0))
176 goto good_area;
178 bad_area:
179 up_read(&mm->mmap_sem);
181 bad_area_nosemaphore:
182 if (user_mode(regs)) {
183 info.si_signo = SIGSEGV;
184 info.si_errno = 0;
185 info.si_code = SEGV_MAPERR;
186 info.si_addr = (__force void __user *)address;
187 info.si_trapno = trapno;
189 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
190 printk_ratelimit()) {
191 printk("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
192 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
193 tsk->comm, task_pid_nr(tsk), address,
194 regs->ctx.CurrPC, regs->ctx.AX[0].U0,
195 write_access, trapno, trap_name(trapno));
196 print_vma_addr(" in ", regs->ctx.CurrPC);
197 print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
198 printk("\n");
199 show_regs(regs);
201 force_sig_info(SIGSEGV, &info, tsk);
202 return 1;
204 goto no_context;
206 do_sigbus:
207 up_read(&mm->mmap_sem);
210 * Send a sigbus, regardless of whether we were in kernel
211 * or user mode.
213 info.si_signo = SIGBUS;
214 info.si_errno = 0;
215 info.si_code = BUS_ADRERR;
216 info.si_addr = (__force void __user *)address;
217 info.si_trapno = trapno;
218 force_sig_info(SIGBUS, &info, tsk);
220 /* Kernel mode? Handle exceptions or die */
221 if (!user_mode(regs))
222 goto no_context;
224 return 1;
227 * We ran out of memory, or some other thing happened to us that made
228 * us unable to handle the page fault gracefully.
230 out_of_memory:
231 up_read(&mm->mmap_sem);
232 if (user_mode(regs)) {
233 pagefault_out_of_memory();
234 return 1;
237 no_context:
238 /* Are we prepared to handle this kernel fault? */
239 if (fixup_exception(regs)) {
240 clear_cbuf_entry(regs, address, trapno);
241 return 1;
244 die("Oops", regs, (write_access << 15) | trapno, address);
245 do_exit(SIGKILL);