2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * From i386 code copyright (C) 1995 Linus Torvalds
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
26 #include <linux/smp.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/tty.h>
30 #include <linux/vt_kern.h> /* For unblank_screen() */
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/kprobes.h>
34 #include <linux/hugetlb.h>
35 #include <linux/syscalls.h>
36 #include <linux/uaccess.h>
38 #include <asm/pgalloc.h>
39 #include <asm/sections.h>
40 #include <asm/traps.h>
41 #include <asm/syscalls.h>
43 #include <arch/interrupts.h>
45 static noinline
void force_sig_info_fault(const char *type
, int si_signo
,
46 int si_code
, unsigned long address
,
48 struct task_struct
*tsk
,
53 if (unlikely(tsk
->pid
< 2)) {
54 panic("Signal %d (code %d) at %#lx sent to %s!",
55 si_signo
, si_code
& 0xffff, address
,
56 is_idle_task(tsk
) ? "the idle task" : "init");
59 info
.si_signo
= si_signo
;
61 info
.si_code
= si_code
;
62 info
.si_addr
= (void __user
*)address
;
63 info
.si_trapno
= fault_num
;
64 trace_unhandled_signal(type
, regs
, address
, si_signo
);
65 force_sig_info(si_signo
, &info
, tsk
);
70 * Synthesize the fault a PL0 process would get by doing a word-load of
71 * an unaligned address or a high kernel address.
73 SYSCALL_DEFINE2(cmpxchg_badaddr
, unsigned long, address
,
74 struct pt_regs
*, regs
)
76 if (address
>= PAGE_OFFSET
)
77 force_sig_info_fault("atomic segfault", SIGSEGV
, SEGV_MAPERR
,
78 address
, INT_DTLB_MISS
, current
, regs
);
80 force_sig_info_fault("atomic alignment fault", SIGBUS
,
82 INT_UNALIGN_DATA
, current
, regs
);
85 * Adjust pc to point at the actual instruction, which is unusual
86 * for syscalls normally, but is appropriate when we are claiming
87 * that a syscall swint1 caused a page fault or bus error.
92 * Mark this as a caller-save interrupt, like a normal page fault,
93 * so that when we go through the signal handler path we will
94 * properly restore r0, r1, and r2 for the signal handler arguments.
96 regs
->flags
|= PT_FLAGS_CALLER_SAVES
;
102 static inline pmd_t
*vmalloc_sync_one(pgd_t
*pgd
, unsigned long address
)
104 unsigned index
= pgd_index(address
);
110 pgd_k
= init_mm
.pgd
+ index
;
112 if (!pgd_present(*pgd_k
))
115 pud
= pud_offset(pgd
, address
);
116 pud_k
= pud_offset(pgd_k
, address
);
117 if (!pud_present(*pud_k
))
120 pmd
= pmd_offset(pud
, address
);
121 pmd_k
= pmd_offset(pud_k
, address
);
122 if (!pmd_present(*pmd_k
))
124 if (!pmd_present(*pmd
)) {
125 set_pmd(pmd
, *pmd_k
);
126 arch_flush_lazy_mmu_mode();
128 BUG_ON(pmd_ptfn(*pmd
) != pmd_ptfn(*pmd_k
));
133 * Handle a fault on the vmalloc area.
135 static inline int vmalloc_fault(pgd_t
*pgd
, unsigned long address
)
140 /* Make sure we are in vmalloc area */
141 if (!(address
>= VMALLOC_START
&& address
< VMALLOC_END
))
145 * Synchronize this task's top level page-table
146 * with the 'reference' page table.
148 pmd_k
= vmalloc_sync_one(pgd
, address
);
151 if (pmd_huge(*pmd_k
))
152 return 0; /* support TILE huge_vmap() API */
153 pte_k
= pte_offset_kernel(pmd_k
, address
);
154 if (!pte_present(*pte_k
))
159 /* Wait until this PTE has completed migration. */
160 static void wait_for_migration(pte_t
*pte
)
162 if (pte_migrating(*pte
)) {
164 * Wait until the migrater fixes up this pte.
165 * We scale the loop count by the clock rate so we'll wait for
166 * a few seconds here.
169 int bound
= get_clock_rate();
170 while (pte_migrating(*pte
)) {
172 if (++retries
> bound
)
173 panic("Hit migrating PTE (%#llx) and"
174 " page PFN %#lx still migrating",
175 pte
->val
, pte_pfn(*pte
));
181 * It's not generally safe to use "current" to get the page table pointer,
182 * since we might be running an oprofile interrupt in the middle of a
185 static pgd_t
*get_current_pgd(void)
187 HV_Context ctx
= hv_inquire_context();
188 unsigned long pgd_pfn
= ctx
.page_table
>> PAGE_SHIFT
;
189 struct page
*pgd_page
= pfn_to_page(pgd_pfn
);
190 BUG_ON(PageHighMem(pgd_page
));
191 return (pgd_t
*) __va(ctx
.page_table
);
195 * We can receive a page fault from a migrating PTE at any time.
196 * Handle it by just waiting until the fault resolves.
198 * It's also possible to get a migrating kernel PTE that resolves
199 * itself during the downcall from hypervisor to Linux. We just check
200 * here to see if the PTE seems valid, and if so we retry it.
202 * NOTE! We MUST NOT take any locks for this case. We may be in an
203 * interrupt or a critical region, and must do as little as possible.
204 * Similarly, we can't use atomic ops here, since we may be handling a
205 * fault caused by an atomic op access.
207 * If we find a migrating PTE while we're in an NMI context, and we're
208 * at a PC that has a registered exception handler, we don't wait,
209 * since this thread may (e.g.) have been interrupted while migrating
210 * its own stack, which would then cause us to self-deadlock.
212 static int handle_migrating_pte(pgd_t
*pgd
, int fault_num
,
213 unsigned long address
, unsigned long pc
,
214 int is_kernel_mode
, int write
)
221 if (pgd_addr_invalid(address
))
224 pgd
+= pgd_index(address
);
225 pud
= pud_offset(pgd
, address
);
226 if (!pud
|| !pud_present(*pud
))
228 pmd
= pmd_offset(pud
, address
);
229 if (!pmd
|| !pmd_present(*pmd
))
231 pte
= pmd_huge_page(*pmd
) ? ((pte_t
*)pmd
) :
232 pte_offset_kernel(pmd
, address
);
234 if (pte_migrating(pteval
)) {
235 if (in_nmi() && search_exception_tables(pc
))
237 wait_for_migration(pte
);
241 if (!is_kernel_mode
|| !pte_present(pteval
))
243 if (fault_num
== INT_ITLB_MISS
) {
244 if (pte_exec(pteval
))
247 if (pte_write(pteval
))
250 if (pte_read(pteval
))
258 * This routine is responsible for faulting in user pages.
259 * It passes the work off to one of the appropriate routines.
260 * It returns true if the fault was successfully handled.
262 static int handle_page_fault(struct pt_regs
*regs
,
265 unsigned long address
,
268 struct task_struct
*tsk
;
269 struct mm_struct
*mm
;
270 struct vm_area_struct
*vma
;
271 unsigned long stack_offset
;
278 /* on TILE, protection faults are always writes */
282 flags
= (FAULT_FLAG_ALLOW_RETRY
| FAULT_FLAG_KILLABLE
|
283 (write
? FAULT_FLAG_WRITE
: 0));
285 is_kernel_mode
= (EX1_PL(regs
->ex1
) != USER_PL
);
287 tsk
= validate_current();
290 * Check to see if we might be overwriting the stack, and bail
291 * out if so. The page fault code is a relatively likely
292 * place to get trapped in an infinite regress, and once we
293 * overwrite the whole stack, it becomes very hard to recover.
295 stack_offset
= stack_pointer
& (THREAD_SIZE
-1);
296 if (stack_offset
< THREAD_SIZE
/ 8) {
297 pr_alert("Potential stack overrun: sp %#lx\n",
300 pr_alert("Killing current process %d/%s\n",
301 tsk
->pid
, tsk
->comm
);
302 do_group_exit(SIGKILL
);
306 * Early on, we need to check for migrating PTE entries;
307 * see homecache.c. If we find a migrating PTE, we wait until
308 * the backing page claims to be done migrating, then we proceed.
309 * For kernel PTEs, we rewrite the PTE and return and retry.
310 * Otherwise, we treat the fault like a normal "no PTE" fault,
311 * rather than trying to patch up the existing PTE.
313 pgd
= get_current_pgd();
314 if (handle_migrating_pte(pgd
, fault_num
, address
, regs
->pc
,
315 is_kernel_mode
, write
))
318 si_code
= SEGV_MAPERR
;
321 * We fault-in kernel-space virtual memory on-demand. The
322 * 'reference' page table is init_mm.pgd.
324 * NOTE! We MUST NOT take any locks for this case. We may
325 * be in an interrupt or a critical region, and should
326 * only copy the information from the master page table,
329 * This verifies that the fault happens in kernel space
330 * and that the fault was not a protection fault.
332 if (unlikely(address
>= TASK_SIZE
&&
333 !is_arch_mappable_range(address
, 0))) {
334 if (is_kernel_mode
&& is_page_fault
&&
335 vmalloc_fault(pgd
, address
) >= 0)
338 * Don't take the mm semaphore here. If we fixup a prefetch
339 * fault we could otherwise deadlock.
341 mm
= NULL
; /* happy compiler */
343 goto bad_area_nosemaphore
;
347 * If we're trying to touch user-space addresses, we must
348 * be either at PL0, or else with interrupts enabled in the
349 * kernel, so either way we can re-enable interrupts here
350 * unless we are doing atomic access to user space with
351 * interrupts disabled.
353 if (!(regs
->flags
& PT_FLAGS_DISABLE_IRQ
))
359 * If we're in an interrupt, have no user context or are running in an
360 * atomic region then we must not take the fault.
362 if (in_atomic() || !mm
) {
363 vma
= NULL
; /* happy compiler */
364 goto bad_area_nosemaphore
;
368 * When running in the kernel we expect faults to occur only to
369 * addresses in user space. All other faults represent errors in the
370 * kernel and should generate an OOPS. Unfortunately, in the case of an
371 * erroneous fault occurring in a code path which already holds mmap_sem
372 * we will deadlock attempting to validate the fault against the
373 * address space. Luckily the kernel only validly references user
374 * space from well defined areas of code, which are listed in the
377 * As the vast majority of faults will be valid we will only perform
378 * the source reference check when there is a possibility of a deadlock.
379 * Attempt to lock the address space, if we cannot we then validate the
380 * source. If this is invalid we can skip the address space check,
381 * thus avoiding the deadlock.
383 if (!down_read_trylock(&mm
->mmap_sem
)) {
384 if (is_kernel_mode
&&
385 !search_exception_tables(regs
->pc
)) {
386 vma
= NULL
; /* happy compiler */
387 goto bad_area_nosemaphore
;
391 down_read(&mm
->mmap_sem
);
394 vma
= find_vma(mm
, address
);
397 if (vma
->vm_start
<= address
)
399 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
401 if (regs
->sp
< PAGE_OFFSET
) {
403 * accessing the stack below sp is always a bug.
405 if (address
< regs
->sp
)
408 if (expand_stack(vma
, address
))
412 * Ok, we have a good vm_area for this memory access, so
416 si_code
= SEGV_ACCERR
;
417 if (fault_num
== INT_ITLB_MISS
) {
418 if (!(vma
->vm_flags
& VM_EXEC
))
421 #ifdef TEST_VERIFY_AREA
422 if (!is_page_fault
&& regs
->cs
== KERNEL_CS
)
423 pr_err("WP fault at "REGFMT
"\n", regs
->eip
);
425 if (!(vma
->vm_flags
& VM_WRITE
))
428 if (!is_page_fault
|| !(vma
->vm_flags
& VM_READ
))
434 * If for any reason at all we couldn't handle the fault,
435 * make sure we exit gracefully rather than endlessly redo
438 fault
= handle_mm_fault(mm
, vma
, address
, flags
);
440 if ((fault
& VM_FAULT_RETRY
) && fatal_signal_pending(current
))
443 if (unlikely(fault
& VM_FAULT_ERROR
)) {
444 if (fault
& VM_FAULT_OOM
)
446 else if (fault
& VM_FAULT_SIGBUS
)
450 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
451 if (fault
& VM_FAULT_MAJOR
)
455 if (fault
& VM_FAULT_RETRY
) {
456 flags
&= ~FAULT_FLAG_ALLOW_RETRY
;
459 * No need to up_read(&mm->mmap_sem) as we would
460 * have already released it in __lock_page_or_retry
467 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
469 * If this was an asynchronous fault,
470 * restart the appropriate engine.
473 #if CHIP_HAS_TILE_DMA()
474 case INT_DMATLB_MISS
:
475 case INT_DMATLB_MISS_DWNCL
:
476 case INT_DMATLB_ACCESS
:
477 case INT_DMATLB_ACCESS_DWNCL
:
478 __insn_mtspr(SPR_DMA_CTR
, SPR_DMA_CTR__REQUEST_MASK
);
481 #if CHIP_HAS_SN_PROC()
482 case INT_SNITLB_MISS
:
483 case INT_SNITLB_MISS_DWNCL
:
484 __insn_mtspr(SPR_SNCTL
,
485 __insn_mfspr(SPR_SNCTL
) &
486 ~SPR_SNCTL__FRZPROC_MASK
);
492 up_read(&mm
->mmap_sem
);
496 * Something tried to access memory that isn't in our memory map..
497 * Fix it, but check if it's kernel or user first..
500 up_read(&mm
->mmap_sem
);
502 bad_area_nosemaphore
:
503 /* User mode accesses just cause a SIGSEGV */
504 if (!is_kernel_mode
) {
506 * It's possible to have interrupts off here.
510 force_sig_info_fault("segfault", SIGSEGV
, si_code
, address
,
511 fault_num
, tsk
, regs
);
516 /* Are we prepared to handle this kernel fault? */
517 if (fixup_exception(regs
))
521 * Oops. The kernel tried to access some bad page. We'll have to
522 * terminate things with extreme prejudice.
527 /* FIXME: no lookup_address() yet */
528 #ifdef SUPPORT_LOOKUP_ADDRESS
529 if (fault_num
== INT_ITLB_MISS
) {
530 pte_t
*pte
= lookup_address(address
);
532 if (pte
&& pte_present(*pte
) && !pte_exec_kernel(*pte
))
533 pr_crit("kernel tried to execute"
534 " non-executable page - exploit attempt?"
535 " (uid: %d)\n", current
->uid
);
538 if (address
< PAGE_SIZE
)
539 pr_alert("Unable to handle kernel NULL pointer dereference\n");
541 pr_alert("Unable to handle kernel paging request\n");
542 pr_alert(" at virtual address "REGFMT
", pc "REGFMT
"\n",
547 if (unlikely(tsk
->pid
< 2)) {
548 panic("Kernel page fault running %s!",
549 is_idle_task(tsk
) ? "the idle task" : "init");
553 * More FIXME: we should probably copy the i386 here and
554 * implement a generic die() routine. Not today.
561 do_group_exit(SIGKILL
);
564 * We ran out of memory, or some other thing happened to us that made
565 * us unable to handle the page fault gracefully.
568 up_read(&mm
->mmap_sem
);
569 if (is_global_init(tsk
)) {
571 down_read(&mm
->mmap_sem
);
574 pr_alert("VM: killing process %s\n", tsk
->comm
);
576 do_group_exit(SIGKILL
);
580 up_read(&mm
->mmap_sem
);
582 /* Kernel mode? Handle exceptions or die */
586 force_sig_info_fault("bus error", SIGBUS
, BUS_ADRERR
, address
,
587 fault_num
, tsk
, regs
);
593 /* We must release ICS before panicking or we won't get anywhere. */
594 #define ics_panic(fmt, ...) do { \
595 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
596 panic(fmt, __VA_ARGS__); \
600 * When we take an ITLB or DTLB fault or access violation in the
601 * supervisor while the critical section bit is set, the hypervisor is
602 * reluctant to write new values into the EX_CONTEXT_K_x registers,
603 * since that might indicate we have not yet squirreled the SPR
604 * contents away and can thus safely take a recursive interrupt.
605 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
607 * Note that this routine is called before homecache_tlb_defer_enter(),
608 * which means that we can properly unlock any atomics that might
609 * be used there (good), but also means we must be very sensitive
610 * to not touch any data structures that might be located in memory
611 * that could migrate, as we could be entering the kernel on a dataplane
612 * cpu that has been deferring kernel TLB updates. This means, for
613 * example, that we can't migrate init_mm or its pgd.
615 struct intvec_state
do_page_fault_ics(struct pt_regs
*regs
, int fault_num
,
616 unsigned long address
,
619 unsigned long pc
= info
& ~1;
620 int write
= info
& 1;
621 pgd_t
*pgd
= get_current_pgd();
623 /* Retval is 1 at first since we will handle the fault fully. */
624 struct intvec_state state
= {
625 do_page_fault
, fault_num
, address
, write
, 1
628 /* Validate that we are plausibly in the right routine. */
629 if ((pc
& 0x7) != 0 || pc
< PAGE_OFFSET
||
630 (fault_num
!= INT_DTLB_MISS
&&
631 fault_num
!= INT_DTLB_ACCESS
)) {
632 unsigned long old_pc
= regs
->pc
;
634 ics_panic("Bad ICS page fault args:"
635 " old PC %#lx, fault %d/%d at %#lx\n",
636 old_pc
, fault_num
, write
, address
);
639 /* We might be faulting on a vmalloc page, so check that first. */
640 if (fault_num
!= INT_DTLB_ACCESS
&& vmalloc_fault(pgd
, address
) >= 0)
644 * If we faulted with ICS set in sys_cmpxchg, we are providing
645 * a user syscall service that should generate a signal on
646 * fault. We didn't set up a kernel stack on initial entry to
647 * sys_cmpxchg, but instead had one set up by the fault, which
648 * (because sys_cmpxchg never releases ICS) came to us via the
649 * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
650 * still referencing the original user code. We release the
651 * atomic lock and rewrite pt_regs so that it appears that we
652 * came from user-space directly, and after we finish the
653 * fault we'll go back to user space and re-issue the swint.
654 * This way the backtrace information is correct if we need to
655 * emit a stack dump at any point while handling this.
657 * Must match register use in sys_cmpxchg().
659 if (pc
>= (unsigned long) sys_cmpxchg
&&
660 pc
< (unsigned long) __sys_cmpxchg_end
) {
662 /* Don't unlock before we could have locked. */
663 if (pc
>= (unsigned long)__sys_cmpxchg_grab_lock
) {
664 int *lock_ptr
= (int *)(regs
->regs
[ATOMIC_LOCK_REG
]);
665 __atomic_fault_unlock(lock_ptr
);
668 regs
->sp
= regs
->regs
[27];
672 * We can also fault in the atomic assembly, in which
673 * case we use the exception table to do the first-level fixup.
674 * We may re-fixup again in the real fault handler if it
675 * turns out the faulting address is just bad, and not,
676 * for example, migrating.
678 else if (pc
>= (unsigned long) __start_atomic_asm_code
&&
679 pc
< (unsigned long) __end_atomic_asm_code
) {
680 const struct exception_table_entry
*fixup
;
682 /* Unlock the atomic lock. */
683 int *lock_ptr
= (int *)(regs
->regs
[ATOMIC_LOCK_REG
]);
684 __atomic_fault_unlock(lock_ptr
);
686 fixup
= search_exception_tables(pc
);
688 ics_panic("ICS atomic fault not in table:"
689 " PC %#lx, fault %d", pc
, fault_num
);
690 regs
->pc
= fixup
->fixup
;
691 regs
->ex1
= PL_ICS_EX1(KERNEL_PL
, 0);
695 * Now that we have released the atomic lock (if necessary),
696 * it's safe to spin if the PTE that caused the fault was migrating.
698 if (fault_num
== INT_DTLB_ACCESS
)
700 if (handle_migrating_pte(pgd
, fault_num
, address
, pc
, 1, write
))
703 /* Return zero so that we continue on with normal fault handling. */
708 #endif /* !__tilegx__ */
711 * This routine handles page faults. It determines the address, and the
712 * problem, and then passes it handle_page_fault() for normal DTLB and
713 * ITLB issues, and for DMA or SN processor faults when we are in user
714 * space. For the latter, if we're in kernel mode, we just save the
715 * interrupt away appropriately and return immediately. We can't do
716 * page faults for user code while in kernel mode.
718 void do_page_fault(struct pt_regs
*regs
, int fault_num
,
719 unsigned long address
, unsigned long write
)
723 /* This case should have been handled by do_page_fault_ics(). */
726 #if CHIP_HAS_TILE_DMA()
728 * If it's a DMA fault, suspend the transfer while we're
729 * handling the miss; we'll restart after it's handled. If we
730 * don't suspend, it's possible that this process could swap
731 * out and back in, and restart the engine since the DMA is
734 if (fault_num
== INT_DMATLB_MISS
||
735 fault_num
== INT_DMATLB_ACCESS
||
736 fault_num
== INT_DMATLB_MISS_DWNCL
||
737 fault_num
== INT_DMATLB_ACCESS_DWNCL
) {
738 __insn_mtspr(SPR_DMA_CTR
, SPR_DMA_CTR__SUSPEND_MASK
);
739 while (__insn_mfspr(SPR_DMA_USER_STATUS
) &
740 SPR_DMA_STATUS__BUSY_MASK
)
745 /* Validate fault num and decide if this is a first-time page fault. */
749 #if CHIP_HAS_TILE_DMA()
750 case INT_DMATLB_MISS
:
751 case INT_DMATLB_MISS_DWNCL
:
753 #if CHIP_HAS_SN_PROC()
754 case INT_SNITLB_MISS
:
755 case INT_SNITLB_MISS_DWNCL
:
760 case INT_DTLB_ACCESS
:
761 #if CHIP_HAS_TILE_DMA()
762 case INT_DMATLB_ACCESS
:
763 case INT_DMATLB_ACCESS_DWNCL
:
769 panic("Bad fault number %d in do_page_fault", fault_num
);
772 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
773 if (EX1_PL(regs
->ex1
) != USER_PL
) {
774 struct async_tlb
*async
;
776 #if CHIP_HAS_TILE_DMA()
777 case INT_DMATLB_MISS
:
778 case INT_DMATLB_ACCESS
:
779 case INT_DMATLB_MISS_DWNCL
:
780 case INT_DMATLB_ACCESS_DWNCL
:
781 async
= ¤t
->thread
.dma_async_tlb
;
784 #if CHIP_HAS_SN_PROC()
785 case INT_SNITLB_MISS
:
786 case INT_SNITLB_MISS_DWNCL
:
787 async
= ¤t
->thread
.sn_async_tlb
;
796 * No vmalloc check required, so we can allow
797 * interrupts immediately at this point.
801 set_thread_flag(TIF_ASYNC_TLB
);
802 if (async
->fault_num
!= 0) {
803 panic("Second async fault %d;"
804 " old fault was %d (%#lx/%ld)",
805 fault_num
, async
->fault_num
,
808 BUG_ON(fault_num
== 0);
809 async
->fault_num
= fault_num
;
810 async
->is_fault
= is_page_fault
;
811 async
->is_write
= write
;
812 async
->address
= address
;
818 handle_page_fault(regs
, fault_num
, is_page_fault
, address
, write
);
822 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
824 * Check an async_tlb structure to see if a deferred fault is waiting,
825 * and if so pass it to the page-fault code.
827 static void handle_async_page_fault(struct pt_regs
*regs
,
828 struct async_tlb
*async
)
830 if (async
->fault_num
) {
832 * Clear async->fault_num before calling the page-fault
833 * handler so that if we re-interrupt before returning
834 * from the function we have somewhere to put the
835 * information from the new interrupt.
837 int fault_num
= async
->fault_num
;
838 async
->fault_num
= 0;
839 handle_page_fault(regs
, fault_num
, async
->is_fault
,
840 async
->address
, async
->is_write
);
845 * This routine effectively re-issues asynchronous page faults
846 * when we are returning to user space.
848 void do_async_page_fault(struct pt_regs
*regs
)
851 * Clear thread flag early. If we re-interrupt while processing
852 * code here, we will reset it and recall this routine before
853 * returning to user space.
855 clear_thread_flag(TIF_ASYNC_TLB
);
857 #if CHIP_HAS_TILE_DMA()
858 handle_async_page_fault(regs
, ¤t
->thread
.dma_async_tlb
);
860 #if CHIP_HAS_SN_PROC()
861 handle_async_page_fault(regs
, ¤t
->thread
.sn_async_tlb
);
864 #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
867 void vmalloc_sync_all(void)
870 /* Currently all L1 kernel pmd's are static and shared. */
871 BUG_ON(pgd_index(VMALLOC_END
) != pgd_index(VMALLOC_START
));
874 * Note that races in the updates of insync and start aren't
875 * problematic: insync can only get set bits added, and updates to
876 * start are only improving performance (without affecting correctness
879 static DECLARE_BITMAP(insync
, PTRS_PER_PGD
);
880 static unsigned long start
= PAGE_OFFSET
;
881 unsigned long address
;
883 BUILD_BUG_ON(PAGE_OFFSET
& ~PGDIR_MASK
);
884 for (address
= start
; address
>= PAGE_OFFSET
; address
+= PGDIR_SIZE
) {
885 if (!test_bit(pgd_index(address
), insync
)) {
887 struct list_head
*pos
;
889 spin_lock_irqsave(&pgd_lock
, flags
);
890 list_for_each(pos
, &pgd_list
)
891 if (!vmalloc_sync_one(list_to_pgd(pos
),
893 /* Must be at first entry in list. */
894 BUG_ON(pos
!= pgd_list
.next
);
897 spin_unlock_irqrestore(&pgd_lock
, flags
);
898 if (pos
!= pgd_list
.next
)
899 set_bit(pgd_index(address
), insync
);
901 if (address
== start
&& test_bit(pgd_index(address
), insync
))
902 start
= address
+ PGDIR_SIZE
;