1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
36 #include <asm/pgtable.h>
39 #include <asm/mmu_context.h>
40 #include <asm/facility.h>
42 #include "../kernel/entry.h"
44 #define __FAIL_ADDR_MASK -4096L
45 #define __SUBCODE_MASK 0x0600
46 #define __PF_RES_FIELD 0x8000000000000000ULL
48 #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
49 #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
50 #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
51 #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
52 #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
61 static unsigned long store_indication __read_mostly
;
63 static int __init
fault_init(void)
65 if (test_facility(75))
66 store_indication
= 0xc00;
69 early_initcall(fault_init
);
72 * Find out which address space caused the exception.
74 static enum fault_type
get_fault_type(struct pt_regs
*regs
)
76 unsigned long trans_exc_code
;
78 trans_exc_code
= regs
->int_parm_long
& 3;
79 if (likely(trans_exc_code
== 0)) {
80 /* primary space exception */
81 if (IS_ENABLED(CONFIG_PGSTE
) &&
82 test_pt_regs_flag(regs
, PIF_GUEST_FAULT
))
84 if (current
->thread
.mm_segment
== USER_DS
)
88 if (trans_exc_code
== 2) {
89 /* secondary space exception */
90 if (current
->thread
.mm_segment
& 1) {
91 if (current
->thread
.mm_segment
== USER_DS_SACF
)
97 if (trans_exc_code
== 1) {
98 /* access register mode, not used in the kernel */
101 /* home space exception -> access via kernel ASCE */
105 static int bad_address(void *p
)
109 return probe_kernel_address((unsigned long *)p
, dummy
);
112 static void dump_pagetable(unsigned long asce
, unsigned long address
)
114 unsigned long *table
= __va(asce
& _ASCE_ORIGIN
);
116 pr_alert("AS:%016lx ", asce
);
117 switch (asce
& _ASCE_TYPE_MASK
) {
118 case _ASCE_TYPE_REGION1
:
119 table
+= (address
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
120 if (bad_address(table
))
122 pr_cont("R1:%016lx ", *table
);
123 if (*table
& _REGION_ENTRY_INVALID
)
125 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
127 case _ASCE_TYPE_REGION2
:
128 table
+= (address
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
129 if (bad_address(table
))
131 pr_cont("R2:%016lx ", *table
);
132 if (*table
& _REGION_ENTRY_INVALID
)
134 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
136 case _ASCE_TYPE_REGION3
:
137 table
+= (address
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
138 if (bad_address(table
))
140 pr_cont("R3:%016lx ", *table
);
141 if (*table
& (_REGION_ENTRY_INVALID
| _REGION3_ENTRY_LARGE
))
143 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
145 case _ASCE_TYPE_SEGMENT
:
146 table
+= (address
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
147 if (bad_address(table
))
149 pr_cont("S:%016lx ", *table
);
150 if (*table
& (_SEGMENT_ENTRY_INVALID
| _SEGMENT_ENTRY_LARGE
))
152 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
154 table
+= (address
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
155 if (bad_address(table
))
157 pr_cont("P:%016lx ", *table
);
165 static void dump_fault_info(struct pt_regs
*regs
)
169 pr_alert("Failing address: %016lx TEID: %016lx\n",
170 regs
->int_parm_long
& __FAIL_ADDR_MASK
, regs
->int_parm_long
);
171 pr_alert("Fault in ");
172 switch (regs
->int_parm_long
& 3) {
174 pr_cont("home space ");
177 pr_cont("secondary space ");
180 pr_cont("access register ");
183 pr_cont("primary space ");
186 pr_cont("mode while using ");
187 switch (get_fault_type(regs
)) {
189 asce
= S390_lowcore
.user_asce
;
193 asce
= S390_lowcore
.vdso_asce
;
197 asce
= ((struct gmap
*) S390_lowcore
.gmap
)->asce
;
201 asce
= S390_lowcore
.kernel_asce
;
208 dump_pagetable(asce
, regs
->int_parm_long
& __FAIL_ADDR_MASK
);
211 int show_unhandled_signals
= 1;
213 void report_user_fault(struct pt_regs
*regs
, long signr
, int is_mm_fault
)
215 if ((task_pid_nr(current
) > 1) && !show_unhandled_signals
)
217 if (!unhandled_signal(current
, signr
))
219 if (!printk_ratelimit())
221 printk(KERN_ALERT
"User process fault: interruption code %04x ilc:%d ",
222 regs
->int_code
& 0xffff, regs
->int_code
>> 17);
223 print_vma_addr(KERN_CONT
"in ", regs
->psw
.addr
);
224 printk(KERN_CONT
"\n");
226 dump_fault_info(regs
);
231 * Send SIGSEGV to task. This is an external routine
232 * to keep the stack usage of do_page_fault small.
234 static noinline
void do_sigsegv(struct pt_regs
*regs
, int si_code
)
236 report_user_fault(regs
, SIGSEGV
, 1);
237 force_sig_fault(SIGSEGV
, si_code
,
238 (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
));
241 const struct exception_table_entry
*s390_search_extables(unsigned long addr
)
243 const struct exception_table_entry
*fixup
;
245 fixup
= search_extable(__start_dma_ex_table
,
246 __stop_dma_ex_table
- __start_dma_ex_table
,
249 fixup
= search_exception_tables(addr
);
253 static noinline
void do_no_context(struct pt_regs
*regs
)
255 const struct exception_table_entry
*fixup
;
257 /* Are we prepared to handle this kernel fault? */
258 fixup
= s390_search_extables(regs
->psw
.addr
);
260 regs
->psw
.addr
= extable_fixup(fixup
);
265 * Oops. The kernel tried to access some bad page. We'll have to
266 * terminate things with extreme prejudice.
268 if (get_fault_type(regs
) == KERNEL_FAULT
)
269 printk(KERN_ALERT
"Unable to handle kernel pointer dereference"
270 " in virtual kernel address space\n");
272 printk(KERN_ALERT
"Unable to handle kernel paging request"
273 " in virtual user address space\n");
274 dump_fault_info(regs
);
279 static noinline
void do_low_address(struct pt_regs
*regs
)
281 /* Low-address protection hit in kernel mode means
282 NULL pointer write access in kernel mode. */
283 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
284 /* Low-address protection hit in user mode 'cannot happen'. */
285 die (regs
, "Low-address protection");
292 static noinline
void do_sigbus(struct pt_regs
*regs
)
295 * Send a sigbus, regardless of whether we were in kernel
298 force_sig_fault(SIGBUS
, BUS_ADRERR
,
299 (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
));
302 static noinline
int signal_return(struct pt_regs
*regs
)
307 rc
= __get_user(instruction
, (u16 __user
*) regs
->psw
.addr
);
310 if (instruction
== 0x0a77) {
311 set_pt_regs_flag(regs
, PIF_SYSCALL
);
312 regs
->int_code
= 0x00040077;
314 } else if (instruction
== 0x0aad) {
315 set_pt_regs_flag(regs
, PIF_SYSCALL
);
316 regs
->int_code
= 0x000400ad;
322 static noinline
void do_fault_error(struct pt_regs
*regs
, int access
,
328 case VM_FAULT_BADACCESS
:
329 if (access
== VM_EXEC
&& signal_return(regs
) == 0)
332 case VM_FAULT_BADMAP
:
333 /* Bad memory access. Check if it is kernel or user space. */
334 if (user_mode(regs
)) {
335 /* User mode accesses just cause a SIGSEGV */
336 si_code
= (fault
== VM_FAULT_BADMAP
) ?
337 SEGV_MAPERR
: SEGV_ACCERR
;
338 do_sigsegv(regs
, si_code
);
342 case VM_FAULT_BADCONTEXT
:
343 case VM_FAULT_PFAULT
:
346 case VM_FAULT_SIGNAL
:
347 if (!user_mode(regs
))
350 default: /* fault & VM_FAULT_ERROR */
351 if (fault
& VM_FAULT_OOM
) {
352 if (!user_mode(regs
))
355 pagefault_out_of_memory();
356 } else if (fault
& VM_FAULT_SIGSEGV
) {
357 /* Kernel mode? Handle exceptions or die */
358 if (!user_mode(regs
))
361 do_sigsegv(regs
, SEGV_MAPERR
);
362 } else if (fault
& VM_FAULT_SIGBUS
) {
363 /* Kernel mode? Handle exceptions or die */
364 if (!user_mode(regs
))
375 * This routine handles page faults. It determines the address,
376 * and the problem, and then passes it off to one of the appropriate
379 * interruption code (int_code):
380 * 04 Protection -> Write-Protection (suprression)
381 * 10 Segment translation -> Not present (nullification)
382 * 11 Page translation -> Not present (nullification)
383 * 3b Region third trans. -> Not present (nullification)
385 static inline vm_fault_t
do_exception(struct pt_regs
*regs
, int access
)
388 struct task_struct
*tsk
;
389 struct mm_struct
*mm
;
390 struct vm_area_struct
*vma
;
391 enum fault_type type
;
392 unsigned long trans_exc_code
;
393 unsigned long address
;
399 * The instruction that caused the program check has
400 * been nullified. Don't signal single step via SIGTRAP.
402 clear_pt_regs_flag(regs
, PIF_PER_TRAP
);
404 if (kprobe_page_fault(regs
, 14))
408 trans_exc_code
= regs
->int_parm_long
;
411 * Verify that the fault happened in user space, that
412 * we are not in an interrupt and that there is a
415 fault
= VM_FAULT_BADCONTEXT
;
416 type
= get_fault_type(regs
);
421 fault
= VM_FAULT_BADMAP
;
425 if (faulthandler_disabled() || !mm
)
430 address
= trans_exc_code
& __FAIL_ADDR_MASK
;
431 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
432 flags
= FAULT_FLAG_DEFAULT
;
434 flags
|= FAULT_FLAG_USER
;
435 if (access
== VM_WRITE
|| (trans_exc_code
& store_indication
) == 0x400)
436 flags
|= FAULT_FLAG_WRITE
;
437 down_read(&mm
->mmap_sem
);
440 if (IS_ENABLED(CONFIG_PGSTE
) && type
== GMAP_FAULT
) {
441 gmap
= (struct gmap
*) S390_lowcore
.gmap
;
442 current
->thread
.gmap_addr
= address
;
443 current
->thread
.gmap_write_flag
= !!(flags
& FAULT_FLAG_WRITE
);
444 current
->thread
.gmap_int_code
= regs
->int_code
& 0xffff;
445 address
= __gmap_translate(gmap
, address
);
446 if (address
== -EFAULT
) {
447 fault
= VM_FAULT_BADMAP
;
450 if (gmap
->pfault_enabled
)
451 flags
|= FAULT_FLAG_RETRY_NOWAIT
;
455 fault
= VM_FAULT_BADMAP
;
456 vma
= find_vma(mm
, address
);
460 if (unlikely(vma
->vm_start
> address
)) {
461 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
463 if (expand_stack(vma
, address
))
468 * Ok, we have a good vm_area for this memory access, so
471 fault
= VM_FAULT_BADACCESS
;
472 if (unlikely(!(vma
->vm_flags
& access
)))
475 if (is_vm_hugetlb_page(vma
))
476 address
&= HPAGE_MASK
;
478 * If for any reason at all we couldn't handle the fault,
479 * make sure we exit gracefully rather than endlessly redo
482 fault
= handle_mm_fault(vma
, address
, flags
);
483 if (fault_signal_pending(fault
, regs
)) {
484 fault
= VM_FAULT_SIGNAL
;
485 if (flags
& FAULT_FLAG_RETRY_NOWAIT
)
489 if (unlikely(fault
& VM_FAULT_ERROR
))
493 * Major/minor page fault accounting is only done on the
494 * initial attempt. If we go through a retry, it is extremely
495 * likely that the page will be found in page cache at that point.
497 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
498 if (fault
& VM_FAULT_MAJOR
) {
500 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ
, 1,
504 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN
, 1,
507 if (fault
& VM_FAULT_RETRY
) {
508 if (IS_ENABLED(CONFIG_PGSTE
) && gmap
&&
509 (flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
510 /* FAULT_FLAG_RETRY_NOWAIT has been set,
511 * mmap_sem has not been released */
512 current
->thread
.gmap_pfault
= 1;
513 fault
= VM_FAULT_PFAULT
;
516 flags
&= ~FAULT_FLAG_RETRY_NOWAIT
;
517 flags
|= FAULT_FLAG_TRIED
;
518 down_read(&mm
->mmap_sem
);
522 if (IS_ENABLED(CONFIG_PGSTE
) && gmap
) {
523 address
= __gmap_link(gmap
, current
->thread
.gmap_addr
,
525 if (address
== -EFAULT
) {
526 fault
= VM_FAULT_BADMAP
;
529 if (address
== -ENOMEM
) {
530 fault
= VM_FAULT_OOM
;
536 up_read(&mm
->mmap_sem
);
541 void do_protection_exception(struct pt_regs
*regs
)
543 unsigned long trans_exc_code
;
547 trans_exc_code
= regs
->int_parm_long
;
549 * Protection exceptions are suppressing, decrement psw address.
550 * The exception to this rule are aborted transactions, for these
551 * the PSW already points to the correct location.
553 if (!(regs
->int_code
& 0x200))
554 regs
->psw
.addr
= __rewind_psw(regs
->psw
, regs
->int_code
>> 16);
556 * Check for low-address protection. This needs to be treated
557 * as a special case because the translation exception code
558 * field is not guaranteed to contain valid data in this case.
560 if (unlikely(!(trans_exc_code
& 4))) {
561 do_low_address(regs
);
564 if (unlikely(MACHINE_HAS_NX
&& (trans_exc_code
& 0x80))) {
565 regs
->int_parm_long
= (trans_exc_code
& ~PAGE_MASK
) |
566 (regs
->psw
.addr
& PAGE_MASK
);
568 fault
= VM_FAULT_BADACCESS
;
571 fault
= do_exception(regs
, access
);
574 do_fault_error(regs
, access
, fault
);
576 NOKPROBE_SYMBOL(do_protection_exception
);
578 void do_dat_exception(struct pt_regs
*regs
)
583 access
= VM_ACCESS_FLAGS
;
584 fault
= do_exception(regs
, access
);
586 do_fault_error(regs
, access
, fault
);
588 NOKPROBE_SYMBOL(do_dat_exception
);
592 * 'pfault' pseudo page faults routines.
594 static int pfault_disable
;
596 static int __init
nopfault(char *str
)
602 __setup("nopfault", nopfault
);
604 struct pfault_refbk
{
613 } __attribute__ ((packed
, aligned(8)));
615 static struct pfault_refbk pfault_init_refbk
= {
620 .refgaddr
= __LC_LPP
,
621 .refselmk
= 1ULL << 48,
622 .refcmpmk
= 1ULL << 48,
623 .reserved
= __PF_RES_FIELD
626 int pfault_init(void)
632 diag_stat_inc(DIAG_STAT_X258
);
634 " diag %1,%0,0x258\n"
640 : "a" (&pfault_init_refbk
), "m" (pfault_init_refbk
) : "cc");
644 static struct pfault_refbk pfault_fini_refbk
= {
651 void pfault_fini(void)
656 diag_stat_inc(DIAG_STAT_X258
);
661 : : "a" (&pfault_fini_refbk
), "m" (pfault_fini_refbk
) : "cc");
664 static DEFINE_SPINLOCK(pfault_lock
);
665 static LIST_HEAD(pfault_list
);
667 #define PF_COMPLETE 0x0080
670 * The mechanism of our pfault code: if Linux is running as guest, runs a user
671 * space process and the user space process accesses a page that the host has
672 * paged out we get a pfault interrupt.
674 * This allows us, within the guest, to schedule a different process. Without
675 * this mechanism the host would have to suspend the whole virtual cpu until
676 * the page has been paged in.
678 * So when we get such an interrupt then we set the state of the current task
679 * to uninterruptible and also set the need_resched flag. Both happens within
680 * interrupt context(!). If we later on want to return to user space we
681 * recognize the need_resched flag and then call schedule(). It's not very
682 * obvious how this works...
684 * Of course we have a lot of additional fun with the completion interrupt (->
685 * host signals that a page of a process has been paged in and the process can
686 * continue to run). This interrupt can arrive on any cpu and, since we have
687 * virtual cpus, actually appear before the interrupt that signals that a page
690 static void pfault_interrupt(struct ext_code ext_code
,
691 unsigned int param32
, unsigned long param64
)
693 struct task_struct
*tsk
;
698 * Get the external interruption subcode & pfault initial/completion
699 * signal bit. VM stores this in the 'cpu address' field associated
700 * with the external interrupt.
702 subcode
= ext_code
.subcode
;
703 if ((subcode
& 0xff00) != __SUBCODE_MASK
)
705 inc_irq_stat(IRQEXT_PFL
);
706 /* Get the token (= pid of the affected task). */
707 pid
= param64
& LPP_PID_MASK
;
709 tsk
= find_task_by_pid_ns(pid
, &init_pid_ns
);
711 get_task_struct(tsk
);
715 spin_lock(&pfault_lock
);
716 if (subcode
& PF_COMPLETE
) {
717 /* signal bit is set -> a page has been swapped in by VM */
718 if (tsk
->thread
.pfault_wait
== 1) {
719 /* Initial interrupt was faster than the completion
720 * interrupt. pfault_wait is valid. Set pfault_wait
721 * back to zero and wake up the process. This can
722 * safely be done because the task is still sleeping
723 * and can't produce new pfaults. */
724 tsk
->thread
.pfault_wait
= 0;
725 list_del(&tsk
->thread
.list
);
726 wake_up_process(tsk
);
727 put_task_struct(tsk
);
729 /* Completion interrupt was faster than initial
730 * interrupt. Set pfault_wait to -1 so the initial
731 * interrupt doesn't put the task to sleep.
732 * If the task is not running, ignore the completion
733 * interrupt since it must be a leftover of a PFAULT
734 * CANCEL operation which didn't remove all pending
735 * completion interrupts. */
736 if (tsk
->state
== TASK_RUNNING
)
737 tsk
->thread
.pfault_wait
= -1;
740 /* signal bit not set -> a real page is missing. */
741 if (WARN_ON_ONCE(tsk
!= current
))
743 if (tsk
->thread
.pfault_wait
== 1) {
744 /* Already on the list with a reference: put to sleep */
746 } else if (tsk
->thread
.pfault_wait
== -1) {
747 /* Completion interrupt was faster than the initial
748 * interrupt (pfault_wait == -1). Set pfault_wait
749 * back to zero and exit. */
750 tsk
->thread
.pfault_wait
= 0;
752 /* Initial interrupt arrived before completion
753 * interrupt. Let the task sleep.
754 * An extra task reference is needed since a different
755 * cpu may set the task state to TASK_RUNNING again
756 * before the scheduler is reached. */
757 get_task_struct(tsk
);
758 tsk
->thread
.pfault_wait
= 1;
759 list_add(&tsk
->thread
.list
, &pfault_list
);
761 /* Since this must be a userspace fault, there
762 * is no kernel task state to trample. Rely on the
763 * return to userspace schedule() to block. */
764 __set_current_state(TASK_UNINTERRUPTIBLE
);
765 set_tsk_need_resched(tsk
);
766 set_preempt_need_resched();
770 spin_unlock(&pfault_lock
);
771 put_task_struct(tsk
);
774 static int pfault_cpu_dead(unsigned int cpu
)
776 struct thread_struct
*thread
, *next
;
777 struct task_struct
*tsk
;
779 spin_lock_irq(&pfault_lock
);
780 list_for_each_entry_safe(thread
, next
, &pfault_list
, list
) {
781 thread
->pfault_wait
= 0;
782 list_del(&thread
->list
);
783 tsk
= container_of(thread
, struct task_struct
, thread
);
784 wake_up_process(tsk
);
785 put_task_struct(tsk
);
787 spin_unlock_irq(&pfault_lock
);
791 static int __init
pfault_irq_init(void)
795 rc
= register_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
798 rc
= pfault_init() == 0 ? 0 : -EOPNOTSUPP
;
801 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
802 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD
, "s390/pfault:dead",
803 NULL
, pfault_cpu_dead
);
807 unregister_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
812 early_initcall(pfault_irq_init
);
814 #endif /* CONFIG_PFAULT */
816 #if IS_ENABLED(CONFIG_PGSTE)
817 void do_secure_storage_access(struct pt_regs
*regs
)
819 unsigned long addr
= regs
->int_parm_long
& __FAIL_ADDR_MASK
;
820 struct vm_area_struct
*vma
;
821 struct mm_struct
*mm
;
825 switch (get_fault_type(regs
)) {
828 down_read(&mm
->mmap_sem
);
829 vma
= find_vma(mm
, addr
);
831 up_read(&mm
->mmap_sem
);
832 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
835 page
= follow_page(vma
, addr
, FOLL_WRITE
| FOLL_GET
);
836 if (IS_ERR_OR_NULL(page
)) {
837 up_read(&mm
->mmap_sem
);
840 if (arch_make_page_accessible(page
))
841 send_sig(SIGSEGV
, current
, 0);
843 up_read(&mm
->mmap_sem
);
846 page
= phys_to_page(addr
);
847 if (unlikely(!try_get_page(page
)))
849 rc
= arch_make_page_accessible(page
);
857 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
861 NOKPROBE_SYMBOL(do_secure_storage_access
);
863 void do_non_secure_storage_access(struct pt_regs
*regs
)
865 unsigned long gaddr
= regs
->int_parm_long
& __FAIL_ADDR_MASK
;
866 struct gmap
*gmap
= (struct gmap
*)S390_lowcore
.gmap
;
868 if (get_fault_type(regs
) != GMAP_FAULT
) {
869 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
874 if (gmap_convert_to_secure(gmap
, gaddr
) == -EINVAL
)
875 send_sig(SIGSEGV
, current
, 0);
877 NOKPROBE_SYMBOL(do_non_secure_storage_access
);
880 void do_secure_storage_access(struct pt_regs
*regs
)
882 default_trap_handler(regs
);
885 void do_non_secure_storage_access(struct pt_regs
*regs
)
887 default_trap_handler(regs
);