1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
24 #include <linux/compat.h>
25 #include <linux/smp.h>
26 #include <linux/kdebug.h>
27 #include <linux/init.h>
28 #include <linux/console.h>
29 #include <linux/extable.h>
30 #include <linux/hardirq.h>
31 #include <linux/kprobes.h>
32 #include <linux/uaccess.h>
33 #include <linux/hugetlb.h>
34 #include <asm/asm-offsets.h>
38 #include <asm/mmu_context.h>
39 #include <asm/facility.h>
41 #include "../kernel/entry.h"
43 #define __FAIL_ADDR_MASK -4096L
44 #define __SUBCODE_MASK 0x0600
45 #define __PF_RES_FIELD 0x8000000000000000ULL
47 #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
48 #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
49 #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
50 #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
51 #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
59 static unsigned long store_indication __read_mostly
;
61 static int __init
fault_init(void)
63 if (test_facility(75))
64 store_indication
= 0xc00;
67 early_initcall(fault_init
);
70 * Find out which address space caused the exception.
72 static enum fault_type
get_fault_type(struct pt_regs
*regs
)
74 unsigned long trans_exc_code
;
76 trans_exc_code
= regs
->int_parm_long
& 3;
77 if (likely(trans_exc_code
== 0)) {
78 /* primary space exception */
81 if (!IS_ENABLED(CONFIG_PGSTE
))
83 if (test_pt_regs_flag(regs
, PIF_GUEST_FAULT
))
87 if (trans_exc_code
== 2)
89 if (trans_exc_code
== 1) {
90 /* access register mode, not used in the kernel */
93 /* home space exception -> access via kernel ASCE */
97 static int bad_address(void *p
)
101 return get_kernel_nofault(dummy
, (unsigned long *)p
);
104 static void dump_pagetable(unsigned long asce
, unsigned long address
)
106 unsigned long *table
= __va(asce
& _ASCE_ORIGIN
);
108 pr_alert("AS:%016lx ", asce
);
109 switch (asce
& _ASCE_TYPE_MASK
) {
110 case _ASCE_TYPE_REGION1
:
111 table
+= (address
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
112 if (bad_address(table
))
114 pr_cont("R1:%016lx ", *table
);
115 if (*table
& _REGION_ENTRY_INVALID
)
117 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
119 case _ASCE_TYPE_REGION2
:
120 table
+= (address
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
121 if (bad_address(table
))
123 pr_cont("R2:%016lx ", *table
);
124 if (*table
& _REGION_ENTRY_INVALID
)
126 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
128 case _ASCE_TYPE_REGION3
:
129 table
+= (address
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
130 if (bad_address(table
))
132 pr_cont("R3:%016lx ", *table
);
133 if (*table
& (_REGION_ENTRY_INVALID
| _REGION3_ENTRY_LARGE
))
135 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
137 case _ASCE_TYPE_SEGMENT
:
138 table
+= (address
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
139 if (bad_address(table
))
141 pr_cont("S:%016lx ", *table
);
142 if (*table
& (_SEGMENT_ENTRY_INVALID
| _SEGMENT_ENTRY_LARGE
))
144 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
146 table
+= (address
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
147 if (bad_address(table
))
149 pr_cont("P:%016lx ", *table
);
157 static void dump_fault_info(struct pt_regs
*regs
)
161 pr_alert("Failing address: %016lx TEID: %016lx\n",
162 regs
->int_parm_long
& __FAIL_ADDR_MASK
, regs
->int_parm_long
);
163 pr_alert("Fault in ");
164 switch (regs
->int_parm_long
& 3) {
166 pr_cont("home space ");
169 pr_cont("secondary space ");
172 pr_cont("access register ");
175 pr_cont("primary space ");
178 pr_cont("mode while using ");
179 switch (get_fault_type(regs
)) {
181 asce
= S390_lowcore
.user_asce
;
185 asce
= ((struct gmap
*) S390_lowcore
.gmap
)->asce
;
189 asce
= S390_lowcore
.kernel_asce
;
196 dump_pagetable(asce
, regs
->int_parm_long
& __FAIL_ADDR_MASK
);
199 int show_unhandled_signals
= 1;
201 void report_user_fault(struct pt_regs
*regs
, long signr
, int is_mm_fault
)
203 if ((task_pid_nr(current
) > 1) && !show_unhandled_signals
)
205 if (!unhandled_signal(current
, signr
))
207 if (!printk_ratelimit())
209 printk(KERN_ALERT
"User process fault: interruption code %04x ilc:%d ",
210 regs
->int_code
& 0xffff, regs
->int_code
>> 17);
211 print_vma_addr(KERN_CONT
"in ", regs
->psw
.addr
);
212 printk(KERN_CONT
"\n");
214 dump_fault_info(regs
);
219 * Send SIGSEGV to task. This is an external routine
220 * to keep the stack usage of do_page_fault small.
222 static noinline
void do_sigsegv(struct pt_regs
*regs
, int si_code
)
224 report_user_fault(regs
, SIGSEGV
, 1);
225 force_sig_fault(SIGSEGV
, si_code
,
226 (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
));
229 const struct exception_table_entry
*s390_search_extables(unsigned long addr
)
231 const struct exception_table_entry
*fixup
;
233 fixup
= search_extable(__start_dma_ex_table
,
234 __stop_dma_ex_table
- __start_dma_ex_table
,
237 fixup
= search_exception_tables(addr
);
241 static noinline
void do_no_context(struct pt_regs
*regs
)
243 const struct exception_table_entry
*fixup
;
245 /* Are we prepared to handle this kernel fault? */
246 fixup
= s390_search_extables(regs
->psw
.addr
);
247 if (fixup
&& ex_handle(fixup
, regs
))
251 * Oops. The kernel tried to access some bad page. We'll have to
252 * terminate things with extreme prejudice.
254 if (get_fault_type(regs
) == KERNEL_FAULT
)
255 printk(KERN_ALERT
"Unable to handle kernel pointer dereference"
256 " in virtual kernel address space\n");
258 printk(KERN_ALERT
"Unable to handle kernel paging request"
259 " in virtual user address space\n");
260 dump_fault_info(regs
);
265 static noinline
void do_low_address(struct pt_regs
*regs
)
267 /* Low-address protection hit in kernel mode means
268 NULL pointer write access in kernel mode. */
269 if (regs
->psw
.mask
& PSW_MASK_PSTATE
) {
270 /* Low-address protection hit in user mode 'cannot happen'. */
271 die (regs
, "Low-address protection");
278 static noinline
void do_sigbus(struct pt_regs
*regs
)
281 * Send a sigbus, regardless of whether we were in kernel
284 force_sig_fault(SIGBUS
, BUS_ADRERR
,
285 (void __user
*)(regs
->int_parm_long
& __FAIL_ADDR_MASK
));
288 static noinline
int signal_return(struct pt_regs
*regs
)
293 rc
= __get_user(instruction
, (u16 __user
*) regs
->psw
.addr
);
296 if (instruction
== 0x0a77) {
297 set_pt_regs_flag(regs
, PIF_SYSCALL
);
298 regs
->int_code
= 0x00040077;
300 } else if (instruction
== 0x0aad) {
301 set_pt_regs_flag(regs
, PIF_SYSCALL
);
302 regs
->int_code
= 0x000400ad;
308 static noinline
void do_fault_error(struct pt_regs
*regs
, int access
,
314 case VM_FAULT_BADACCESS
:
315 if (access
== VM_EXEC
&& signal_return(regs
) == 0)
318 case VM_FAULT_BADMAP
:
319 /* Bad memory access. Check if it is kernel or user space. */
320 if (user_mode(regs
)) {
321 /* User mode accesses just cause a SIGSEGV */
322 si_code
= (fault
== VM_FAULT_BADMAP
) ?
323 SEGV_MAPERR
: SEGV_ACCERR
;
324 do_sigsegv(regs
, si_code
);
328 case VM_FAULT_BADCONTEXT
:
329 case VM_FAULT_PFAULT
:
332 case VM_FAULT_SIGNAL
:
333 if (!user_mode(regs
))
336 default: /* fault & VM_FAULT_ERROR */
337 if (fault
& VM_FAULT_OOM
) {
338 if (!user_mode(regs
))
341 pagefault_out_of_memory();
342 } else if (fault
& VM_FAULT_SIGSEGV
) {
343 /* Kernel mode? Handle exceptions or die */
344 if (!user_mode(regs
))
347 do_sigsegv(regs
, SEGV_MAPERR
);
348 } else if (fault
& VM_FAULT_SIGBUS
) {
349 /* Kernel mode? Handle exceptions or die */
350 if (!user_mode(regs
))
361 * This routine handles page faults. It determines the address,
362 * and the problem, and then passes it off to one of the appropriate
365 * interruption code (int_code):
366 * 04 Protection -> Write-Protection (suppression)
367 * 10 Segment translation -> Not present (nullification)
368 * 11 Page translation -> Not present (nullification)
369 * 3b Region third trans. -> Not present (nullification)
371 static inline vm_fault_t
do_exception(struct pt_regs
*regs
, int access
)
374 struct task_struct
*tsk
;
375 struct mm_struct
*mm
;
376 struct vm_area_struct
*vma
;
377 enum fault_type type
;
378 unsigned long trans_exc_code
;
379 unsigned long address
;
385 * The instruction that caused the program check has
386 * been nullified. Don't signal single step via SIGTRAP.
388 clear_pt_regs_flag(regs
, PIF_PER_TRAP
);
390 if (kprobe_page_fault(regs
, 14))
394 trans_exc_code
= regs
->int_parm_long
;
397 * Verify that the fault happened in user space, that
398 * we are not in an interrupt and that there is a
401 fault
= VM_FAULT_BADCONTEXT
;
402 type
= get_fault_type(regs
);
408 if (faulthandler_disabled() || !mm
)
413 address
= trans_exc_code
& __FAIL_ADDR_MASK
;
414 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS
, 1, regs
, address
);
415 flags
= FAULT_FLAG_DEFAULT
;
417 flags
|= FAULT_FLAG_USER
;
418 if (access
== VM_WRITE
|| (trans_exc_code
& store_indication
) == 0x400)
419 flags
|= FAULT_FLAG_WRITE
;
423 if (IS_ENABLED(CONFIG_PGSTE
) && type
== GMAP_FAULT
) {
424 gmap
= (struct gmap
*) S390_lowcore
.gmap
;
425 current
->thread
.gmap_addr
= address
;
426 current
->thread
.gmap_write_flag
= !!(flags
& FAULT_FLAG_WRITE
);
427 current
->thread
.gmap_int_code
= regs
->int_code
& 0xffff;
428 address
= __gmap_translate(gmap
, address
);
429 if (address
== -EFAULT
) {
430 fault
= VM_FAULT_BADMAP
;
433 if (gmap
->pfault_enabled
)
434 flags
|= FAULT_FLAG_RETRY_NOWAIT
;
438 fault
= VM_FAULT_BADMAP
;
439 vma
= find_vma(mm
, address
);
443 if (unlikely(vma
->vm_start
> address
)) {
444 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
446 if (expand_stack(vma
, address
))
451 * Ok, we have a good vm_area for this memory access, so
454 fault
= VM_FAULT_BADACCESS
;
455 if (unlikely(!(vma
->vm_flags
& access
)))
458 if (is_vm_hugetlb_page(vma
))
459 address
&= HPAGE_MASK
;
461 * If for any reason at all we couldn't handle the fault,
462 * make sure we exit gracefully rather than endlessly redo
465 fault
= handle_mm_fault(vma
, address
, flags
, regs
);
466 if (fault_signal_pending(fault
, regs
)) {
467 fault
= VM_FAULT_SIGNAL
;
468 if (flags
& FAULT_FLAG_RETRY_NOWAIT
)
472 if (unlikely(fault
& VM_FAULT_ERROR
))
475 if (flags
& FAULT_FLAG_ALLOW_RETRY
) {
476 if (fault
& VM_FAULT_RETRY
) {
477 if (IS_ENABLED(CONFIG_PGSTE
) && gmap
&&
478 (flags
& FAULT_FLAG_RETRY_NOWAIT
)) {
479 /* FAULT_FLAG_RETRY_NOWAIT has been set,
480 * mmap_lock has not been released */
481 current
->thread
.gmap_pfault
= 1;
482 fault
= VM_FAULT_PFAULT
;
485 flags
&= ~FAULT_FLAG_RETRY_NOWAIT
;
486 flags
|= FAULT_FLAG_TRIED
;
491 if (IS_ENABLED(CONFIG_PGSTE
) && gmap
) {
492 address
= __gmap_link(gmap
, current
->thread
.gmap_addr
,
494 if (address
== -EFAULT
) {
495 fault
= VM_FAULT_BADMAP
;
498 if (address
== -ENOMEM
) {
499 fault
= VM_FAULT_OOM
;
505 mmap_read_unlock(mm
);
510 void do_protection_exception(struct pt_regs
*regs
)
512 unsigned long trans_exc_code
;
516 trans_exc_code
= regs
->int_parm_long
;
518 * Protection exceptions are suppressing, decrement psw address.
519 * The exception to this rule are aborted transactions, for these
520 * the PSW already points to the correct location.
522 if (!(regs
->int_code
& 0x200))
523 regs
->psw
.addr
= __rewind_psw(regs
->psw
, regs
->int_code
>> 16);
525 * Check for low-address protection. This needs to be treated
526 * as a special case because the translation exception code
527 * field is not guaranteed to contain valid data in this case.
529 if (unlikely(!(trans_exc_code
& 4))) {
530 do_low_address(regs
);
533 if (unlikely(MACHINE_HAS_NX
&& (trans_exc_code
& 0x80))) {
534 regs
->int_parm_long
= (trans_exc_code
& ~PAGE_MASK
) |
535 (regs
->psw
.addr
& PAGE_MASK
);
537 fault
= VM_FAULT_BADACCESS
;
540 fault
= do_exception(regs
, access
);
543 do_fault_error(regs
, access
, fault
);
545 NOKPROBE_SYMBOL(do_protection_exception
);
547 void do_dat_exception(struct pt_regs
*regs
)
552 access
= VM_ACCESS_FLAGS
;
553 fault
= do_exception(regs
, access
);
555 do_fault_error(regs
, access
, fault
);
557 NOKPROBE_SYMBOL(do_dat_exception
);
561 * 'pfault' pseudo page faults routines.
563 static int pfault_disable
;
565 static int __init
nopfault(char *str
)
571 __setup("nopfault", nopfault
);
573 struct pfault_refbk
{
582 } __attribute__ ((packed
, aligned(8)));
584 static struct pfault_refbk pfault_init_refbk
= {
589 .refgaddr
= __LC_LPP
,
590 .refselmk
= 1ULL << 48,
591 .refcmpmk
= 1ULL << 48,
592 .reserved
= __PF_RES_FIELD
595 int pfault_init(void)
601 diag_stat_inc(DIAG_STAT_X258
);
603 " diag %1,%0,0x258\n"
609 : "a" (&pfault_init_refbk
), "m" (pfault_init_refbk
) : "cc");
613 static struct pfault_refbk pfault_fini_refbk
= {
620 void pfault_fini(void)
625 diag_stat_inc(DIAG_STAT_X258
);
630 : : "a" (&pfault_fini_refbk
), "m" (pfault_fini_refbk
) : "cc");
633 static DEFINE_SPINLOCK(pfault_lock
);
634 static LIST_HEAD(pfault_list
);
636 #define PF_COMPLETE 0x0080
639 * The mechanism of our pfault code: if Linux is running as guest, runs a user
640 * space process and the user space process accesses a page that the host has
641 * paged out we get a pfault interrupt.
643 * This allows us, within the guest, to schedule a different process. Without
644 * this mechanism the host would have to suspend the whole virtual cpu until
645 * the page has been paged in.
647 * So when we get such an interrupt then we set the state of the current task
648 * to uninterruptible and also set the need_resched flag. Both happens within
649 * interrupt context(!). If we later on want to return to user space we
650 * recognize the need_resched flag and then call schedule(). It's not very
651 * obvious how this works...
653 * Of course we have a lot of additional fun with the completion interrupt (->
654 * host signals that a page of a process has been paged in and the process can
655 * continue to run). This interrupt can arrive on any cpu and, since we have
656 * virtual cpus, actually appear before the interrupt that signals that a page
659 static void pfault_interrupt(struct ext_code ext_code
,
660 unsigned int param32
, unsigned long param64
)
662 struct task_struct
*tsk
;
667 * Get the external interruption subcode & pfault initial/completion
668 * signal bit. VM stores this in the 'cpu address' field associated
669 * with the external interrupt.
671 subcode
= ext_code
.subcode
;
672 if ((subcode
& 0xff00) != __SUBCODE_MASK
)
674 inc_irq_stat(IRQEXT_PFL
);
675 /* Get the token (= pid of the affected task). */
676 pid
= param64
& LPP_PID_MASK
;
678 tsk
= find_task_by_pid_ns(pid
, &init_pid_ns
);
680 get_task_struct(tsk
);
684 spin_lock(&pfault_lock
);
685 if (subcode
& PF_COMPLETE
) {
686 /* signal bit is set -> a page has been swapped in by VM */
687 if (tsk
->thread
.pfault_wait
== 1) {
688 /* Initial interrupt was faster than the completion
689 * interrupt. pfault_wait is valid. Set pfault_wait
690 * back to zero and wake up the process. This can
691 * safely be done because the task is still sleeping
692 * and can't produce new pfaults. */
693 tsk
->thread
.pfault_wait
= 0;
694 list_del(&tsk
->thread
.list
);
695 wake_up_process(tsk
);
696 put_task_struct(tsk
);
698 /* Completion interrupt was faster than initial
699 * interrupt. Set pfault_wait to -1 so the initial
700 * interrupt doesn't put the task to sleep.
701 * If the task is not running, ignore the completion
702 * interrupt since it must be a leftover of a PFAULT
703 * CANCEL operation which didn't remove all pending
704 * completion interrupts. */
705 if (tsk
->state
== TASK_RUNNING
)
706 tsk
->thread
.pfault_wait
= -1;
709 /* signal bit not set -> a real page is missing. */
710 if (WARN_ON_ONCE(tsk
!= current
))
712 if (tsk
->thread
.pfault_wait
== 1) {
713 /* Already on the list with a reference: put to sleep */
715 } else if (tsk
->thread
.pfault_wait
== -1) {
716 /* Completion interrupt was faster than the initial
717 * interrupt (pfault_wait == -1). Set pfault_wait
718 * back to zero and exit. */
719 tsk
->thread
.pfault_wait
= 0;
721 /* Initial interrupt arrived before completion
722 * interrupt. Let the task sleep.
723 * An extra task reference is needed since a different
724 * cpu may set the task state to TASK_RUNNING again
725 * before the scheduler is reached. */
726 get_task_struct(tsk
);
727 tsk
->thread
.pfault_wait
= 1;
728 list_add(&tsk
->thread
.list
, &pfault_list
);
730 /* Since this must be a userspace fault, there
731 * is no kernel task state to trample. Rely on the
732 * return to userspace schedule() to block. */
733 __set_current_state(TASK_UNINTERRUPTIBLE
);
734 set_tsk_need_resched(tsk
);
735 set_preempt_need_resched();
739 spin_unlock(&pfault_lock
);
740 put_task_struct(tsk
);
743 static int pfault_cpu_dead(unsigned int cpu
)
745 struct thread_struct
*thread
, *next
;
746 struct task_struct
*tsk
;
748 spin_lock_irq(&pfault_lock
);
749 list_for_each_entry_safe(thread
, next
, &pfault_list
, list
) {
750 thread
->pfault_wait
= 0;
751 list_del(&thread
->list
);
752 tsk
= container_of(thread
, struct task_struct
, thread
);
753 wake_up_process(tsk
);
754 put_task_struct(tsk
);
756 spin_unlock_irq(&pfault_lock
);
760 static int __init
pfault_irq_init(void)
764 rc
= register_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
767 rc
= pfault_init() == 0 ? 0 : -EOPNOTSUPP
;
770 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
771 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD
, "s390/pfault:dead",
772 NULL
, pfault_cpu_dead
);
776 unregister_external_irq(EXT_IRQ_CP_SERVICE
, pfault_interrupt
);
781 early_initcall(pfault_irq_init
);
783 #endif /* CONFIG_PFAULT */
785 #if IS_ENABLED(CONFIG_PGSTE)
786 void do_secure_storage_access(struct pt_regs
*regs
)
788 unsigned long addr
= regs
->int_parm_long
& __FAIL_ADDR_MASK
;
789 struct vm_area_struct
*vma
;
790 struct mm_struct
*mm
;
794 switch (get_fault_type(regs
)) {
798 vma
= find_vma(mm
, addr
);
800 mmap_read_unlock(mm
);
801 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
804 page
= follow_page(vma
, addr
, FOLL_WRITE
| FOLL_GET
);
805 if (IS_ERR_OR_NULL(page
)) {
806 mmap_read_unlock(mm
);
809 if (arch_make_page_accessible(page
))
810 send_sig(SIGSEGV
, current
, 0);
812 mmap_read_unlock(mm
);
815 page
= phys_to_page(addr
);
816 if (unlikely(!try_get_page(page
)))
818 rc
= arch_make_page_accessible(page
);
825 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
829 NOKPROBE_SYMBOL(do_secure_storage_access
);
831 void do_non_secure_storage_access(struct pt_regs
*regs
)
833 unsigned long gaddr
= regs
->int_parm_long
& __FAIL_ADDR_MASK
;
834 struct gmap
*gmap
= (struct gmap
*)S390_lowcore
.gmap
;
836 if (get_fault_type(regs
) != GMAP_FAULT
) {
837 do_fault_error(regs
, VM_READ
| VM_WRITE
, VM_FAULT_BADMAP
);
842 if (gmap_convert_to_secure(gmap
, gaddr
) == -EINVAL
)
843 send_sig(SIGSEGV
, current
, 0);
845 NOKPROBE_SYMBOL(do_non_secure_storage_access
);
847 void do_secure_storage_violation(struct pt_regs
*regs
)
850 * Either KVM messed up the secure guest mapping or the same
851 * page is mapped into multiple secure guests.
853 * This exception is only triggered when a guest 2 is running
854 * and can therefore never occur in kernel context.
856 printk_ratelimited(KERN_WARNING
857 "Secure storage violation in task: %s, pid %d\n",
858 current
->comm
, current
->pid
);
859 send_sig(SIGSEGV
, current
, 0);
863 void do_secure_storage_access(struct pt_regs
*regs
)
865 default_trap_handler(regs
);
868 void do_non_secure_storage_access(struct pt_regs
*regs
)
870 default_trap_handler(regs
);
873 void do_secure_storage_violation(struct pt_regs
*regs
)
875 default_trap_handler(regs
);