2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Linux interrupt vectors.
17 #include <linux/linkage.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/unistd.h>
21 #include <asm/ptrace.h>
22 #include <asm/thread_info.h>
23 #include <asm/irqflags.h>
24 #include <asm/atomic_32.h>
25 #include <asm/asm-offsets.h>
26 #include <hv/hypervisor.h>
28 #include <arch/interrupts.h>
29 #include <arch/spr_def.h>
31 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
33 #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
35 .macro push_reg reg, ptr=sp, delta=-4
38 addli \ptr, \ptr, \delta
42 .macro pop_reg reg, ptr=sp, delta=4
45 addli \ptr, \ptr, \delta
49 .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
53 addi \ptr, \ptr, \delta
57 .macro push_extra_callee_saves reg
58 PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
76 push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
80 .pushsection .rodata, "a"
93 #ifdef __COLLECT_LINKER_FEEDBACK__
94 .pushsection .text.intvec_feedback,"ax"
100 * Default interrupt handler.
102 * vecnum is where we'll put this code.
103 * c_routine is the C routine we'll call.
105 * The C routine is passed two arguments:
106 * - A pointer to the pt_regs state.
107 * - The interrupt vector number.
109 * The "processing" argument specifies the code for processing
110 * the interrupt. Defaults to "handle_interrupt".
112 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
115 .ifc \vecnum, INT_SWINT_1
116 blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
119 /* Temporarily save a register so we have somewhere to work. */
121 mtspr SPR_SYSTEM_SAVE_K_1, r0
122 mfspr r0, SPR_EX_CONTEXT_K_1
124 /* The cmpxchg code clears sp to force us to reset it here on fault. */
127 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
130 .ifc \vecnum, INT_DOUBLE_FAULT
132 * For double-faults from user-space, fall through to the normal
133 * register save and stack setup path. Otherwise, it's the
134 * hypervisor giving us one last chance to dump diagnostics, and we
135 * branch to the kernel_double_fault routine to do so.
138 j _kernel_double_fault
142 * If we're coming from user-space, then set sp to the top of
143 * the kernel stack. Otherwise, assume sp is already valid.
151 .ifc \c_routine, do_page_fault
153 * The page_fault handler may be downcalled directly by the
154 * hypervisor even when Linux is running and has ICS set.
156 * In this case the contents of EX_CONTEXT_K_1 reflect the
157 * previous fault and can't be relied on to choose whether or
158 * not to reinitialize the stack pointer. So we add a test
159 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
160 * and if so we don't reinitialize sp, since we must be coming
161 * from Linux. (In fact the precise case is !(val & ~1),
162 * but any Linux PC has to have the high bit set.)
164 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
165 * any path that turns into a downcall to one of our TLB handlers.
167 mfspr r0, SPR_SYSTEM_SAVE_K_2
169 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
176 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
177 * the current stack top in the higher bits. So we recover
178 * our stack top by just masking off the low bits, then
179 * point sp at the top aligned address on the actual stack page.
181 mfspr r0, SPR_SYSTEM_SAVE_K_0
182 mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
186 * Align the stack mod 64 so we can properly predict what
187 * cache lines we need to write-hint to reduce memory fetch
188 * latency as we enter the kernel. The layout of memory is
189 * as follows, with cache line 0 at the lowest VA, and cache
190 * line 4 just below the r0 value this "andi" computes.
191 * Note that we never write to cache line 4, and we skip
192 * cache line 1 for syscalls.
194 * cache line 4: ptregs padding (two words)
195 * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
196 * cache line 2: r30...r45
197 * cache line 1: r14...r29
198 * cache line 0: 2 x frame, r0..r13
200 #if STACK_TOP_DELTA != 64
201 #error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
206 * Push the first four registers on the stack, so that we can set
207 * them to vector-unique values before we jump to the common code.
209 * Registers are pushed on the stack as a struct pt_regs,
210 * with the sp initially just above the struct, and when we're
211 * done, sp points to the base of the struct, minus
212 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
214 * This routine saves just the first four registers, plus the
215 * stack context so we can do proper backtracing right away,
216 * and defers to handle_interrupt to save the rest.
217 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
219 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
220 wh64 r0 /* cache line 3 */
223 addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
227 addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
231 addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
233 wh64 sp /* cache line 0 */
236 addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
240 addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
244 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
246 mfspr r0, SPR_EX_CONTEXT_K_0
247 .ifc \processing,handle_syscall
249 * Bump the saved PC by one bundle so that when we return, we won't
250 * execute the same swint instruction again. We need to do this while
251 * we're in the critical section.
257 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
259 mfspr r0, SPR_EX_CONTEXT_K_1
262 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
264 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
265 * so that it gets passed through unchanged to the handler routine.
266 * Note that the .if conditional confusingly spans bundles.
268 .ifc \processing,handle_syscall
279 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
281 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
284 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
287 sw sp, zero /* write zero into "Next SP" frame pointer */
288 addi sp, sp, -4 /* leave SP pointing at bottom of frame */
290 .ifc \processing,handle_syscall
294 * Capture per-interrupt SPR context to registers.
295 * We overload the meaning of r3 on this path such that if its bit 31
296 * is set, we have to mask all interrupts including NMIs before
297 * clearing the interrupt critical section bit.
298 * See discussion below at "finish_interrupt_save".
300 .ifc \c_routine, do_page_fault
301 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
302 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
304 .ifc \vecnum, INT_DOUBLE_FAULT
306 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
310 .ifc \c_routine, do_trap
316 .ifc \c_routine, op_handle_perf_interrupt
318 mfspr r2, PERF_COUNT_STS
319 movei r3, -1 /* not used, but set for consistency */
322 .ifc \c_routine, op_handle_aux_perf_interrupt
324 mfspr r2, AUX_PERF_COUNT_STS
325 movei r3, -1 /* not used, but set for consistency */
334 /* Put function pointer in r0 */
335 moveli r0, lo16(\c_routine)
337 auli r0, r0, ha16(\c_routine)
341 ENDPROC(intvec_\vecname)
343 #ifdef __COLLECT_LINKER_FEEDBACK__
344 .pushsection .text.intvec_feedback,"ax"
346 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
355 * Save the rest of the registers that we didn't save in the actual
356 * vector itself. We can't use r0-r10 inclusive here.
358 .macro finish_interrupt_save, function
360 /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
361 PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
363 .ifc \function,handle_syscall
368 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
372 * For ordinary syscalls, we save neither caller- nor callee-
373 * save registers, since the syscall invoker doesn't expect the
374 * caller-saves to be saved, and the called kernel functions will
375 * take care of saving the callee-saves for us.
377 * For interrupts we save just the caller-save registers. Saving
378 * them is required (since the "caller" can't save them). Again,
379 * the called kernel functions will restore the callee-save
380 * registers for us appropriately.
382 * On return, we normally restore nothing special for syscalls,
383 * and just the caller-save registers for interrupts.
385 * However, there are some important caveats to all this:
387 * - We always save a few callee-save registers to give us
388 * some scratchpad registers to carry across function calls.
390 * - fork/vfork/etc require us to save all the callee-save
391 * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
393 * - We always save r0..r5 and r10 for syscalls, since we need
394 * to reload them a bit later for the actual kernel call, and
395 * since we might need them for -ERESTARTNOINTR, etc.
397 * - Before invoking a signal handler, we save the unsaved
398 * callee-save registers so they are visible to the
399 * signal handler or any ptracer.
401 * - If the unsaved callee-save registers are modified, we set
402 * a bit in pt_regs so we know to reload them from pt_regs
403 * and not just rely on the kernel function unwinding.
404 * (Done for ptrace register writes and SA_SIGINFO handler.)
408 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
410 wh64 r52 /* cache line 2 */
414 .ifc \function,handle_syscall
415 push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
416 push_reg TREG_SYSCALL_NR_NAME, r52, \
417 PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
420 push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
421 wh64 r52 /* cache line 1 */
452 /* Load tp with our per-cpu offset. */
455 mfspr r20, SPR_SYSTEM_SAVE_K_0
456 moveli r21, lo16(__per_cpu_offset)
459 auli r21, r21, ha16(__per_cpu_offset)
460 mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
469 * If we will be returning to the kernel, we will need to
470 * reset the interrupt masks to the state they had before.
471 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
472 * We load flags in r32 here so we can jump to .Lrestore_regs
473 * directly after do_page_fault_ics() if necessary.
475 mfspr r32, SPR_EX_CONTEXT_K_1
477 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
478 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
480 bzt r32, 1f /* zero if from user space */
481 IRQS_DISABLED(r32) /* zero if irqs enabled */
482 #if PT_FLAGS_DISABLE_IRQ != 1
483 # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
486 .ifnc \function,handle_syscall
487 /* Record the fact that we saved the caller-save registers above. */
488 ori r32, r32, PT_FLAGS_CALLER_SAVES
492 #ifdef __COLLECT_LINKER_FEEDBACK__
494 * Notify the feedback routines that we were in the
495 * appropriate fixed interrupt vector area. Note that we
496 * still have ICS set at this point, so we can't invoke any
497 * atomic operations or we will panic. The feedback
498 * routines internally preserve r0..r10 and r30 up.
500 .ifnc \function,handle_syscall
503 moveli r20, INT_SWINT_1 << 5
505 addli r20, r20, lo16(intvec_feedback)
506 auli r20, r20, ha16(intvec_feedback)
509 /* And now notify the feedback routines that we are here. */
510 FEEDBACK_ENTER(\function)
514 * we've captured enough state to the stack (including in
515 * particular our EX_CONTEXT state) that we can now release
516 * the interrupt critical section and replace it with our
517 * standard "interrupts disabled" mask value. This allows
518 * synchronous interrupts (and profile interrupts) to punch
519 * through from this point onwards.
521 * If bit 31 of r3 is set during a non-NMI interrupt, we know we
522 * are on the path where the hypervisor has punched through our
523 * ICS with a page fault, so we call out to do_page_fault_ics()
524 * to figure out what to do with it. If the fault was in
525 * an atomic op, we unlock the atomic lock, adjust the
526 * saved register state a little, and return "zero" in r4,
527 * falling through into the normal page-fault interrupt code.
528 * If the fault was in a kernel-space atomic operation, then
529 * do_page_fault_ics() resolves it itself, returns "one" in r4,
530 * and as a result goes directly to restoring registers and iret,
531 * without trying to adjust the interrupt masks at all.
532 * The do_page_fault_ics() API involves passing and returning
533 * a five-word struct (in registers) to avoid writing the
534 * save and restore code here.
536 .ifc \function,handle_nmi
539 .ifnc \function,handle_syscall
542 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
543 jal do_page_fault_ics
545 FEEDBACK_REENTER(\function)
550 IRQ_DISABLE(r20, r21)
552 mtspr INTERRUPT_CRITICAL_SECTION, zero
555 * Prepare the first 256 stack bytes to be rapidly accessible
556 * without having to fetch the background data. We don't really
557 * know how far to write-hint, but kernel stacks generally
558 * aren't that big, and write-hinting here does take some time.
575 #ifdef CONFIG_TRACE_IRQFLAGS
576 .ifnc \function,handle_nmi
578 * We finally have enough state set up to notify the irq
579 * tracing code that irqs were disabled on entry to the handler.
580 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
581 * For syscalls, we already have the register state saved away
582 * on the stack, so we don't bother to do any register saves here,
583 * and later we pop the registers back off the kernel stack.
584 * For interrupt handlers, save r0-r3 in callee-saved registers.
586 .ifnc \function,handle_syscall
587 { move r30, r0; move r31, r1 }
588 { move r32, r2; move r33, r3 }
591 .ifnc \function,handle_syscall
592 { move r0, r30; move r1, r31 }
593 { move r2, r32; move r3, r33 }
600 .macro check_single_stepping, kind, not_single_stepping
602 * Check for single stepping in user-level priv
603 * kind can be "normal", "ill", or "syscall"
604 * At end, if fall-thru
605 * r29: thread_info->step_state
608 * r26: thread_info->step_state->buffer
611 /* Check for single stepping */
614 /* Get pointer to field holding step state */
615 addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
617 /* Get pointer to EX1 in register state */
618 PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
621 /* Get pointer to field holding PC */
622 PTREGS_PTR(r28, PTREGS_OFFSET_PC)
624 /* Load the pointer to the step state */
630 /* Points to flags */
631 addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
633 /* No single stepping if there is no step state structure */
634 bzt r29, \not_single_stepping
637 /* mask off ICS and any other high bits */
638 andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
640 /* Load pointer to single step instruction buffer */
643 /* Check priv state */
644 bnz r27, \not_single_stepping
649 /* Branch if single-step mode not enabled */
650 bbnst r22, \not_single_stepping
652 /* Clear enabled flag */
653 andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
660 /* Point to the entry containing the original PC */
661 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
664 /* Disable single stepping flag */
668 /* Get the original pc */
671 /* See if the PC is at the start of the single step buffer */
675 * NOTE: it is really expected that the PC be in the single step buffer
678 bzt r25, \not_single_stepping
680 /* Restore the original PC */
688 /* Point to the entry containing the next PC */
689 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
692 /* Increment the stopped PC by the bundle size */
695 /* Disable single stepping flag */
699 /* Get the next pc */
703 * See if the PC is one bundle past the start of the
710 * NOTE: it is really expected that the PC be in the
711 * single step buffer at this point
713 bzt r25, \not_single_stepping
715 /* Set to the next PC */
719 /* Point to 3rd bundle in buffer */
726 /* Disable single stepping flag */
729 /* See if the PC is in the single step buffer */
736 * NOTE: it is really expected that the PC be in the
737 * single step buffer at this point
739 bzt r24, \not_single_stepping
741 bzt r25, \not_single_stepping
747 * Redispatch a downcall.
749 .macro dc_dispatch vecnum, vecname
752 j _hv_downcall_dispatch
753 ENDPROC(intvec_\vecname)
757 * Common code for most interrupts. The C function we're eventually
758 * going to is in r0, and the faultnum is in r1; the original
759 * values for those registers are on the stack.
761 .pushsection .text.handle_interrupt,"ax"
763 finish_interrupt_save handle_interrupt
766 * Check for if we are single stepping in user level. If so, then
767 * we need to restore the PC.
770 check_single_stepping normal, .Ldispatch_interrupt
771 .Ldispatch_interrupt:
773 /* Jump to the C routine; it should enable irqs as soon as possible. */
776 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
778 FEEDBACK_REENTER(handle_interrupt)
780 movei r30, 0 /* not an NMI */
783 STD_ENDPROC(handle_interrupt)
786 * This routine takes a boolean in r30 indicating if this is an NMI.
787 * If so, we also expect a boolean in r31 indicating whether to
788 * re-enable the oprofile interrupts.
790 * Note that .Lresume_userspace is jumped to directly in several
791 * places, and we need to make sure r30 is set correctly in those
794 STD_ENTRY(interrupt_return)
795 /* If we're resuming to kernel space, don't check thread flags. */
797 bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
798 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
801 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
802 bzt r29, .Lresume_userspace
804 #ifdef CONFIG_PREEMPT
805 /* Returning to kernel space. Check if we need preemption. */
807 addli r28, r29, THREAD_INFO_FLAGS_OFFSET
810 addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
813 andi r28, r28, _TIF_NEED_RESCHED
818 /* Disable interrupts explicitly for preemption. */
821 jal preempt_schedule_irq
822 FEEDBACK_REENTER(interrupt_return)
826 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
828 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
829 moveli r27, lo16(_cpu_idle_nap)
833 auli r27, r27, ha16(_cpu_idle_nap)
839 bbns r27, .Lrestore_all
846 FEEDBACK_REENTER(interrupt_return)
849 * Use r33 to hold whether we have already loaded the callee-saves
850 * into ptregs. We don't want to do it twice in this loop, since
851 * then we'd clobber whatever changes are made by ptrace, etc.
852 * Get base of stack in r32.
859 .Lretry_work_pending:
861 * Disable interrupts so as to make sure we don't
862 * miss an interrupt that sets any of the thread flags (like
863 * need_resched or sigpending) between sampling and the iret.
864 * Routines like schedule() or do_signal() may re-enable
865 * interrupts before returning.
867 IRQ_DISABLE(r20, r21)
868 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
871 /* Check to see if there is any work to do before returning to user. */
873 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
874 moveli r1, lo16(_TIF_ALLWORK_MASK)
878 auli r1, r1, ha16(_TIF_ALLWORK_MASK)
881 bzt r1, .Lrestore_all
884 * Make sure we have all the registers saved for signal
885 * handling, notify-resume, or single-step. Call out to C
886 * code to figure out exactly what we need to do for each flag bit,
887 * then if necessary, reload the flags and recheck.
890 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
893 push_extra_callee_saves r0
895 1: jal do_work_pending
896 bnz r0, .Lretry_work_pending
900 * omit the call to single_process_check_nohz, which normally checks
901 * to see if we should start or stop the scheduler tick, because
902 * we can't call arbitrary Linux code from an NMI context.
903 * We always call the homecache TLB deferral code to re-trigger
904 * the deferral mechanism.
906 * The other chunk of responsibility this code has is to reset the
907 * interrupt masks appropriately to reset irqs and NMIs. We have
908 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
909 * lockdep-type stuff, but we can't set ICS until afterwards, since
910 * ICS can only be used in very tight chunks of code to avoid
911 * tripping over various assertions that it is off.
913 * (There is what looks like a window of vulnerability here since
914 * we might take a profile interrupt between the two SPR writes
915 * that set the mask, but since we write the low SPR word first,
916 * and our interrupt entry code checks the low SPR word, any
917 * profile interrupt will actually disable interrupts in both SPRs
918 * before returning, which is OK.)
921 PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
924 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
927 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
932 #if PT_FLAGS_DISABLE_IRQ != 1
933 # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
939 mtspr INTERRUPT_CRITICAL_SECTION, r0
940 bzt r30, .Lrestore_regs
944 mtspr INTERRUPT_CRITICAL_SECTION, r0
946 bzt r30, .Lrestore_regs
951 * We now commit to returning from this interrupt, since we will be
952 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
953 * frame. No calls should be made to any other code after this point.
954 * This code should only be entered with ICS set.
955 * r32 must still be set to ptregs.flags.
956 * We launch loads to each cache line separately first, so we can
957 * get some parallelism out of the memory subsystem.
958 * We start zeroing caller-saved registers throughout, since
959 * that will save some cycles if this turns out to be a syscall.
962 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
965 * Rotate so we have one high bit and one low bit to test.
966 * - low bit says whether to restore all the callee-saved registers,
967 * or just r30-r33, and r52 up.
968 * - high bit (i.e. sign bit) says whether to restore all the
969 * caller-saved registers, or just r0.
971 #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
972 # error Rotate trick does not work :-)
976 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
980 * Load cache lines 0, 2, and 3 in that order, then use
981 * the last loaded value, which makes it likely that the other
982 * cache lines have also loaded, at which point we should be
983 * able to safely read all the remaining words on those cache
984 * lines without waiting for the memory subsystem.
986 pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
987 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
988 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
989 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
991 mtspr SPR_EX_CONTEXT_K_0, r21
995 mtspr SPR_EX_CONTEXT_K_1, lr
996 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
999 /* Restore callee-saveds that we actually use. */
1000 pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
1001 pop_reg_zero r31, r7
1002 pop_reg_zero r32, r8
1003 pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
1006 * If we modified other callee-saveds, restore them now.
1007 * This is rare, but could be via ptrace or signal handler.
1011 bbs r20, .Lrestore_callees
1013 .Lcontinue_restore_regs:
1015 /* Check if we're returning from a syscall. */
1018 blzt r20, 1f /* no, so go restore callee-save registers */
1022 * Check if we're returning to userspace.
1023 * Note that if we're not, we don't worry about zeroing everything.
1026 addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
1027 bnz lr, .Lkernel_return
1031 * On return from syscall, we've restored r0 from pt_regs, but we
1032 * clear the remainder of the caller-saved registers. We could
1033 * restore the syscall arguments, but there's not much point,
1034 * and it ensures user programs aren't trying to use the
1035 * caller-saves if we clear them, as well as avoiding leaking
1036 * kernel pointers into userspace.
1038 pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1039 pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1045 { move r16, zero; move r17, zero }
1046 { move r18, zero; move r19, zero }
1047 { move r20, zero; move r21, zero }
1048 { move r22, zero; move r23, zero }
1049 { move r24, zero; move r25, zero }
1050 { move r26, zero; move r27, zero }
1052 /* Set r1 to errno if we are returning an error, otherwise zero. */
1065 * Not a syscall, so restore caller-saved registers.
1066 * First kick off a load for cache line 1, which we're touching
1067 * for the first time here.
1070 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
1098 pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
1099 /* r29 already restored above */
1100 bnz lr, .Lkernel_return
1101 pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1102 pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1107 * We can't restore tp when in kernel mode, since a thread might
1108 * have migrated from another cpu and brought a stale tp value.
1111 pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
1115 /* Restore callee-saved registers from r34 to r51. */
1117 addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
1135 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
1136 j .Lcontinue_restore_regs
1137 STD_ENDPROC(interrupt_return)
1140 * Some interrupts don't check for single stepping
1142 .pushsection .text.handle_interrupt_no_single_step,"ax"
1143 handle_interrupt_no_single_step:
1144 finish_interrupt_save handle_interrupt_no_single_step
1147 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1149 FEEDBACK_REENTER(handle_interrupt_no_single_step)
1151 movei r30, 0 /* not an NMI */
1154 STD_ENDPROC(handle_interrupt_no_single_step)
1157 * "NMI" interrupts mask ALL interrupts before calling the
1158 * handler, and don't check thread flags, etc., on the way
1159 * back out. In general, the only things we do here for NMIs
1160 * are the register save/restore, fixing the PC if we were
1161 * doing single step, and the dataplane kernel-TLB management.
1162 * We don't (for example) deal with start/stop of the sched tick.
1164 .pushsection .text.handle_nmi,"ax"
1166 finish_interrupt_save handle_nmi
1167 check_single_stepping normal, .Ldispatch_nmi
1171 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1173 FEEDBACK_REENTER(handle_nmi)
1175 STD_ENDPROC(handle_nmi)
1178 * Parallel code for syscalls to handle_interrupt.
1180 .pushsection .text.handle_syscall,"ax"
1182 finish_interrupt_save handle_syscall
1185 * Check for if we are single stepping in user level. If so, then
1186 * we need to restore the PC.
1188 check_single_stepping syscall, .Ldispatch_syscall
1193 IRQ_ENABLE(r20, r21)
1195 /* Bump the counter for syscalls made on this tile. */
1196 moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1197 auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1203 GET_THREAD_INFO(r31)
1206 /* Trace syscalls, if requested. */
1207 addi r31, r31, THREAD_INFO_FLAGS_OFFSET
1209 andi r30, r30, _TIF_SYSCALL_TRACE
1210 bzt r30, .Lrestore_syscall_regs
1212 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1213 jal do_syscall_trace_enter
1215 FEEDBACK_REENTER(handle_syscall)
1218 * We always reload our registers from the stack at this
1219 * point. They might be valid, if we didn't build with
1220 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
1221 * doing syscall tracing, but there are enough cases now that it
1222 * seems simplest just to do the reload unconditionally.
1224 .Lrestore_syscall_regs:
1225 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
1231 pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
1232 pop_reg TREG_SYSCALL_NR_NAME, r11
1234 /* Ensure that the syscall number is within the legal range. */
1235 moveli r21, __NR_syscalls
1237 slt_u r21, TREG_SYSCALL_NR_NAME, r21
1238 moveli r20, lo16(sys_call_table)
1241 bbns r21, .Linvalid_syscall
1242 auli r20, r20, ha16(sys_call_table)
1244 s2a r20, TREG_SYSCALL_NR_NAME, r20
1247 /* Jump to syscall handler. */
1249 .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1252 * Write our r0 onto the stack so it gets restored instead
1253 * of whatever the user had there before.
1255 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1258 .Lsyscall_sigreturn_skip:
1259 FEEDBACK_REENTER(handle_syscall)
1261 /* Do syscall trace again, if requested. */
1263 andi r30, r30, _TIF_SYSCALL_TRACE
1266 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1267 jal do_syscall_trace_exit
1269 FEEDBACK_REENTER(handle_syscall)
1271 movei r30, 0 /* not an NMI */
1272 j .Lresume_userspace /* jump into middle of interrupt_return */
1276 /* Report an invalid syscall back to the user program */
1278 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1283 movei r30, 0 /* not an NMI */
1284 j .Lresume_userspace /* jump into middle of interrupt_return */
1286 STD_ENDPROC(handle_syscall)
1288 /* Return the address for oprofile to suppress in backtraces. */
1289 STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1292 addli r0, r0, .Lhandle_syscall_link - .
1295 STD_ENDPROC(handle_syscall_link_address)
1297 STD_ENTRY(ret_from_fork)
1300 FEEDBACK_REENTER(ret_from_fork)
1302 movei r30, 0 /* not an NMI */
1303 j .Lresume_userspace /* jump into middle of interrupt_return */
1305 STD_ENDPROC(ret_from_fork)
1307 STD_ENTRY(ret_from_kernel_thread)
1310 FEEDBACK_REENTER(ret_from_fork)
1315 FEEDBACK_REENTER(ret_from_kernel_thread)
1317 movei r30, 0 /* not an NMI */
1318 j .Lresume_userspace /* jump into middle of interrupt_return */
1320 STD_ENDPROC(ret_from_kernel_thread)
1323 * Code for ill interrupt.
1325 .pushsection .text.handle_ill,"ax"
1327 finish_interrupt_save handle_ill
1330 * Check for if we are single stepping in user level. If so, then
1331 * we need to restore the PC.
1333 check_single_stepping ill, .Ldispatch_normal_ill
1336 /* See if the PC is the 1st bundle in the buffer */
1339 /* Point to the 2nd bundle in the buffer */
1343 /* Point to the original pc */
1344 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
1346 /* Branch if the PC is the 1st bundle in the buffer */
1350 /* See if the PC is the 2nd bundle of the buffer */
1353 /* Set PC to next instruction */
1354 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
1357 /* Point to flags */
1358 addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
1360 /* Branch if PC is in the second bundle */
1367 * Get the offset for the register to restore
1368 * Note: the lower bound is 2, so we have implicit scaling by 4.
1369 * No multiplication of the register number by the size of a register
1372 mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
1373 SINGLESTEP_STATE_TARGET_UB
1375 /* Mask Rewrite_LR */
1376 andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
1379 addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
1381 /* Don't rewrite temp register */
1385 /* Get the temp value */
1388 /* Point to where the register is stored */
1392 /* Add in the C ABI save area size to the register offset */
1393 addi r27, r27, C_ABI_SAVE_AREA_SIZE
1395 /* Restore the user's register with the temp value */
1400 /* Must be in the third bundle */
1401 addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
1404 /* set PC and continue */
1412 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
1413 * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
1414 * need to clear it here and can't really impose on all other arches.
1415 * So what's another write between friends?
1418 addi r1, r0, THREAD_INFO_FLAGS_OFFSET
1421 addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
1423 andi r2, r2, ~_TIF_SINGLESTEP
1426 /* Issue a sigtrap */
1428 lw r0, r0 /* indirect thru thread_info to get task_info*/
1429 addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
1432 jal send_sigtrap /* issue a SIGTRAP */
1433 FEEDBACK_REENTER(handle_ill)
1435 movei r30, 0 /* not an NMI */
1436 j .Lresume_userspace /* jump into middle of interrupt_return */
1439 .Ldispatch_normal_ill:
1442 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1444 FEEDBACK_REENTER(handle_ill)
1446 movei r30, 0 /* not an NMI */
1449 STD_ENDPROC(handle_ill)
1451 /* Various stub interrupt handlers and syscall handlers */
1453 STD_ENTRY_LOCAL(_kernel_double_fault)
1454 mfspr r1, SPR_EX_CONTEXT_K_0
1458 addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1459 j kernel_double_fault
1460 STD_ENDPROC(_kernel_double_fault)
1462 STD_ENTRY_LOCAL(bad_intr)
1463 mfspr r2, SPR_EX_CONTEXT_K_0
1464 panic "Unhandled interrupt %#x: PC %#lx"
1465 STD_ENDPROC(bad_intr)
1468 * Special-case sigreturn to not write r0 to the stack on return.
1469 * This is technically more efficient, but it also avoids difficulties
1470 * in the 64-bit OS when handling 32-bit compat code, since we must not
1471 * sign-extend r0 for the sigreturn return-value case.
1473 #define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1475 addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1477 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1482 PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1484 /* Save additional callee-saves to pt_regs and jump to standard function. */
1485 STD_ENTRY(_sys_clone)
1486 push_extra_callee_saves r4
1488 STD_ENDPROC(_sys_clone)
1491 * This entrypoint is taken for the cmpxchg and atomic_update fast
1492 * swints. We may wish to generalize it to other fast swints at some
1493 * point, but for now there are just two very similar ones, which
1496 * The fast swint code is designed to have a small footprint. It does
1497 * not save or restore any GPRs, counting on the caller-save registers
1498 * to be available to it on entry. It does not modify any callee-save
1499 * registers (including "lr"). It does not check what PL it is being
1500 * called at, so you'd better not call it other than at PL0.
1501 * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
1502 * it ever is necessary to use more registers, be aware.
1504 * It does not use the stack, but since it might be re-interrupted by
1505 * a page fault which would assume the stack was valid, it does
1506 * save/restore the stack pointer and zero it out to make sure it gets reset.
1507 * Since we always keep interrupts disabled, the hypervisor won't
1508 * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
1509 * (other than to advance the PC on return).
1511 * We have to manually validate the user vs kernel address range
1512 * (since at PL1 we can read/write both), and for performance reasons
1513 * we don't allow cmpxchg on the fc000000 memory region, since we only
1514 * validate that the user address is below PAGE_OFFSET.
1516 * We place it in the __HEAD section to ensure it is relatively
1517 * near to the intvec_SWINT_1 code (reachable by a conditional branch).
1519 * Our use of ATOMIC_LOCK_REG here must match do_page_fault_ics().
1521 * As we do in lib/atomic_asm_32.S, we bypass a store if the value we
1522 * would store is the same as the value we just loaded.
1526 /* Align much later jump on the start of a cache line. */
1528 #if PAGE_SIZE >= 0x10000
1534 * Save "sp" and set it zero for any possible page fault.
1536 * HACK: We want to both zero sp and check r0's alignment,
1537 * so we do both at once. If "sp" becomes nonzero we
1538 * know r0 is unaligned and branch to the error handler that
1539 * restores sp, so this is OK.
1541 * ICS is disabled right now so having a garbage but nonzero
1542 * sp is OK, since we won't execute any faulting instructions
1543 * when it is nonzero.
1551 * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
1552 * address is less than PAGE_OFFSET, since that won't trap at PL1.
1553 * We only use bits less than PAGE_SHIFT to avoid having to worry
1554 * about aliasing among multiple mappings of the same physical page,
1555 * and we ignore the low 3 bits so we have one lock that covers
1556 * both a cmpxchg64() and a cmpxchg() on either its low or high word.
1557 * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
1560 #if (PAGE_OFFSET & 0xffff) != 0
1561 # error Code here assumes PAGE_OFFSET can be loaded with just hi16()
1565 /* Check for unaligned input. */
1566 bnz sp, .Lcmpxchg_badaddr
1567 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1571 * Slide bits into position for 'mm'. We want to ignore
1572 * the low 3 bits of r0, and consider only the next
1573 * ATOMIC_HASH_SHIFT bits.
1574 * Because of C pointer arithmetic, we want to compute this:
1576 * ((char*)atomic_locks +
1577 * (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
1579 * Instead of two shifts we just ">> 1", and use 'mm'
1580 * to ignore the low and high bits we don't want.
1587 * Ensure that the TLB is loaded before we take out the lock.
1588 * This will start fetching the value all the way into our L1
1589 * as well (and if it gets modified before we grab the lock,
1590 * it will be invalidated from our cache before we reload it).
1595 auli r21, zero, ha16(atomic_locks)
1597 bbns r23, .Lcmpxchg_badaddr
1599 #if PAGE_SIZE < 0x10000
1600 /* atomic_locks is page-aligned so for big pages we don't need this. */
1601 addli r21, r21, lo16(atomic_locks)
1605 * Insert the hash bits into the page-aligned pointer.
1606 * ATOMIC_HASH_SHIFT is so big that we don't actually hash
1607 * the unmasked address bits, as that may cause unnecessary
1610 mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
1612 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1615 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1616 bbs r23, .Lcmpxchg64
1617 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1621 * We very carefully align the code that actually runs with
1622 * the lock held (twelve bundles) so that we know it is all in
1623 * the icache when we start. This instruction (the jump) is
1624 * at the start of the first cache line, address zero mod 64;
1625 * we jump to the very end of the second cache line to get that
1626 * line loaded in the icache, then fall through to issue the tns
1627 * in the third cache line, at which point it's all cached.
1628 * Note that is for performance, not correctness.
1633 /* Symbol for do_page_fault_ics() to use to compare against the PC. */
1634 .global __sys_cmpxchg_grab_lock
1635 __sys_cmpxchg_grab_lock:
1638 * Perform the actual cmpxchg or atomic_update.
1643 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
1647 seq r22, r21, r1 /* See if cmpxchg matches. */
1648 and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
1651 or r22, r22, r23 /* Skip compare branch for atomic_update. */
1652 add r25, r25, r2 /* Compute (*mem & mask) + addend. */
1655 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
1656 bbns r22, .Lcmpxchg32_nostore
1658 seq r22, r24, r21 /* Are we storing the value we loaded? */
1659 bbs r22, .Lcmpxchg32_nostore
1662 /* The following instruction is the start of the second cache line. */
1663 /* Do slow mtspr here so the following "mf" waits less. */
1666 mtspr SPR_EX_CONTEXT_K_0, r28
1672 sw ATOMIC_LOCK_REG_NAME, zero
1676 /* Duplicated code here in the case where we don't overlap "mf" */
1677 .Lcmpxchg32_nostore:
1680 sw ATOMIC_LOCK_REG_NAME, zero
1684 mtspr SPR_EX_CONTEXT_K_0, r28
1689 * The locking code is the same for 32-bit cmpxchg/atomic_update,
1690 * and for 64-bit cmpxchg. We provide it as a macro and put
1691 * it into both versions. We can't share the code literally
1692 * since it depends on having the right branch-back address.
1694 .macro cmpxchg_lock, bitwidth
1696 /* Lock; if we succeed, jump back up to the read-modify-write. */
1698 tns r21, ATOMIC_LOCK_REG_NAME
1701 * Non-SMP preserves all the lock infrastructure, to keep the
1702 * code simpler for the interesting (SMP) case. However, we do
1703 * one small optimization here and in atomic_asm.S, which is
1704 * to fake out acquiring the actual lock in the atomic_lock table.
1709 /* Issue the slow SPR here while the tns result is in flight. */
1710 mfspr r28, SPR_EX_CONTEXT_K_0
1713 addi r28, r28, 8 /* return to the instruction after the swint1 */
1714 bzt r21, .Ldo_cmpxchg\bitwidth
1717 * The preceding instruction is the last thing that must be
1718 * hot in the icache before we do the "tns" above.
1723 * We failed to acquire the tns lock on our first try. Now use
1724 * bounded exponential backoff to retry, like __atomic_spinlock().
1727 moveli r23, 2048 /* maximum backoff time in cycles */
1728 moveli r25, 32 /* starting backoff time in cycles */
1730 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */
1731 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
1736 shli r25, r25, 1 /* double the backoff; retry the tns */
1737 tns r21, ATOMIC_LOCK_REG_NAME
1739 slt r26, r23, r25 /* is the proposed backoff too big? */
1742 bzt r21, .Ldo_cmpxchg\bitwidth
1745 #endif /* CONFIG_SMP */
1750 * This is the last instruction on the second cache line.
1751 * The nop here loads the second line, then we fall through
1752 * to the tns to load the third line before we take the lock.
1758 * This code is invoked from sys_cmpxchg after most of the
1759 * preconditions have been checked. We still need to check
1760 * that r0 is 8-byte aligned, since if it's not we won't
1761 * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
1762 * lock pointer and r27/r28 have the saved SP/PC.
1763 * r23 is holding "r0 & 7" so we can test for alignment.
1764 * The compare value is in r2/r3; the new value is in r4/r5.
1765 * On return, we must put the old value in r0/r1.
1770 bzt r23, .Lcmpxchg64_tns
1784 bz r26, .Lcmpxchg64_mismatch
1788 bz r26, .Lcmpxchg64_mismatch
1794 * The 32-bit path provides optimized "match" and "mismatch"
1795 * iret paths, but we don't have enough bundles in this cache line
1796 * to do that, so we just make even the "mismatch" path do an "mf".
1798 .Lcmpxchg64_mismatch:
1801 mtspr SPR_EX_CONTEXT_K_0, r28
1806 sw ATOMIC_LOCK_REG_NAME, zero
1815 * Reset sp and revector to sys_cmpxchg_badaddr(), which will
1816 * just raise the appropriate signal and exit. Doing it this
1817 * way means we don't have to duplicate the code in intvec.S's
1818 * int_hand macro that locates the top of the stack.
1822 moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
1826 ENDPROC(sys_cmpxchg)
1827 ENTRY(__sys_cmpxchg_end)
1830 /* The single-step support may need to read all the registers. */
1832 push_extra_callee_saves r0
1835 /* Include .intrpt array of interrupt vectors */
1836 .section ".intrpt", "ax"
1838 #define op_handle_perf_interrupt bad_intr
1839 #define op_handle_aux_perf_interrupt bad_intr
1841 #ifndef CONFIG_HARDWALL
1842 #define do_hardwall_trap bad_intr
1845 int_hand INT_ITLB_MISS, ITLB_MISS, \
1846 do_page_fault, handle_interrupt_no_single_step
1847 int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
1848 int_hand INT_ILL, ILL, do_trap, handle_ill
1849 int_hand INT_GPV, GPV, do_trap
1850 int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
1851 int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1852 int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1853 int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
1854 int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
1855 int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1856 int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1857 int_hand INT_SWINT_3, SWINT_3, do_trap
1858 int_hand INT_SWINT_2, SWINT_2, do_trap
1859 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1860 int_hand INT_SWINT_0, SWINT_0, do_trap
1861 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1862 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1863 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1864 int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
1865 int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
1866 int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
1867 int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
1868 int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
1869 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
1870 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1871 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1872 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1873 int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1874 int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
1875 int_hand INT_IDN_CA, IDN_CA, bad_intr
1876 int_hand INT_UDN_CA, UDN_CA, bad_intr
1877 int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1878 int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1879 int_hand INT_PERF_COUNT, PERF_COUNT, \
1880 op_handle_perf_interrupt, handle_nmi
1881 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1882 #if CONFIG_KERNEL_PL == 2
1883 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1884 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1886 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1887 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1889 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1890 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1892 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
1894 int_hand INT_I_ASID, I_ASID, bad_intr
1895 int_hand INT_D_ASID, D_ASID, bad_intr
1896 int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
1898 int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
1900 int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
1902 int_hand INT_SN_CPL, SN_CPL, bad_intr
1903 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1904 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1905 op_handle_aux_perf_interrupt, handle_nmi
1907 /* Synthetic interrupt delivered only by the simulator */
1908 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint