2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Linux interrupt vectors.
17 #include <linux/linkage.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/unistd.h>
21 #include <asm/ptrace.h>
22 #include <asm/thread_info.h>
23 #include <asm/irqflags.h>
24 #include <asm/atomic.h>
25 #include <asm/asm-offsets.h>
26 #include <hv/hypervisor.h>
28 #include <arch/interrupts.h>
29 #include <arch/spr_def.h>
32 # error "No support for kernel preemption currently"
35 #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
37 #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
40 /* By making this an empty macro, we can use wh64 in the code. */
45 .macro push_reg reg, ptr=sp, delta=-4
48 addli \ptr, \ptr, \delta
52 .macro pop_reg reg, ptr=sp, delta=4
55 addli \ptr, \ptr, \delta
59 .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
63 addi \ptr, \ptr, \delta
67 .macro push_extra_callee_saves reg
68 PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
86 push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
90 .pushsection .rodata, "a"
103 #ifdef __COLLECT_LINKER_FEEDBACK__
104 .pushsection .text.intvec_feedback,"ax"
110 * Default interrupt handler.
112 * vecnum is where we'll put this code.
113 * c_routine is the C routine we'll call.
115 * The C routine is passed two arguments:
116 * - A pointer to the pt_regs state.
117 * - The interrupt vector number.
119 * The "processing" argument specifies the code for processing
120 * the interrupt. Defaults to "handle_interrupt".
122 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
125 .ifc \vecnum, INT_SWINT_1
126 blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
129 /* Temporarily save a register so we have somewhere to work. */
131 mtspr SPR_SYSTEM_SAVE_K_1, r0
132 mfspr r0, SPR_EX_CONTEXT_K_1
134 /* The cmpxchg code clears sp to force us to reset it here on fault. */
137 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
140 .ifc \vecnum, INT_DOUBLE_FAULT
142 * For double-faults from user-space, fall through to the normal
143 * register save and stack setup path. Otherwise, it's the
144 * hypervisor giving us one last chance to dump diagnostics, and we
145 * branch to the kernel_double_fault routine to do so.
148 j _kernel_double_fault
152 * If we're coming from user-space, then set sp to the top of
153 * the kernel stack. Otherwise, assume sp is already valid.
161 .ifc \c_routine, do_page_fault
163 * The page_fault handler may be downcalled directly by the
164 * hypervisor even when Linux is running and has ICS set.
166 * In this case the contents of EX_CONTEXT_K_1 reflect the
167 * previous fault and can't be relied on to choose whether or
168 * not to reinitialize the stack pointer. So we add a test
169 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
170 * and if so we don't reinitialize sp, since we must be coming
171 * from Linux. (In fact the precise case is !(val & ~1),
172 * but any Linux PC has to have the high bit set.)
174 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
175 * any path that turns into a downcall to one of our TLB handlers.
177 mfspr r0, SPR_SYSTEM_SAVE_K_2
179 blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
186 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
187 * the current stack top in the higher bits. So we recover
188 * our stack top by just masking off the low bits, then
189 * point sp at the top aligned address on the actual stack page.
191 mfspr r0, SPR_SYSTEM_SAVE_K_0
192 mm r0, r0, zero, LOG2_THREAD_SIZE, 31
196 * Align the stack mod 64 so we can properly predict what
197 * cache lines we need to write-hint to reduce memory fetch
198 * latency as we enter the kernel. The layout of memory is
199 * as follows, with cache line 0 at the lowest VA, and cache
200 * line 4 just below the r0 value this "andi" computes.
201 * Note that we never write to cache line 4, and we skip
202 * cache line 1 for syscalls.
204 * cache line 4: ptregs padding (two words)
205 * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
206 * cache line 2: r30...r45
207 * cache line 1: r14...r29
208 * cache line 0: 2 x frame, r0..r13
213 * Push the first four registers on the stack, so that we can set
214 * them to vector-unique values before we jump to the common code.
216 * Registers are pushed on the stack as a struct pt_regs,
217 * with the sp initially just above the struct, and when we're
218 * done, sp points to the base of the struct, minus
219 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
221 * This routine saves just the first four registers, plus the
222 * stack context so we can do proper backtracing right away,
223 * and defers to handle_interrupt to save the rest.
224 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
226 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
227 wh64 r0 /* cache line 3 */
230 addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
234 addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
238 addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
240 wh64 sp /* cache line 0 */
243 addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
247 addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
251 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
253 mfspr r0, SPR_EX_CONTEXT_K_0
254 .ifc \processing,handle_syscall
256 * Bump the saved PC by one bundle so that when we return, we won't
257 * execute the same swint instruction again. We need to do this while
258 * we're in the critical section.
264 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
266 mfspr r0, SPR_EX_CONTEXT_K_1
269 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
271 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
272 * so that it gets passed through unchanged to the handler routine.
273 * Note that the .if conditional confusingly spans bundles.
275 .ifc \processing,handle_syscall
286 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
288 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
291 addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
294 sw sp, zero /* write zero into "Next SP" frame pointer */
295 addi sp, sp, -4 /* leave SP pointing at bottom of frame */
297 .ifc \processing,handle_syscall
301 * Capture per-interrupt SPR context to registers.
302 * We overload the meaning of r3 on this path such that if its bit 31
303 * is set, we have to mask all interrupts including NMIs before
304 * clearing the interrupt critical section bit.
305 * See discussion below at "finish_interrupt_save".
307 .ifc \c_routine, do_page_fault
308 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
309 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
311 .ifc \vecnum, INT_DOUBLE_FAULT
313 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
317 .ifc \c_routine, do_trap
323 .ifc \c_routine, op_handle_perf_interrupt
325 mfspr r2, PERF_COUNT_STS
326 movei r3, -1 /* not used, but set for consistency */
329 #if CHIP_HAS_AUX_PERF_COUNTERS()
330 .ifc \c_routine, op_handle_aux_perf_interrupt
332 mfspr r2, AUX_PERF_COUNT_STS
333 movei r3, -1 /* not used, but set for consistency */
338 #if CHIP_HAS_AUX_PERF_COUNTERS()
345 /* Put function pointer in r0 */
346 moveli r0, lo16(\c_routine)
348 auli r0, r0, ha16(\c_routine)
352 ENDPROC(intvec_\vecname)
354 #ifdef __COLLECT_LINKER_FEEDBACK__
355 .pushsection .text.intvec_feedback,"ax"
357 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
366 * Save the rest of the registers that we didn't save in the actual
367 * vector itself. We can't use r0-r10 inclusive here.
369 .macro finish_interrupt_save, function
371 /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
372 PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
374 .ifc \function,handle_syscall
379 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
383 * For ordinary syscalls, we save neither caller- nor callee-
384 * save registers, since the syscall invoker doesn't expect the
385 * caller-saves to be saved, and the called kernel functions will
386 * take care of saving the callee-saves for us.
388 * For interrupts we save just the caller-save registers. Saving
389 * them is required (since the "caller" can't save them). Again,
390 * the called kernel functions will restore the callee-save
391 * registers for us appropriately.
393 * On return, we normally restore nothing special for syscalls,
394 * and just the caller-save registers for interrupts.
396 * However, there are some important caveats to all this:
398 * - We always save a few callee-save registers to give us
399 * some scratchpad registers to carry across function calls.
401 * - fork/vfork/etc require us to save all the callee-save
402 * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
404 * - We always save r0..r5 and r10 for syscalls, since we need
405 * to reload them a bit later for the actual kernel call, and
406 * since we might need them for -ERESTARTNOINTR, etc.
408 * - Before invoking a signal handler, we save the unsaved
409 * callee-save registers so they are visible to the
410 * signal handler or any ptracer.
412 * - If the unsaved callee-save registers are modified, we set
413 * a bit in pt_regs so we know to reload them from pt_regs
414 * and not just rely on the kernel function unwinding.
415 * (Done for ptrace register writes and SA_SIGINFO handler.)
419 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
421 wh64 r52 /* cache line 2 */
425 .ifc \function,handle_syscall
426 push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
427 push_reg TREG_SYSCALL_NR_NAME, r52, \
428 PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
431 push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
432 wh64 r52 /* cache line 1 */
463 /* Load tp with our per-cpu offset. */
466 mfspr r20, SPR_SYSTEM_SAVE_K_0
467 moveli r21, lo16(__per_cpu_offset)
470 auli r21, r21, ha16(__per_cpu_offset)
471 mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1
480 * If we will be returning to the kernel, we will need to
481 * reset the interrupt masks to the state they had before.
482 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
483 * We load flags in r32 here so we can jump to .Lrestore_regs
484 * directly after do_page_fault_ics() if necessary.
486 mfspr r32, SPR_EX_CONTEXT_K_1
488 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
489 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
491 bzt r32, 1f /* zero if from user space */
492 IRQS_DISABLED(r32) /* zero if irqs enabled */
493 #if PT_FLAGS_DISABLE_IRQ != 1
494 # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
497 .ifnc \function,handle_syscall
498 /* Record the fact that we saved the caller-save registers above. */
499 ori r32, r32, PT_FLAGS_CALLER_SAVES
503 #ifdef __COLLECT_LINKER_FEEDBACK__
505 * Notify the feedback routines that we were in the
506 * appropriate fixed interrupt vector area. Note that we
507 * still have ICS set at this point, so we can't invoke any
508 * atomic operations or we will panic. The feedback
509 * routines internally preserve r0..r10 and r30 up.
511 .ifnc \function,handle_syscall
514 moveli r20, INT_SWINT_1 << 5
516 addli r20, r20, lo16(intvec_feedback)
517 auli r20, r20, ha16(intvec_feedback)
520 /* And now notify the feedback routines that we are here. */
521 FEEDBACK_ENTER(\function)
525 * we've captured enough state to the stack (including in
526 * particular our EX_CONTEXT state) that we can now release
527 * the interrupt critical section and replace it with our
528 * standard "interrupts disabled" mask value. This allows
529 * synchronous interrupts (and profile interrupts) to punch
530 * through from this point onwards.
532 * If bit 31 of r3 is set during a non-NMI interrupt, we know we
533 * are on the path where the hypervisor has punched through our
534 * ICS with a page fault, so we call out to do_page_fault_ics()
535 * to figure out what to do with it. If the fault was in
536 * an atomic op, we unlock the atomic lock, adjust the
537 * saved register state a little, and return "zero" in r4,
538 * falling through into the normal page-fault interrupt code.
539 * If the fault was in a kernel-space atomic operation, then
540 * do_page_fault_ics() resolves it itself, returns "one" in r4,
541 * and as a result goes directly to restoring registers and iret,
542 * without trying to adjust the interrupt masks at all.
543 * The do_page_fault_ics() API involves passing and returning
544 * a five-word struct (in registers) to avoid writing the
545 * save and restore code here.
547 .ifc \function,handle_nmi
550 .ifnc \function,handle_syscall
553 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
554 jal do_page_fault_ics
556 FEEDBACK_REENTER(\function)
561 IRQ_DISABLE(r20, r21)
563 mtspr INTERRUPT_CRITICAL_SECTION, zero
567 * Prepare the first 256 stack bytes to be rapidly accessible
568 * without having to fetch the background data. We don't really
569 * know how far to write-hint, but kernel stacks generally
570 * aren't that big, and write-hinting here does take some time.
588 #ifdef CONFIG_TRACE_IRQFLAGS
589 .ifnc \function,handle_nmi
591 * We finally have enough state set up to notify the irq
592 * tracing code that irqs were disabled on entry to the handler.
593 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
594 * For syscalls, we already have the register state saved away
595 * on the stack, so we don't bother to do any register saves here,
596 * and later we pop the registers back off the kernel stack.
597 * For interrupt handlers, save r0-r3 in callee-saved registers.
599 .ifnc \function,handle_syscall
600 { move r30, r0; move r31, r1 }
601 { move r32, r2; move r33, r3 }
604 .ifnc \function,handle_syscall
605 { move r0, r30; move r1, r31 }
606 { move r2, r32; move r3, r33 }
613 .macro check_single_stepping, kind, not_single_stepping
615 * Check for single stepping in user-level priv
616 * kind can be "normal", "ill", or "syscall"
617 * At end, if fall-thru
618 * r29: thread_info->step_state
621 * r26: thread_info->step_state->buffer
624 /* Check for single stepping */
627 /* Get pointer to field holding step state */
628 addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
630 /* Get pointer to EX1 in register state */
631 PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
634 /* Get pointer to field holding PC */
635 PTREGS_PTR(r28, PTREGS_OFFSET_PC)
637 /* Load the pointer to the step state */
643 /* Points to flags */
644 addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
646 /* No single stepping if there is no step state structure */
647 bzt r29, \not_single_stepping
650 /* mask off ICS and any other high bits */
651 andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
653 /* Load pointer to single step instruction buffer */
656 /* Check priv state */
657 bnz r27, \not_single_stepping
662 /* Branch if single-step mode not enabled */
663 bbnst r22, \not_single_stepping
665 /* Clear enabled flag */
666 andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
673 /* Point to the entry containing the original PC */
674 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
677 /* Disable single stepping flag */
681 /* Get the original pc */
684 /* See if the PC is at the start of the single step buffer */
688 * NOTE: it is really expected that the PC be in the single step buffer
691 bzt r25, \not_single_stepping
693 /* Restore the original PC */
701 /* Point to the entry containing the next PC */
702 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
705 /* Increment the stopped PC by the bundle size */
708 /* Disable single stepping flag */
712 /* Get the next pc */
716 * See if the PC is one bundle past the start of the
723 * NOTE: it is really expected that the PC be in the
724 * single step buffer at this point
726 bzt r25, \not_single_stepping
728 /* Set to the next PC */
732 /* Point to 3rd bundle in buffer */
739 /* Disable single stepping flag */
742 /* See if the PC is in the single step buffer */
749 * NOTE: it is really expected that the PC be in the
750 * single step buffer at this point
752 bzt r24, \not_single_stepping
754 bzt r25, \not_single_stepping
760 * Redispatch a downcall.
762 .macro dc_dispatch vecnum, vecname
765 j hv_downcall_dispatch
766 ENDPROC(intvec_\vecname)
770 * Common code for most interrupts. The C function we're eventually
771 * going to is in r0, and the faultnum is in r1; the original
772 * values for those registers are on the stack.
774 .pushsection .text.handle_interrupt,"ax"
776 finish_interrupt_save handle_interrupt
779 * Check for if we are single stepping in user level. If so, then
780 * we need to restore the PC.
783 check_single_stepping normal, .Ldispatch_interrupt
784 .Ldispatch_interrupt:
786 /* Jump to the C routine; it should enable irqs as soon as possible. */
789 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
791 FEEDBACK_REENTER(handle_interrupt)
793 movei r30, 0 /* not an NMI */
796 STD_ENDPROC(handle_interrupt)
799 * This routine takes a boolean in r30 indicating if this is an NMI.
800 * If so, we also expect a boolean in r31 indicating whether to
801 * re-enable the oprofile interrupts.
803 STD_ENTRY(interrupt_return)
804 /* If we're resuming to kernel space, don't check thread flags. */
806 bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
807 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
810 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
812 bzt r29, .Lresume_userspace
813 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
816 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
819 moveli r27, lo16(_cpu_idle_nap)
822 auli r27, r27, ha16(_cpu_idle_nap)
828 bbns r27, .Lrestore_all
835 FEEDBACK_REENTER(interrupt_return)
838 * Disable interrupts so as to make sure we don't
839 * miss an interrupt that sets any of the thread flags (like
840 * need_resched or sigpending) between sampling and the iret.
841 * Routines like schedule() or do_signal() may re-enable
842 * interrupts before returning.
844 IRQ_DISABLE(r20, r21)
845 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
847 /* Get base of stack in r32; note r30/31 are used as arguments here. */
851 /* Check to see if there is any work to do before returning to user. */
853 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
854 moveli r28, lo16(_TIF_ALLWORK_MASK)
858 auli r28, r28, ha16(_TIF_ALLWORK_MASK)
861 bnz r28, .Lwork_pending
865 * omit the call to single_process_check_nohz, which normally checks
866 * to see if we should start or stop the scheduler tick, because
867 * we can't call arbitrary Linux code from an NMI context.
868 * We always call the homecache TLB deferral code to re-trigger
869 * the deferral mechanism.
871 * The other chunk of responsibility this code has is to reset the
872 * interrupt masks appropriately to reset irqs and NMIs. We have
873 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
874 * lockdep-type stuff, but we can't set ICS until afterwards, since
875 * ICS can only be used in very tight chunks of code to avoid
876 * tripping over various assertions that it is off.
878 * (There is what looks like a window of vulnerability here since
879 * we might take a profile interrupt between the two SPR writes
880 * that set the mask, but since we write the low SPR word first,
881 * and our interrupt entry code checks the low SPR word, any
882 * profile interrupt will actually disable interrupts in both SPRs
883 * before returning, which is OK.)
886 PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
889 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
892 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
897 #if PT_FLAGS_DISABLE_IRQ != 1
898 # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
904 mtspr INTERRUPT_CRITICAL_SECTION, r0
905 bzt r30, .Lrestore_regs
909 mtspr INTERRUPT_CRITICAL_SECTION, r0
911 bzt r30, .Lrestore_regs
916 * We now commit to returning from this interrupt, since we will be
917 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
918 * frame. No calls should be made to any other code after this point.
919 * This code should only be entered with ICS set.
920 * r32 must still be set to ptregs.flags.
921 * We launch loads to each cache line separately first, so we can
922 * get some parallelism out of the memory subsystem.
923 * We start zeroing caller-saved registers throughout, since
924 * that will save some cycles if this turns out to be a syscall.
927 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
930 * Rotate so we have one high bit and one low bit to test.
931 * - low bit says whether to restore all the callee-saved registers,
932 * or just r30-r33, and r52 up.
933 * - high bit (i.e. sign bit) says whether to restore all the
934 * caller-saved registers, or just r0.
936 #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
937 # error Rotate trick does not work :-)
941 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
945 * Load cache lines 0, 2, and 3 in that order, then use
946 * the last loaded value, which makes it likely that the other
947 * cache lines have also loaded, at which point we should be
948 * able to safely read all the remaining words on those cache
949 * lines without waiting for the memory subsystem.
951 pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
952 pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
953 pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
954 pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
956 mtspr SPR_EX_CONTEXT_K_0, r21
960 mtspr SPR_EX_CONTEXT_K_1, lr
961 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
964 /* Restore callee-saveds that we actually use. */
965 pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
968 pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
971 * If we modified other callee-saveds, restore them now.
972 * This is rare, but could be via ptrace or signal handler.
976 bbs r20, .Lrestore_callees
978 .Lcontinue_restore_regs:
980 /* Check if we're returning from a syscall. */
983 blzt r20, 1f /* no, so go restore callee-save registers */
987 * Check if we're returning to userspace.
988 * Note that if we're not, we don't worry about zeroing everything.
991 addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
992 bnz lr, .Lkernel_return
996 * On return from syscall, we've restored r0 from pt_regs, but we
997 * clear the remainder of the caller-saved registers. We could
998 * restore the syscall arguments, but there's not much point,
999 * and it ensures user programs aren't trying to use the
1000 * caller-saves if we clear them, as well as avoiding leaking
1001 * kernel pointers into userspace.
1003 pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1004 pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1010 { move r16, zero; move r17, zero }
1011 { move r18, zero; move r19, zero }
1012 { move r20, zero; move r21, zero }
1013 { move r22, zero; move r23, zero }
1014 { move r24, zero; move r25, zero }
1015 { move r26, zero; move r27, zero }
1017 /* Set r1 to errno if we are returning an error, otherwise zero. */
1030 * Not a syscall, so restore caller-saved registers.
1031 * First kick off a load for cache line 1, which we're touching
1032 * for the first time here.
1035 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
1063 pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
1064 /* r29 already restored above */
1065 bnz lr, .Lkernel_return
1066 pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
1067 pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
1072 * We can't restore tp when in kernel mode, since a thread might
1073 * have migrated from another cpu and brought a stale tp value.
1076 pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
1080 /* Restore callee-saved registers from r34 to r51. */
1082 addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
1100 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
1101 j .Lcontinue_restore_regs
1104 /* Mask the reschedule flag */
1105 andi r28, r29, _TIF_NEED_RESCHED
1109 * If the NEED_RESCHED flag is called, we call schedule(), which
1110 * may drop this context right here and go do something else.
1111 * On return, jump back to .Lresume_userspace and recheck.
1115 /* Mask the async-tlb flag */
1116 andi r28, r29, _TIF_ASYNC_TLB
1120 FEEDBACK_REENTER(interrupt_return)
1122 /* Reload the flags and check again */
1123 j .Lresume_userspace
1127 bz r28, .Lneed_sigpending
1129 /* Mask the sigpending flag */
1130 andi r28, r29, _TIF_SIGPENDING
1133 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1134 jal do_async_page_fault
1135 FEEDBACK_REENTER(interrupt_return)
1138 * Go restart the "resume userspace" process. We may have
1139 * fired a signal, and we need to disable interrupts again.
1141 j .Lresume_userspace
1145 * At this point we are either doing signal handling or single-step,
1146 * so either way make sure we have all the registers saved.
1148 push_extra_callee_saves r0
1151 /* If no signal pending, skip to singlestep check */
1152 bz r28, .Lneed_singlestep
1154 /* Mask the singlestep flag */
1155 andi r28, r29, _TIF_SINGLESTEP
1159 FEEDBACK_REENTER(interrupt_return)
1161 /* Reload the flags and check again */
1162 j .Lresume_userspace
1166 /* Get a pointer to the EX1 field */
1167 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
1169 /* If we get here, our bit must be set. */
1170 bz r28, .Lwork_confusion
1172 /* If we are in priv mode, don't single step */
1174 andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
1175 bnz r28, .Lrestore_all
1177 /* Allow interrupts within the single step code */
1178 TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */
1179 IRQ_ENABLE(r20, r21)
1181 /* try to single-step the current instruction */
1182 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1183 jal single_step_once
1184 FEEDBACK_REENTER(interrupt_return)
1186 /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */
1187 IRQ_DISABLE(r20,r21)
1193 panic "thread_info allwork flags unhandled on userspace resume: %#x"
1195 STD_ENDPROC(interrupt_return)
1198 * Some interrupts don't check for single stepping
1200 .pushsection .text.handle_interrupt_no_single_step,"ax"
1201 handle_interrupt_no_single_step:
1202 finish_interrupt_save handle_interrupt_no_single_step
1205 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1207 FEEDBACK_REENTER(handle_interrupt_no_single_step)
1209 movei r30, 0 /* not an NMI */
1212 STD_ENDPROC(handle_interrupt_no_single_step)
1215 * "NMI" interrupts mask ALL interrupts before calling the
1216 * handler, and don't check thread flags, etc., on the way
1217 * back out. In general, the only things we do here for NMIs
1218 * are the register save/restore, fixing the PC if we were
1219 * doing single step, and the dataplane kernel-TLB management.
1220 * We don't (for example) deal with start/stop of the sched tick.
1222 .pushsection .text.handle_nmi,"ax"
1224 finish_interrupt_save handle_nmi
1225 check_single_stepping normal, .Ldispatch_nmi
1229 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1231 FEEDBACK_REENTER(handle_nmi)
1233 STD_ENDPROC(handle_nmi)
1236 * Parallel code for syscalls to handle_interrupt.
1238 .pushsection .text.handle_syscall,"ax"
1240 finish_interrupt_save handle_syscall
1243 * Check for if we are single stepping in user level. If so, then
1244 * we need to restore the PC.
1246 check_single_stepping syscall, .Ldispatch_syscall
1251 IRQ_ENABLE(r20, r21)
1253 /* Bump the counter for syscalls made on this tile. */
1254 moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1255 auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
1261 /* Trace syscalls, if requested. */
1262 GET_THREAD_INFO(r31)
1263 addi r31, r31, THREAD_INFO_FLAGS_OFFSET
1265 andi r30, r30, _TIF_SYSCALL_TRACE
1266 bzt r30, .Lrestore_syscall_regs
1267 jal do_syscall_trace
1268 FEEDBACK_REENTER(handle_syscall)
1271 * We always reload our registers from the stack at this
1272 * point. They might be valid, if we didn't build with
1273 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
1274 * doing syscall tracing, but there are enough cases now that it
1275 * seems simplest just to do the reload unconditionally.
1277 .Lrestore_syscall_regs:
1278 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
1284 pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
1285 pop_reg TREG_SYSCALL_NR_NAME, r11
1287 /* Ensure that the syscall number is within the legal range. */
1288 moveli r21, __NR_syscalls
1290 slt_u r21, TREG_SYSCALL_NR_NAME, r21
1291 moveli r20, lo16(sys_call_table)
1294 bbns r21, .Linvalid_syscall
1295 auli r20, r20, ha16(sys_call_table)
1297 s2a r20, TREG_SYSCALL_NR_NAME, r20
1300 /* Jump to syscall handler. */
1302 .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1305 * Write our r0 onto the stack so it gets restored instead
1306 * of whatever the user had there before.
1308 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1311 .Lsyscall_sigreturn_skip:
1312 FEEDBACK_REENTER(handle_syscall)
1314 /* Do syscall trace again, if requested. */
1316 andi r30, r30, _TIF_SYSCALL_TRACE
1318 jal do_syscall_trace
1319 FEEDBACK_REENTER(handle_syscall)
1320 1: j .Lresume_userspace /* jump into middle of interrupt_return */
1323 /* Report an invalid syscall back to the user program */
1325 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1329 j .Lresume_userspace /* jump into middle of interrupt_return */
1330 STD_ENDPROC(handle_syscall)
1332 /* Return the address for oprofile to suppress in backtraces. */
1333 STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1336 addli r0, r0, .Lhandle_syscall_link - .
1339 STD_ENDPROC(handle_syscall_link_address)
1341 STD_ENTRY(ret_from_fork)
1344 FEEDBACK_REENTER(ret_from_fork)
1345 j .Lresume_userspace /* jump into middle of interrupt_return */
1346 STD_ENDPROC(ret_from_fork)
1349 * Code for ill interrupt.
1351 .pushsection .text.handle_ill,"ax"
1353 finish_interrupt_save handle_ill
1356 * Check for if we are single stepping in user level. If so, then
1357 * we need to restore the PC.
1359 check_single_stepping ill, .Ldispatch_normal_ill
1362 /* See if the PC is the 1st bundle in the buffer */
1365 /* Point to the 2nd bundle in the buffer */
1369 /* Point to the original pc */
1370 addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
1372 /* Branch if the PC is the 1st bundle in the buffer */
1376 /* See if the PC is the 2nd bundle of the buffer */
1379 /* Set PC to next instruction */
1380 addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
1383 /* Point to flags */
1384 addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
1386 /* Branch if PC is in the second bundle */
1393 * Get the offset for the register to restore
1394 * Note: the lower bound is 2, so we have implicit scaling by 4.
1395 * No multiplication of the register number by the size of a register
1398 mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
1399 SINGLESTEP_STATE_TARGET_UB
1401 /* Mask Rewrite_LR */
1402 andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
1405 addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
1407 /* Don't rewrite temp register */
1411 /* Get the temp value */
1414 /* Point to where the register is stored */
1418 /* Add in the C ABI save area size to the register offset */
1419 addi r27, r27, C_ABI_SAVE_AREA_SIZE
1421 /* Restore the user's register with the temp value */
1426 /* Must be in the third bundle */
1427 addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
1430 /* set PC and continue */
1435 * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
1436 * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
1437 * need to clear it here and can't really impose on all other arches.
1438 * So what's another write between friends?
1442 addi r1, r0, THREAD_INFO_FLAGS_OFFSET
1445 addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
1447 andi r2, r2, ~_TIF_SINGLESTEP
1450 /* Issue a sigtrap */
1452 lw r0, r0 /* indirect thru thread_info to get task_info*/
1453 addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
1454 move r2, zero /* load error code into r2 */
1457 jal send_sigtrap /* issue a SIGTRAP */
1458 FEEDBACK_REENTER(handle_ill)
1459 j .Lresume_userspace /* jump into middle of interrupt_return */
1461 .Ldispatch_normal_ill:
1464 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1466 FEEDBACK_REENTER(handle_ill)
1468 movei r30, 0 /* not an NMI */
1471 STD_ENDPROC(handle_ill)
1473 /* Various stub interrupt handlers and syscall handlers */
1475 STD_ENTRY_LOCAL(_kernel_double_fault)
1476 mfspr r1, SPR_EX_CONTEXT_K_0
1480 addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1481 j kernel_double_fault
1482 STD_ENDPROC(_kernel_double_fault)
1484 STD_ENTRY_LOCAL(bad_intr)
1485 mfspr r2, SPR_EX_CONTEXT_K_0
1486 panic "Unhandled interrupt %#x: PC %#lx"
1487 STD_ENDPROC(bad_intr)
1489 /* Put address of pt_regs in reg and jump. */
1490 #define PTREGS_SYSCALL(x, reg) \
1493 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1499 * Special-case sigreturn to not write r0 to the stack on return.
1500 * This is technically more efficient, but it also avoids difficulties
1501 * in the 64-bit OS when handling 32-bit compat code, since we must not
1502 * sign-extend r0 for the sigreturn return-value case.
1504 #define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1506 addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1508 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1513 PTREGS_SYSCALL(sys_execve, r3)
1514 PTREGS_SYSCALL(sys_sigaltstack, r2)
1515 PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1516 PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1)
1518 /* Save additional callee-saves to pt_regs, put address in r4 and jump. */
1519 STD_ENTRY(_sys_clone)
1520 push_extra_callee_saves r4
1522 STD_ENDPROC(_sys_clone)
1525 * This entrypoint is taken for the cmpxchg and atomic_update fast
1526 * swints. We may wish to generalize it to other fast swints at some
1527 * point, but for now there are just two very similar ones, which
1530 * The fast swint code is designed to have a small footprint. It does
1531 * not save or restore any GPRs, counting on the caller-save registers
1532 * to be available to it on entry. It does not modify any callee-save
1533 * registers (including "lr"). It does not check what PL it is being
1534 * called at, so you'd better not call it other than at PL0.
1535 * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
1536 * it ever is necessary to use more registers, be aware.
1538 * It does not use the stack, but since it might be re-interrupted by
1539 * a page fault which would assume the stack was valid, it does
1540 * save/restore the stack pointer and zero it out to make sure it gets reset.
1541 * Since we always keep interrupts disabled, the hypervisor won't
1542 * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
1543 * (other than to advance the PC on return).
1545 * We have to manually validate the user vs kernel address range
1546 * (since at PL1 we can read/write both), and for performance reasons
1547 * we don't allow cmpxchg on the fc000000 memory region, since we only
1548 * validate that the user address is below PAGE_OFFSET.
1550 * We place it in the __HEAD section to ensure it is relatively
1551 * near to the intvec_SWINT_1 code (reachable by a conditional branch).
1553 * Must match register usage in do_page_fault().
1557 /* Align much later jump on the start of a cache line. */
1558 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
1560 #if PAGE_SIZE >= 0x10000
1567 * Save "sp" and set it zero for any possible page fault.
1569 * HACK: We want to both zero sp and check r0's alignment,
1570 * so we do both at once. If "sp" becomes nonzero we
1571 * know r0 is unaligned and branch to the error handler that
1572 * restores sp, so this is OK.
1574 * ICS is disabled right now so having a garbage but nonzero
1575 * sp is OK, since we won't execute any faulting instructions
1576 * when it is nonzero.
1584 * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
1585 * address is less than PAGE_OFFSET, since that won't trap at PL1.
1586 * We only use bits less than PAGE_SHIFT to avoid having to worry
1587 * about aliasing among multiple mappings of the same physical page,
1588 * and we ignore the low 3 bits so we have one lock that covers
1589 * both a cmpxchg64() and a cmpxchg() on either its low or high word.
1590 * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
1593 #if (PAGE_OFFSET & 0xffff) != 0
1594 # error Code here assumes PAGE_OFFSET can be loaded with just hi16()
1597 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1599 /* Check for unaligned input. */
1600 bnz sp, .Lcmpxchg_badaddr
1601 mm r25, r0, zero, 3, PAGE_SHIFT-1
1604 crc32_32 r25, zero, r25
1605 moveli r21, lo16(atomic_lock_ptr)
1608 auli r21, r21, ha16(atomic_lock_ptr)
1609 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1612 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
1616 * Ensure that the TLB is loaded before we take out the lock.
1617 * On TILEPro, this will start fetching the value all the way
1618 * into our L1 as well (and if it gets modified before we
1619 * grab the lock, it will be invalidated from our cache
1620 * before we reload it). On tile64, we'll start fetching it
1621 * into our L1 if we're the home, and if we're not, we'll
1622 * still at least start fetching it into the home's L2.
1628 bbns r23, .Lcmpxchg_badaddr
1632 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1633 andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
1636 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1637 bbs r23, .Lcmpxchg64
1638 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1643 * We very carefully align the code that actually runs with
1644 * the lock held (nine bundles) so that we know it is all in
1645 * the icache when we start. This instruction (the jump) is
1646 * at the start of the first cache line, address zero mod 64;
1647 * we jump to somewhere in the second cache line to issue the
1648 * tns, then jump back to finish up.
1650 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1654 #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1656 /* Check for unaligned input. */
1657 bnz sp, .Lcmpxchg_badaddr
1658 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1662 * Slide bits into position for 'mm'. We want to ignore
1663 * the low 3 bits of r0, and consider only the next
1664 * ATOMIC_HASH_SHIFT bits.
1665 * Because of C pointer arithmetic, we want to compute this:
1667 * ((char*)atomic_locks +
1668 * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2))
1670 * Instead of two shifts we just ">> 1", and use 'mm'
1671 * to ignore the low and high bits we don't want.
1678 * Ensure that the TLB is loaded before we take out the lock.
1679 * On tilepro, this will start fetching the value all the way
1680 * into our L1 as well (and if it gets modified before we
1681 * grab the lock, it will be invalidated from our cache
1682 * before we reload it). On tile64, we'll start fetching it
1683 * into our L1 if we're the home, and if we're not, we'll
1684 * still at least start fetching it into the home's L2.
1689 auli r21, zero, ha16(atomic_locks)
1691 bbns r23, .Lcmpxchg_badaddr
1693 #if PAGE_SIZE < 0x10000
1694 /* atomic_locks is page-aligned so for big pages we don't need this. */
1695 addli r21, r21, lo16(atomic_locks)
1699 * Insert the hash bits into the page-aligned pointer.
1700 * ATOMIC_HASH_SHIFT is so big that we don't actually hash
1701 * the unmasked address bits, as that may cause unnecessary
1704 mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
1706 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1709 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1710 bbs r23, .Lcmpxchg64
1711 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1715 * We very carefully align the code that actually runs with
1716 * the lock held (nine bundles) so that we know it is all in
1717 * the icache when we start. This instruction (the jump) is
1718 * at the start of the first cache line, address zero mod 64;
1719 * we jump to somewhere in the second cache line to issue the
1720 * tns, then jump back to finish up.
1725 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1727 ENTRY(__sys_cmpxchg_grab_lock)
1730 * Perform the actual cmpxchg or atomic_update.
1731 * Note that the system <arch/atomic.h> header relies on
1732 * atomic_update() to always perform an "mf", so don't make
1733 * it optional or conditional without modifying that code.
1738 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
1742 seq r22, r21, r1 /* See if cmpxchg matches. */
1743 and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
1746 or r22, r22, r23 /* Skip compare branch for atomic_update. */
1747 add r25, r25, r2 /* Compute (*mem & mask) + addend. */
1750 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
1751 bbns r22, .Lcmpxchg32_mismatch
1755 /* Do slow mtspr here so the following "mf" waits less. */
1758 mtspr SPR_EX_CONTEXT_K_0, r28
1762 /* The following instruction is the start of the second cache line. */
1765 sw ATOMIC_LOCK_REG_NAME, zero
1769 /* Duplicated code here in the case where we don't overlap "mf" */
1770 .Lcmpxchg32_mismatch:
1773 sw ATOMIC_LOCK_REG_NAME, zero
1777 mtspr SPR_EX_CONTEXT_K_0, r28
1782 * The locking code is the same for 32-bit cmpxchg/atomic_update,
1783 * and for 64-bit cmpxchg. We provide it as a macro and put
1784 * it into both versions. We can't share the code literally
1785 * since it depends on having the right branch-back address.
1786 * Note that the first few instructions should share the cache
1787 * line with the second half of the actual locked code.
1789 .macro cmpxchg_lock, bitwidth
1791 /* Lock; if we succeed, jump back up to the read-modify-write. */
1793 tns r21, ATOMIC_LOCK_REG_NAME
1796 * Non-SMP preserves all the lock infrastructure, to keep the
1797 * code simpler for the interesting (SMP) case. However, we do
1798 * one small optimization here and in atomic_asm.S, which is
1799 * to fake out acquiring the actual lock in the atomic_lock table.
1804 /* Issue the slow SPR here while the tns result is in flight. */
1805 mfspr r28, SPR_EX_CONTEXT_K_0
1808 addi r28, r28, 8 /* return to the instruction after the swint1 */
1809 bzt r21, .Ldo_cmpxchg\bitwidth
1812 * The preceding instruction is the last thing that must be
1813 * on the second cache line.
1818 * We failed to acquire the tns lock on our first try. Now use
1819 * bounded exponential backoff to retry, like __atomic_spinlock().
1822 moveli r23, 2048 /* maximum backoff time in cycles */
1823 moveli r25, 32 /* starting backoff time in cycles */
1825 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */
1826 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
1831 shli r25, r25, 1 /* double the backoff; retry the tns */
1832 tns r21, ATOMIC_LOCK_REG_NAME
1834 slt r26, r23, r25 /* is the proposed backoff too big? */
1837 bzt r21, .Ldo_cmpxchg\bitwidth
1840 #endif /* CONFIG_SMP */
1847 * This code is invoked from sys_cmpxchg after most of the
1848 * preconditions have been checked. We still need to check
1849 * that r0 is 8-byte aligned, since if it's not we won't
1850 * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
1851 * lock pointer and r27/r28 have the saved SP/PC.
1852 * r23 is holding "r0 & 7" so we can test for alignment.
1853 * The compare value is in r2/r3; the new value is in r4/r5.
1854 * On return, we must put the old value in r0/r1.
1859 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1860 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1862 bzt r23, .Lcmpxchg64_tns
1876 bz r26, .Lcmpxchg64_mismatch
1880 bz r26, .Lcmpxchg64_mismatch
1886 * The 32-bit path provides optimized "match" and "mismatch"
1887 * iret paths, but we don't have enough bundles in this cache line
1888 * to do that, so we just make even the "mismatch" path do an "mf".
1890 .Lcmpxchg64_mismatch:
1893 mtspr SPR_EX_CONTEXT_K_0, r28
1898 sw ATOMIC_LOCK_REG_NAME, zero
1907 * Reset sp and revector to sys_cmpxchg_badaddr(), which will
1908 * just raise the appropriate signal and exit. Doing it this
1909 * way means we don't have to duplicate the code in intvec.S's
1910 * int_hand macro that locates the top of the stack.
1914 moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
1918 ENDPROC(sys_cmpxchg)
1919 ENTRY(__sys_cmpxchg_end)
1922 /* The single-step support may need to read all the registers. */
1924 push_extra_callee_saves r0
1927 /* Include .intrpt1 array of interrupt vectors */
1928 .section ".intrpt1", "ax"
1930 #define op_handle_perf_interrupt bad_intr
1931 #define op_handle_aux_perf_interrupt bad_intr
1933 #ifndef CONFIG_HARDWALL
1934 #define do_hardwall_trap bad_intr
1937 int_hand INT_ITLB_MISS, ITLB_MISS, \
1938 do_page_fault, handle_interrupt_no_single_step
1939 int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
1940 int_hand INT_ILL, ILL, do_trap, handle_ill
1941 int_hand INT_GPV, GPV, do_trap
1942 int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
1943 int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1944 int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1945 int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
1946 int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
1947 int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1948 int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1949 int_hand INT_SWINT_3, SWINT_3, do_trap
1950 int_hand INT_SWINT_2, SWINT_2, do_trap
1951 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1952 int_hand INT_SWINT_0, SWINT_0, do_trap
1953 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1954 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1955 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1956 int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
1957 int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
1958 int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
1959 int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
1960 int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
1961 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
1962 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1963 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1964 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1965 int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1966 int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
1967 int_hand INT_IDN_CA, IDN_CA, bad_intr
1968 int_hand INT_UDN_CA, UDN_CA, bad_intr
1969 int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1970 int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1971 int_hand INT_PERF_COUNT, PERF_COUNT, \
1972 op_handle_perf_interrupt, handle_nmi
1973 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1974 #if CONFIG_KERNEL_PL == 2
1975 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1976 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1978 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1979 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1981 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1982 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1984 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
1986 int_hand INT_I_ASID, I_ASID, bad_intr
1987 int_hand INT_D_ASID, D_ASID, bad_intr
1988 int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
1990 int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
1992 int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
1994 int_hand INT_SN_CPL, SN_CPL, bad_intr
1995 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1996 #if CHIP_HAS_AUX_PERF_COUNTERS()
1997 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1998 op_handle_aux_perf_interrupt, handle_nmi
2001 /* Synthetic interrupt delivered only by the simulator */
2002 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint