x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / sparc / kernel / rtrap_64.S
blobafa2a9e3d0a0c6734785dfaafc68538d7e393b51
1 /*
2  * rtrap.S: Preparing for return from trap on Sparc V9.
3  *
4  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6  */
9 #include <asm/asi.h>
10 #include <asm/pstate.h>
11 #include <asm/ptrace.h>
12 #include <asm/spitfire.h>
13 #include <asm/head.h>
14 #include <asm/visasm.h>
15 #include <asm/processor.h>
17 #define         RTRAP_PSTATE            (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
18 #define         RTRAP_PSTATE_IRQOFF     (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
19 #define         RTRAP_PSTATE_AG_IRQOFF  (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
21                 .text
22                 .align                  32
23 __handle_preemption:
24                 call                    schedule
25                  wrpr                   %g0, RTRAP_PSTATE, %pstate
26                 ba,pt                   %xcc, __handle_preemption_continue
27                  wrpr                   %g0, RTRAP_PSTATE_IRQOFF, %pstate
29 __handle_user_windows:
30                 call                    fault_in_user_windows
31                  wrpr                   %g0, RTRAP_PSTATE, %pstate
32                 ba,pt                   %xcc, __handle_preemption_continue
33                  wrpr                   %g0, RTRAP_PSTATE_IRQOFF, %pstate
35 __handle_userfpu:
36                 rd                      %fprs, %l5
37                 andcc                   %l5, FPRS_FEF, %g0
38                 sethi                   %hi(TSTATE_PEF), %o0
39                 be,a,pn                 %icc, __handle_userfpu_continue
40                  andn                   %l1, %o0, %l1
41                 ba,a,pt                 %xcc, __handle_userfpu_continue
43 __handle_signal:
44                 mov                     %l5, %o1
45                 add                     %sp, PTREGS_OFF, %o0
46                 mov                     %l0, %o2
47                 call                    do_notify_resume
48                  wrpr                   %g0, RTRAP_PSTATE, %pstate
49                 wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
51                 /* Signal delivery can modify pt_regs tstate, so we must
52                  * reload it.
53                  */
54                 ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
55                 sethi                   %hi(0xf << 20), %l4
56                 and                     %l1, %l4, %l4
57                 ba,pt                   %xcc, __handle_preemption_continue
58                  andn                   %l1, %l4, %l1
60                 /* When returning from a NMI (%pil==15) interrupt we want to
61                  * avoid running softirqs, doing IRQ tracing, preempting, etc.
62                  */
63                 .globl                  rtrap_nmi
64 rtrap_nmi:      ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
65                 sethi                   %hi(0xf << 20), %l4
66                 and                     %l1, %l4, %l4
67                 andn                    %l1, %l4, %l1
68                 srl                     %l4, 20, %l4
69                 ba,pt                   %xcc, rtrap_no_irq_enable
70                  wrpr                   %l4, %pil
72                 .align                  64
73                 .globl                  rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
74 rtrap_irq:
75 rtrap:
76                 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
77                 ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
78 rtrap_xcall:
79                 sethi                   %hi(0xf << 20), %l4
80                 and                     %l1, %l4, %l4
81                 andn                    %l1, %l4, %l1
82                 srl                     %l4, 20, %l4
83 #ifdef CONFIG_TRACE_IRQFLAGS
84                 brnz,pn                 %l4, rtrap_no_irq_enable
85                  nop
86                 call                    trace_hardirqs_on
87                  nop
88                 /* Do not actually set the %pil here.  We will do that
89                  * below after we clear PSTATE_IE in the %pstate register.
90                  * If we re-enable interrupts here, we can recurse down
91                  * the hardirq stack potentially endlessly, causing a
92                  * stack overflow.
93                  *
94                  * It is tempting to put this test and trace_hardirqs_on
95                  * call at the 'rt_continue' label, but that will not work
96                  * as that path hits unconditionally and we do not want to
97                  * execute this in NMI return paths, for example.
98                  */
99 #endif
100 rtrap_no_irq_enable:
101                 andcc                   %l1, TSTATE_PRIV, %l3
102                 bne,pn                  %icc, to_kernel
103                  nop
105                 /* We must hold IRQs off and atomically test schedule+signal
106                  * state, then hold them off all the way back to userspace.
107                  * If we are returning to kernel, none of this matters.  Note
108                  * that we are disabling interrupts via PSTATE_IE, not using
109                  * %pil.
110                  *
111                  * If we do not do this, there is a window where we would do
112                  * the tests, later the signal/resched event arrives but we do
113                  * not process it since we are still in kernel mode.  It would
114                  * take until the next local IRQ before the signal/resched
115                  * event would be handled.
116                  *
117                  * This also means that if we have to deal with user
118                  * windows, we have to redo all of these sched+signal checks
119                  * with IRQs disabled.
120                  */
121 to_user:        wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
122                 wrpr                    0, %pil
123 __handle_preemption_continue:
124                 ldx                     [%g6 + TI_FLAGS], %l0
125                 sethi                   %hi(_TIF_USER_WORK_MASK), %o0
126                 or                      %o0, %lo(_TIF_USER_WORK_MASK), %o0
127                 andcc                   %l0, %o0, %g0
128                 sethi                   %hi(TSTATE_PEF), %o0
129                 be,pt                   %xcc, user_nowork
130                  andcc                  %l1, %o0, %g0
131                 andcc                   %l0, _TIF_NEED_RESCHED, %g0
132                 bne,pn                  %xcc, __handle_preemption
133                  andcc                  %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
134                 bne,pn                  %xcc, __handle_signal
135                  ldub                   [%g6 + TI_WSAVED], %o2
136                 brnz,pn                 %o2, __handle_user_windows
137                  nop
138                 sethi                   %hi(TSTATE_PEF), %o0
139                 andcc                   %l1, %o0, %g0
141                 /* This fpdepth clear is necessary for non-syscall rtraps only */
142 user_nowork:
143                 bne,pn                  %xcc, __handle_userfpu
144                  stb                    %g0, [%g6 + TI_FPDEPTH]
145 __handle_userfpu_continue:
147 rt_continue:    ldx                     [%sp + PTREGS_OFF + PT_V9_G1], %g1
148                 ldx                     [%sp + PTREGS_OFF + PT_V9_G2], %g2
150                 ldx                     [%sp + PTREGS_OFF + PT_V9_G3], %g3
151                 ldx                     [%sp + PTREGS_OFF + PT_V9_G4], %g4
152                 ldx                     [%sp + PTREGS_OFF + PT_V9_G5], %g5
153                 brz,pt                  %l3, 1f
154                 mov                     %g6, %l2
156                 /* Must do this before thread reg is clobbered below.  */
157                 LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
159                 ldx                     [%sp + PTREGS_OFF + PT_V9_G6], %g6
160                 ldx                     [%sp + PTREGS_OFF + PT_V9_G7], %g7
162                 /* Normal globals are restored, go to trap globals.  */
163 661:            wrpr                    %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
164                 nop
165                 .section                .sun4v_2insn_patch, "ax"
166                 .word                   661b
167                 wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
168                 SET_GL(1)
169                 .previous
171                 mov                     %l2, %g6
173                 ldx                     [%sp + PTREGS_OFF + PT_V9_I0], %i0
174                 ldx                     [%sp + PTREGS_OFF + PT_V9_I1], %i1
176                 ldx                     [%sp + PTREGS_OFF + PT_V9_I2], %i2
177                 ldx                     [%sp + PTREGS_OFF + PT_V9_I3], %i3
178                 ldx                     [%sp + PTREGS_OFF + PT_V9_I4], %i4
179                 ldx                     [%sp + PTREGS_OFF + PT_V9_I5], %i5
180                 ldx                     [%sp + PTREGS_OFF + PT_V9_I6], %i6
181                 ldx                     [%sp + PTREGS_OFF + PT_V9_I7], %i7
182                 ldx                     [%sp + PTREGS_OFF + PT_V9_TPC], %l2
183                 ldx                     [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
185                 ld                      [%sp + PTREGS_OFF + PT_V9_Y], %o3
186                 wr                      %o3, %g0, %y
187                 wrpr                    %l4, 0x0, %pil
188                 wrpr                    %g0, 0x1, %tl
189                 andn                    %l1, TSTATE_SYSCALL, %l1
190                 wrpr                    %l1, %g0, %tstate
191                 wrpr                    %l2, %g0, %tpc
192                 wrpr                    %o2, %g0, %tnpc
194                 brnz,pn                 %l3, kern_rtt
195                  mov                    PRIMARY_CONTEXT, %l7
197 661:            ldxa                    [%l7 + %l7] ASI_DMMU, %l0
198                 .section                .sun4v_1insn_patch, "ax"
199                 .word                   661b
200                 ldxa                    [%l7 + %l7] ASI_MMU, %l0
201                 .previous
203                 sethi                   %hi(sparc64_kern_pri_nuc_bits), %l1
204                 ldx                     [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
205                 or                      %l0, %l1, %l0
207 661:            stxa                    %l0, [%l7] ASI_DMMU
208                 .section                .sun4v_1insn_patch, "ax"
209                 .word                   661b
210                 stxa                    %l0, [%l7] ASI_MMU
211                 .previous
213                 sethi                   %hi(KERNBASE), %l7
214                 flush                   %l7
215                 rdpr                    %wstate, %l1
216                 rdpr                    %otherwin, %l2
217                 srl                     %l1, 3, %l1
219                 wrpr                    %l2, %g0, %canrestore
220                 wrpr                    %l1, %g0, %wstate
221                 brnz,pt                 %l2, user_rtt_restore
222                  wrpr                   %g0, %g0, %otherwin
224                 ldx                     [%g6 + TI_FLAGS], %g3
225                 wr                      %g0, ASI_AIUP, %asi
226                 rdpr                    %cwp, %g1
227                 andcc                   %g3, _TIF_32BIT, %g0
228                 sub                     %g1, 1, %g1
229                 bne,pt                  %xcc, user_rtt_fill_32bit
230                  wrpr                   %g1, %cwp
231                 ba,a,pt                 %xcc, user_rtt_fill_64bit
233 user_rtt_fill_fixup:
234                 rdpr    %cwp, %g1
235                 add     %g1, 1, %g1
236                 wrpr    %g1, 0x0, %cwp
238                 rdpr    %wstate, %g2
239                 sll     %g2, 3, %g2
240                 wrpr    %g2, 0x0, %wstate
242                 /* We know %canrestore and %otherwin are both zero.  */
244                 sethi   %hi(sparc64_kern_pri_context), %g2
245                 ldx     [%g2 + %lo(sparc64_kern_pri_context)], %g2
246                 mov     PRIMARY_CONTEXT, %g1
248 661:            stxa    %g2, [%g1] ASI_DMMU
249                 .section .sun4v_1insn_patch, "ax"
250                 .word   661b
251                 stxa    %g2, [%g1] ASI_MMU
252                 .previous
254                 sethi   %hi(KERNBASE), %g1
255                 flush   %g1
257                 or      %g4, FAULT_CODE_WINFIXUP, %g4
258                 stb     %g4, [%g6 + TI_FAULT_CODE]
259                 stx     %g5, [%g6 + TI_FAULT_ADDR]
261                 mov     %g6, %l1
262                 wrpr    %g0, 0x0, %tl
264 661:            nop
265                 .section                .sun4v_1insn_patch, "ax"
266                 .word                   661b
267                 SET_GL(0)
268                 .previous
270                 wrpr    %g0, RTRAP_PSTATE, %pstate
272                 mov     %l1, %g6
273                 ldx     [%g6 + TI_TASK], %g4
274                 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
275                 call    do_sparc64_fault
276                  add    %sp, PTREGS_OFF, %o0
277                 ba,pt   %xcc, rtrap
278                  nop
280 user_rtt_pre_restore:
281                 add                     %g1, 1, %g1
282                 wrpr                    %g1, 0x0, %cwp
284 user_rtt_restore:
285                 restore
286                 rdpr                    %canrestore, %g1
287                 wrpr                    %g1, 0x0, %cleanwin
288                 retry
289                 nop
291 kern_rtt:       rdpr                    %canrestore, %g1
292                 brz,pn                  %g1, kern_rtt_fill
293                  nop
294 kern_rtt_restore:
295                 stw                     %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
296                 restore
297                 retry
299 to_kernel:
300 #ifdef CONFIG_PREEMPT
301                 ldsw                    [%g6 + TI_PRE_COUNT], %l5
302                 brnz                    %l5, kern_fpucheck
303                  ldx                    [%g6 + TI_FLAGS], %l5
304                 andcc                   %l5, _TIF_NEED_RESCHED, %g0
305                 be,pt                   %xcc, kern_fpucheck
306                  nop
307                 cmp                     %l4, 0
308                 bne,pn                  %xcc, kern_fpucheck
309                  sethi                  %hi(PREEMPT_ACTIVE), %l6
310                 stw                     %l6, [%g6 + TI_PRE_COUNT]
311                 call                    schedule
312                  nop
313                 ba,pt                   %xcc, rtrap
314                  stw                    %g0, [%g6 + TI_PRE_COUNT]
315 #endif
316 kern_fpucheck:  ldub                    [%g6 + TI_FPDEPTH], %l5
317                 brz,pt                  %l5, rt_continue
318                  srl                    %l5, 1, %o0
319                 add                     %g6, TI_FPSAVED, %l6
320                 ldub                    [%l6 + %o0], %l2
321                 sub                     %l5, 2, %l5
323                 add                     %g6, TI_GSR, %o1
324                 andcc                   %l2, (FPRS_FEF|FPRS_DU), %g0
325                 be,pt                   %icc, 2f
326                  and                    %l2, FPRS_DL, %l6
327                 andcc                   %l2, FPRS_FEF, %g0
328                 be,pn                   %icc, 5f
329                  sll                    %o0, 3, %o5
330                 rd                      %fprs, %g1
332                 wr                      %g1, FPRS_FEF, %fprs
333                 ldx                     [%o1 + %o5], %g1
334                 add                     %g6, TI_XFSR, %o1
335                 sll                     %o0, 8, %o2
336                 add                     %g6, TI_FPREGS, %o3
337                 brz,pn                  %l6, 1f
338                  add                    %g6, TI_FPREGS+0x40, %o4
340                 membar                  #Sync
341                 ldda                    [%o3 + %o2] ASI_BLK_P, %f0
342                 ldda                    [%o4 + %o2] ASI_BLK_P, %f16
343                 membar                  #Sync
344 1:              andcc                   %l2, FPRS_DU, %g0
345                 be,pn                   %icc, 1f
346                  wr                     %g1, 0, %gsr
347                 add                     %o2, 0x80, %o2
348                 membar                  #Sync
349                 ldda                    [%o3 + %o2] ASI_BLK_P, %f32
350                 ldda                    [%o4 + %o2] ASI_BLK_P, %f48
351 1:              membar                  #Sync
352                 ldx                     [%o1 + %o5], %fsr
353 2:              stb                     %l5, [%g6 + TI_FPDEPTH]
354                 ba,pt                   %xcc, rt_continue
355                  nop
356 5:              wr                      %g0, FPRS_FEF, %fprs
357                 sll                     %o0, 8, %o2
359                 add                     %g6, TI_FPREGS+0x80, %o3
360                 add                     %g6, TI_FPREGS+0xc0, %o4
361                 membar                  #Sync
362                 ldda                    [%o3 + %o2] ASI_BLK_P, %f32
363                 ldda                    [%o4 + %o2] ASI_BLK_P, %f48
364                 membar                  #Sync
365                 wr                      %g0, FPRS_DU, %fprs
366                 ba,pt                   %xcc, rt_continue
367                  stb                    %l5, [%g6 + TI_FPDEPTH]