treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / ia64 / kernel / relocate_kernel.S
blob7124fe7bec7c322ef879829342280dbdf6ff442c
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * arch/ia64/kernel/relocate_kernel.S
4  *
5  * Relocate kexec'able kernel and start it
6  *
7  * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
8  * Copyright (C) 2005 Khalid Aziz  <khalid.aziz@hp.com>
9  * Copyright (C) 2005 Intel Corp,  Zou Nan hai <nanhai.zou@intel.com>
10  */
11 #include <asm/asmmacro.h>
12 #include <asm/kregs.h>
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
15 #include <asm/mca_asm.h>
17        /* Must be relocatable PIC code callable as a C function
18         */
19 GLOBAL_ENTRY(relocate_new_kernel)
20         .prologue
21         alloc r31=ar.pfs,4,0,0,0
22         .body
23 .reloc_entry:
25         rsm psr.i| psr.ic
26         mov r2=ip
28         ;;
30         flushrs                         // must be first insn in group
31         srlz.i
33         ;;
34         dep r2=0,r2,61,3                //to physical address
35         ;;
36         //first switch to physical mode
37         add r3=1f-.reloc_entry, r2
38         movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
39         mov ar.rsc=0                    // put RSE in enforced lazy mode
40         ;;
41         add sp=(memory_stack_end - 16 - .reloc_entry),r2
42         add r8=(register_stack - .reloc_entry),r2
43         ;;
44         mov r18=ar.rnat
45         mov ar.bspstore=r8
46         ;;
47         mov cr.ipsr=r16
48         mov cr.iip=r3
49         mov cr.ifs=r0
50         srlz.i
51         ;;
52         mov ar.rnat=r18
53         rfi                             // note: this unmask MCA/INIT (psr.mc)
54         ;;
56         //physical mode code begin
57         mov b6=in1
58         dep r28=0,in2,61,3      //to physical address
60         // purge all TC entries
61 #define O(member)       IA64_CPUINFO_##member##_OFFSET
62         GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
63         ;;
64         addl r17=O(PTCE_STRIDE),r2
65         addl r2=O(PTCE_BASE),r2
66         ;;
67         ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;     // r18=ptce_base
68         ld4 r19=[r2],4                                  // r19=ptce_count[0]
69         ld4 r21=[r17],4                                 // r21=ptce_stride[0]
70         ;;
71         ld4 r20=[r2]                                    // r20=ptce_count[1]
72         ld4 r22=[r17]                                   // r22=ptce_stride[1]
73         mov r24=r0
74         ;;
75         adds r20=-1,r20
76         ;;
77 #undef O
79         cmp.ltu p6,p7=r24,r19
80 (p7)    br.cond.dpnt.few 4f
81         mov ar.lc=r20
83         ptc.e r18
84         ;;
85         add r18=r22,r18
86         br.cloop.sptk.few 3b
87         ;;
88         add r18=r21,r18
89         add r24=1,r24
90         ;;
91         br.sptk.few 2b
93         srlz.i
94         ;;
95         // purge TR entry for kernel text and data
96         movl r16=KERNEL_START
97         mov r18=KERNEL_TR_PAGE_SHIFT<<2
98         ;;
99         ptr.i r16, r18
100         ptr.d r16, r18
101         ;;
102         srlz.i
103         ;;
105         // purge TR entry for pal code
106         mov r16=in3
107         mov r18=IA64_GRANULE_SHIFT<<2
108         ;;
109         ptr.i r16,r18
110         ;;
111         srlz.i
112         ;;
114         // purge TR entry for stack
115         mov r16=IA64_KR(CURRENT_STACK)
116         ;;
117         shl r16=r16,IA64_GRANULE_SHIFT
118         movl r19=PAGE_OFFSET
119         ;;
120         add r16=r19,r16
121         mov r18=IA64_GRANULE_SHIFT<<2
122         ;;
123         ptr.d r16,r18
124         ;;
125         srlz.i
126         ;;
128         //copy segments
129         movl r16=PAGE_MASK
130         mov  r30=in0                    // in0 is page_list
131         br.sptk.few .dest_page
132         ;;
133 .loop:
134         ld8  r30=[in0], 8;;
135 .dest_page:
136         tbit.z p0, p6=r30, 0;;          // 0x1 dest page
137 (p6)    and r17=r30, r16
138 (p6)    br.cond.sptk.few .loop;;
140         tbit.z p0, p6=r30, 1;;          // 0x2 indirect page
141 (p6)    and in0=r30, r16
142 (p6)    br.cond.sptk.few .loop;;
144         tbit.z p0, p6=r30, 2;;          // 0x4 end flag
145 (p6)    br.cond.sptk.few .end_loop;;
147         tbit.z p6, p0=r30, 3;;          // 0x8 source page
148 (p6)    br.cond.sptk.few .loop
150         and r18=r30, r16
152         // simple copy page, may optimize later
153         movl r14=PAGE_SIZE/8 - 1;;
154         mov ar.lc=r14;;
156         ld8 r14=[r18], 8;;
157         st8 [r17]=r14;;
158         fc.i r17
159         add r17=8, r17
160         br.ctop.sptk.few 1b
161         br.sptk.few .loop
162         ;;
164 .end_loop:
165         sync.i                  // for fc.i
166         ;;
167         srlz.i
168         ;;
169         srlz.d
170         ;;
171         br.call.sptk.many b0=b6;;
173 .align  32
174 memory_stack:
175         .fill           8192, 1, 0
176 memory_stack_end:
177 register_stack:
178         .fill           8192, 1, 0
179 register_stack_end:
180 relocate_new_kernel_end:
181 END(relocate_new_kernel)
183 .global relocate_new_kernel_size
184 relocate_new_kernel_size:
185         data8   relocate_new_kernel_end - relocate_new_kernel
187 GLOBAL_ENTRY(ia64_dump_cpu_regs)
188         .prologue
189         alloc loc0=ar.pfs,1,2,0,0
190         .body
191         mov     ar.rsc=0                // put RSE in enforced lazy mode
192         add     loc1=4*8, in0           // save r4 and r5 first
193         ;;
195         flushrs                         // flush dirty regs to backing store
196         srlz.i
198         st8 [loc1]=r4, 8
199         ;;
200         st8 [loc1]=r5, 8
201         ;;
202         add loc1=32*8, in0
203         mov r4=ar.rnat
204         ;;
205         st8 [in0]=r0, 8                 // r0
206         st8 [loc1]=r4, 8                // rnat
207         mov r5=pr
208         ;;
209         st8 [in0]=r1, 8                 // r1
210         st8 [loc1]=r5, 8                // pr
211         mov r4=b0
212         ;;
213         st8 [in0]=r2, 8                 // r2
214         st8 [loc1]=r4, 8                // b0
215         mov r5=b1;
216         ;;
217         st8 [in0]=r3, 24                // r3
218         st8 [loc1]=r5, 8                // b1
219         mov r4=b2
220         ;;
221         st8 [in0]=r6, 8                 // r6
222         st8 [loc1]=r4, 8                // b2
223         mov r5=b3
224         ;;
225         st8 [in0]=r7, 8                 // r7
226         st8 [loc1]=r5, 8                // b3
227         mov r4=b4
228         ;;
229         st8 [in0]=r8, 8                 // r8
230         st8 [loc1]=r4, 8                // b4
231         mov r5=b5
232         ;;
233         st8 [in0]=r9, 8                 // r9
234         st8 [loc1]=r5, 8                // b5
235         mov r4=b6
236         ;;
237         st8 [in0]=r10, 8                // r10
238         st8 [loc1]=r5, 8                // b6
239         mov r5=b7
240         ;;
241         st8 [in0]=r11, 8                // r11
242         st8 [loc1]=r5, 8                // b7
243         mov r4=b0
244         ;;
245         st8 [in0]=r12, 8                // r12
246         st8 [loc1]=r4, 8                // ip
247         mov r5=loc0
248         ;;
249         st8 [in0]=r13, 8                // r13
250         extr.u r5=r5, 0, 38             // ar.pfs.pfm
251         mov r4=r0                       // user mask
252         ;;
253         st8 [in0]=r14, 8                // r14
254         st8 [loc1]=r5, 8                // cfm
255         ;;
256         st8 [in0]=r15, 8                // r15
257         st8 [loc1]=r4, 8                // user mask
258         mov r5=ar.rsc
259         ;;
260         st8 [in0]=r16, 8                // r16
261         st8 [loc1]=r5, 8                // ar.rsc
262         mov r4=ar.bsp
263         ;;
264         st8 [in0]=r17, 8                // r17
265         st8 [loc1]=r4, 8                // ar.bsp
266         mov r5=ar.bspstore
267         ;;
268         st8 [in0]=r18, 8                // r18
269         st8 [loc1]=r5, 8                // ar.bspstore
270         mov r4=ar.rnat
271         ;;
272         st8 [in0]=r19, 8                // r19
273         st8 [loc1]=r4, 8                // ar.rnat
274         mov r5=ar.ccv
275         ;;
276         st8 [in0]=r20, 8                // r20
277         st8 [loc1]=r5, 8                // ar.ccv
278         mov r4=ar.unat
279         ;;
280         st8 [in0]=r21, 8                // r21
281         st8 [loc1]=r4, 8                // ar.unat
282         mov r5 = ar.fpsr
283         ;;
284         st8 [in0]=r22, 8                // r22
285         st8 [loc1]=r5, 8                // ar.fpsr
286         mov r4 = ar.unat
287         ;;
288         st8 [in0]=r23, 8                // r23
289         st8 [loc1]=r4, 8                // unat
290         mov r5 = ar.fpsr
291         ;;
292         st8 [in0]=r24, 8                // r24
293         st8 [loc1]=r5, 8                // fpsr
294         mov r4 = ar.pfs
295         ;;
296         st8 [in0]=r25, 8                // r25
297         st8 [loc1]=r4, 8                // ar.pfs
298         mov r5 = ar.lc
299         ;;
300         st8 [in0]=r26, 8                // r26
301         st8 [loc1]=r5, 8                // ar.lc
302         mov r4 = ar.ec
303         ;;
304         st8 [in0]=r27, 8                // r27
305         st8 [loc1]=r4, 8                // ar.ec
306         mov r5 = ar.csd
307         ;;
308         st8 [in0]=r28, 8                // r28
309         st8 [loc1]=r5, 8                // ar.csd
310         mov r4 = ar.ssd
311         ;;
312         st8 [in0]=r29, 8                // r29
313         st8 [loc1]=r4, 8                // ar.ssd
314         ;;
315         st8 [in0]=r30, 8                // r30
316         ;;
317         st8 [in0]=r31, 8                // r31
318         mov ar.pfs=loc0
319         ;;
320         br.ret.sptk.many rp
321 END(ia64_dump_cpu_regs)