ia64/pv_ops/xen: paravirtualize DO_SAVE_MIN for xen.
[pv_ops_mirror.git] / include / asm-ia64 / xen / inst.h
blob93bceae3c66f4d36abad69906d1a38d930b0402b
1 /******************************************************************************
2 * include/asm-ia64/xen/inst.h
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <asm/xen/privop.h>
25 #define DO_SAVE_MIN XEN_DO_SAVE_MIN
27 #define MOV_FROM_IFA(reg) \
28 movl reg = XSI_IFA; \
29 ;; \
30 ld8 reg = [reg]
32 #define MOV_FROM_ITIR(reg) \
33 movl reg = XSI_ITIR; \
34 ;; \
35 ld8 reg = [reg]
37 #define MOV_FROM_ISR(reg) \
38 movl reg = XSI_ISR; \
39 ;; \
40 ld8 reg = [reg]
42 #define MOV_FROM_IHA(reg) \
43 movl reg = XSI_IHA; \
44 ;; \
45 ld8 reg = [reg]
47 #define MOV_FROM_IPSR(pred, reg) \
48 (pred) movl reg = XSI_IPSR; \
49 ;; \
50 (pred) ld8 reg = [reg]
52 #define MOV_FROM_IIM(reg) \
53 movl reg = XSI_IIM; \
54 ;; \
55 ld8 reg = [reg]
57 #define MOV_FROM_IIP(reg) \
58 movl reg = XSI_IIP; \
59 ;; \
60 ld8 reg = [reg]
62 .macro __MOV_FROM_IVR reg, clob
63 .ifc "\reg", "r8"
64 XEN_HYPER_GET_IVR
65 .exitm
66 .endif
67 .ifc "\clob", "r8"
68 XEN_HYPER_GET_IVR
70 mov \reg = r8
71 .exitm
72 .endif
73 .ifc "\reg", "\clob"
74 .error "it should be reg \reg != clob \clob"
75 .endif
77 mov \clob = r8
79 XEN_HYPER_GET_IVR
81 mov \reg = r8
83 mov r8 = \clob
84 .endm
85 #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
87 .macro __MOV_FROM_PSR pred, reg, clob
88 .ifc "\reg", "r8"
89 (\pred) XEN_HYPER_GET_PSR;
90 .exitm
91 .endif
92 .ifc "\clob", "r8"
93 (\pred) XEN_HYPER_GET_PSR
95 (\pred) mov \reg = r8
96 .exitm
97 .endif
99 (\pred) mov \clob = r8
100 (\pred) XEN_HYPER_GET_PSR
102 (\pred) mov \reg = r8
103 (\pred) mov r8 = \clob
104 .endm
105 #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
108 #define MOV_TO_IFA(reg, clob) \
109 movl clob = XSI_IFA; \
110 ;; \
111 st8 [clob] = reg \
113 #define MOV_TO_ITIR(pred, reg, clob) \
114 (pred) movl clob = XSI_ITIR; \
115 ;; \
116 (pred) st8 [clob] = reg
118 #define MOV_TO_IHA(pred, reg, clob) \
119 (pred) movl clob = XSI_IHA; \
120 ;; \
121 (pred) st8 [clob] = reg
123 #define MOV_TO_IPSR(pred, reg, clob) \
124 (pred) movl clob = XSI_IPSR; \
125 ;; \
126 (pred) st8 [clob] = reg; \
129 #define MOV_TO_IFS(pred, reg, clob) \
130 (pred) movl clob = XSI_IFS; \
131 ;; \
132 (pred) st8 [clob] = reg; \
135 #define MOV_TO_IIP(reg, clob) \
136 movl clob = XSI_IIP; \
137 ;; \
138 st8 [clob] = reg
140 .macro ____MOV_TO_KR kr, reg, clob0, clob1
141 .ifc "\clob0", "r9"
142 .error "clob0 \clob0 must not be r9"
143 .endif
144 .ifc "\clob1", "r8"
145 .error "clob1 \clob1 must not be r8"
146 .endif
148 .ifnc "\reg", "r9"
149 .ifnc "\clob1", "r9"
150 mov \clob1 = r9
151 .endif
152 mov r9 = \reg
153 .endif
154 .ifnc "\clob0", "r8"
155 mov \clob0 = r8
156 .endif
157 mov r8 = \kr
159 XEN_HYPER_SET_KR
161 .ifnc "\reg", "r9"
162 .ifnc "\clob1", "r9"
163 mov r9 = \clob1
164 .endif
165 .endif
166 .ifnc "\clob0", "r8"
167 mov r8 = \clob0
168 .endif
169 .endm
171 .macro __MOV_TO_KR kr, reg, clob0, clob1
172 .ifc "\clob0", "r9"
173 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
174 .exitm
175 .endif
176 .ifc "\clob1", "r8"
177 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
178 .exitm
179 .endif
181 ____MOV_TO_KR \kr, \reg, \clob0, \clob1
182 .endm
184 #define MOV_TO_KR(kr, reg, clob0, clob1) \
185 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
188 .macro __ITC_I pred, reg, clob
189 .ifc "\reg", "r8"
190 (\pred) XEN_HYPER_ITC_I
191 .exitm
192 .endif
193 .ifc "\clob", "r8"
194 (\pred) mov r8 = \reg
196 (\pred) XEN_HYPER_ITC_I
197 .exitm
198 .endif
200 (\pred) mov \clob = r8
201 (\pred) mov r8 = \reg
203 (\pred) XEN_HYPER_ITC_I
205 (\pred) mov r8 = \clob
207 .endm
208 #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
210 .macro __ITC_D pred, reg, clob
211 .ifc "\reg", "r8"
212 (\pred) XEN_HYPER_ITC_D
214 .exitm
215 .endif
216 .ifc "\clob", "r8"
217 (\pred) mov r8 = \reg
219 (\pred) XEN_HYPER_ITC_D
221 .exitm
222 .endif
224 (\pred) mov \clob = r8
225 (\pred) mov r8 = \reg
227 (\pred) XEN_HYPER_ITC_D
229 (\pred) mov r8 = \clob
231 .endm
232 #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
234 .macro __ITC_I_AND_D pred_i, pred_d, reg, clob
235 .ifc "\reg", "r8"
236 (\pred_i)XEN_HYPER_ITC_I
238 (\pred_d)XEN_HYPER_ITC_D
240 .exitm
241 .endif
242 .ifc "\clob", "r8"
243 mov r8 = \reg
245 (\pred_i)XEN_HYPER_ITC_I
247 (\pred_d)XEN_HYPER_ITC_D
249 .exitm
250 .endif
252 mov \clob = r8
253 mov r8 = \reg
255 (\pred_i)XEN_HYPER_ITC_I
257 (\pred_d)XEN_HYPER_ITC_D
259 mov r8 = \clob
261 .endm
262 #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
263 __ITC_I_AND_D pred_i, pred_d, reg, clob
265 .macro __THASH pred, reg0, reg1, clob
266 .ifc "\reg0", "r8"
267 (\pred) mov r8 = \reg1
268 (\pred) XEN_HYPER_THASH
269 .exitm
270 .endc
271 .ifc "\reg1", "r8"
272 (\pred) XEN_HYPER_THASH
274 (\pred) mov \reg0 = r8
276 .exitm
277 .endif
278 .ifc "\clob", "r8"
279 (\pred) mov r8 = \reg1
280 (\pred) XEN_HYPER_THASH
282 (\pred) mov \reg0 = r8
284 .exitm
285 .endif
287 (\pred) mov \clob = r8
288 (\pred) mov r8 = \reg1
289 (\pred) XEN_HYPER_THASH
291 (\pred) mov \reg0 = r8
292 (\pred) mov r8 = \clob
294 .endm
295 #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
297 #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
298 mov clob0 = 1; \
299 movl clob1 = XSI_PSR_IC; \
300 ;; \
301 st4 [clob1] = clob0 \
304 #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
305 ;; \
306 srlz.d; \
307 mov clob1 = 1; \
308 movl clob0 = XSI_PSR_IC; \
309 ;; \
310 st4 [clob0] = clob1
312 #define RSM_PSR_IC(clob) \
313 movl clob = XSI_PSR_IC; \
314 ;; \
315 st4 [clob] = r0; \
318 /* pred will be clobbered */
319 #define MASK_TO_PEND_OFS (-1)
320 #define SSM_PSR_I(pred, pred_clob, clob) \
321 (pred) movl clob = XSI_PSR_I_ADDR \
322 ;; \
323 (pred) ld8 clob = [clob] \
324 ;; \
325 /* if (pred) vpsr.i = 1 */ \
326 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
327 (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
328 ;; \
329 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
330 (pred) ld1 clob = [clob] \
331 ;; \
332 (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
333 ;; \
334 (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
336 #define RSM_PSR_I(pred, clob0, clob1) \
337 movl clob0 = XSI_PSR_I_ADDR; \
338 mov clob1 = 1; \
339 ;; \
340 ld8 clob0 = [clob0]; \
341 ;; \
342 (pred) st1 [clob0] = clob1
344 #define RSM_PSR_I_IC(clob0, clob1, clob2) \
345 movl clob0 = XSI_PSR_I_ADDR; \
346 movl clob1 = XSI_PSR_IC; \
347 ;; \
348 ld8 clob0 = [clob0]; \
349 mov clob2 = 1; \
350 ;; \
351 /* note: clears both vpsr.i and vpsr.ic! */ \
352 st1 [clob0] = clob2; \
353 st4 [clob1] = r0; \
356 #define RSM_PSR_DT \
357 XEN_HYPER_RSM_PSR_DT
359 #define SSM_PSR_DT_AND_SRLZ_I \
360 XEN_HYPER_SSM_PSR_DT
362 #define BSW_0(clob0, clob1, clob2) \
363 ;; \
364 /* r16-r31 all now hold bank1 values */ \
365 mov clob2 = ar.unat; \
366 movl clob0 = XSI_BANK1_R16; \
367 movl clob1 = XSI_BANK1_R16 + 8; \
368 ;; \
369 .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
370 .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
371 ;; \
372 .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
373 .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
374 ;; \
375 .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
376 .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
377 ;; \
378 .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
379 .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
380 ;; \
381 .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
382 .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
383 ;; \
384 .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
385 .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
386 ;; \
387 .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
388 .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
389 ;; \
390 .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
391 .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
392 ;; \
393 mov clob1 = ar.unat; \
394 movl clob0 = XSI_B1NAT; \
395 ;; \
396 st8 [clob0] = clob1; \
397 mov ar.unat = clob2; \
398 movl clob0 = XSI_BANKNUM; \
399 ;; \
400 st4 [clob0] = r0
403 /* FIXME: THIS CODE IS NOT NaT SAFE! */
404 #define XEN_BSW_1(clob) \
405 mov clob = ar.unat; \
406 movl r30 = XSI_B1NAT; \
407 ;; \
408 ld8 r30 = [r30]; \
409 mov r31 = 1; \
410 ;; \
411 mov ar.unat = r30; \
412 movl r30 = XSI_BANKNUM; \
413 ;; \
414 st4 [r30] = r31; \
415 movl r30 = XSI_BANK1_R16; \
416 movl r31 = XSI_BANK1_R16+8; \
417 ;; \
418 ld8.fill r16 = [r30], 16; \
419 ld8.fill r17 = [r31], 16; \
420 ;; \
421 ld8.fill r18 = [r30], 16; \
422 ld8.fill r19 = [r31], 16; \
423 ;; \
424 ld8.fill r20 = [r30], 16; \
425 ld8.fill r21 = [r31], 16; \
426 ;; \
427 ld8.fill r22 = [r30], 16; \
428 ld8.fill r23 = [r31], 16; \
429 ;; \
430 ld8.fill r24 = [r30], 16; \
431 ld8.fill r25 = [r31], 16; \
432 ;; \
433 ld8.fill r26 = [r30], 16; \
434 ld8.fill r27 = [r31], 16; \
435 ;; \
436 ld8.fill r28 = [r30], 16; \
437 ld8.fill r29 = [r31], 16; \
438 ;; \
439 ld8.fill r30 = [r30]; \
440 ld8.fill r31 = [r31]; \
441 ;; \
442 mov ar.unat = clob
444 /* xen_bsw1 clobbers clob1 = r14 */
445 .macro ____BSW_1 clob0, clob1
446 .ifc "\clob0", "r14"
447 .error "clob0 \clob0 must not be r14"
448 .endif
449 .ifnc "\clob1", "r14"
450 .error "clob1 \clob1 must be r14"
451 .endif
452 .ifc "\clob0", "\clob1"
453 .error "it must be clob0 \clob0 != clob1 \clob1"
454 .endif
456 mov \clob0 = b0
457 br.call.sptk b0 = xen_bsw1
459 mov b0 = \clob0
461 .endm
463 .macro __BSW_1 clob0, clob1
464 .ifc "\clob0", "r14"
465 ____BSW_1 \clob1, \clob0
466 .exitm
467 .endif
468 .ifc "\clob1", "r14"
469 ____BSW_1 \clob0, \clob1
470 .exitm
471 .endif
472 .ifc "\clob0", "\clob1"
473 .error "it must be clob0 \clob0 != clob1 \clob1"
474 .endif
476 .warning "use r14 as second argument \clob0 \clob1"
477 mov \clob1 = r14
478 ____BSW_1 \clob0, r14
479 mov r14 = \clob1
480 .endm
482 /* in place code generating causes lack of space */
483 /* #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) */
484 #define BSW_1(clob0, clob1) __BSW_1 clob0, clob1
487 #define COVER \
488 XEN_HYPER_COVER
490 #define RFI \
491 XEN_HYPER_RFI; \
492 dv_serialize_data