1 /******************************************************************************
2 * include/asm-ia64/xen/inst.h
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <asm/xen/privop.h>
25 #define DO_SAVE_MIN XEN_DO_SAVE_MIN
27 #define MOV_FROM_IFA(reg) \
32 #define MOV_FROM_ITIR(reg) \
33 movl reg = XSI_ITIR; \
37 #define MOV_FROM_ISR(reg) \
42 #define MOV_FROM_IHA(reg) \
47 #define MOV_FROM_IPSR(pred, reg) \
48 (pred) movl reg = XSI_IPSR; \
50 (pred) ld8 reg = [reg]
52 #define MOV_FROM_IIM(reg) \
57 #define MOV_FROM_IIP(reg) \
62 .macro __MOV_FROM_IVR reg
, clob
74 .error
"it should be reg \reg != clob \clob"
85 #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
87 .macro __MOV_FROM_PSR pred
, reg
, clob
89 (\pred
) XEN_HYPER_GET_PSR
;
93 (\pred
) XEN_HYPER_GET_PSR
99 (\pred
) mov \clob
= r8
100 (\pred
) XEN_HYPER_GET_PSR
102 (\pred
) mov
\reg
= r8
103 (\pred
) mov r8
= \clob
105 #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
108 #define MOV_TO_IFA(reg, clob) \
109 movl clob = XSI_IFA; \
113 #define MOV_TO_ITIR(pred, reg, clob) \
114 (pred) movl clob = XSI_ITIR; \
116 (pred) st8 [clob] = reg
118 #define MOV_TO_IHA(pred, reg, clob) \
119 (pred) movl clob = XSI_IHA; \
121 (pred) st8 [clob] = reg
123 #define MOV_TO_IPSR(pred, reg, clob) \
124 (pred) movl clob = XSI_IPSR; \
126 (pred) st8 [clob] = reg; \
129 #define MOV_TO_IFS(pred, reg, clob) \
130 (pred) movl clob = XSI_IFS; \
132 (pred) st8 [clob] = reg; \
135 #define MOV_TO_IIP(reg, clob) \
136 movl clob = XSI_IIP; \
140 .macro ____MOV_TO_KR kr
, reg
, clob0
, clob1
142 .error
"clob0 \clob0 must not be r9"
145 .error
"clob1 \clob1 must not be r8"
171 .macro __MOV_TO_KR kr
, reg
, clob0
, clob1
173 ____MOV_TO_KR \kr
, \reg
, \clob1
, \clob0
177 ____MOV_TO_KR \kr
, \reg
, \clob1
, \clob0
181 ____MOV_TO_KR \kr
, \reg
, \clob0
, \clob1
184 #define MOV_TO_KR(kr, reg, clob0, clob1) \
185 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
188 .macro __ITC_I pred
, reg
, clob
190 (\pred
) XEN_HYPER_ITC_I
194 (\pred
) mov r8
= \reg
196 (\pred
) XEN_HYPER_ITC_I
200 (\pred
) mov \clob
= r8
201 (\pred
) mov r8
= \reg
203 (\pred
) XEN_HYPER_ITC_I
205 (\pred
) mov r8
= \clob
208 #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
210 .macro __ITC_D pred
, reg
, clob
212 (\pred
) XEN_HYPER_ITC_D
217 (\pred
) mov r8
= \reg
219 (\pred
) XEN_HYPER_ITC_D
224 (\pred
) mov \clob
= r8
225 (\pred
) mov r8
= \reg
227 (\pred
) XEN_HYPER_ITC_D
229 (\pred
) mov r8
= \clob
232 #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
234 .macro __ITC_I_AND_D pred_i
, pred_d
, reg
, clob
236 (\pred_i
)XEN_HYPER_ITC_I
238 (\pred_d
)XEN_HYPER_ITC_D
245 (\pred_i
)XEN_HYPER_ITC_I
247 (\pred_d
)XEN_HYPER_ITC_D
255 (\pred_i
)XEN_HYPER_ITC_I
257 (\pred_d
)XEN_HYPER_ITC_D
262 #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
263 __ITC_I_AND_D pred_i, pred_d, reg, clob
265 .macro __THASH pred
, reg0
, reg1
, clob
267 (\pred
) mov r8
= \reg
1
268 (\pred
) XEN_HYPER_THASH
272 (\pred
) XEN_HYPER_THASH
274 (\pred
) mov
\reg
0 = r8
279 (\pred
) mov r8
= \reg
1
280 (\pred
) XEN_HYPER_THASH
282 (\pred
) mov
\reg
0 = r8
287 (\pred
) mov \clob
= r8
288 (\pred
) mov r8
= \reg
1
289 (\pred
) XEN_HYPER_THASH
291 (\pred
) mov
\reg
0 = r8
292 (\pred
) mov r8
= \clob
295 #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
297 #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
299 movl clob1 = XSI_PSR_IC; \
301 st4 [clob1] = clob0 \
304 #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
308 movl clob0 = XSI_PSR_IC; \
312 #define RSM_PSR_IC(clob) \
313 movl clob = XSI_PSR_IC; \
318 /* pred will be clobbered */
319 #define MASK_TO_PEND_OFS (-1)
320 #define SSM_PSR_I(pred, pred_clob, clob) \
321 (pred) movl clob = XSI_PSR_I_ADDR \
323 (pred) ld8 clob = [clob] \
325 /* if (pred) vpsr.i = 1 */ \
326 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
327 (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
329 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
330 (pred) ld1 clob = [clob] \
332 (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
334 (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
336 #define RSM_PSR_I(pred, clob0, clob1) \
337 movl clob0 = XSI_PSR_I_ADDR; \
340 ld8 clob0 = [clob0]; \
342 (pred) st1 [clob0] = clob1
344 #define RSM_PSR_I_IC(clob0, clob1, clob2) \
345 movl clob0 = XSI_PSR_I_ADDR; \
346 movl clob1 = XSI_PSR_IC; \
348 ld8 clob0 = [clob0]; \
351 /* note: clears both vpsr.i and vpsr.ic! */ \
352 st1 [clob0] = clob2; \
359 #define SSM_PSR_DT_AND_SRLZ_I \
362 #define BSW_0(clob0, clob1, clob2) \
364 /* r16-r31 all now hold bank1 values */ \
365 mov clob2 = ar.unat; \
366 movl clob0 = XSI_BANK1_R16; \
367 movl clob1 = XSI_BANK1_R16 + 8; \
369 .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
370 .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
372 .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
373 .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
375 .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
376 .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
378 .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
379 .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
381 .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
382 .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
384 .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
385 .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
387 .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
388 .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
390 .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
391 .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
393 mov clob1 = ar.unat; \
394 movl clob0 = XSI_B1NAT; \
396 st8 [clob0] = clob1; \
397 mov ar.unat = clob2; \
398 movl clob0 = XSI_BANKNUM; \
403 /* FIXME: THIS CODE IS NOT NaT SAFE! */
404 #define XEN_BSW_1(clob) \
405 mov clob = ar.unat; \
406 movl r30 = XSI_B1NAT; \
412 movl r30 = XSI_BANKNUM; \
415 movl r30 = XSI_BANK1_R16; \
416 movl r31 = XSI_BANK1_R16+8; \
418 ld8.fill r16 = [r30], 16; \
419 ld8.fill r17 = [r31], 16; \
421 ld8.fill r18 = [r30], 16; \
422 ld8.fill r19 = [r31], 16; \
424 ld8.fill r20 = [r30], 16; \
425 ld8.fill r21 = [r31], 16; \
427 ld8.fill r22 = [r30], 16; \
428 ld8.fill r23 = [r31], 16; \
430 ld8.fill r24 = [r30], 16; \
431 ld8.fill r25 = [r31], 16; \
433 ld8.fill r26 = [r30], 16; \
434 ld8.fill r27 = [r31], 16; \
436 ld8.fill r28 = [r30], 16; \
437 ld8.fill r29 = [r31], 16; \
439 ld8.fill r30 = [r30]; \
440 ld8.fill r31 = [r31]; \
444 /* xen_bsw1 clobbers clob1 = r14 */
445 .macro ____BSW_1 clob0
, clob1
447 .error
"clob0 \clob0 must not be r14"
449 .ifnc
"\clob1", "r14"
450 .error
"clob1 \clob1 must be r14"
452 .ifc
"\clob0", "\clob1"
453 .error
"it must be clob0 \clob0 != clob1 \clob1"
457 br
.call
.sptk b0
= xen_bsw1
463 .macro __BSW_1 clob0
, clob1
465 ____BSW_1 \clob1
, \clob0
469 ____BSW_1 \clob0
, \clob1
472 .ifc
"\clob0", "\clob1"
473 .error
"it must be clob0 \clob0 != clob1 \clob1"
476 .warning
"use r14 as second argument \clob0 \clob1"
478 ____BSW_1 \clob0
, r14
482 /* in place code generating causes lack of space */
483 /* #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) */
484 #define BSW_1(clob0, clob1) __BSW_1 clob0, clob1