1 /******************************************************************************
2 * include/asm-ia64/xen/inst.h
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <asm/xen/privop.h>
25 #define ia64_ivt xen_ivt
26 #define DO_SAVE_MIN XEN_DO_SAVE_MIN
28 #define __paravirt_switch_to xen_switch_to
29 #define __paravirt_leave_syscall xen_leave_syscall
30 #define __paravirt_work_processed_syscall xen_work_processed_syscall
31 #define __paravirt_leave_kernel xen_leave_kernel
32 #define __paravirt_pending_syscall_end xen_work_pending_syscall_end
33 #define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall
36 #define MOV_FROM_IFA(reg) \
41 #define MOV_FROM_ITIR(reg) \
42 movl reg = XSI_ITIR; \
46 #define MOV_FROM_ISR(reg) \
51 #define MOV_FROM_IHA(reg) \
56 #define MOV_FROM_IPSR(pred, reg) \
57 (pred) movl reg = XSI_IPSR; \
59 (pred) ld8 reg = [reg]
61 #define MOV_FROM_IIM(reg) \
66 #define MOV_FROM_IIP(reg) \
71 .macro __MOV_FROM_IVR reg
, clob
83 .error
"it should be reg \reg != clob \clob"
94 #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
96 .macro __MOV_FROM_PSR pred
, reg
, clob
98 (\pred
) XEN_HYPER_GET_PSR
;
102 (\pred
) XEN_HYPER_GET_PSR
104 (\pred
) mov
\reg
= r8
108 (\pred
) mov \clob
= r8
109 (\pred
) XEN_HYPER_GET_PSR
111 (\pred
) mov
\reg
= r8
112 (\pred
) mov r8
= \clob
114 #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
117 #define MOV_TO_IFA(reg, clob) \
118 movl clob = XSI_IFA; \
122 #define MOV_TO_ITIR(pred, reg, clob) \
123 (pred) movl clob = XSI_ITIR; \
125 (pred) st8 [clob] = reg
127 #define MOV_TO_IHA(pred, reg, clob) \
128 (pred) movl clob = XSI_IHA; \
130 (pred) st8 [clob] = reg
132 #define MOV_TO_IPSR(pred, reg, clob) \
133 (pred) movl clob = XSI_IPSR; \
135 (pred) st8 [clob] = reg; \
138 #define MOV_TO_IFS(pred, reg, clob) \
139 (pred) movl clob = XSI_IFS; \
141 (pred) st8 [clob] = reg; \
144 #define MOV_TO_IIP(reg, clob) \
145 movl clob = XSI_IIP; \
149 .macro ____MOV_TO_KR kr
, reg
, clob0
, clob1
151 .error
"clob0 \clob0 must not be r9"
154 .error
"clob1 \clob1 must not be r8"
180 .macro __MOV_TO_KR kr
, reg
, clob0
, clob1
182 ____MOV_TO_KR \kr
, \reg
, \clob1
, \clob0
186 ____MOV_TO_KR \kr
, \reg
, \clob1
, \clob0
190 ____MOV_TO_KR \kr
, \reg
, \clob0
, \clob1
193 #define MOV_TO_KR(kr, reg, clob0, clob1) \
194 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
197 .macro __ITC_I pred
, reg
, clob
199 (\pred
) XEN_HYPER_ITC_I
203 (\pred
) mov r8
= \reg
205 (\pred
) XEN_HYPER_ITC_I
209 (\pred
) mov \clob
= r8
210 (\pred
) mov r8
= \reg
212 (\pred
) XEN_HYPER_ITC_I
214 (\pred
) mov r8
= \clob
217 #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
219 .macro __ITC_D pred
, reg
, clob
221 (\pred
) XEN_HYPER_ITC_D
226 (\pred
) mov r8
= \reg
228 (\pred
) XEN_HYPER_ITC_D
233 (\pred
) mov \clob
= r8
234 (\pred
) mov r8
= \reg
236 (\pred
) XEN_HYPER_ITC_D
238 (\pred
) mov r8
= \clob
241 #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
243 .macro __ITC_I_AND_D pred_i
, pred_d
, reg
, clob
245 (\pred_i
)XEN_HYPER_ITC_I
247 (\pred_d
)XEN_HYPER_ITC_D
254 (\pred_i
)XEN_HYPER_ITC_I
256 (\pred_d
)XEN_HYPER_ITC_D
264 (\pred_i
)XEN_HYPER_ITC_I
266 (\pred_d
)XEN_HYPER_ITC_D
271 #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
272 __ITC_I_AND_D pred_i, pred_d, reg, clob
274 .macro __THASH pred
, reg0
, reg1
, clob
276 (\pred
) mov r8
= \reg
1
277 (\pred
) XEN_HYPER_THASH
281 (\pred
) XEN_HYPER_THASH
283 (\pred
) mov
\reg
0 = r8
288 (\pred
) mov r8
= \reg
1
289 (\pred
) XEN_HYPER_THASH
291 (\pred
) mov
\reg
0 = r8
296 (\pred
) mov \clob
= r8
297 (\pred
) mov r8
= \reg
1
298 (\pred
) XEN_HYPER_THASH
300 (\pred
) mov
\reg
0 = r8
301 (\pred
) mov r8
= \clob
304 #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
306 #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
308 movl clob1 = XSI_PSR_IC; \
310 st4 [clob1] = clob0 \
313 #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
317 movl clob0 = XSI_PSR_IC; \
321 #define RSM_PSR_IC(clob) \
322 movl clob = XSI_PSR_IC; \
327 /* pred will be clobbered */
328 #define MASK_TO_PEND_OFS (-1)
329 #define SSM_PSR_I(pred, pred_clob, clob) \
330 (pred) movl clob = XSI_PSR_I_ADDR \
332 (pred) ld8 clob = [clob] \
334 /* if (pred) vpsr.i = 1 */ \
335 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
336 (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
338 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
339 (pred) ld1 clob = [clob] \
341 (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
343 (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
345 #define RSM_PSR_I(pred, clob0, clob1) \
346 movl clob0 = XSI_PSR_I_ADDR; \
349 ld8 clob0 = [clob0]; \
351 (pred) st1 [clob0] = clob1
353 #define RSM_PSR_I_IC(clob0, clob1, clob2) \
354 movl clob0 = XSI_PSR_I_ADDR; \
355 movl clob1 = XSI_PSR_IC; \
357 ld8 clob0 = [clob0]; \
360 /* note: clears both vpsr.i and vpsr.ic! */ \
361 st1 [clob0] = clob2; \
368 #define SSM_PSR_DT_AND_SRLZ_I \
371 #define BSW_0(clob0, clob1, clob2) \
373 /* r16-r31 all now hold bank1 values */ \
374 mov clob2 = ar.unat; \
375 movl clob0 = XSI_BANK1_R16; \
376 movl clob1 = XSI_BANK1_R16 + 8; \
378 .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
379 .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
381 .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
382 .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
384 .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
385 .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
387 .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
388 .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
390 .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
391 .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
393 .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
394 .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
396 .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
397 .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
399 .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
400 .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
402 mov clob1 = ar.unat; \
403 movl clob0 = XSI_B1NAT; \
405 st8 [clob0] = clob1; \
406 mov ar.unat = clob2; \
407 movl clob0 = XSI_BANKNUM; \
412 /* FIXME: THIS CODE IS NOT NaT SAFE! */
413 #define XEN_BSW_1(clob) \
414 mov clob = ar.unat; \
415 movl r30 = XSI_B1NAT; \
421 movl r30 = XSI_BANKNUM; \
424 movl r30 = XSI_BANK1_R16; \
425 movl r31 = XSI_BANK1_R16+8; \
427 ld8.fill r16 = [r30], 16; \
428 ld8.fill r17 = [r31], 16; \
430 ld8.fill r18 = [r30], 16; \
431 ld8.fill r19 = [r31], 16; \
433 ld8.fill r20 = [r30], 16; \
434 ld8.fill r21 = [r31], 16; \
436 ld8.fill r22 = [r30], 16; \
437 ld8.fill r23 = [r31], 16; \
439 ld8.fill r24 = [r30], 16; \
440 ld8.fill r25 = [r31], 16; \
442 ld8.fill r26 = [r30], 16; \
443 ld8.fill r27 = [r31], 16; \
445 ld8.fill r28 = [r30], 16; \
446 ld8.fill r29 = [r31], 16; \
448 ld8.fill r30 = [r30]; \
449 ld8.fill r31 = [r31]; \
453 /* xen_bsw1 clobbers clob1 = r14 */
454 .macro ____BSW_1 clob0
, clob1
456 .error
"clob0 \clob0 must not be r14"
458 .ifnc
"\clob1", "r14"
459 .error
"clob1 \clob1 must be r14"
461 .ifc
"\clob0", "\clob1"
462 .error
"it must be clob0 \clob0 != clob1 \clob1"
466 br
.call
.sptk b0
= xen_bsw1
472 .macro __BSW_1 clob0
, clob1
474 ____BSW_1 \clob1
, \clob0
478 ____BSW_1 \clob0
, \clob1
481 .ifc
"\clob0", "\clob1"
482 .error
"it must be clob0 \clob0 != clob1 \clob1"
485 .warning
"use r14 as second argument \clob0 \clob1"
487 ____BSW_1 \clob0
, r14
491 /* in place code generating causes lack of space */
492 /* #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) */
493 #define BSW_1(clob0, clob1) __BSW_1 clob0, clob1