ia64/kvm: compilation fix. export account_system_vtime.
[pv_ops_mirror.git] / include / asm-ia64 / xen / inst.h
blob9015bd389dc9fb8c1fad47fac355ca4f5e23b447
1 /******************************************************************************
2 * include/asm-ia64/xen/inst.h
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <asm/xen/privop.h>
25 #define ia64_ivt xen_ivt
26 #define DO_SAVE_MIN XEN_DO_SAVE_MIN
28 #define __paravirt_switch_to xen_switch_to
29 #define __paravirt_leave_syscall xen_leave_syscall
30 #define __paravirt_work_processed_syscall xen_work_processed_syscall
31 #define __paravirt_leave_kernel xen_leave_kernel
32 #define __paravirt_pending_syscall_end xen_work_pending_syscall_end
33 #define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall
36 #define MOV_FROM_IFA(reg) \
37 movl reg = XSI_IFA; \
38 ;; \
39 ld8 reg = [reg]
41 #define MOV_FROM_ITIR(reg) \
42 movl reg = XSI_ITIR; \
43 ;; \
44 ld8 reg = [reg]
46 #define MOV_FROM_ISR(reg) \
47 movl reg = XSI_ISR; \
48 ;; \
49 ld8 reg = [reg]
51 #define MOV_FROM_IHA(reg) \
52 movl reg = XSI_IHA; \
53 ;; \
54 ld8 reg = [reg]
56 #define MOV_FROM_IPSR(pred, reg) \
57 (pred) movl reg = XSI_IPSR; \
58 ;; \
59 (pred) ld8 reg = [reg]
61 #define MOV_FROM_IIM(reg) \
62 movl reg = XSI_IIM; \
63 ;; \
64 ld8 reg = [reg]
66 #define MOV_FROM_IIP(reg) \
67 movl reg = XSI_IIP; \
68 ;; \
69 ld8 reg = [reg]
71 .macro __MOV_FROM_IVR reg, clob
72 .ifc "\reg", "r8"
73 XEN_HYPER_GET_IVR
74 .exitm
75 .endif
76 .ifc "\clob", "r8"
77 XEN_HYPER_GET_IVR
79 mov \reg = r8
80 .exitm
81 .endif
82 .ifc "\reg", "\clob"
83 .error "it should be reg \reg != clob \clob"
84 .endif
86 mov \clob = r8
88 XEN_HYPER_GET_IVR
90 mov \reg = r8
92 mov r8 = \clob
93 .endm
94 #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
96 .macro __MOV_FROM_PSR pred, reg, clob
97 .ifc "\reg", "r8"
98 (\pred) XEN_HYPER_GET_PSR;
99 .exitm
100 .endif
101 .ifc "\clob", "r8"
102 (\pred) XEN_HYPER_GET_PSR
104 (\pred) mov \reg = r8
105 .exitm
106 .endif
108 (\pred) mov \clob = r8
109 (\pred) XEN_HYPER_GET_PSR
111 (\pred) mov \reg = r8
112 (\pred) mov r8 = \clob
113 .endm
114 #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
117 #define MOV_TO_IFA(reg, clob) \
118 movl clob = XSI_IFA; \
119 ;; \
120 st8 [clob] = reg \
122 #define MOV_TO_ITIR(pred, reg, clob) \
123 (pred) movl clob = XSI_ITIR; \
124 ;; \
125 (pred) st8 [clob] = reg
127 #define MOV_TO_IHA(pred, reg, clob) \
128 (pred) movl clob = XSI_IHA; \
129 ;; \
130 (pred) st8 [clob] = reg
132 #define MOV_TO_IPSR(pred, reg, clob) \
133 (pred) movl clob = XSI_IPSR; \
134 ;; \
135 (pred) st8 [clob] = reg; \
138 #define MOV_TO_IFS(pred, reg, clob) \
139 (pred) movl clob = XSI_IFS; \
140 ;; \
141 (pred) st8 [clob] = reg; \
144 #define MOV_TO_IIP(reg, clob) \
145 movl clob = XSI_IIP; \
146 ;; \
147 st8 [clob] = reg
149 .macro ____MOV_TO_KR kr, reg, clob0, clob1
150 .ifc "\clob0", "r9"
151 .error "clob0 \clob0 must not be r9"
152 .endif
153 .ifc "\clob1", "r8"
154 .error "clob1 \clob1 must not be r8"
155 .endif
157 .ifnc "\reg", "r9"
158 .ifnc "\clob1", "r9"
159 mov \clob1 = r9
160 .endif
161 mov r9 = \reg
162 .endif
163 .ifnc "\clob0", "r8"
164 mov \clob0 = r8
165 .endif
166 mov r8 = \kr
168 XEN_HYPER_SET_KR
170 .ifnc "\reg", "r9"
171 .ifnc "\clob1", "r9"
172 mov r9 = \clob1
173 .endif
174 .endif
175 .ifnc "\clob0", "r8"
176 mov r8 = \clob0
177 .endif
178 .endm
180 .macro __MOV_TO_KR kr, reg, clob0, clob1
181 .ifc "\clob0", "r9"
182 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
183 .exitm
184 .endif
185 .ifc "\clob1", "r8"
186 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
187 .exitm
188 .endif
190 ____MOV_TO_KR \kr, \reg, \clob0, \clob1
191 .endm
193 #define MOV_TO_KR(kr, reg, clob0, clob1) \
194 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
197 .macro __ITC_I pred, reg, clob
198 .ifc "\reg", "r8"
199 (\pred) XEN_HYPER_ITC_I
200 .exitm
201 .endif
202 .ifc "\clob", "r8"
203 (\pred) mov r8 = \reg
205 (\pred) XEN_HYPER_ITC_I
206 .exitm
207 .endif
209 (\pred) mov \clob = r8
210 (\pred) mov r8 = \reg
212 (\pred) XEN_HYPER_ITC_I
214 (\pred) mov r8 = \clob
216 .endm
217 #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
219 .macro __ITC_D pred, reg, clob
220 .ifc "\reg", "r8"
221 (\pred) XEN_HYPER_ITC_D
223 .exitm
224 .endif
225 .ifc "\clob", "r8"
226 (\pred) mov r8 = \reg
228 (\pred) XEN_HYPER_ITC_D
230 .exitm
231 .endif
233 (\pred) mov \clob = r8
234 (\pred) mov r8 = \reg
236 (\pred) XEN_HYPER_ITC_D
238 (\pred) mov r8 = \clob
240 .endm
241 #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
243 .macro __ITC_I_AND_D pred_i, pred_d, reg, clob
244 .ifc "\reg", "r8"
245 (\pred_i)XEN_HYPER_ITC_I
247 (\pred_d)XEN_HYPER_ITC_D
249 .exitm
250 .endif
251 .ifc "\clob", "r8"
252 mov r8 = \reg
254 (\pred_i)XEN_HYPER_ITC_I
256 (\pred_d)XEN_HYPER_ITC_D
258 .exitm
259 .endif
261 mov \clob = r8
262 mov r8 = \reg
264 (\pred_i)XEN_HYPER_ITC_I
266 (\pred_d)XEN_HYPER_ITC_D
268 mov r8 = \clob
270 .endm
271 #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
272 __ITC_I_AND_D pred_i, pred_d, reg, clob
274 .macro __THASH pred, reg0, reg1, clob
275 .ifc "\reg0", "r8"
276 (\pred) mov r8 = \reg1
277 (\pred) XEN_HYPER_THASH
278 .exitm
279 .endc
280 .ifc "\reg1", "r8"
281 (\pred) XEN_HYPER_THASH
283 (\pred) mov \reg0 = r8
285 .exitm
286 .endif
287 .ifc "\clob", "r8"
288 (\pred) mov r8 = \reg1
289 (\pred) XEN_HYPER_THASH
291 (\pred) mov \reg0 = r8
293 .exitm
294 .endif
296 (\pred) mov \clob = r8
297 (\pred) mov r8 = \reg1
298 (\pred) XEN_HYPER_THASH
300 (\pred) mov \reg0 = r8
301 (\pred) mov r8 = \clob
303 .endm
304 #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
306 #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
307 mov clob0 = 1; \
308 movl clob1 = XSI_PSR_IC; \
309 ;; \
310 st4 [clob1] = clob0 \
313 #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
314 ;; \
315 srlz.d; \
316 mov clob1 = 1; \
317 movl clob0 = XSI_PSR_IC; \
318 ;; \
319 st4 [clob0] = clob1
321 #define RSM_PSR_IC(clob) \
322 movl clob = XSI_PSR_IC; \
323 ;; \
324 st4 [clob] = r0; \
327 /* pred will be clobbered */
328 #define MASK_TO_PEND_OFS (-1)
329 #define SSM_PSR_I(pred, pred_clob, clob) \
330 (pred) movl clob = XSI_PSR_I_ADDR \
331 ;; \
332 (pred) ld8 clob = [clob] \
333 ;; \
334 /* if (pred) vpsr.i = 1 */ \
335 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
336 (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
337 ;; \
338 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
339 (pred) ld1 clob = [clob] \
340 ;; \
341 (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
342 ;; \
343 (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
345 #define RSM_PSR_I(pred, clob0, clob1) \
346 movl clob0 = XSI_PSR_I_ADDR; \
347 mov clob1 = 1; \
348 ;; \
349 ld8 clob0 = [clob0]; \
350 ;; \
351 (pred) st1 [clob0] = clob1
353 #define RSM_PSR_I_IC(clob0, clob1, clob2) \
354 movl clob0 = XSI_PSR_I_ADDR; \
355 movl clob1 = XSI_PSR_IC; \
356 ;; \
357 ld8 clob0 = [clob0]; \
358 mov clob2 = 1; \
359 ;; \
360 /* note: clears both vpsr.i and vpsr.ic! */ \
361 st1 [clob0] = clob2; \
362 st4 [clob1] = r0; \
365 #define RSM_PSR_DT \
366 XEN_HYPER_RSM_PSR_DT
368 #define SSM_PSR_DT_AND_SRLZ_I \
369 XEN_HYPER_SSM_PSR_DT
371 #define BSW_0(clob0, clob1, clob2) \
372 ;; \
373 /* r16-r31 all now hold bank1 values */ \
374 mov clob2 = ar.unat; \
375 movl clob0 = XSI_BANK1_R16; \
376 movl clob1 = XSI_BANK1_R16 + 8; \
377 ;; \
378 .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
379 .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
380 ;; \
381 .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
382 .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
383 ;; \
384 .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
385 .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
386 ;; \
387 .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
388 .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
389 ;; \
390 .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
391 .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
392 ;; \
393 .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
394 .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
395 ;; \
396 .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
397 .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
398 ;; \
399 .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
400 .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
401 ;; \
402 mov clob1 = ar.unat; \
403 movl clob0 = XSI_B1NAT; \
404 ;; \
405 st8 [clob0] = clob1; \
406 mov ar.unat = clob2; \
407 movl clob0 = XSI_BANKNUM; \
408 ;; \
409 st4 [clob0] = r0
412 /* FIXME: THIS CODE IS NOT NaT SAFE! */
413 #define XEN_BSW_1(clob) \
414 mov clob = ar.unat; \
415 movl r30 = XSI_B1NAT; \
416 ;; \
417 ld8 r30 = [r30]; \
418 mov r31 = 1; \
419 ;; \
420 mov ar.unat = r30; \
421 movl r30 = XSI_BANKNUM; \
422 ;; \
423 st4 [r30] = r31; \
424 movl r30 = XSI_BANK1_R16; \
425 movl r31 = XSI_BANK1_R16+8; \
426 ;; \
427 ld8.fill r16 = [r30], 16; \
428 ld8.fill r17 = [r31], 16; \
429 ;; \
430 ld8.fill r18 = [r30], 16; \
431 ld8.fill r19 = [r31], 16; \
432 ;; \
433 ld8.fill r20 = [r30], 16; \
434 ld8.fill r21 = [r31], 16; \
435 ;; \
436 ld8.fill r22 = [r30], 16; \
437 ld8.fill r23 = [r31], 16; \
438 ;; \
439 ld8.fill r24 = [r30], 16; \
440 ld8.fill r25 = [r31], 16; \
441 ;; \
442 ld8.fill r26 = [r30], 16; \
443 ld8.fill r27 = [r31], 16; \
444 ;; \
445 ld8.fill r28 = [r30], 16; \
446 ld8.fill r29 = [r31], 16; \
447 ;; \
448 ld8.fill r30 = [r30]; \
449 ld8.fill r31 = [r31]; \
450 ;; \
451 mov ar.unat = clob
453 /* xen_bsw1 clobbers clob1 = r14 */
454 .macro ____BSW_1 clob0, clob1
455 .ifc "\clob0", "r14"
456 .error "clob0 \clob0 must not be r14"
457 .endif
458 .ifnc "\clob1", "r14"
459 .error "clob1 \clob1 must be r14"
460 .endif
461 .ifc "\clob0", "\clob1"
462 .error "it must be clob0 \clob0 != clob1 \clob1"
463 .endif
465 mov \clob0 = b0
466 br.call.sptk b0 = xen_bsw1
468 mov b0 = \clob0
470 .endm
472 .macro __BSW_1 clob0, clob1
473 .ifc "\clob0", "r14"
474 ____BSW_1 \clob1, \clob0
475 .exitm
476 .endif
477 .ifc "\clob1", "r14"
478 ____BSW_1 \clob0, \clob1
479 .exitm
480 .endif
481 .ifc "\clob0", "\clob1"
482 .error "it must be clob0 \clob0 != clob1 \clob1"
483 .endif
485 .warning "use r14 as second argument \clob0 \clob1"
486 mov \clob1 = r14
487 ____BSW_1 \clob0, r14
488 mov r14 = \clob1
489 .endm
491 /* in place code generating causes lack of space */
492 /* #define BSW_1(clob0, clob1) XEN_BSW_1(clob1) */
493 #define BSW_1(clob0, clob1) __BSW_1 clob0, clob1
496 #define COVER \
497 XEN_HYPER_COVER
499 #define RFI \
500 XEN_HYPER_RFI; \
501 dv_serialize_data