3 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
4 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
6 #ifndef _UAPI_ASM_IA64_GCC_INTRIN_H
7 #define _UAPI_ASM_IA64_GCC_INTRIN_H
9 #include <linux/types.h>
10 #include <linux/compiler.h>
12 /* define this macro to get some asm stmts included in 'c' files */
15 /* Optimization barrier */
16 /* The "volatile" is due to gcc bugs */
17 #define ia64_barrier() asm volatile ("":::"memory")
19 #define ia64_stop() asm volatile (";;"::)
21 #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
23 #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
25 #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
27 #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
29 extern void ia64_bad_param_for_setreg (void);
30 extern void ia64_bad_param_for_getreg (void);
33 #define ia64_native_setreg(regnum, val) \
36 case _IA64_REG_PSR_L: \
37 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
39 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
40 asm volatile ("mov ar%0=%1" :: \
41 "i" (regnum - _IA64_REG_AR_KR0), \
42 "r"(val): "memory"); \
44 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
45 asm volatile ("mov cr%0=%1" :: \
46 "i" (regnum - _IA64_REG_CR_DCR), \
47 "r"(val): "memory" ); \
50 asm volatile ("mov r12=%0" :: \
51 "r"(val): "memory"); \
54 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
57 ia64_bad_param_for_setreg(); \
62 #define ia64_native_getreg(regnum) \
64 __u64 ia64_intri_res; \
68 asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
71 asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
74 asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
76 case _IA64_REG_TP: /* for current() */ \
77 ia64_intri_res = ia64_r13; \
79 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
80 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
81 : "i"(regnum - _IA64_REG_AR_KR0)); \
83 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
84 asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
85 : "i" (regnum - _IA64_REG_CR_DCR)); \
88 asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
91 ia64_bad_param_for_getreg(); \
97 #define ia64_hint_pause 0
99 #define ia64_hint(mode) \
102 case ia64_hint_pause: \
103 asm volatile ("hint @pause" ::: "memory"); \
109 /* Integer values for mux1 instruction */
110 #define ia64_mux1_brcst 0
111 #define ia64_mux1_mix 8
112 #define ia64_mux1_shuf 9
113 #define ia64_mux1_alt 10
114 #define ia64_mux1_rev 11
116 #define ia64_mux1(x, mode) \
118 __u64 ia64_intri_res; \
121 case ia64_mux1_brcst: \
122 asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
124 case ia64_mux1_mix: \
125 asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
127 case ia64_mux1_shuf: \
128 asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
130 case ia64_mux1_alt: \
131 asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
133 case ia64_mux1_rev: \
134 asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
140 #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
141 # define ia64_popcnt(x) __builtin_popcountl(x)
143 # define ia64_popcnt(x) \
145 __u64 ia64_intri_res; \
146 asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
152 #define ia64_getf_exp(x) \
154 long ia64_intri_res; \
156 asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
161 #define ia64_shrp(a, b, count) \
163 __u64 ia64_intri_res; \
164 asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
168 #define ia64_ldfs(regnum, x) \
170 register double __f__ asm ("f"#regnum); \
171 asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
174 #define ia64_ldfd(regnum, x) \
176 register double __f__ asm ("f"#regnum); \
177 asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
180 #define ia64_ldfe(regnum, x) \
182 register double __f__ asm ("f"#regnum); \
183 asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
186 #define ia64_ldf8(regnum, x) \
188 register double __f__ asm ("f"#regnum); \
189 asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
192 #define ia64_ldf_fill(regnum, x) \
194 register double __f__ asm ("f"#regnum); \
195 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
198 #define ia64_st4_rel_nta(m, val) \
200 asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
203 #define ia64_stfs(x, regnum) \
205 register double __f__ asm ("f"#regnum); \
206 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
209 #define ia64_stfd(x, regnum) \
211 register double __f__ asm ("f"#regnum); \
212 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
215 #define ia64_stfe(x, regnum) \
217 register double __f__ asm ("f"#regnum); \
218 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
221 #define ia64_stf8(x, regnum) \
223 register double __f__ asm ("f"#regnum); \
224 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
227 #define ia64_stf_spill(x, regnum) \
229 register double __f__ asm ("f"#regnum); \
230 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
233 #define ia64_fetchadd4_acq(p, inc) \
236 __u64 ia64_intri_res; \
237 asm volatile ("fetchadd4.acq %0=[%1],%2" \
238 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
244 #define ia64_fetchadd4_rel(p, inc) \
246 __u64 ia64_intri_res; \
247 asm volatile ("fetchadd4.rel %0=[%1],%2" \
248 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
254 #define ia64_fetchadd8_acq(p, inc) \
257 __u64 ia64_intri_res; \
258 asm volatile ("fetchadd8.acq %0=[%1],%2" \
259 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
265 #define ia64_fetchadd8_rel(p, inc) \
267 __u64 ia64_intri_res; \
268 asm volatile ("fetchadd8.rel %0=[%1],%2" \
269 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
275 #define ia64_xchg1(ptr,x) \
277 __u64 ia64_intri_res; \
278 asm volatile ("xchg1 %0=[%1],%2" \
279 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
283 #define ia64_xchg2(ptr,x) \
285 __u64 ia64_intri_res; \
286 asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
287 : "r" (ptr), "r" (x) : "memory"); \
291 #define ia64_xchg4(ptr,x) \
293 __u64 ia64_intri_res; \
294 asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
295 : "r" (ptr), "r" (x) : "memory"); \
299 #define ia64_xchg8(ptr,x) \
301 __u64 ia64_intri_res; \
302 asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
303 : "r" (ptr), "r" (x) : "memory"); \
307 #define ia64_cmpxchg1_acq(ptr, new, old) \
309 __u64 ia64_intri_res; \
310 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
311 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
312 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
316 #define ia64_cmpxchg1_rel(ptr, new, old) \
318 __u64 ia64_intri_res; \
319 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
320 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
321 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
325 #define ia64_cmpxchg2_acq(ptr, new, old) \
327 __u64 ia64_intri_res; \
328 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
329 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
330 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
334 #define ia64_cmpxchg2_rel(ptr, new, old) \
336 __u64 ia64_intri_res; \
337 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
339 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
340 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
344 #define ia64_cmpxchg4_acq(ptr, new, old) \
346 __u64 ia64_intri_res; \
347 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
348 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
349 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
353 #define ia64_cmpxchg4_rel(ptr, new, old) \
355 __u64 ia64_intri_res; \
356 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
357 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
358 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
362 #define ia64_cmpxchg8_acq(ptr, new, old) \
364 __u64 ia64_intri_res; \
365 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
366 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
367 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
371 #define ia64_cmpxchg8_rel(ptr, new, old) \
373 __u64 ia64_intri_res; \
374 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
376 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
377 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
381 #define ia64_mf() asm volatile ("mf" ::: "memory")
382 #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
384 #define ia64_invala() asm volatile ("invala" ::: "memory")
386 #define ia64_native_thash(addr) \
388 unsigned long ia64_intri_res; \
389 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
393 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
394 #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
396 #ifdef HAVE_SERIALIZE_DIRECTIVE
397 # define ia64_dv_serialize_data() asm volatile (".serialize.data");
398 # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
400 # define ia64_dv_serialize_data()
401 # define ia64_dv_serialize_instruction()
404 #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
406 #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
408 #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
411 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
412 :: "r"(trnum), "r"(addr) : "memory")
414 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
415 :: "r"(trnum), "r"(addr) : "memory")
417 #define ia64_tpa(addr) \
419 unsigned long ia64_pa; \
420 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
424 #define __ia64_set_dbr(index, val) \
425 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
427 #define ia64_set_ibr(index, val) \
428 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
430 #define ia64_set_pkr(index, val) \
431 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
433 #define ia64_set_pmc(index, val) \
434 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
436 #define ia64_set_pmd(index, val) \
437 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
439 #define ia64_native_set_rr(index, val) \
440 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
442 #define ia64_native_get_cpuid(index) \
444 unsigned long ia64_intri_res; \
445 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
449 #define __ia64_get_dbr(index) \
451 unsigned long ia64_intri_res; \
452 asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
456 #define ia64_get_ibr(index) \
458 unsigned long ia64_intri_res; \
459 asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
463 #define ia64_get_pkr(index) \
465 unsigned long ia64_intri_res; \
466 asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
470 #define ia64_get_pmc(index) \
472 unsigned long ia64_intri_res; \
473 asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
478 #define ia64_native_get_pmd(index) \
480 unsigned long ia64_intri_res; \
481 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
485 #define ia64_native_get_rr(index) \
487 unsigned long ia64_intri_res; \
488 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
492 #define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
495 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
497 #define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
498 #define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
499 #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
500 #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
502 #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
504 #define ia64_native_ptcga(addr, size) \
506 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
507 ia64_dv_serialize_data(); \
510 #define ia64_ptcl(addr, size) \
512 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
513 ia64_dv_serialize_data(); \
516 #define ia64_ptri(addr, size) \
517 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
519 #define ia64_ptrd(addr, size) \
520 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
522 #define ia64_ttag(addr) \
524 __u64 ia64_intri_res; \
525 asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
530 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
532 #define ia64_lfhint_none 0
533 #define ia64_lfhint_nt1 1
534 #define ia64_lfhint_nt2 2
535 #define ia64_lfhint_nta 3
537 #define ia64_lfetch(lfhint, y) \
540 case ia64_lfhint_none: \
541 asm volatile ("lfetch [%0]" : : "r"(y)); \
543 case ia64_lfhint_nt1: \
544 asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
546 case ia64_lfhint_nt2: \
547 asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
549 case ia64_lfhint_nta: \
550 asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
555 #define ia64_lfetch_excl(lfhint, y) \
558 case ia64_lfhint_none: \
559 asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
561 case ia64_lfhint_nt1: \
562 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
564 case ia64_lfhint_nt2: \
565 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
567 case ia64_lfhint_nta: \
568 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
573 #define ia64_lfetch_fault(lfhint, y) \
576 case ia64_lfhint_none: \
577 asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
579 case ia64_lfhint_nt1: \
580 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
582 case ia64_lfhint_nt2: \
583 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
585 case ia64_lfhint_nta: \
586 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
591 #define ia64_lfetch_fault_excl(lfhint, y) \
594 case ia64_lfhint_none: \
595 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
597 case ia64_lfhint_nt1: \
598 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
600 case ia64_lfhint_nt2: \
601 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
603 case ia64_lfhint_nta: \
604 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
609 #define ia64_native_intrin_local_irq_restore(x) \
611 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
615 :: "r"((x)) : "p6", "p7", "memory"); \
618 #endif /* _UAPI_ASM_IA64_GCC_INTRIN_H */