x86: introduce native_set_pte_atomic() on 64-bit too
[wrt350n-kernel.git] / include / asm-ia64 / gcc_intrin.h
blob5b6665c754c914213f4e6fa8fb76bc497e5bbf1c
1 #ifndef _ASM_IA64_GCC_INTRIN_H
2 #define _ASM_IA64_GCC_INTRIN_H
3 /*
5 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
7 */
9 #include <linux/compiler.h>
11 /* define this macro to get some asm stmts included in 'c' files */
12 #define ASM_SUPPORTED
14 /* Optimization barrier */
15 /* The "volatile" is due to gcc bugs */
16 #define ia64_barrier() asm volatile ("":::"memory")
18 #define ia64_stop() asm volatile (";;"::)
20 #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
22 #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
24 extern void ia64_bad_param_for_setreg (void);
25 extern void ia64_bad_param_for_getreg (void);
27 register unsigned long ia64_r13 asm ("r13") __used;
29 #define ia64_setreg(regnum, val) \
30 ({ \
31 switch (regnum) { \
32 case _IA64_REG_PSR_L: \
33 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
34 break; \
35 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
36 asm volatile ("mov ar%0=%1" :: \
37 "i" (regnum - _IA64_REG_AR_KR0), \
38 "r"(val): "memory"); \
39 break; \
40 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
41 asm volatile ("mov cr%0=%1" :: \
42 "i" (regnum - _IA64_REG_CR_DCR), \
43 "r"(val): "memory" ); \
44 break; \
45 case _IA64_REG_SP: \
46 asm volatile ("mov r12=%0" :: \
47 "r"(val): "memory"); \
48 break; \
49 case _IA64_REG_GP: \
50 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
51 break; \
52 default: \
53 ia64_bad_param_for_setreg(); \
54 break; \
55 } \
58 #define ia64_getreg(regnum) \
59 ({ \
60 __u64 ia64_intri_res; \
62 switch (regnum) { \
63 case _IA64_REG_GP: \
64 asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
65 break; \
66 case _IA64_REG_IP: \
67 asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
68 break; \
69 case _IA64_REG_PSR: \
70 asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
71 break; \
72 case _IA64_REG_TP: /* for current() */ \
73 ia64_intri_res = ia64_r13; \
74 break; \
75 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
76 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
77 : "i"(regnum - _IA64_REG_AR_KR0)); \
78 break; \
79 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
80 asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
81 : "i" (regnum - _IA64_REG_CR_DCR)); \
82 break; \
83 case _IA64_REG_SP: \
84 asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
85 break; \
86 default: \
87 ia64_bad_param_for_getreg(); \
88 break; \
89 } \
90 ia64_intri_res; \
93 #define ia64_hint_pause 0
95 #define ia64_hint(mode) \
96 ({ \
97 switch (mode) { \
98 case ia64_hint_pause: \
99 asm volatile ("hint @pause" ::: "memory"); \
100 break; \
105 /* Integer values for mux1 instruction */
106 #define ia64_mux1_brcst 0
107 #define ia64_mux1_mix 8
108 #define ia64_mux1_shuf 9
109 #define ia64_mux1_alt 10
110 #define ia64_mux1_rev 11
112 #define ia64_mux1(x, mode) \
113 ({ \
114 __u64 ia64_intri_res; \
116 switch (mode) { \
117 case ia64_mux1_brcst: \
118 asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
119 break; \
120 case ia64_mux1_mix: \
121 asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
122 break; \
123 case ia64_mux1_shuf: \
124 asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
125 break; \
126 case ia64_mux1_alt: \
127 asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
128 break; \
129 case ia64_mux1_rev: \
130 asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
131 break; \
133 ia64_intri_res; \
136 #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
137 # define ia64_popcnt(x) __builtin_popcountl(x)
138 #else
139 # define ia64_popcnt(x) \
140 ({ \
141 __u64 ia64_intri_res; \
142 asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
144 ia64_intri_res; \
146 #endif
148 #define ia64_getf_exp(x) \
149 ({ \
150 long ia64_intri_res; \
152 asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
154 ia64_intri_res; \
157 #define ia64_shrp(a, b, count) \
158 ({ \
159 __u64 ia64_intri_res; \
160 asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
161 ia64_intri_res; \
164 #define ia64_ldfs(regnum, x) \
165 ({ \
166 register double __f__ asm ("f"#regnum); \
167 asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
170 #define ia64_ldfd(regnum, x) \
171 ({ \
172 register double __f__ asm ("f"#regnum); \
173 asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
176 #define ia64_ldfe(regnum, x) \
177 ({ \
178 register double __f__ asm ("f"#regnum); \
179 asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
182 #define ia64_ldf8(regnum, x) \
183 ({ \
184 register double __f__ asm ("f"#regnum); \
185 asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
188 #define ia64_ldf_fill(regnum, x) \
189 ({ \
190 register double __f__ asm ("f"#regnum); \
191 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
194 #define ia64_st4_rel_nta(m, val) \
195 ({ \
196 asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
199 #define ia64_stfs(x, regnum) \
200 ({ \
201 register double __f__ asm ("f"#regnum); \
202 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
205 #define ia64_stfd(x, regnum) \
206 ({ \
207 register double __f__ asm ("f"#regnum); \
208 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
211 #define ia64_stfe(x, regnum) \
212 ({ \
213 register double __f__ asm ("f"#regnum); \
214 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
217 #define ia64_stf8(x, regnum) \
218 ({ \
219 register double __f__ asm ("f"#regnum); \
220 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
223 #define ia64_stf_spill(x, regnum) \
224 ({ \
225 register double __f__ asm ("f"#regnum); \
226 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
229 #define ia64_fetchadd4_acq(p, inc) \
230 ({ \
232 __u64 ia64_intri_res; \
233 asm volatile ("fetchadd4.acq %0=[%1],%2" \
234 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
235 : "memory"); \
237 ia64_intri_res; \
240 #define ia64_fetchadd4_rel(p, inc) \
241 ({ \
242 __u64 ia64_intri_res; \
243 asm volatile ("fetchadd4.rel %0=[%1],%2" \
244 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
245 : "memory"); \
247 ia64_intri_res; \
250 #define ia64_fetchadd8_acq(p, inc) \
251 ({ \
253 __u64 ia64_intri_res; \
254 asm volatile ("fetchadd8.acq %0=[%1],%2" \
255 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
256 : "memory"); \
258 ia64_intri_res; \
261 #define ia64_fetchadd8_rel(p, inc) \
262 ({ \
263 __u64 ia64_intri_res; \
264 asm volatile ("fetchadd8.rel %0=[%1],%2" \
265 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
266 : "memory"); \
268 ia64_intri_res; \
271 #define ia64_xchg1(ptr,x) \
272 ({ \
273 __u64 ia64_intri_res; \
274 asm volatile ("xchg1 %0=[%1],%2" \
275 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
276 ia64_intri_res; \
279 #define ia64_xchg2(ptr,x) \
280 ({ \
281 __u64 ia64_intri_res; \
282 asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
283 : "r" (ptr), "r" (x) : "memory"); \
284 ia64_intri_res; \
287 #define ia64_xchg4(ptr,x) \
288 ({ \
289 __u64 ia64_intri_res; \
290 asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
291 : "r" (ptr), "r" (x) : "memory"); \
292 ia64_intri_res; \
295 #define ia64_xchg8(ptr,x) \
296 ({ \
297 __u64 ia64_intri_res; \
298 asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
299 : "r" (ptr), "r" (x) : "memory"); \
300 ia64_intri_res; \
303 #define ia64_cmpxchg1_acq(ptr, new, old) \
304 ({ \
305 __u64 ia64_intri_res; \
306 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
307 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
308 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
309 ia64_intri_res; \
312 #define ia64_cmpxchg1_rel(ptr, new, old) \
313 ({ \
314 __u64 ia64_intri_res; \
315 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
316 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
317 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
318 ia64_intri_res; \
321 #define ia64_cmpxchg2_acq(ptr, new, old) \
322 ({ \
323 __u64 ia64_intri_res; \
324 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
325 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
326 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
327 ia64_intri_res; \
330 #define ia64_cmpxchg2_rel(ptr, new, old) \
331 ({ \
332 __u64 ia64_intri_res; \
333 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
335 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
336 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
337 ia64_intri_res; \
340 #define ia64_cmpxchg4_acq(ptr, new, old) \
341 ({ \
342 __u64 ia64_intri_res; \
343 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
344 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
345 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
346 ia64_intri_res; \
349 #define ia64_cmpxchg4_rel(ptr, new, old) \
350 ({ \
351 __u64 ia64_intri_res; \
352 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
353 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
354 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
355 ia64_intri_res; \
358 #define ia64_cmpxchg8_acq(ptr, new, old) \
359 ({ \
360 __u64 ia64_intri_res; \
361 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
362 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
363 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
364 ia64_intri_res; \
367 #define ia64_cmpxchg8_rel(ptr, new, old) \
368 ({ \
369 __u64 ia64_intri_res; \
370 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
372 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
373 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
374 ia64_intri_res; \
377 #define ia64_mf() asm volatile ("mf" ::: "memory")
378 #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
380 #define ia64_invala() asm volatile ("invala" ::: "memory")
382 #define ia64_thash(addr) \
383 ({ \
384 __u64 ia64_intri_res; \
385 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
386 ia64_intri_res; \
389 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
390 #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
392 #ifdef HAVE_SERIALIZE_DIRECTIVE
393 # define ia64_dv_serialize_data() asm volatile (".serialize.data");
394 # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
395 #else
396 # define ia64_dv_serialize_data()
397 # define ia64_dv_serialize_instruction()
398 #endif
400 #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
402 #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
404 #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
407 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
408 :: "r"(trnum), "r"(addr) : "memory")
410 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
411 :: "r"(trnum), "r"(addr) : "memory")
413 #define ia64_tpa(addr) \
414 ({ \
415 __u64 ia64_pa; \
416 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
417 ia64_pa; \
420 #define __ia64_set_dbr(index, val) \
421 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
423 #define ia64_set_ibr(index, val) \
424 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
426 #define ia64_set_pkr(index, val) \
427 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
429 #define ia64_set_pmc(index, val) \
430 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
432 #define ia64_set_pmd(index, val) \
433 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
435 #define ia64_set_rr(index, val) \
436 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
438 #define ia64_get_cpuid(index) \
439 ({ \
440 __u64 ia64_intri_res; \
441 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
442 ia64_intri_res; \
445 #define __ia64_get_dbr(index) \
446 ({ \
447 __u64 ia64_intri_res; \
448 asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
449 ia64_intri_res; \
452 #define ia64_get_ibr(index) \
453 ({ \
454 __u64 ia64_intri_res; \
455 asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
456 ia64_intri_res; \
459 #define ia64_get_pkr(index) \
460 ({ \
461 __u64 ia64_intri_res; \
462 asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
463 ia64_intri_res; \
466 #define ia64_get_pmc(index) \
467 ({ \
468 __u64 ia64_intri_res; \
469 asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
470 ia64_intri_res; \
474 #define ia64_get_pmd(index) \
475 ({ \
476 __u64 ia64_intri_res; \
477 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
478 ia64_intri_res; \
481 #define ia64_get_rr(index) \
482 ({ \
483 __u64 ia64_intri_res; \
484 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
485 ia64_intri_res; \
488 #define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
491 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
493 #define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
494 #define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
495 #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
496 #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
498 #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
500 #define ia64_ptcga(addr, size) \
501 do { \
502 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
503 ia64_dv_serialize_data(); \
504 } while (0)
506 #define ia64_ptcl(addr, size) \
507 do { \
508 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
509 ia64_dv_serialize_data(); \
510 } while (0)
512 #define ia64_ptri(addr, size) \
513 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
515 #define ia64_ptrd(addr, size) \
516 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
518 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
520 #define ia64_lfhint_none 0
521 #define ia64_lfhint_nt1 1
522 #define ia64_lfhint_nt2 2
523 #define ia64_lfhint_nta 3
525 #define ia64_lfetch(lfhint, y) \
526 ({ \
527 switch (lfhint) { \
528 case ia64_lfhint_none: \
529 asm volatile ("lfetch [%0]" : : "r"(y)); \
530 break; \
531 case ia64_lfhint_nt1: \
532 asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
533 break; \
534 case ia64_lfhint_nt2: \
535 asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
536 break; \
537 case ia64_lfhint_nta: \
538 asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
539 break; \
543 #define ia64_lfetch_excl(lfhint, y) \
544 ({ \
545 switch (lfhint) { \
546 case ia64_lfhint_none: \
547 asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
548 break; \
549 case ia64_lfhint_nt1: \
550 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
551 break; \
552 case ia64_lfhint_nt2: \
553 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
554 break; \
555 case ia64_lfhint_nta: \
556 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
557 break; \
561 #define ia64_lfetch_fault(lfhint, y) \
562 ({ \
563 switch (lfhint) { \
564 case ia64_lfhint_none: \
565 asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
566 break; \
567 case ia64_lfhint_nt1: \
568 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
569 break; \
570 case ia64_lfhint_nt2: \
571 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
572 break; \
573 case ia64_lfhint_nta: \
574 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
575 break; \
579 #define ia64_lfetch_fault_excl(lfhint, y) \
580 ({ \
581 switch (lfhint) { \
582 case ia64_lfhint_none: \
583 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
584 break; \
585 case ia64_lfhint_nt1: \
586 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
587 break; \
588 case ia64_lfhint_nt2: \
589 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
590 break; \
591 case ia64_lfhint_nta: \
592 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
593 break; \
597 #define ia64_intrin_local_irq_restore(x) \
598 do { \
599 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
600 "(p6) ssm psr.i;" \
601 "(p7) rsm psr.i;;" \
602 "(p6) srlz.d" \
603 :: "r"((x)) : "p6", "p7", "memory"); \
604 } while (0)
606 #endif /* _ASM_IA64_GCC_INTRIN_H */