Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/gregkh/driver...
[linux-2.6/verdex.git] / include / asm-x86_64 / system.h
blob76165736e43a8c1b913c11ab224c907efe351577
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
8 #ifdef __KERNEL__
10 #ifdef CONFIG_SMP
11 #define LOCK_PREFIX "lock ; "
12 #else
13 #define LOCK_PREFIX ""
14 #endif
16 #define __STR(x) #x
17 #define STR(x) __STR(x)
19 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
20 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
22 /* frame pointer must be last for get_wchan */
23 #define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
24 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
26 #define __EXTRA_CLOBBER \
27 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
29 #define switch_to(prev,next,last) \
30 asm volatile(SAVE_CONTEXT \
31 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
32 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
33 "call __switch_to\n\t" \
34 ".globl thread_return\n" \
35 "thread_return:\n\t" \
36 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
37 "movq %P[thread_info](%%rsi),%%r8\n\t" \
38 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
39 "movq %%rax,%%rdi\n\t" \
40 "jc ret_from_fork\n\t" \
41 RESTORE_CONTEXT \
42 : "=a" (last) \
43 : [next] "S" (next), [prev] "D" (prev), \
44 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
45 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
46 [tif_fork] "i" (TIF_FORK), \
47 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
48 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
49 : "memory", "cc" __EXTRA_CLOBBER)
51 extern void load_gs_index(unsigned);
54 * Load a segment. Fall back on loading the zero
55 * segment if something goes wrong..
57 #define loadsegment(seg,value) \
58 asm volatile("\n" \
59 "1:\t" \
60 "movl %k0,%%" #seg "\n" \
61 "2:\n" \
62 ".section .fixup,\"ax\"\n" \
63 "3:\t" \
64 "movl %1,%%" #seg "\n\t" \
65 "jmp 2b\n" \
66 ".previous\n" \
67 ".section __ex_table,\"a\"\n\t" \
68 ".align 8\n\t" \
69 ".quad 1b,3b\n" \
70 ".previous" \
71 : :"r" (value), "r" (0))
73 #define set_debug(value,register) \
74 __asm__("movq %0,%%db" #register \
75 : /* no output */ \
76 :"r" ((unsigned long) value))
79 #ifdef __KERNEL__
80 struct alt_instr {
81 __u8 *instr; /* original instruction */
82 __u8 *replacement;
83 __u8 cpuid; /* cpuid bit set for replacement */
84 __u8 instrlen; /* length of original instruction */
85 __u8 replacementlen; /* length of new instruction, <= instrlen */
86 __u8 pad[5];
87 };
88 #endif
91 * Alternative instructions for different CPU types or capabilities.
93 * This allows to use optimized instructions even on generic binary
94 * kernels.
96 * length of oldinstr must be longer or equal the length of newinstr
97 * It can be padded with nops as needed.
99 * For non barrier like inlines please define new variants
100 * without volatile and memory clobber.
102 #define alternative(oldinstr, newinstr, feature) \
103 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
104 ".section .altinstructions,\"a\"\n" \
105 " .align 8\n" \
106 " .quad 661b\n" /* label */ \
107 " .quad 663f\n" /* new instruction */ \
108 " .byte %c0\n" /* feature bit */ \
109 " .byte 662b-661b\n" /* sourcelen */ \
110 " .byte 664f-663f\n" /* replacementlen */ \
111 ".previous\n" \
112 ".section .altinstr_replacement,\"ax\"\n" \
113 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
114 ".previous" :: "i" (feature) : "memory")
117 * Alternative inline assembly with input.
119 * Pecularities:
120 * No memory clobber here.
121 * Argument numbers start with 1.
122 * Best is to use constraints that are fixed size (like (%1) ... "r")
123 * If you use variable sized constraints like "m" or "g" in the
124 * replacement maake sure to pad to the worst case length.
126 #define alternative_input(oldinstr, newinstr, feature, input...) \
127 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
128 ".section .altinstructions,\"a\"\n" \
129 " .align 8\n" \
130 " .quad 661b\n" /* label */ \
131 " .quad 663f\n" /* new instruction */ \
132 " .byte %c0\n" /* feature bit */ \
133 " .byte 662b-661b\n" /* sourcelen */ \
134 " .byte 664f-663f\n" /* replacementlen */ \
135 ".previous\n" \
136 ".section .altinstr_replacement,\"ax\"\n" \
137 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
138 ".previous" :: "i" (feature), ##input)
141 * Clear and set 'TS' bit respectively
143 #define clts() __asm__ __volatile__ ("clts")
145 static inline unsigned long read_cr0(void)
147 unsigned long cr0;
148 asm volatile("movq %%cr0,%0" : "=r" (cr0));
149 return cr0;
152 static inline void write_cr0(unsigned long val)
154 asm volatile("movq %0,%%cr0" :: "r" (val));
157 static inline unsigned long read_cr3(void)
159 unsigned long cr3;
160 asm("movq %%cr3,%0" : "=r" (cr3));
161 return cr3;
164 static inline unsigned long read_cr4(void)
166 unsigned long cr4;
167 asm("movq %%cr4,%0" : "=r" (cr4));
168 return cr4;
171 static inline void write_cr4(unsigned long val)
173 asm volatile("movq %0,%%cr4" :: "r" (val));
176 #define stts() write_cr0(8 | read_cr0())
178 #define wbinvd() \
179 __asm__ __volatile__ ("wbinvd": : :"memory");
181 #endif /* __KERNEL__ */
183 #define nop() __asm__ __volatile__ ("nop")
185 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
187 #define tas(ptr) (xchg((ptr),1))
189 #define __xg(x) ((volatile long *)(x))
191 extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
193 *ptr = val;
196 #define _set_64bit set_64bit
199 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
200 * Note 2: xchg has side effect, so that attribute volatile is necessary,
201 * but generally the primitive is invalid, *ptr is output argument. --ANK
203 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
205 switch (size) {
206 case 1:
207 __asm__ __volatile__("xchgb %b0,%1"
208 :"=q" (x)
209 :"m" (*__xg(ptr)), "0" (x)
210 :"memory");
211 break;
212 case 2:
213 __asm__ __volatile__("xchgw %w0,%1"
214 :"=r" (x)
215 :"m" (*__xg(ptr)), "0" (x)
216 :"memory");
217 break;
218 case 4:
219 __asm__ __volatile__("xchgl %k0,%1"
220 :"=r" (x)
221 :"m" (*__xg(ptr)), "0" (x)
222 :"memory");
223 break;
224 case 8:
225 __asm__ __volatile__("xchgq %0,%1"
226 :"=r" (x)
227 :"m" (*__xg(ptr)), "0" (x)
228 :"memory");
229 break;
231 return x;
235 * Atomic compare and exchange. Compare OLD with MEM, if identical,
236 * store NEW in MEM. Return the initial value in MEM. Success is
237 * indicated by comparing RETURN with OLD.
240 #define __HAVE_ARCH_CMPXCHG 1
242 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
243 unsigned long new, int size)
245 unsigned long prev;
246 switch (size) {
247 case 1:
248 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
249 : "=a"(prev)
250 : "q"(new), "m"(*__xg(ptr)), "0"(old)
251 : "memory");
252 return prev;
253 case 2:
254 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
255 : "=a"(prev)
256 : "q"(new), "m"(*__xg(ptr)), "0"(old)
257 : "memory");
258 return prev;
259 case 4:
260 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
261 : "=a"(prev)
262 : "q"(new), "m"(*__xg(ptr)), "0"(old)
263 : "memory");
264 return prev;
265 case 8:
266 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
267 : "=a"(prev)
268 : "q"(new), "m"(*__xg(ptr)), "0"(old)
269 : "memory");
270 return prev;
272 return old;
275 #define cmpxchg(ptr,o,n)\
276 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
277 (unsigned long)(n),sizeof(*(ptr))))
279 #ifdef CONFIG_SMP
280 #define smp_mb() mb()
281 #define smp_rmb() rmb()
282 #define smp_wmb() wmb()
283 #define smp_read_barrier_depends() do {} while(0)
284 #else
285 #define smp_mb() barrier()
286 #define smp_rmb() barrier()
287 #define smp_wmb() barrier()
288 #define smp_read_barrier_depends() do {} while(0)
289 #endif
293 * Force strict CPU ordering.
294 * And yes, this is required on UP too when we're talking
295 * to devices.
297 #define mb() asm volatile("mfence":::"memory")
298 #define rmb() asm volatile("lfence":::"memory")
300 #ifdef CONFIG_UNORDERED_IO
301 #define wmb() asm volatile("sfence" ::: "memory")
302 #else
303 #define wmb() asm volatile("" ::: "memory")
304 #endif
305 #define read_barrier_depends() do {} while(0)
306 #define set_mb(var, value) do { xchg(&var, value); } while (0)
307 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
309 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
311 /* interrupt control.. */
312 #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
313 #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
314 #define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
315 #define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
316 /* used in the idle loop; sti takes one instruction cycle to complete */
317 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
319 #define irqs_disabled() \
320 ({ \
321 unsigned long flags; \
322 local_save_flags(flags); \
323 !(flags & (1<<9)); \
326 /* For spinlocks etc */
327 #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
329 void cpu_idle_wait(void);
332 * disable hlt during certain critical i/o operations
334 #define HAVE_DISABLE_HLT
335 void disable_hlt(void);
336 void enable_hlt(void);
338 #define HAVE_EAT_KEY
339 void eat_key(void);
341 extern unsigned long arch_align_stack(unsigned long sp);
343 #endif