1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
5 #define __percpu_seg gs
6 #define __percpu_mov_op movq
8 #define __percpu_seg fs
9 #define __percpu_mov_op movl
15 * PER_CPU finds an address of a per-cpu variable.
19 * reg - 32bit register
21 * The resulting address is stored in the "reg" argument.
24 * PER_CPU(cpu_gdt_descr, %ebx)
27 #define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
30 #define PER_CPU_VAR(var) %__percpu_seg:var
32 #define PER_CPU(var, reg) __percpu_mov_op $var, reg
33 #define PER_CPU_VAR(var) var
36 #ifdef CONFIG_X86_64_SMP
37 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39 #define INIT_PER_CPU_VAR(var) var
42 #else /* ...!ASSEMBLY */
44 #include <linux/kernel.h>
45 #include <linux/stringify.h>
48 #define __percpu_prefix "%%"__stringify(__percpu_seg)":"
49 #define __my_cpu_offset this_cpu_read(this_cpu_off)
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
55 #define arch_raw_cpu_ptr(ptr) \
57 unsigned long tcp_ptr__; \
58 asm volatile("add " __percpu_arg(1) ", %0" \
60 : "m" (this_cpu_off), "0" (ptr)); \
61 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
64 #define __percpu_prefix ""
67 #define __percpu_arg(x) __percpu_prefix "%" #x
70 * Initialized pointers to per-cpu variables needed for the boot
71 * processor need to use these macros to get the proper address
72 * offset from __per_cpu_load on SMP.
74 * There also must be an entry in vmlinux_64.lds.S
76 #define DECLARE_INIT_PER_CPU(var) \
77 extern typeof(var) init_per_cpu_var(var)
79 #ifdef CONFIG_X86_64_SMP
80 #define init_per_cpu_var(var) init_per_cpu__##var
82 #define init_per_cpu_var(var) var
85 /* For arch-specific code, we can use direct single-insn ops (they
86 * don't give an lvalue though). */
87 extern void __bad_percpu_size(void);
89 #define percpu_to_op(op, var, val) \
91 typedef typeof(var) pto_T__; \
97 switch (sizeof(var)) { \
99 asm(op "b %1,"__percpu_arg(0) \
101 : "qi" ((pto_T__)(val))); \
104 asm(op "w %1,"__percpu_arg(0) \
106 : "ri" ((pto_T__)(val))); \
109 asm(op "l %1,"__percpu_arg(0) \
111 : "ri" ((pto_T__)(val))); \
114 asm(op "q %1,"__percpu_arg(0) \
116 : "re" ((pto_T__)(val))); \
118 default: __bad_percpu_size(); \
123 * Generate a percpu add to memory instruction and optimize code
124 * if one is added or subtracted.
126 #define percpu_add_op(var, val) \
128 typedef typeof(var) pao_T__; \
129 const int pao_ID__ = (__builtin_constant_p(val) && \
130 ((val) == 1 || (val) == -1)) ? \
137 switch (sizeof(var)) { \
140 asm("incb "__percpu_arg(0) : "+m" (var)); \
141 else if (pao_ID__ == -1) \
142 asm("decb "__percpu_arg(0) : "+m" (var)); \
144 asm("addb %1, "__percpu_arg(0) \
146 : "qi" ((pao_T__)(val))); \
150 asm("incw "__percpu_arg(0) : "+m" (var)); \
151 else if (pao_ID__ == -1) \
152 asm("decw "__percpu_arg(0) : "+m" (var)); \
154 asm("addw %1, "__percpu_arg(0) \
156 : "ri" ((pao_T__)(val))); \
160 asm("incl "__percpu_arg(0) : "+m" (var)); \
161 else if (pao_ID__ == -1) \
162 asm("decl "__percpu_arg(0) : "+m" (var)); \
164 asm("addl %1, "__percpu_arg(0) \
166 : "ri" ((pao_T__)(val))); \
170 asm("incq "__percpu_arg(0) : "+m" (var)); \
171 else if (pao_ID__ == -1) \
172 asm("decq "__percpu_arg(0) : "+m" (var)); \
174 asm("addq %1, "__percpu_arg(0) \
176 : "re" ((pao_T__)(val))); \
178 default: __bad_percpu_size(); \
182 #define percpu_from_op(op, var) \
184 typeof(var) pfo_ret__; \
185 switch (sizeof(var)) { \
187 asm(op "b "__percpu_arg(1)",%0" \
192 asm(op "w "__percpu_arg(1)",%0" \
197 asm(op "l "__percpu_arg(1)",%0" \
202 asm(op "q "__percpu_arg(1)",%0" \
206 default: __bad_percpu_size(); \
211 #define percpu_stable_op(op, var) \
213 typeof(var) pfo_ret__; \
214 switch (sizeof(var)) { \
216 asm(op "b "__percpu_arg(P1)",%0" \
221 asm(op "w "__percpu_arg(P1)",%0" \
226 asm(op "l "__percpu_arg(P1)",%0" \
231 asm(op "q "__percpu_arg(P1)",%0" \
235 default: __bad_percpu_size(); \
240 #define percpu_unary_op(op, var) \
242 switch (sizeof(var)) { \
244 asm(op "b "__percpu_arg(0) \
248 asm(op "w "__percpu_arg(0) \
252 asm(op "l "__percpu_arg(0) \
256 asm(op "q "__percpu_arg(0) \
259 default: __bad_percpu_size(); \
264 * Add return operation
266 #define percpu_add_return_op(var, val) \
268 typeof(var) paro_ret__ = val; \
269 switch (sizeof(var)) { \
271 asm("xaddb %0, "__percpu_arg(1) \
272 : "+q" (paro_ret__), "+m" (var) \
276 asm("xaddw %0, "__percpu_arg(1) \
277 : "+r" (paro_ret__), "+m" (var) \
281 asm("xaddl %0, "__percpu_arg(1) \
282 : "+r" (paro_ret__), "+m" (var) \
286 asm("xaddq %0, "__percpu_arg(1) \
287 : "+re" (paro_ret__), "+m" (var) \
290 default: __bad_percpu_size(); \
297 * xchg is implemented using cmpxchg without a lock prefix. xchg is
298 * expensive due to the implied lock prefix. The processor cannot prefetch
299 * cachelines if xchg is used.
301 #define percpu_xchg_op(var, nval) \
303 typeof(var) pxo_ret__; \
304 typeof(var) pxo_new__ = (nval); \
305 switch (sizeof(var)) { \
307 asm("\n\tmov "__percpu_arg(1)",%%al" \
308 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
310 : "=&a" (pxo_ret__), "+m" (var) \
315 asm("\n\tmov "__percpu_arg(1)",%%ax" \
316 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
318 : "=&a" (pxo_ret__), "+m" (var) \
323 asm("\n\tmov "__percpu_arg(1)",%%eax" \
324 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
326 : "=&a" (pxo_ret__), "+m" (var) \
331 asm("\n\tmov "__percpu_arg(1)",%%rax" \
332 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
334 : "=&a" (pxo_ret__), "+m" (var) \
338 default: __bad_percpu_size(); \
344 * cmpxchg has no such implied lock semantics as a result it is much
345 * more efficient for cpu local operations.
347 #define percpu_cmpxchg_op(var, oval, nval) \
349 typeof(var) pco_ret__; \
350 typeof(var) pco_old__ = (oval); \
351 typeof(var) pco_new__ = (nval); \
352 switch (sizeof(var)) { \
354 asm("cmpxchgb %2, "__percpu_arg(1) \
355 : "=a" (pco_ret__), "+m" (var) \
356 : "q" (pco_new__), "0" (pco_old__) \
360 asm("cmpxchgw %2, "__percpu_arg(1) \
361 : "=a" (pco_ret__), "+m" (var) \
362 : "r" (pco_new__), "0" (pco_old__) \
366 asm("cmpxchgl %2, "__percpu_arg(1) \
367 : "=a" (pco_ret__), "+m" (var) \
368 : "r" (pco_new__), "0" (pco_old__) \
372 asm("cmpxchgq %2, "__percpu_arg(1) \
373 : "=a" (pco_ret__), "+m" (var) \
374 : "r" (pco_new__), "0" (pco_old__) \
377 default: __bad_percpu_size(); \
383 * this_cpu_read() makes gcc load the percpu variable every time it is
384 * accessed while this_cpu_read_stable() allows the value to be cached.
385 * this_cpu_read_stable() is more efficient and can be used if its value
386 * is guaranteed to be valid across cpus. The current users include
387 * get_current() and get_thread_info() both of which are actually
388 * per-thread variables implemented as per-cpu variables and thus
389 * stable for the duration of the respective task.
391 #define this_cpu_read_stable(var) percpu_stable_op("mov", var)
393 #define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp)
394 #define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp)
395 #define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp)
397 #define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
398 #define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
399 #define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
400 #define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
401 #define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
402 #define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
403 #define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
404 #define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
405 #define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
406 #define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
407 #define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
408 #define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
409 #define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
410 #define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
411 #define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
413 #define this_cpu_read_1(pcp) percpu_from_op("mov", pcp)
414 #define this_cpu_read_2(pcp) percpu_from_op("mov", pcp)
415 #define this_cpu_read_4(pcp) percpu_from_op("mov", pcp)
416 #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
417 #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
418 #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
419 #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
420 #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
421 #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
422 #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
423 #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
424 #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
425 #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
426 #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
427 #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
428 #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
429 #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
430 #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
432 #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
433 #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
434 #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
435 #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
436 #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
437 #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
439 #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
440 #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
441 #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
442 #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
443 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
444 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
446 #ifdef CONFIG_X86_CMPXCHG64
447 #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
450 typeof(pcp1) __o1 = (o1), __n1 = (n1); \
451 typeof(pcp2) __o2 = (o2), __n2 = (n2); \
452 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
453 : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
454 : "b" (__n1), "c" (__n2), "a" (__o1)); \
458 #define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
459 #define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
460 #endif /* CONFIG_X86_CMPXCHG64 */
463 * Per cpu atomic 64 bit operations are only available under 64 bit.
464 * 32 bit must fall back to generic operations.
467 #define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp)
468 #define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
469 #define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
470 #define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
471 #define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
472 #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
473 #define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
474 #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
476 #define this_cpu_read_8(pcp) percpu_from_op("mov", pcp)
477 #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
478 #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
479 #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
480 #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
481 #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
482 #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
483 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
486 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
487 * is not supported on early AMD64 processors so we must be able to emulate
488 * it in software. The address used in the cmpxchg16 instruction must be
489 * aligned to a 16 byte boundary.
491 #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \
494 typeof(pcp1) __o1 = (o1), __n1 = (n1); \
495 typeof(pcp2) __o2 = (o2), __n2 = (n2); \
496 alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
497 "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
499 ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
500 "+m" (pcp2), "+d" (__o2)), \
501 "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
505 #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
506 #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
510 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
511 #define x86_test_and_clear_bit_percpu(bit, var) \
514 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
515 : "=r" (old__), "+m" (var) \
520 static __always_inline
int x86_this_cpu_constant_test_bit(unsigned int nr
,
521 const unsigned long __percpu
*addr
)
523 unsigned long __percpu
*a
= (unsigned long *)addr
+ nr
/ BITS_PER_LONG
;
526 return ((1UL << (nr
% BITS_PER_LONG
)) & raw_cpu_read_8(*a
)) != 0;
528 return ((1UL << (nr
% BITS_PER_LONG
)) & raw_cpu_read_4(*a
)) != 0;
532 static inline int x86_this_cpu_variable_test_bit(int nr
,
533 const unsigned long __percpu
*addr
)
537 asm volatile("bt "__percpu_arg(2)",%1\n\t"
540 : "m" (*(unsigned long *)addr
), "Ir" (nr
));
545 #define x86_this_cpu_test_bit(nr, addr) \
546 (__builtin_constant_p((nr)) \
547 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
548 : x86_this_cpu_variable_test_bit((nr), (addr)))
551 #include <asm-generic/percpu.h>
553 /* We can use this directly for local CPU (faster). */
554 DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off
);
556 #endif /* !__ASSEMBLY__ */
561 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
562 * variables that are initialized and accessed before there are per_cpu
566 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
567 DEFINE_PER_CPU(_type, _name) = _initvalue; \
568 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
569 { [0 ... NR_CPUS-1] = _initvalue }; \
570 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
572 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
573 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
574 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
575 { [0 ... NR_CPUS-1] = _initvalue }; \
576 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
578 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
579 EXPORT_PER_CPU_SYMBOL(_name)
581 #define DECLARE_EARLY_PER_CPU(_type, _name) \
582 DECLARE_PER_CPU(_type, _name); \
583 extern __typeof__(_type) *_name##_early_ptr; \
584 extern __typeof__(_type) _name##_early_map[]
586 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
587 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
588 extern __typeof__(_type) *_name##_early_ptr; \
589 extern __typeof__(_type) _name##_early_map[]
591 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
592 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
593 #define early_per_cpu(_name, _cpu) \
594 *(early_per_cpu_ptr(_name) ? \
595 &early_per_cpu_ptr(_name)[_cpu] : \
596 &per_cpu(_name, _cpu))
598 #else /* !CONFIG_SMP */
599 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
600 DEFINE_PER_CPU(_type, _name) = _initvalue
602 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
603 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
605 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
606 EXPORT_PER_CPU_SYMBOL(_name)
608 #define DECLARE_EARLY_PER_CPU(_type, _name) \
609 DECLARE_PER_CPU(_type, _name)
611 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
612 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
614 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
615 #define early_per_cpu_ptr(_name) NULL
616 /* no early_per_cpu_map() */
618 #endif /* !CONFIG_SMP */
620 #endif /* _ASM_X86_PERCPU_H */