[ALSA] oxygen: generalize handling of DAC volume limits
[linux/fpc-iii.git] / include / asm-x86 / pda.h
blob101fb9e11954e8bd45c21a4524c5c8c8f9019132
1 #ifndef X86_64_PDA_H
2 #define X86_64_PDA_H
4 #ifndef __ASSEMBLY__
5 #include <linux/stddef.h>
6 #include <linux/types.h>
7 #include <linux/cache.h>
8 #include <asm/page.h>
10 /* Per processor datastructure. %gs points to it while the kernel runs */
11 struct x8664_pda {
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber; /* 36 Logical CPU number */
19 #ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23 #endif
24 char *irqstackptr;
25 unsigned int __softirq_pending;
26 unsigned int __nmi_count; /* number of NMI on this CPUs */
27 short mmu_state;
28 short isidle;
29 struct mm_struct *active_mm;
30 unsigned apic_timer_irqs;
31 unsigned irq0_irqs;
32 unsigned irq_resched_count;
33 unsigned irq_call_count;
34 unsigned irq_tlb_count;
35 unsigned irq_thermal_count;
36 unsigned irq_threshold_count;
37 unsigned irq_spurious_count;
38 } ____cacheline_aligned_in_smp;
40 extern struct x8664_pda *_cpu_pda[];
41 extern struct x8664_pda boot_cpu_pda[];
42 extern void pda_init(int);
44 #define cpu_pda(i) (_cpu_pda[i])
47 * There is no fast way to get the base address of the PDA, all the accesses
48 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
50 extern void __bad_pda_field(void) __attribute__((noreturn));
53 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
54 * all PDA accesses so it gets read/write dependencies right.
56 extern struct x8664_pda _proxy_pda;
58 #define pda_offset(field) offsetof(struct x8664_pda, field)
60 #define pda_to_op(op, field, val) \
61 do { \
62 typedef typeof(_proxy_pda.field) T__; \
63 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
64 switch (sizeof(_proxy_pda.field)) { \
65 case 2: \
66 asm(op "w %1,%%gs:%c2" : \
67 "+m" (_proxy_pda.field) : \
68 "ri" ((T__)val), \
69 "i"(pda_offset(field))); \
70 break; \
71 case 4: \
72 asm(op "l %1,%%gs:%c2" : \
73 "+m" (_proxy_pda.field) : \
74 "ri" ((T__)val), \
75 "i" (pda_offset(field))); \
76 break; \
77 case 8: \
78 asm(op "q %1,%%gs:%c2": \
79 "+m" (_proxy_pda.field) : \
80 "ri" ((T__)val), \
81 "i"(pda_offset(field))); \
82 break; \
83 default: \
84 __bad_pda_field(); \
85 } \
86 } while (0)
88 #define pda_from_op(op, field) \
89 ({ \
90 typeof(_proxy_pda.field) ret__; \
91 switch (sizeof(_proxy_pda.field)) { \
92 case 2: \
93 asm(op "w %%gs:%c1,%0" : \
94 "=r" (ret__) : \
95 "i" (pda_offset(field)), \
96 "m" (_proxy_pda.field)); \
97 break; \
98 case 4: \
99 asm(op "l %%gs:%c1,%0": \
100 "=r" (ret__): \
101 "i" (pda_offset(field)), \
102 "m" (_proxy_pda.field)); \
103 break; \
104 case 8: \
105 asm(op "q %%gs:%c1,%0": \
106 "=r" (ret__) : \
107 "i" (pda_offset(field)), \
108 "m" (_proxy_pda.field)); \
109 break; \
110 default: \
111 __bad_pda_field(); \
113 ret__; \
116 #define read_pda(field) pda_from_op("mov", field)
117 #define write_pda(field, val) pda_to_op("mov", field, val)
118 #define add_pda(field, val) pda_to_op("add", field, val)
119 #define sub_pda(field, val) pda_to_op("sub", field, val)
120 #define or_pda(field, val) pda_to_op("or", field, val)
122 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
123 #define test_and_clear_bit_pda(bit, field) \
124 ({ \
125 int old__; \
126 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
127 : "=r" (old__), "+m" (_proxy_pda.field) \
128 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
129 old__; \
132 #endif
134 #define PDA_STACKOFFSET (5*8)
136 #endif