Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
[cris-mirror.git] / include / linux / percpu-defs.h
blob27ef6b190ea6cc5ee84c53dbcd9a38f5c09164d6
1 #ifndef _LINUX_PERCPU_DEFS_H
2 #define _LINUX_PERCPU_DEFS_H
4 /*
5 * Base implementations of per-CPU variable declarations and definitions, where
6 * the section in which the variable is to be placed is provided by the
7 * 'sec' argument. This may be used to affect the parameters governing the
8 * variable's storage.
10 * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
11 * linkage errors occur due the compiler generating the wrong code to access
12 * that section.
14 #define __PCPU_ATTRS(sec) \
15 __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
16 PER_CPU_ATTRIBUTES
18 #define __PCPU_DUMMY_ATTRS \
19 __attribute__((section(".discard"), unused))
22 * Macro which verifies @ptr is a percpu pointer without evaluating
23 * @ptr. This is to be used in percpu accessors to verify that the
24 * input parameter is a percpu pointer.
26 #define __verify_pcpu_ptr(ptr) do { \
27 const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
28 (void)__vpp_verify; \
29 } while (0)
32 * s390 and alpha modules require percpu variables to be defined as
33 * weak to force the compiler to generate GOT based external
34 * references for them. This is necessary because percpu sections
35 * will be located outside of the usually addressable area.
37 * This definition puts the following two extra restrictions when
38 * defining percpu variables.
40 * 1. The symbol must be globally unique, even the static ones.
41 * 2. Static percpu variables cannot be defined inside a function.
43 * Archs which need weak percpu definitions should define
44 * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
46 * To ensure that the generic code observes the above two
47 * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
48 * definition is used for all cases.
50 #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
52 * __pcpu_scope_* dummy variable is used to enforce scope. It
53 * receives the static modifier when it's used in front of
54 * DEFINE_PER_CPU() and will trigger build failure if
55 * DECLARE_PER_CPU() is used for the same variable.
57 * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
58 * such that hidden weak symbol collision, which will cause unrelated
59 * variables to share the same address, can be detected during build.
61 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
62 extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 extern __PCPU_ATTRS(sec) __typeof__(type) name
65 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
66 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
67 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
68 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
69 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
70 __typeof__(type) name
71 #else
73 * Normal declaration and definition macros.
75 #define DECLARE_PER_CPU_SECTION(type, name, sec) \
76 extern __PCPU_ATTRS(sec) __typeof__(type) name
78 #define DEFINE_PER_CPU_SECTION(type, name, sec) \
79 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
80 __typeof__(type) name
81 #endif
84 * Variant on the per-CPU variable declaration/definition theme used for
85 * ordinary per-CPU variables.
87 #define DECLARE_PER_CPU(type, name) \
88 DECLARE_PER_CPU_SECTION(type, name, "")
90 #define DEFINE_PER_CPU(type, name) \
91 DEFINE_PER_CPU_SECTION(type, name, "")
94 * Declaration/definition used for per-CPU variables that must come first in
95 * the set of variables.
97 #define DECLARE_PER_CPU_FIRST(type, name) \
98 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
100 #define DEFINE_PER_CPU_FIRST(type, name) \
101 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
104 * Declaration/definition used for per-CPU variables that must be cacheline
105 * aligned under SMP conditions so that, whilst a particular instance of the
106 * data corresponds to a particular CPU, inefficiencies due to direct access by
107 * other CPUs are reduced by preventing the data from unnecessarily spanning
108 * cachelines.
110 * An example of this would be statistical data, where each CPU's set of data
111 * is updated by that CPU alone, but the data from across all CPUs is collated
112 * by a CPU processing a read from a proc file.
114 #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
115 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
116 ____cacheline_aligned_in_smp
118 #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
119 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
120 ____cacheline_aligned_in_smp
122 #define DECLARE_PER_CPU_ALIGNED(type, name) \
123 DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
124 ____cacheline_aligned
126 #define DEFINE_PER_CPU_ALIGNED(type, name) \
127 DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
128 ____cacheline_aligned
131 * Declaration/definition used for per-CPU variables that must be page aligned.
133 #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
134 DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
135 __aligned(PAGE_SIZE)
137 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
138 DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
139 __aligned(PAGE_SIZE)
142 * Declaration/definition used for per-CPU variables that must be read mostly.
144 #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
145 DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
147 #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
148 DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
151 * Intermodule exports for per-CPU variables. sparse forgets about
152 * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
153 * noop if __CHECKER__.
155 #ifndef __CHECKER__
156 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
157 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
158 #else
159 #define EXPORT_PER_CPU_SYMBOL(var)
160 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
161 #endif
163 #endif /* _LINUX_PERCPU_DEFS_H */