1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_CACHE_H
3 #define __LINUX_CACHE_H
5 #include <uapi/linux/kernel.h>
9 #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
12 #ifndef SMP_CACHE_BYTES
13 #define SMP_CACHE_BYTES L1_CACHE_BYTES
17 * SMP_CACHE_ALIGN - align a value to the L2 cacheline size
20 * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
21 * this needs to be accounted.
23 * Return: aligned value.
25 #ifndef SMP_CACHE_ALIGN
26 #define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES)
30 * ``__aligned_largest`` aligns a field to the value most optimal for the
31 * target architecture to perform memory operations. Get the actual value
32 * to be able to use it anywhere else.
34 #ifndef __LARGEST_ALIGN
35 #define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest)
39 #define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN)
43 * __read_mostly is used to keep rarely changing variables out of frequently
44 * updated cachelines. Its use should be reserved for data that is used
45 * frequently in hot paths. Performance traces can help decide when to use
46 * this. You want __read_mostly data to be tightly packed, so that in the
47 * best case multiple frequently read variables for a hot path will be next
48 * to each other in order to reduce the number of cachelines needed to
49 * execute a critical path. We should be mindful and selective of its use.
50 * ie: if you're going to use it please supply a *good* justification in your
58 * __ro_after_init is used to mark things that are read-only after init (i.e.
59 * after mark_rodata_ro() has been called). These are effectively read-only,
60 * but may get written to during init, so can't live in .rodata (via "const").
62 #ifndef __ro_after_init
63 #define __ro_after_init __section(".data..ro_after_init")
66 #ifndef ____cacheline_aligned
67 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
70 #ifndef ____cacheline_aligned_in_smp
72 #define ____cacheline_aligned_in_smp ____cacheline_aligned
74 #define ____cacheline_aligned_in_smp
75 #endif /* CONFIG_SMP */
78 #ifndef __cacheline_aligned
79 #define __cacheline_aligned \
80 __attribute__((__aligned__(SMP_CACHE_BYTES), \
81 __section__(".data..cacheline_aligned")))
82 #endif /* __cacheline_aligned */
84 #ifndef __cacheline_aligned_in_smp
86 #define __cacheline_aligned_in_smp __cacheline_aligned
88 #define __cacheline_aligned_in_smp
89 #endif /* CONFIG_SMP */
93 * The maximum alignment needed for some critical structures
94 * These could be inter-node cacheline sizes/L3 cacheline
95 * size etc. Define this in asm/cache.h for your arch
97 #ifndef INTERNODE_CACHE_SHIFT
98 #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
101 #if !defined(____cacheline_internodealigned_in_smp)
102 #if defined(CONFIG_SMP)
103 #define ____cacheline_internodealigned_in_smp \
104 __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
106 #define ____cacheline_internodealigned_in_smp
110 #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
111 #define cache_line_size() L1_CACHE_BYTES
114 #ifndef __cacheline_group_begin
115 #define __cacheline_group_begin(GROUP) \
116 __u8 __cacheline_group_begin__##GROUP[0]
119 #ifndef __cacheline_group_end
120 #define __cacheline_group_end(GROUP) \
121 __u8 __cacheline_group_end__##GROUP[0]
125 * __cacheline_group_begin_aligned - declare an aligned group start
126 * @GROUP: name of the group
127 * @...: optional group alignment
129 * The following block inside a struct:
131 * __cacheline_group_begin_aligned(grp);
134 * __cacheline_group_end_aligned(grp);
136 * will always be aligned to either the specified alignment or
137 * ``SMP_CACHE_BYTES``.
139 #define __cacheline_group_begin_aligned(GROUP, ...) \
140 __cacheline_group_begin(GROUP) \
141 __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
144 * __cacheline_group_end_aligned - declare an aligned group end
145 * @GROUP: name of the group
146 * @...: optional alignment (same as was in __cacheline_group_begin_aligned())
148 * Note that the end marker is aligned to sizeof(long) to allow more precise
149 * size assertion. It also declares a padding at the end to avoid next field
150 * falling into this cacheline.
152 #define __cacheline_group_end_aligned(GROUP, ...) \
153 __cacheline_group_end(GROUP) __aligned(sizeof(long)); \
154 struct { } __cacheline_group_pad__##GROUP \
155 __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
157 #ifndef CACHELINE_ASSERT_GROUP_MEMBER
158 #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
159 BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
160 offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
161 offsetofend(TYPE, MEMBER) <= \
162 offsetof(TYPE, __cacheline_group_end__##GROUP)))
165 #ifndef CACHELINE_ASSERT_GROUP_SIZE
166 #define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
167 BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
168 offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
173 * Helper to add padding within a struct to ensure data fall into separate
176 #if defined(CONFIG_SMP)
177 struct cacheline_padding
{
179 } ____cacheline_internodealigned_in_smp
;
180 #define CACHELINE_PADDING(name) struct cacheline_padding name
182 #define CACHELINE_PADDING(name)
185 #ifdef ARCH_DMA_MINALIGN
186 #define ARCH_HAS_DMA_MINALIGN
188 #define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
191 #endif /* __LINUX_CACHE_H */