2 * Copyright (C) 2013 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_PERCPU_H
17 #define __ASM_PERCPU_H
19 static inline void set_my_cpu_offset(unsigned long off
)
21 asm volatile("msr tpidr_el1, %0" :: "r" (off
) : "memory");
24 static inline unsigned long __my_cpu_offset(void)
29 * We want to allow caching the value, so avoid using volatile and
30 * instead use a fake stack read to hazard against barrier().
32 asm("mrs %0, tpidr_el1" : "=r" (off
) :
33 "Q" (*(const unsigned long *)current_stack_pointer
));
37 #define __my_cpu_offset __my_cpu_offset()
39 #define PERCPU_OP(op, asm_op) \
40 static inline unsigned long __percpu_##op(void *ptr, \
41 unsigned long val, int size) \
43 unsigned long loop, ret; \
48 asm ("//__per_cpu_" #op "_1\n" \
49 "ldxrb %w[ret], %[ptr]\n" \
50 #asm_op " %w[ret], %w[ret], %w[val]\n" \
51 "stxrb %w[loop], %w[ret], %[ptr]\n" \
52 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
53 [ptr] "+Q"(*(u8 *)ptr) \
54 : [val] "Ir" (val)); \
59 asm ("//__per_cpu_" #op "_2\n" \
60 "ldxrh %w[ret], %[ptr]\n" \
61 #asm_op " %w[ret], %w[ret], %w[val]\n" \
62 "stxrh %w[loop], %w[ret], %[ptr]\n" \
63 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
64 [ptr] "+Q"(*(u16 *)ptr) \
65 : [val] "Ir" (val)); \
70 asm ("//__per_cpu_" #op "_4\n" \
71 "ldxr %w[ret], %[ptr]\n" \
72 #asm_op " %w[ret], %w[ret], %w[val]\n" \
73 "stxr %w[loop], %w[ret], %[ptr]\n" \
74 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
75 [ptr] "+Q"(*(u32 *)ptr) \
76 : [val] "Ir" (val)); \
81 asm ("//__per_cpu_" #op "_8\n" \
82 "ldxr %[ret], %[ptr]\n" \
83 #asm_op " %[ret], %[ret], %[val]\n" \
84 "stxr %w[loop], %[ret], %[ptr]\n" \
85 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
86 [ptr] "+Q"(*(u64 *)ptr) \
87 : [val] "Ir" (val)); \
102 static inline unsigned long __percpu_read(void *ptr
, int size
)
108 ret
= ACCESS_ONCE(*(u8
*)ptr
);
111 ret
= ACCESS_ONCE(*(u16
*)ptr
);
114 ret
= ACCESS_ONCE(*(u32
*)ptr
);
117 ret
= ACCESS_ONCE(*(u64
*)ptr
);
126 static inline void __percpu_write(void *ptr
, unsigned long val
, int size
)
130 ACCESS_ONCE(*(u8
*)ptr
) = (u8
)val
;
133 ACCESS_ONCE(*(u16
*)ptr
) = (u16
)val
;
136 ACCESS_ONCE(*(u32
*)ptr
) = (u32
)val
;
139 ACCESS_ONCE(*(u64
*)ptr
) = (u64
)val
;
146 static inline unsigned long __percpu_xchg(void *ptr
, unsigned long val
,
149 unsigned long ret
, loop
;
154 asm ("//__percpu_xchg_1\n"
155 "ldxrb %w[ret], %[ptr]\n"
156 "stxrb %w[loop], %w[val], %[ptr]\n"
157 : [loop
] "=&r"(loop
), [ret
] "=&r"(ret
),
158 [ptr
] "+Q"(*(u8
*)ptr
)
164 asm ("//__percpu_xchg_2\n"
165 "ldxrh %w[ret], %[ptr]\n"
166 "stxrh %w[loop], %w[val], %[ptr]\n"
167 : [loop
] "=&r"(loop
), [ret
] "=&r"(ret
),
168 [ptr
] "+Q"(*(u16
*)ptr
)
174 asm ("//__percpu_xchg_4\n"
175 "ldxr %w[ret], %[ptr]\n"
176 "stxr %w[loop], %w[val], %[ptr]\n"
177 : [loop
] "=&r"(loop
), [ret
] "=&r"(ret
),
178 [ptr
] "+Q"(*(u32
*)ptr
)
184 asm ("//__percpu_xchg_8\n"
185 "ldxr %[ret], %[ptr]\n"
186 "stxr %w[loop], %[val], %[ptr]\n"
187 : [loop
] "=&r"(loop
), [ret
] "=&r"(ret
),
188 [ptr
] "+Q"(*(u64
*)ptr
)
199 #define _percpu_read(pcp) \
201 typeof(pcp) __retval; \
203 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
209 #define _percpu_write(pcp, val) \
212 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
217 #define _pcp_protect(operation, pcp, val) \
219 typeof(pcp) __retval; \
221 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
222 (val), sizeof(pcp)); \
227 #define _percpu_add(pcp, val) \
228 _pcp_protect(__percpu_add, pcp, val)
230 #define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
232 #define _percpu_and(pcp, val) \
233 _pcp_protect(__percpu_and, pcp, val)
235 #define _percpu_or(pcp, val) \
236 _pcp_protect(__percpu_or, pcp, val)
238 #define _percpu_xchg(pcp, val) (typeof(pcp)) \
239 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
241 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
242 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
243 #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
244 #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
246 #define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val)
247 #define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val)
248 #define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
249 #define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
251 #define this_cpu_and_1(pcp, val) _percpu_and(pcp, val)
252 #define this_cpu_and_2(pcp, val) _percpu_and(pcp, val)
253 #define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
254 #define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
256 #define this_cpu_or_1(pcp, val) _percpu_or(pcp, val)
257 #define this_cpu_or_2(pcp, val) _percpu_or(pcp, val)
258 #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
259 #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
261 #define this_cpu_read_1(pcp) _percpu_read(pcp)
262 #define this_cpu_read_2(pcp) _percpu_read(pcp)
263 #define this_cpu_read_4(pcp) _percpu_read(pcp)
264 #define this_cpu_read_8(pcp) _percpu_read(pcp)
266 #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
267 #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
268 #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
269 #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
271 #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
272 #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
273 #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
274 #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
276 #include <asm-generic/percpu.h>
278 #endif /* __ASM_PERCPU_H */