Merge tag 'locking-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / arm / include / asm / percpu.h
blobe2fcb3cfd3de5dbc8727d39e7866a54f41301a01
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright 2012 Calxeda, Inc.
4 */
5 #ifndef _ASM_ARM_PERCPU_H_
6 #define _ASM_ARM_PERCPU_H_
8 register unsigned long current_stack_pointer asm ("sp");
11 * Same as asm-generic/percpu.h, except that we store the per cpu offset
12 * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
14 #if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
15 static inline void set_my_cpu_offset(unsigned long off)
17 /* Set TPIDRPRW */
18 asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
21 static inline unsigned long __my_cpu_offset(void)
23 unsigned long off;
26 * Read TPIDRPRW.
27 * We want to allow caching the value, so avoid using volatile and
28 * instead use a fake stack read to hazard against barrier().
30 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
31 : "Q" (*(const unsigned long *)current_stack_pointer));
33 return off;
35 #define __my_cpu_offset __my_cpu_offset()
36 #else
37 #define set_my_cpu_offset(x) do {} while(0)
39 #endif /* CONFIG_SMP */
41 #include <asm-generic/percpu.h>
43 #endif /* _ASM_ARM_PERCPU_H_ */