Merge tag 'locking-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / arm / include / asm / proc-fns.h
blobc82f7a29ec4a6ebab3f1203028bea92765b09e21
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm/include/asm/proc-fns.h
5 * Copyright (C) 1997-1999 Russell King
6 * Copyright (C) 2000 Deep Blue Solutions Ltd
7 */
8 #ifndef __ASM_PROCFNS_H
9 #define __ASM_PROCFNS_H
11 #ifdef __KERNEL__
13 #include <asm/glue-proc.h>
14 #include <asm/page.h>
16 #ifndef __ASSEMBLY__
18 struct mm_struct;
21 * Don't change this structure - ASM code relies on it.
23 struct processor {
24 /* MISC
25 * get data abort address/flags
27 void (*_data_abort)(unsigned long pc);
29 * Retrieve prefetch fault address
31 unsigned long (*_prefetch_abort)(unsigned long lr);
33 * Set up any processor specifics
35 void (*_proc_init)(void);
37 * Check for processor bugs
39 void (*check_bugs)(void);
41 * Disable any processor specifics
43 void (*_proc_fin)(void);
45 * Special stuff for a reset
47 void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
49 * Idle the processor
51 int (*_do_idle)(void);
53 * Processor architecture specific
56 * clean a virtual address range from the
57 * D-cache without flushing the cache.
59 void (*dcache_clean_area)(void *addr, int size);
62 * Set the page table
64 void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
66 * Set a possibly extended PTE. Non-extended PTEs should
67 * ignore 'ext'.
69 #ifdef CONFIG_ARM_LPAE
70 void (*set_pte_ext)(pte_t *ptep, pte_t pte);
71 #else
72 void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
73 #endif
75 /* Suspend/resume */
76 unsigned int suspend_size;
77 void (*do_suspend)(void *);
78 void (*do_resume)(void *);
81 #ifndef MULTI_CPU
82 static inline void init_proc_vtable(const struct processor *p)
86 extern void cpu_proc_init(void);
87 extern void cpu_proc_fin(void);
88 extern int cpu_do_idle(void);
89 extern void cpu_dcache_clean_area(void *, int);
90 extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
91 #ifdef CONFIG_ARM_LPAE
92 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
93 #else
94 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
95 #endif
96 extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
98 /* These three are private to arch/arm/kernel/suspend.c */
99 extern void cpu_do_suspend(void *);
100 extern void cpu_do_resume(void *);
101 #else
103 extern struct processor processor;
104 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
105 #include <linux/smp.h>
107 * This can't be a per-cpu variable because we need to access it before
108 * per-cpu has been initialised. We have a couple of functions that are
109 * called in a pre-emptible context, and so can't use smp_processor_id()
110 * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
111 * function pointers for these are identical across all CPUs.
113 extern struct processor *cpu_vtable[];
114 #define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
115 #define PROC_TABLE(f) cpu_vtable[0]->f
116 static inline void init_proc_vtable(const struct processor *p)
118 unsigned int cpu = smp_processor_id();
119 *cpu_vtable[cpu] = *p;
120 WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
121 cpu_vtable[0]->dcache_clean_area);
122 WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
123 cpu_vtable[0]->set_pte_ext);
125 #else
126 #define PROC_VTABLE(f) processor.f
127 #define PROC_TABLE(f) processor.f
128 static inline void init_proc_vtable(const struct processor *p)
130 processor = *p;
132 #endif
134 #define cpu_proc_init PROC_VTABLE(_proc_init)
135 #define cpu_check_bugs PROC_VTABLE(check_bugs)
136 #define cpu_proc_fin PROC_VTABLE(_proc_fin)
137 #define cpu_reset PROC_VTABLE(reset)
138 #define cpu_do_idle PROC_VTABLE(_do_idle)
139 #define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
140 #define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
141 #define cpu_do_switch_mm PROC_VTABLE(switch_mm)
143 /* These two are private to arch/arm/kernel/suspend.c */
144 #define cpu_do_suspend PROC_VTABLE(do_suspend)
145 #define cpu_do_resume PROC_VTABLE(do_resume)
146 #endif
148 extern void cpu_resume(void);
150 #include <asm/memory.h>
152 #ifdef CONFIG_MMU
154 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
156 #ifdef CONFIG_ARM_LPAE
158 #define cpu_get_ttbr(nr) \
159 ({ \
160 u64 ttbr; \
161 __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
162 : "=r" (ttbr)); \
163 ttbr; \
166 #define cpu_get_pgd() \
167 ({ \
168 u64 pg = cpu_get_ttbr(0); \
169 pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
170 (pgd_t *)phys_to_virt(pg); \
172 #else
173 #define cpu_get_pgd() \
174 ({ \
175 unsigned long pg; \
176 __asm__("mrc p15, 0, %0, c2, c0, 0" \
177 : "=r" (pg) : : "cc"); \
178 pg &= ~0x3fff; \
179 (pgd_t *)phys_to_virt(pg); \
181 #endif
183 #else /*!CONFIG_MMU */
185 #define cpu_switch_mm(pgd,mm) { }
187 #endif
189 #endif /* __ASSEMBLY__ */
190 #endif /* __KERNEL__ */
191 #endif /* __ASM_PROCFNS_H */