x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / arm / include / asm / proc-fns.h
blobe1b6f280ab088fb0b8ac59b6ceb3543606c97e01
1 /*
2 * arch/arm/include/asm/proc-fns.h
4 * Copyright (C) 1997-1999 Russell King
5 * Copyright (C) 2000 Deep Blue Solutions Ltd
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_PROCFNS_H
12 #define __ASM_PROCFNS_H
14 #ifdef __KERNEL__
16 #include <asm/glue-proc.h>
17 #include <asm/page.h>
19 #ifndef __ASSEMBLY__
21 struct mm_struct;
24 * Don't change this structure - ASM code relies on it.
26 struct processor {
27 /* MISC
28 * get data abort address/flags
30 void (*_data_abort)(unsigned long pc);
32 * Retrieve prefetch fault address
34 unsigned long (*_prefetch_abort)(unsigned long lr);
36 * Set up any processor specifics
38 void (*_proc_init)(void);
40 * Check for processor bugs
42 void (*check_bugs)(void);
44 * Disable any processor specifics
46 void (*_proc_fin)(void);
48 * Special stuff for a reset
50 void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
52 * Idle the processor
54 int (*_do_idle)(void);
56 * Processor architecture specific
59 * clean a virtual address range from the
60 * D-cache without flushing the cache.
62 void (*dcache_clean_area)(void *addr, int size);
65 * Set the page table
67 void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
69 * Set a possibly extended PTE. Non-extended PTEs should
70 * ignore 'ext'.
72 #ifdef CONFIG_ARM_LPAE
73 void (*set_pte_ext)(pte_t *ptep, pte_t pte);
74 #else
75 void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
76 #endif
78 /* Suspend/resume */
79 unsigned int suspend_size;
80 void (*do_suspend)(void *);
81 void (*do_resume)(void *);
84 #ifndef MULTI_CPU
85 static inline void init_proc_vtable(const struct processor *p)
89 extern void cpu_proc_init(void);
90 extern void cpu_proc_fin(void);
91 extern int cpu_do_idle(void);
92 extern void cpu_dcache_clean_area(void *, int);
93 extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
94 #ifdef CONFIG_ARM_LPAE
95 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
96 #else
97 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
98 #endif
99 extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
101 /* These three are private to arch/arm/kernel/suspend.c */
102 extern void cpu_do_suspend(void *);
103 extern void cpu_do_resume(void *);
104 #else
106 extern struct processor processor;
107 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
108 #include <linux/smp.h>
110 * This can't be a per-cpu variable because we need to access it before
111 * per-cpu has been initialised. We have a couple of functions that are
112 * called in a pre-emptible context, and so can't use smp_processor_id()
113 * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
114 * function pointers for these are identical across all CPUs.
116 extern struct processor *cpu_vtable[];
117 #define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
118 #define PROC_TABLE(f) cpu_vtable[0]->f
119 static inline void init_proc_vtable(const struct processor *p)
121 unsigned int cpu = smp_processor_id();
122 *cpu_vtable[cpu] = *p;
123 WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
124 cpu_vtable[0]->dcache_clean_area);
125 WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
126 cpu_vtable[0]->set_pte_ext);
128 #else
129 #define PROC_VTABLE(f) processor.f
130 #define PROC_TABLE(f) processor.f
131 static inline void init_proc_vtable(const struct processor *p)
133 processor = *p;
135 #endif
137 #define cpu_proc_init PROC_VTABLE(_proc_init)
138 #define cpu_check_bugs PROC_VTABLE(check_bugs)
139 #define cpu_proc_fin PROC_VTABLE(_proc_fin)
140 #define cpu_reset PROC_VTABLE(reset)
141 #define cpu_do_idle PROC_VTABLE(_do_idle)
142 #define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
143 #define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
144 #define cpu_do_switch_mm PROC_VTABLE(switch_mm)
146 /* These two are private to arch/arm/kernel/suspend.c */
147 #define cpu_do_suspend PROC_VTABLE(do_suspend)
148 #define cpu_do_resume PROC_VTABLE(do_resume)
149 #endif
151 extern void cpu_resume(void);
153 #include <asm/memory.h>
155 #ifdef CONFIG_MMU
157 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
159 #ifdef CONFIG_ARM_LPAE
161 #define cpu_get_ttbr(nr) \
162 ({ \
163 u64 ttbr; \
164 __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
165 : "=r" (ttbr)); \
166 ttbr; \
169 #define cpu_get_pgd() \
170 ({ \
171 u64 pg = cpu_get_ttbr(0); \
172 pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
173 (pgd_t *)phys_to_virt(pg); \
175 #else
176 #define cpu_get_pgd() \
177 ({ \
178 unsigned long pg; \
179 __asm__("mrc p15, 0, %0, c2, c0, 0" \
180 : "=r" (pg) : : "cc"); \
181 pg &= ~0x3fff; \
182 (pgd_t *)phys_to_virt(pg); \
184 #endif
186 #else /*!CONFIG_MMU */
188 #define cpu_switch_mm(pgd,mm) { }
190 #endif
192 #endif /* __ASSEMBLY__ */
193 #endif /* __KERNEL__ */
194 #endif /* __ASM_PROCFNS_H */