[PATCH] i386: Allow to use GENERICARCH for UP kernels
[linux-2.6/verdex.git] / arch / i386 / kernel / alternative.c
blob28ab8064976421ffab42ed3ba07b3063f8fee86b
1 #include <linux/module.h>
2 #include <linux/spinlock.h>
3 #include <linux/list.h>
4 #include <asm/alternative.h>
5 #include <asm/sections.h>
7 static int no_replacement = 0;
8 static int smp_alt_once = 0;
9 static int debug_alternative = 0;
11 static int __init noreplacement_setup(char *s)
13 no_replacement = 1;
14 return 1;
16 static int __init bootonly(char *str)
18 smp_alt_once = 1;
19 return 1;
21 static int __init debug_alt(char *str)
23 debug_alternative = 1;
24 return 1;
27 __setup("noreplacement", noreplacement_setup);
28 __setup("smp-alt-boot", bootonly);
29 __setup("debug-alternative", debug_alt);
31 #define DPRINTK(fmt, args...) if (debug_alternative) \
32 printk(KERN_DEBUG fmt, args)
34 #ifdef GENERIC_NOP1
35 /* Use inline assembly to define this because the nops are defined
36 as inline assembly strings in the include files and we cannot
37 get them easily into strings. */
38 asm("\t.data\nintelnops: "
39 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
40 GENERIC_NOP7 GENERIC_NOP8);
41 extern unsigned char intelnops[];
42 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
43 NULL,
44 intelnops,
45 intelnops + 1,
46 intelnops + 1 + 2,
47 intelnops + 1 + 2 + 3,
48 intelnops + 1 + 2 + 3 + 4,
49 intelnops + 1 + 2 + 3 + 4 + 5,
50 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
51 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
53 #endif
55 #ifdef K8_NOP1
56 asm("\t.data\nk8nops: "
57 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
58 K8_NOP7 K8_NOP8);
59 extern unsigned char k8nops[];
60 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
61 NULL,
62 k8nops,
63 k8nops + 1,
64 k8nops + 1 + 2,
65 k8nops + 1 + 2 + 3,
66 k8nops + 1 + 2 + 3 + 4,
67 k8nops + 1 + 2 + 3 + 4 + 5,
68 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
69 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
71 #endif
73 #ifdef K7_NOP1
74 asm("\t.data\nk7nops: "
75 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
76 K7_NOP7 K7_NOP8);
77 extern unsigned char k7nops[];
78 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
79 NULL,
80 k7nops,
81 k7nops + 1,
82 k7nops + 1 + 2,
83 k7nops + 1 + 2 + 3,
84 k7nops + 1 + 2 + 3 + 4,
85 k7nops + 1 + 2 + 3 + 4 + 5,
86 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
87 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
89 #endif
91 #ifdef CONFIG_X86_64
93 extern char __vsyscall_0;
94 static inline unsigned char** find_nop_table(void)
96 return k8_nops;
99 #else /* CONFIG_X86_64 */
101 static struct nop {
102 int cpuid;
103 unsigned char **noptable;
104 } noptypes[] = {
105 { X86_FEATURE_K8, k8_nops },
106 { X86_FEATURE_K7, k7_nops },
107 { -1, NULL }
110 static unsigned char** find_nop_table(void)
112 unsigned char **noptable = intel_nops;
113 int i;
115 for (i = 0; noptypes[i].cpuid >= 0; i++) {
116 if (boot_cpu_has(noptypes[i].cpuid)) {
117 noptable = noptypes[i].noptable;
118 break;
121 return noptable;
124 #endif /* CONFIG_X86_64 */
126 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
127 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
128 extern u8 *__smp_locks[], *__smp_locks_end[];
130 extern u8 __smp_alt_begin[], __smp_alt_end[];
132 /* Replace instructions with better alternatives for this CPU type.
133 This runs before SMP is initialized to avoid SMP problems with
134 self modifying code. This implies that assymetric systems where
135 APs have less capabilities than the boot processor are not handled.
136 Tough. Make sure you disable such features by hand. */
138 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
140 unsigned char **noptable = find_nop_table();
141 struct alt_instr *a;
142 u8 *instr;
143 int diff, i, k;
145 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
146 for (a = start; a < end; a++) {
147 BUG_ON(a->replacementlen > a->instrlen);
148 if (!boot_cpu_has(a->cpuid))
149 continue;
150 instr = a->instr;
151 #ifdef CONFIG_X86_64
152 /* vsyscall code is not mapped yet. resolve it manually. */
153 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
154 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
155 DPRINTK("%s: vsyscall fixup: %p => %p\n",
156 __FUNCTION__, a->instr, instr);
158 #endif
159 memcpy(instr, a->replacement, a->replacementlen);
160 diff = a->instrlen - a->replacementlen;
161 /* Pad the rest with nops */
162 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
163 k = diff;
164 if (k > ASM_NOP_MAX)
165 k = ASM_NOP_MAX;
166 memcpy(a->instr + i, noptable[k], k);
171 #ifdef CONFIG_SMP
173 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
175 struct alt_instr *a;
177 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
178 for (a = start; a < end; a++) {
179 memcpy(a->replacement + a->replacementlen,
180 a->instr,
181 a->instrlen);
185 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
187 struct alt_instr *a;
189 for (a = start; a < end; a++) {
190 memcpy(a->instr,
191 a->replacement + a->replacementlen,
192 a->instrlen);
196 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
198 u8 **ptr;
200 for (ptr = start; ptr < end; ptr++) {
201 if (*ptr < text)
202 continue;
203 if (*ptr > text_end)
204 continue;
205 **ptr = 0xf0; /* lock prefix */
209 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
211 unsigned char **noptable = find_nop_table();
212 u8 **ptr;
214 for (ptr = start; ptr < end; ptr++) {
215 if (*ptr < text)
216 continue;
217 if (*ptr > text_end)
218 continue;
219 **ptr = noptable[1][0];
223 struct smp_alt_module {
224 /* what is this ??? */
225 struct module *mod;
226 char *name;
228 /* ptrs to lock prefixes */
229 u8 **locks;
230 u8 **locks_end;
232 /* .text segment, needed to avoid patching init code ;) */
233 u8 *text;
234 u8 *text_end;
236 struct list_head next;
238 static LIST_HEAD(smp_alt_modules);
239 static DEFINE_SPINLOCK(smp_alt);
241 void alternatives_smp_module_add(struct module *mod, char *name,
242 void *locks, void *locks_end,
243 void *text, void *text_end)
245 struct smp_alt_module *smp;
246 unsigned long flags;
248 if (no_replacement)
249 return;
251 if (smp_alt_once) {
252 if (boot_cpu_has(X86_FEATURE_UP))
253 alternatives_smp_unlock(locks, locks_end,
254 text, text_end);
255 return;
258 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
259 if (NULL == smp)
260 return; /* we'll run the (safe but slow) SMP code then ... */
262 smp->mod = mod;
263 smp->name = name;
264 smp->locks = locks;
265 smp->locks_end = locks_end;
266 smp->text = text;
267 smp->text_end = text_end;
268 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
269 __FUNCTION__, smp->locks, smp->locks_end,
270 smp->text, smp->text_end, smp->name);
272 spin_lock_irqsave(&smp_alt, flags);
273 list_add_tail(&smp->next, &smp_alt_modules);
274 if (boot_cpu_has(X86_FEATURE_UP))
275 alternatives_smp_unlock(smp->locks, smp->locks_end,
276 smp->text, smp->text_end);
277 spin_unlock_irqrestore(&smp_alt, flags);
280 void alternatives_smp_module_del(struct module *mod)
282 struct smp_alt_module *item;
283 unsigned long flags;
285 if (no_replacement || smp_alt_once)
286 return;
288 spin_lock_irqsave(&smp_alt, flags);
289 list_for_each_entry(item, &smp_alt_modules, next) {
290 if (mod != item->mod)
291 continue;
292 list_del(&item->next);
293 spin_unlock_irqrestore(&smp_alt, flags);
294 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
295 kfree(item);
296 return;
298 spin_unlock_irqrestore(&smp_alt, flags);
301 void alternatives_smp_switch(int smp)
303 struct smp_alt_module *mod;
304 unsigned long flags;
306 #ifdef CONFIG_LOCKDEP
308 * A not yet fixed binutils section handling bug prevents
309 * alternatives-replacement from working reliably, so turn
310 * it off:
312 printk("lockdep: not fixing up alternatives.\n");
313 return;
314 #endif
316 if (no_replacement || smp_alt_once)
317 return;
318 BUG_ON(!smp && (num_online_cpus() > 1));
320 spin_lock_irqsave(&smp_alt, flags);
321 if (smp) {
322 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
323 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
324 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
325 alternatives_smp_apply(__smp_alt_instructions,
326 __smp_alt_instructions_end);
327 list_for_each_entry(mod, &smp_alt_modules, next)
328 alternatives_smp_lock(mod->locks, mod->locks_end,
329 mod->text, mod->text_end);
330 } else {
331 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
332 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
333 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
334 apply_alternatives(__smp_alt_instructions,
335 __smp_alt_instructions_end);
336 list_for_each_entry(mod, &smp_alt_modules, next)
337 alternatives_smp_unlock(mod->locks, mod->locks_end,
338 mod->text, mod->text_end);
340 spin_unlock_irqrestore(&smp_alt, flags);
343 #endif
345 void __init alternative_instructions(void)
347 if (no_replacement) {
348 printk(KERN_INFO "(SMP-)alternatives turned off\n");
349 free_init_pages("SMP alternatives",
350 (unsigned long)__smp_alt_begin,
351 (unsigned long)__smp_alt_end);
352 return;
354 apply_alternatives(__alt_instructions, __alt_instructions_end);
356 /* switch to patch-once-at-boottime-only mode and free the
357 * tables in case we know the number of CPUs will never ever
358 * change */
359 #ifdef CONFIG_HOTPLUG_CPU
360 if (num_possible_cpus() < 2)
361 smp_alt_once = 1;
362 #else
363 smp_alt_once = 1;
364 #endif
366 #ifdef CONFIG_SMP
367 if (smp_alt_once) {
368 if (1 == num_possible_cpus()) {
369 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
370 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
371 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
372 apply_alternatives(__smp_alt_instructions,
373 __smp_alt_instructions_end);
374 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
375 _text, _etext);
377 free_init_pages("SMP alternatives",
378 (unsigned long)__smp_alt_begin,
379 (unsigned long)__smp_alt_end);
380 } else {
381 alternatives_smp_save(__smp_alt_instructions,
382 __smp_alt_instructions_end);
383 alternatives_smp_module_add(NULL, "core kernel",
384 __smp_locks, __smp_locks_end,
385 _text, _etext);
386 alternatives_smp_switch(0);
388 #endif