1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <linux/kprobes.h>
7 #include <linux/vmalloc.h>
8 #include <asm/alternative.h>
9 #include <asm/sections.h>
10 #include <asm/pgtable.h>
13 #include <asm/vsyscall.h>
15 #define MAX_PATCH_LEN (255-1)
17 #ifdef CONFIG_HOTPLUG_CPU
18 static int smp_alt_once
;
20 static int __init
bootonly(char *str
)
25 __setup("smp-alt-boot", bootonly
);
27 #define smp_alt_once 1
30 static int debug_alternative
;
32 static int __init
debug_alt(char *str
)
34 debug_alternative
= 1;
37 __setup("debug-alternative", debug_alt
);
39 static int noreplace_smp
;
41 static int __init
setup_noreplace_smp(char *str
)
46 __setup("noreplace-smp", setup_noreplace_smp
);
48 #ifdef CONFIG_PARAVIRT
49 static int noreplace_paravirt
= 0;
51 static int __init
setup_noreplace_paravirt(char *str
)
53 noreplace_paravirt
= 1;
56 __setup("noreplace-paravirt", setup_noreplace_paravirt
);
59 #define DPRINTK(fmt, args...) if (debug_alternative) \
60 printk(KERN_DEBUG fmt, args)
63 /* Use inline assembly to define this because the nops are defined
64 as inline assembly strings in the include files and we cannot
65 get them easily into strings. */
66 asm("\t.data\nintelnops: "
67 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
68 GENERIC_NOP7 GENERIC_NOP8
);
69 extern unsigned char intelnops
[];
70 static unsigned char *intel_nops
[ASM_NOP_MAX
+1] = {
75 intelnops
+ 1 + 2 + 3,
76 intelnops
+ 1 + 2 + 3 + 4,
77 intelnops
+ 1 + 2 + 3 + 4 + 5,
78 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
79 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
84 asm("\t.data\nk8nops: "
85 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
87 extern unsigned char k8nops
[];
88 static unsigned char *k8_nops
[ASM_NOP_MAX
+1] = {
94 k8nops
+ 1 + 2 + 3 + 4,
95 k8nops
+ 1 + 2 + 3 + 4 + 5,
96 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
97 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
102 asm("\t.data\nk7nops: "
103 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
105 extern unsigned char k7nops
[];
106 static unsigned char *k7_nops
[ASM_NOP_MAX
+1] = {
112 k7nops
+ 1 + 2 + 3 + 4,
113 k7nops
+ 1 + 2 + 3 + 4 + 5,
114 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
115 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
121 extern char __vsyscall_0
;
122 static inline unsigned char** find_nop_table(void)
127 #else /* CONFIG_X86_64 */
131 unsigned char **noptable
;
133 { X86_FEATURE_K8
, k8_nops
},
134 { X86_FEATURE_K7
, k7_nops
},
138 static unsigned char** find_nop_table(void)
140 unsigned char **noptable
= intel_nops
;
143 for (i
= 0; noptypes
[i
].cpuid
>= 0; i
++) {
144 if (boot_cpu_has(noptypes
[i
].cpuid
)) {
145 noptable
= noptypes
[i
].noptable
;
152 #endif /* CONFIG_X86_64 */
154 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
155 static void add_nops(void *insns
, unsigned int len
)
157 unsigned char **noptable
= find_nop_table();
160 unsigned int noplen
= len
;
161 if (noplen
> ASM_NOP_MAX
)
162 noplen
= ASM_NOP_MAX
;
163 memcpy(insns
, noptable
[noplen
], noplen
);
169 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
170 extern u8
*__smp_locks
[], *__smp_locks_end
[];
172 /* Replace instructions with better alternatives for this CPU type.
173 This runs before SMP is initialized to avoid SMP problems with
174 self modifying code. This implies that assymetric systems where
175 APs have less capabilities than the boot processor are not handled.
176 Tough. Make sure you disable such features by hand. */
178 void apply_alternatives(struct alt_instr
*start
, struct alt_instr
*end
)
181 char insnbuf
[MAX_PATCH_LEN
];
183 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__
, start
, end
);
184 for (a
= start
; a
< end
; a
++) {
185 u8
*instr
= a
->instr
;
186 BUG_ON(a
->replacementlen
> a
->instrlen
);
187 BUG_ON(a
->instrlen
> sizeof(insnbuf
));
188 if (!boot_cpu_has(a
->cpuid
))
191 /* vsyscall code is not mapped yet. resolve it manually. */
192 if (instr
>= (u8
*)VSYSCALL_START
&& instr
< (u8
*)VSYSCALL_END
) {
193 instr
= __va(instr
- (u8
*)VSYSCALL_START
+ (u8
*)__pa_symbol(&__vsyscall_0
));
194 DPRINTK("%s: vsyscall fixup: %p => %p\n",
195 __FUNCTION__
, a
->instr
, instr
);
198 memcpy(insnbuf
, a
->replacement
, a
->replacementlen
);
199 add_nops(insnbuf
+ a
->replacementlen
,
200 a
->instrlen
- a
->replacementlen
);
201 text_poke(instr
, insnbuf
, a
->instrlen
);
207 static void alternatives_smp_lock(u8
**start
, u8
**end
, u8
*text
, u8
*text_end
)
211 for (ptr
= start
; ptr
< end
; ptr
++) {
216 text_poke(*ptr
, ((unsigned char []){0xf0}), 1); /* add lock prefix */
220 static void alternatives_smp_unlock(u8
**start
, u8
**end
, u8
*text
, u8
*text_end
)
229 for (ptr
= start
; ptr
< end
; ptr
++) {
234 text_poke(*ptr
, insn
, 1);
238 struct smp_alt_module
{
239 /* what is this ??? */
243 /* ptrs to lock prefixes */
247 /* .text segment, needed to avoid patching init code ;) */
251 struct list_head next
;
253 static LIST_HEAD(smp_alt_modules
);
254 static DEFINE_SPINLOCK(smp_alt
);
256 void alternatives_smp_module_add(struct module
*mod
, char *name
,
257 void *locks
, void *locks_end
,
258 void *text
, void *text_end
)
260 struct smp_alt_module
*smp
;
267 if (boot_cpu_has(X86_FEATURE_UP
))
268 alternatives_smp_unlock(locks
, locks_end
,
273 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
275 return; /* we'll run the (safe but slow) SMP code then ... */
280 smp
->locks_end
= locks_end
;
282 smp
->text_end
= text_end
;
283 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
284 __FUNCTION__
, smp
->locks
, smp
->locks_end
,
285 smp
->text
, smp
->text_end
, smp
->name
);
287 spin_lock_irqsave(&smp_alt
, flags
);
288 list_add_tail(&smp
->next
, &smp_alt_modules
);
289 if (boot_cpu_has(X86_FEATURE_UP
))
290 alternatives_smp_unlock(smp
->locks
, smp
->locks_end
,
291 smp
->text
, smp
->text_end
);
292 spin_unlock_irqrestore(&smp_alt
, flags
);
295 void alternatives_smp_module_del(struct module
*mod
)
297 struct smp_alt_module
*item
;
300 if (smp_alt_once
|| noreplace_smp
)
303 spin_lock_irqsave(&smp_alt
, flags
);
304 list_for_each_entry(item
, &smp_alt_modules
, next
) {
305 if (mod
!= item
->mod
)
307 list_del(&item
->next
);
308 spin_unlock_irqrestore(&smp_alt
, flags
);
309 DPRINTK("%s: %s\n", __FUNCTION__
, item
->name
);
313 spin_unlock_irqrestore(&smp_alt
, flags
);
316 void alternatives_smp_switch(int smp
)
318 struct smp_alt_module
*mod
;
321 #ifdef CONFIG_LOCKDEP
323 * A not yet fixed binutils section handling bug prevents
324 * alternatives-replacement from working reliably, so turn
327 printk("lockdep: not fixing up alternatives.\n");
331 if (noreplace_smp
|| smp_alt_once
)
333 BUG_ON(!smp
&& (num_online_cpus() > 1));
335 spin_lock_irqsave(&smp_alt
, flags
);
337 printk(KERN_INFO
"SMP alternatives: switching to SMP code\n");
338 clear_bit(X86_FEATURE_UP
, boot_cpu_data
.x86_capability
);
339 clear_bit(X86_FEATURE_UP
, cpu_data
[0].x86_capability
);
340 list_for_each_entry(mod
, &smp_alt_modules
, next
)
341 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
342 mod
->text
, mod
->text_end
);
344 printk(KERN_INFO
"SMP alternatives: switching to UP code\n");
345 set_bit(X86_FEATURE_UP
, boot_cpu_data
.x86_capability
);
346 set_bit(X86_FEATURE_UP
, cpu_data
[0].x86_capability
);
347 list_for_each_entry(mod
, &smp_alt_modules
, next
)
348 alternatives_smp_unlock(mod
->locks
, mod
->locks_end
,
349 mod
->text
, mod
->text_end
);
351 spin_unlock_irqrestore(&smp_alt
, flags
);
356 #ifdef CONFIG_PARAVIRT
357 void apply_paravirt(struct paravirt_patch_site
*start
,
358 struct paravirt_patch_site
*end
)
360 struct paravirt_patch_site
*p
;
361 char insnbuf
[MAX_PATCH_LEN
];
363 if (noreplace_paravirt
)
366 for (p
= start
; p
< end
; p
++) {
369 BUG_ON(p
->len
> MAX_PATCH_LEN
);
370 /* prep the buffer with the original instructions */
371 memcpy(insnbuf
, p
->instr
, p
->len
);
372 used
= paravirt_ops
.patch(p
->instrtype
, p
->clobbers
, insnbuf
,
373 (unsigned long)p
->instr
, p
->len
);
375 BUG_ON(used
> p
->len
);
377 /* Pad the rest with nops */
378 add_nops(insnbuf
+ used
, p
->len
- used
);
379 text_poke(p
->instr
, insnbuf
, p
->len
);
382 extern struct paravirt_patch_site __start_parainstructions
[],
383 __stop_parainstructions
[];
384 #endif /* CONFIG_PARAVIRT */
386 void __init
alternative_instructions(void)
390 /* The patching is not fully atomic, so try to avoid local interruptions
391 that might execute the to be patched code.
392 Other CPUs are not running. */
394 #ifdef CONFIG_X86_MCE
398 local_irq_save(flags
);
399 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
401 /* switch to patch-once-at-boottime-only mode and free the
402 * tables in case we know the number of CPUs will never ever
404 #ifdef CONFIG_HOTPLUG_CPU
405 if (num_possible_cpus() < 2)
411 if (1 == num_possible_cpus()) {
412 printk(KERN_INFO
"SMP alternatives: switching to UP code\n");
413 set_bit(X86_FEATURE_UP
, boot_cpu_data
.x86_capability
);
414 set_bit(X86_FEATURE_UP
, cpu_data
[0].x86_capability
);
415 alternatives_smp_unlock(__smp_locks
, __smp_locks_end
,
418 free_init_pages("SMP alternatives",
419 (unsigned long)__smp_locks
,
420 (unsigned long)__smp_locks_end
);
422 alternatives_smp_module_add(NULL
, "core kernel",
423 __smp_locks
, __smp_locks_end
,
425 alternatives_smp_switch(0);
428 apply_paravirt(__parainstructions
, __parainstructions_end
);
429 local_irq_restore(flags
);
432 #ifdef CONFIG_X86_MCE
439 * When you use this code to patch more than one byte of an instruction
440 * you need to make sure that other CPUs cannot execute this code in parallel.
441 * Also no thread must be currently preempted in the middle of these instructions.
442 * And on the local CPU you need to be protected again NMI or MCE handlers
443 * seeing an inconsistent instruction while you patch.
445 void __kprobes
text_poke(void *addr
, unsigned char *opcode
, int len
)
447 memcpy(addr
, opcode
, len
);
449 /* Could also do a CLFLUSH here to speed up CPU recovery; but
450 that causes hangs on some VIA CPUs. */