ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / arch / x86 / kernel / alternative.c
bloba81f2d52f869d842d9301d5f9aea5c23a74369a2
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
7 #include <linux/mm.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
15 #include <asm/mce.h>
16 #include <asm/nmi.h>
17 #include <asm/vsyscall.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/io.h>
21 #include <asm/fixmap.h>
23 #define MAX_PATCH_LEN (255-1)
25 #ifdef CONFIG_HOTPLUG_CPU
26 static int smp_alt_once;
28 static int __init bootonly(char *str)
30 smp_alt_once = 1;
31 return 1;
33 __setup("smp-alt-boot", bootonly);
34 #else
35 #define smp_alt_once 1
36 #endif
38 static int __initdata_or_module debug_alternative;
40 static int __init debug_alt(char *str)
42 debug_alternative = 1;
43 return 1;
45 __setup("debug-alternative", debug_alt);
47 static int noreplace_smp;
49 static int __init setup_noreplace_smp(char *str)
51 noreplace_smp = 1;
52 return 1;
54 __setup("noreplace-smp", setup_noreplace_smp);
56 #ifdef CONFIG_PARAVIRT
57 static int __initdata_or_module noreplace_paravirt = 0;
59 static int __init setup_noreplace_paravirt(char *str)
61 noreplace_paravirt = 1;
62 return 1;
64 __setup("noreplace-paravirt", setup_noreplace_paravirt);
65 #endif
67 #define DPRINTK(fmt, args...) if (debug_alternative) \
68 printk(KERN_DEBUG fmt, args)
71 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
72 * that correspond to that nop. Getting from one nop to the next, we
73 * add to the array the offset that is equal to the sum of all sizes of
74 * nops preceding the one we are after.
76 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
77 * nice symmetry of sizes of the previous nops.
79 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
80 static const unsigned char intelnops[] =
82 GENERIC_NOP1,
83 GENERIC_NOP2,
84 GENERIC_NOP3,
85 GENERIC_NOP4,
86 GENERIC_NOP5,
87 GENERIC_NOP6,
88 GENERIC_NOP7,
89 GENERIC_NOP8,
90 GENERIC_NOP5_ATOMIC
92 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
94 NULL,
95 intelnops,
96 intelnops + 1,
97 intelnops + 1 + 2,
98 intelnops + 1 + 2 + 3,
99 intelnops + 1 + 2 + 3 + 4,
100 intelnops + 1 + 2 + 3 + 4 + 5,
101 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
105 #endif
107 #ifdef K8_NOP1
108 static const unsigned char k8nops[] =
110 K8_NOP1,
111 K8_NOP2,
112 K8_NOP3,
113 K8_NOP4,
114 K8_NOP5,
115 K8_NOP6,
116 K8_NOP7,
117 K8_NOP8,
118 K8_NOP5_ATOMIC
120 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
122 NULL,
123 k8nops,
124 k8nops + 1,
125 k8nops + 1 + 2,
126 k8nops + 1 + 2 + 3,
127 k8nops + 1 + 2 + 3 + 4,
128 k8nops + 1 + 2 + 3 + 4 + 5,
129 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
133 #endif
135 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136 static const unsigned char k7nops[] =
138 K7_NOP1,
139 K7_NOP2,
140 K7_NOP3,
141 K7_NOP4,
142 K7_NOP5,
143 K7_NOP6,
144 K7_NOP7,
145 K7_NOP8,
146 K7_NOP5_ATOMIC
148 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
150 NULL,
151 k7nops,
152 k7nops + 1,
153 k7nops + 1 + 2,
154 k7nops + 1 + 2 + 3,
155 k7nops + 1 + 2 + 3 + 4,
156 k7nops + 1 + 2 + 3 + 4 + 5,
157 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
161 #endif
163 #ifdef P6_NOP1
164 static const unsigned char __initconst_or_module p6nops[] =
166 P6_NOP1,
167 P6_NOP2,
168 P6_NOP3,
169 P6_NOP4,
170 P6_NOP5,
171 P6_NOP6,
172 P6_NOP7,
173 P6_NOP8,
174 P6_NOP5_ATOMIC
176 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
178 NULL,
179 p6nops,
180 p6nops + 1,
181 p6nops + 1 + 2,
182 p6nops + 1 + 2 + 3,
183 p6nops + 1 + 2 + 3 + 4,
184 p6nops + 1 + 2 + 3 + 4 + 5,
185 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
189 #endif
191 /* Initialize these to a safe default */
192 #ifdef CONFIG_X86_64
193 const unsigned char * const *ideal_nops = p6_nops;
194 #else
195 const unsigned char * const *ideal_nops = intel_nops;
196 #endif
198 void __init arch_init_ideal_nops(void)
200 switch (boot_cpu_data.x86_vendor) {
201 case X86_VENDOR_INTEL:
203 * Due to a decoder implementation quirk, some
204 * specific Intel CPUs actually perform better with
205 * the "k8_nops" than with the SDM-recommended NOPs.
207 if (boot_cpu_data.x86 == 6 &&
208 boot_cpu_data.x86_model >= 0x0f &&
209 boot_cpu_data.x86_model != 0x1c &&
210 boot_cpu_data.x86_model != 0x26 &&
211 boot_cpu_data.x86_model != 0x27 &&
212 boot_cpu_data.x86_model < 0x30) {
213 ideal_nops = k8_nops;
214 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215 ideal_nops = p6_nops;
216 } else {
217 #ifdef CONFIG_X86_64
218 ideal_nops = k8_nops;
219 #else
220 ideal_nops = intel_nops;
221 #endif
224 default:
225 #ifdef CONFIG_X86_64
226 ideal_nops = k8_nops;
227 #else
228 if (boot_cpu_has(X86_FEATURE_K8))
229 ideal_nops = k8_nops;
230 else if (boot_cpu_has(X86_FEATURE_K7))
231 ideal_nops = k7_nops;
232 else
233 ideal_nops = intel_nops;
234 #endif
238 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
239 static void __init_or_module add_nops(void *insns, unsigned int len)
241 while (len > 0) {
242 unsigned int noplen = len;
243 if (noplen > ASM_NOP_MAX)
244 noplen = ASM_NOP_MAX;
245 memcpy(insns, ideal_nops[noplen], noplen);
246 insns += noplen;
247 len -= noplen;
251 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
252 extern s32 __smp_locks[], __smp_locks_end[];
253 extern char __vsyscall_0;
254 void *text_poke_early(void *addr, const void *opcode, size_t len);
256 /* Replace instructions with better alternatives for this CPU type.
257 This runs before SMP is initialized to avoid SMP problems with
258 self modifying code. This implies that asymmetric systems where
259 APs have less capabilities than the boot processor are not handled.
260 Tough. Make sure you disable such features by hand. */
262 void __init_or_module apply_alternatives(struct alt_instr *start,
263 struct alt_instr *end)
265 struct alt_instr *a;
266 u8 insnbuf[MAX_PATCH_LEN];
268 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
270 * The scan order should be from start to end. A later scanned
271 * alternative code can overwrite a previous scanned alternative code.
272 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
273 * patch code.
275 * So be careful if you want to change the scan order to any other
276 * order.
278 for (a = start; a < end; a++) {
279 u8 *instr = a->instr;
280 BUG_ON(a->replacementlen > a->instrlen);
281 BUG_ON(a->instrlen > sizeof(insnbuf));
282 BUG_ON(a->cpuid >= NCAPINTS*32);
283 if (!boot_cpu_has(a->cpuid))
284 continue;
285 #ifdef CONFIG_X86_64
286 /* vsyscall code is not mapped yet. resolve it manually. */
287 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
288 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
289 DPRINTK("%s: vsyscall fixup: %p => %p\n",
290 __func__, a->instr, instr);
292 #endif
293 memcpy(insnbuf, a->replacement, a->replacementlen);
294 if (*insnbuf == 0xe8 && a->replacementlen == 5)
295 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
296 add_nops(insnbuf + a->replacementlen,
297 a->instrlen - a->replacementlen);
298 text_poke_early(instr, insnbuf, a->instrlen);
302 #ifdef CONFIG_SMP
304 static void alternatives_smp_lock(const s32 *start, const s32 *end,
305 u8 *text, u8 *text_end)
307 const s32 *poff;
309 mutex_lock(&text_mutex);
310 for (poff = start; poff < end; poff++) {
311 u8 *ptr = (u8 *)poff + *poff;
313 if (!*poff || ptr < text || ptr >= text_end)
314 continue;
315 /* turn DS segment override prefix into lock prefix */
316 if (*ptr == 0x3e)
317 text_poke(ptr, ((unsigned char []){0xf0}), 1);
319 mutex_unlock(&text_mutex);
322 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
323 u8 *text, u8 *text_end)
325 const s32 *poff;
327 if (noreplace_smp)
328 return;
330 mutex_lock(&text_mutex);
331 for (poff = start; poff < end; poff++) {
332 u8 *ptr = (u8 *)poff + *poff;
334 if (!*poff || ptr < text || ptr >= text_end)
335 continue;
336 /* turn lock prefix into DS segment override prefix */
337 if (*ptr == 0xf0)
338 text_poke(ptr, ((unsigned char []){0x3E}), 1);
340 mutex_unlock(&text_mutex);
343 struct smp_alt_module {
344 /* what is this ??? */
345 struct module *mod;
346 char *name;
348 /* ptrs to lock prefixes */
349 const s32 *locks;
350 const s32 *locks_end;
352 /* .text segment, needed to avoid patching init code ;) */
353 u8 *text;
354 u8 *text_end;
356 struct list_head next;
358 static LIST_HEAD(smp_alt_modules);
359 static DEFINE_MUTEX(smp_alt);
360 static int smp_mode = 1; /* protected by smp_alt */
362 void __init_or_module alternatives_smp_module_add(struct module *mod,
363 char *name,
364 void *locks, void *locks_end,
365 void *text, void *text_end)
367 struct smp_alt_module *smp;
369 if (noreplace_smp)
370 return;
372 if (smp_alt_once) {
373 if (boot_cpu_has(X86_FEATURE_UP))
374 alternatives_smp_unlock(locks, locks_end,
375 text, text_end);
376 return;
379 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
380 if (NULL == smp)
381 return; /* we'll run the (safe but slow) SMP code then ... */
383 smp->mod = mod;
384 smp->name = name;
385 smp->locks = locks;
386 smp->locks_end = locks_end;
387 smp->text = text;
388 smp->text_end = text_end;
389 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
390 __func__, smp->locks, smp->locks_end,
391 smp->text, smp->text_end, smp->name);
393 mutex_lock(&smp_alt);
394 list_add_tail(&smp->next, &smp_alt_modules);
395 if (boot_cpu_has(X86_FEATURE_UP))
396 alternatives_smp_unlock(smp->locks, smp->locks_end,
397 smp->text, smp->text_end);
398 mutex_unlock(&smp_alt);
401 void __init_or_module alternatives_smp_module_del(struct module *mod)
403 struct smp_alt_module *item;
405 if (smp_alt_once || noreplace_smp)
406 return;
408 mutex_lock(&smp_alt);
409 list_for_each_entry(item, &smp_alt_modules, next) {
410 if (mod != item->mod)
411 continue;
412 list_del(&item->next);
413 mutex_unlock(&smp_alt);
414 DPRINTK("%s: %s\n", __func__, item->name);
415 kfree(item);
416 return;
418 mutex_unlock(&smp_alt);
421 bool skip_smp_alternatives;
422 void alternatives_smp_switch(int smp)
424 struct smp_alt_module *mod;
426 #ifdef CONFIG_LOCKDEP
428 * Older binutils section handling bug prevented
429 * alternatives-replacement from working reliably.
431 * If this still occurs then you should see a hang
432 * or crash shortly after this line:
434 printk("lockdep: fixing up alternatives.\n");
435 #endif
437 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
438 return;
439 BUG_ON(!smp && (num_online_cpus() > 1));
441 mutex_lock(&smp_alt);
444 * Avoid unnecessary switches because it forces JIT based VMs to
445 * throw away all cached translations, which can be quite costly.
447 if (smp == smp_mode) {
448 /* nothing */
449 } else if (smp) {
450 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
451 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
452 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
453 list_for_each_entry(mod, &smp_alt_modules, next)
454 alternatives_smp_lock(mod->locks, mod->locks_end,
455 mod->text, mod->text_end);
456 } else {
457 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
458 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
459 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
460 list_for_each_entry(mod, &smp_alt_modules, next)
461 alternatives_smp_unlock(mod->locks, mod->locks_end,
462 mod->text, mod->text_end);
464 smp_mode = smp;
465 mutex_unlock(&smp_alt);
468 /* Return 1 if the address range is reserved for smp-alternatives */
469 int alternatives_text_reserved(void *start, void *end)
471 struct smp_alt_module *mod;
472 const s32 *poff;
473 u8 *text_start = start;
474 u8 *text_end = end;
476 list_for_each_entry(mod, &smp_alt_modules, next) {
477 if (mod->text > text_end || mod->text_end < text_start)
478 continue;
479 for (poff = mod->locks; poff < mod->locks_end; poff++) {
480 const u8 *ptr = (const u8 *)poff + *poff;
482 if (text_start <= ptr && text_end > ptr)
483 return 1;
487 return 0;
489 #endif
491 #ifdef CONFIG_PARAVIRT
492 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
493 struct paravirt_patch_site *end)
495 struct paravirt_patch_site *p;
496 char insnbuf[MAX_PATCH_LEN];
498 if (noreplace_paravirt)
499 return;
501 for (p = start; p < end; p++) {
502 unsigned int used;
504 BUG_ON(p->len > MAX_PATCH_LEN);
505 /* prep the buffer with the original instructions */
506 memcpy(insnbuf, p->instr, p->len);
507 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
508 (unsigned long)p->instr, p->len);
510 BUG_ON(used > p->len);
512 /* Pad the rest with nops */
513 add_nops(insnbuf + used, p->len - used);
514 text_poke_early(p->instr, insnbuf, p->len);
517 extern struct paravirt_patch_site __start_parainstructions[],
518 __stop_parainstructions[];
519 #endif /* CONFIG_PARAVIRT */
521 void __init alternative_instructions(void)
523 /* The patching is not fully atomic, so try to avoid local interruptions
524 that might execute the to be patched code.
525 Other CPUs are not running. */
526 stop_nmi();
529 * Don't stop machine check exceptions while patching.
530 * MCEs only happen when something got corrupted and in this
531 * case we must do something about the corruption.
532 * Ignoring it is worse than a unlikely patching race.
533 * Also machine checks tend to be broadcast and if one CPU
534 * goes into machine check the others follow quickly, so we don't
535 * expect a machine check to cause undue problems during to code
536 * patching.
539 apply_alternatives(__alt_instructions, __alt_instructions_end);
541 /* switch to patch-once-at-boottime-only mode and free the
542 * tables in case we know the number of CPUs will never ever
543 * change */
544 #ifdef CONFIG_HOTPLUG_CPU
545 if (num_possible_cpus() < 2)
546 smp_alt_once = 1;
547 #endif
549 #ifdef CONFIG_SMP
550 if (smp_alt_once) {
551 if (1 == num_possible_cpus()) {
552 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
553 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
554 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
556 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
557 _text, _etext);
559 } else {
560 alternatives_smp_module_add(NULL, "core kernel",
561 __smp_locks, __smp_locks_end,
562 _text, _etext);
564 /* Only switch to UP mode if we don't immediately boot others */
565 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
566 alternatives_smp_switch(0);
568 #endif
569 apply_paravirt(__parainstructions, __parainstructions_end);
571 if (smp_alt_once)
572 free_init_pages("SMP alternatives",
573 (unsigned long)__smp_locks,
574 (unsigned long)__smp_locks_end);
576 restart_nmi();
580 * text_poke_early - Update instructions on a live kernel at boot time
581 * @addr: address to modify
582 * @opcode: source of the copy
583 * @len: length to copy
585 * When you use this code to patch more than one byte of an instruction
586 * you need to make sure that other CPUs cannot execute this code in parallel.
587 * Also no thread must be currently preempted in the middle of these
588 * instructions. And on the local CPU you need to be protected again NMI or MCE
589 * handlers seeing an inconsistent instruction while you patch.
591 void *__init_or_module text_poke_early(void *addr, const void *opcode,
592 size_t len)
594 unsigned long flags;
595 local_irq_save(flags);
596 memcpy(addr, opcode, len);
597 sync_core();
598 local_irq_restore(flags);
599 /* Could also do a CLFLUSH here to speed up CPU recovery; but
600 that causes hangs on some VIA CPUs. */
601 return addr;
605 * text_poke - Update instructions on a live kernel
606 * @addr: address to modify
607 * @opcode: source of the copy
608 * @len: length to copy
610 * Only atomic text poke/set should be allowed when not doing early patching.
611 * It means the size must be writable atomically and the address must be aligned
612 * in a way that permits an atomic write. It also makes sure we fit on a single
613 * page.
615 * Note: Must be called under text_mutex.
617 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
619 unsigned long flags;
620 char *vaddr;
621 struct page *pages[2];
622 int i;
624 if (!core_kernel_text((unsigned long)addr)) {
625 pages[0] = vmalloc_to_page(addr);
626 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
627 } else {
628 pages[0] = virt_to_page(addr);
629 WARN_ON(!PageReserved(pages[0]));
630 pages[1] = virt_to_page(addr + PAGE_SIZE);
632 BUG_ON(!pages[0]);
633 local_irq_save(flags);
634 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
635 if (pages[1])
636 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
637 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
638 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
639 clear_fixmap(FIX_TEXT_POKE0);
640 if (pages[1])
641 clear_fixmap(FIX_TEXT_POKE1);
642 local_flush_tlb();
643 sync_core();
644 /* Could also do a CLFLUSH here to speed up CPU recovery; but
645 that causes hangs on some VIA CPUs. */
646 for (i = 0; i < len; i++)
647 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
648 local_irq_restore(flags);
649 return addr;
653 * Cross-modifying kernel text with stop_machine().
654 * This code originally comes from immediate value.
656 static atomic_t stop_machine_first;
657 static int wrote_text;
659 struct text_poke_params {
660 struct text_poke_param *params;
661 int nparams;
664 static int __kprobes stop_machine_text_poke(void *data)
666 struct text_poke_params *tpp = data;
667 struct text_poke_param *p;
668 int i;
670 if (atomic_dec_and_test(&stop_machine_first)) {
671 for (i = 0; i < tpp->nparams; i++) {
672 p = &tpp->params[i];
673 text_poke(p->addr, p->opcode, p->len);
675 smp_wmb(); /* Make sure other cpus see that this has run */
676 wrote_text = 1;
677 } else {
678 while (!wrote_text)
679 cpu_relax();
680 smp_mb(); /* Load wrote_text before following execution */
683 for (i = 0; i < tpp->nparams; i++) {
684 p = &tpp->params[i];
685 flush_icache_range((unsigned long)p->addr,
686 (unsigned long)p->addr + p->len);
689 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
690 * that a core serializing instruction such as "cpuid" should be
691 * executed on _each_ core before the new instruction is made visible.
693 sync_core();
694 return 0;
698 * text_poke_smp - Update instructions on a live kernel on SMP
699 * @addr: address to modify
700 * @opcode: source of the copy
701 * @len: length to copy
703 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
704 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
705 * should be allowed, since stop_machine() does _not_ protect code against
706 * NMI and MCE.
708 * Note: Must be called under get_online_cpus() and text_mutex.
710 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
712 struct text_poke_params tpp;
713 struct text_poke_param p;
715 p.addr = addr;
716 p.opcode = opcode;
717 p.len = len;
718 tpp.params = &p;
719 tpp.nparams = 1;
720 atomic_set(&stop_machine_first, 1);
721 wrote_text = 0;
722 /* Use __stop_machine() because the caller already got online_cpus. */
723 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
724 return addr;
728 * text_poke_smp_batch - Update instructions on a live kernel on SMP
729 * @params: an array of text_poke parameters
730 * @n: the number of elements in params.
732 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
733 * stop_machine() is heavy task, it is better to aggregate text_poke requests
734 * and do it once if possible.
736 * Note: Must be called under get_online_cpus() and text_mutex.
738 void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
740 struct text_poke_params tpp = {.params = params, .nparams = n};
742 atomic_set(&stop_machine_first, 1);
743 wrote_text = 0;
744 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);