kvm: take srcu lock around kvm_steal_time_set_preempted()
[linux/fpc-iii.git] / arch / powerpc / lib / feature-fixups.c
blob043415f0bdb1646fa85f7bb26d04f0241c68ff63
1 /*
2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
7 * Copyright 2008 Michael Ellerman, IBM Corporation.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <asm/cputable.h>
21 #include <asm/code-patching.h>
22 #include <asm/page.h>
23 #include <asm/sections.h>
24 #include <asm/setup.h>
25 #include <asm/firmware.h>
27 struct fixup_entry {
28 unsigned long mask;
29 unsigned long value;
30 long start_off;
31 long end_off;
32 long alt_start_off;
33 long alt_end_off;
36 static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
39 * We store the offset to the code as a negative offset from
40 * the start of the alt_entry, to support the VDSO. This
41 * routine converts that back into an actual address.
43 return (unsigned int *)((unsigned long)fcur + offset);
46 static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
47 unsigned int *alt_start, unsigned int *alt_end)
49 unsigned int instr;
51 instr = *src;
53 if (instr_is_relative_branch(*src)) {
54 unsigned int *target = (unsigned int *)branch_target(src);
56 /* Branch within the section doesn't need translating */
57 if (target < alt_start || target >= alt_end) {
58 instr = translate_branch(dest, src);
59 if (!instr)
60 return 1;
64 patch_instruction(dest, instr);
66 return 0;
69 static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
71 unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
73 start = calc_addr(fcur, fcur->start_off);
74 end = calc_addr(fcur, fcur->end_off);
75 alt_start = calc_addr(fcur, fcur->alt_start_off);
76 alt_end = calc_addr(fcur, fcur->alt_end_off);
78 if ((alt_end - alt_start) > (end - start))
79 return 1;
81 if ((value & fcur->mask) == fcur->value)
82 return 0;
84 src = alt_start;
85 dest = start;
87 for (; src < alt_end; src++, dest++) {
88 if (patch_alt_instruction(src, dest, alt_start, alt_end))
89 return 1;
92 for (; dest < end; dest++)
93 patch_instruction(dest, PPC_INST_NOP);
95 return 0;
98 void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
100 struct fixup_entry *fcur, *fend;
102 fcur = fixup_start;
103 fend = fixup_end;
105 for (; fcur < fend; fcur++) {
106 if (patch_feature_section(value, fcur)) {
107 WARN_ON(1);
108 printk("Unable to patch feature section at %p - %p" \
109 " with %p - %p\n",
110 calc_addr(fcur, fcur->start_off),
111 calc_addr(fcur, fcur->end_off),
112 calc_addr(fcur, fcur->alt_start_off),
113 calc_addr(fcur, fcur->alt_end_off));
118 void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
120 long *start, *end;
121 unsigned int *dest;
123 if (!(value & CPU_FTR_LWSYNC))
124 return ;
126 start = fixup_start;
127 end = fixup_end;
129 for (; start < end; start++) {
130 dest = (void *)start + *start;
131 patch_instruction(dest, PPC_INST_LWSYNC);
135 static void do_final_fixups(void)
137 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
138 int *src, *dest;
139 unsigned long length;
141 if (PHYSICAL_START == 0)
142 return;
144 src = (int *)(KERNELBASE + PHYSICAL_START);
145 dest = (int *)KERNELBASE;
146 length = (__end_interrupts - _stext) / sizeof(int);
148 while (length--) {
149 patch_instruction(dest, *src);
150 src++;
151 dest++;
153 #endif
156 static unsigned long __initdata saved_cpu_features;
157 static unsigned int __initdata saved_mmu_features;
158 #ifdef CONFIG_PPC64
159 static unsigned long __initdata saved_firmware_features;
160 #endif
162 void __init apply_feature_fixups(void)
164 struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
166 *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
167 *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
170 * Apply the CPU-specific and firmware specific fixups to kernel text
171 * (nop out sections not relevant to this CPU or this firmware).
173 do_feature_fixups(spec->cpu_features,
174 PTRRELOC(&__start___ftr_fixup),
175 PTRRELOC(&__stop___ftr_fixup));
177 do_feature_fixups(spec->mmu_features,
178 PTRRELOC(&__start___mmu_ftr_fixup),
179 PTRRELOC(&__stop___mmu_ftr_fixup));
181 do_lwsync_fixups(spec->cpu_features,
182 PTRRELOC(&__start___lwsync_fixup),
183 PTRRELOC(&__stop___lwsync_fixup));
185 #ifdef CONFIG_PPC64
186 saved_firmware_features = powerpc_firmware_features;
187 do_feature_fixups(powerpc_firmware_features,
188 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
189 #endif
190 do_final_fixups();
193 void __init setup_feature_keys(void)
196 * Initialise jump label. This causes all the cpu/mmu_has_feature()
197 * checks to take on their correct polarity based on the current set of
198 * CPU/MMU features.
200 jump_label_init();
201 cpu_feature_keys_init();
202 mmu_feature_keys_init();
205 static int __init check_features(void)
207 WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
208 "CPU features changed after feature patching!\n");
209 WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
210 "MMU features changed after feature patching!\n");
211 #ifdef CONFIG_PPC64
212 WARN(saved_firmware_features != powerpc_firmware_features,
213 "Firmware features changed after feature patching!\n");
214 #endif
216 return 0;
218 late_initcall(check_features);
220 #ifdef CONFIG_FTR_FIXUP_SELFTEST
222 #define check(x) \
223 if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
225 /* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
226 static struct fixup_entry fixup;
228 static long calc_offset(struct fixup_entry *entry, unsigned int *p)
230 return (unsigned long)p - (unsigned long)entry;
233 static void test_basic_patching(void)
235 extern unsigned int ftr_fixup_test1;
236 extern unsigned int end_ftr_fixup_test1;
237 extern unsigned int ftr_fixup_test1_orig;
238 extern unsigned int ftr_fixup_test1_expected;
239 int size = &end_ftr_fixup_test1 - &ftr_fixup_test1;
241 fixup.value = fixup.mask = 8;
242 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1);
243 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2);
244 fixup.alt_start_off = fixup.alt_end_off = 0;
246 /* Sanity check */
247 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
249 /* Check we don't patch if the value matches */
250 patch_feature_section(8, &fixup);
251 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
253 /* Check we do patch if the value doesn't match */
254 patch_feature_section(0, &fixup);
255 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
257 /* Check we do patch if the mask doesn't match */
258 memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size);
259 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0);
260 patch_feature_section(~8, &fixup);
261 check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0);
264 static void test_alternative_patching(void)
266 extern unsigned int ftr_fixup_test2;
267 extern unsigned int end_ftr_fixup_test2;
268 extern unsigned int ftr_fixup_test2_orig;
269 extern unsigned int ftr_fixup_test2_alt;
270 extern unsigned int ftr_fixup_test2_expected;
271 int size = &end_ftr_fixup_test2 - &ftr_fixup_test2;
273 fixup.value = fixup.mask = 0xF;
274 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1);
275 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2);
276 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt);
277 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1);
279 /* Sanity check */
280 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
282 /* Check we don't patch if the value matches */
283 patch_feature_section(0xF, &fixup);
284 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
286 /* Check we do patch if the value doesn't match */
287 patch_feature_section(0, &fixup);
288 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
290 /* Check we do patch if the mask doesn't match */
291 memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size);
292 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0);
293 patch_feature_section(~0xF, &fixup);
294 check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0);
297 static void test_alternative_case_too_big(void)
299 extern unsigned int ftr_fixup_test3;
300 extern unsigned int end_ftr_fixup_test3;
301 extern unsigned int ftr_fixup_test3_orig;
302 extern unsigned int ftr_fixup_test3_alt;
303 int size = &end_ftr_fixup_test3 - &ftr_fixup_test3;
305 fixup.value = fixup.mask = 0xC;
306 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1);
307 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2);
308 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt);
309 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2);
311 /* Sanity check */
312 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
314 /* Expect nothing to be patched, and the error returned to us */
315 check(patch_feature_section(0xF, &fixup) == 1);
316 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
317 check(patch_feature_section(0, &fixup) == 1);
318 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
319 check(patch_feature_section(~0xF, &fixup) == 1);
320 check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0);
323 static void test_alternative_case_too_small(void)
325 extern unsigned int ftr_fixup_test4;
326 extern unsigned int end_ftr_fixup_test4;
327 extern unsigned int ftr_fixup_test4_orig;
328 extern unsigned int ftr_fixup_test4_alt;
329 extern unsigned int ftr_fixup_test4_expected;
330 int size = &end_ftr_fixup_test4 - &ftr_fixup_test4;
331 unsigned long flag;
333 /* Check a high-bit flag */
334 flag = 1UL << ((sizeof(unsigned long) - 1) * 8);
335 fixup.value = fixup.mask = flag;
336 fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1);
337 fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5);
338 fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt);
339 fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2);
341 /* Sanity check */
342 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
344 /* Check we don't patch if the value matches */
345 patch_feature_section(flag, &fixup);
346 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
348 /* Check we do patch if the value doesn't match */
349 patch_feature_section(0, &fixup);
350 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
352 /* Check we do patch if the mask doesn't match */
353 memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size);
354 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0);
355 patch_feature_section(~flag, &fixup);
356 check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0);
359 static void test_alternative_case_with_branch(void)
361 extern unsigned int ftr_fixup_test5;
362 extern unsigned int end_ftr_fixup_test5;
363 extern unsigned int ftr_fixup_test5_expected;
364 int size = &end_ftr_fixup_test5 - &ftr_fixup_test5;
366 check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0);
369 static void test_alternative_case_with_external_branch(void)
371 extern unsigned int ftr_fixup_test6;
372 extern unsigned int end_ftr_fixup_test6;
373 extern unsigned int ftr_fixup_test6_expected;
374 int size = &end_ftr_fixup_test6 - &ftr_fixup_test6;
376 check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0);
379 static void test_cpu_macros(void)
381 extern u8 ftr_fixup_test_FTR_macros;
382 extern u8 ftr_fixup_test_FTR_macros_expected;
383 unsigned long size = &ftr_fixup_test_FTR_macros_expected -
384 &ftr_fixup_test_FTR_macros;
386 /* The fixups have already been done for us during boot */
387 check(memcmp(&ftr_fixup_test_FTR_macros,
388 &ftr_fixup_test_FTR_macros_expected, size) == 0);
391 static void test_fw_macros(void)
393 #ifdef CONFIG_PPC64
394 extern u8 ftr_fixup_test_FW_FTR_macros;
395 extern u8 ftr_fixup_test_FW_FTR_macros_expected;
396 unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected -
397 &ftr_fixup_test_FW_FTR_macros;
399 /* The fixups have already been done for us during boot */
400 check(memcmp(&ftr_fixup_test_FW_FTR_macros,
401 &ftr_fixup_test_FW_FTR_macros_expected, size) == 0);
402 #endif
405 static void test_lwsync_macros(void)
407 extern u8 lwsync_fixup_test;
408 extern u8 end_lwsync_fixup_test;
409 extern u8 lwsync_fixup_test_expected_LWSYNC;
410 extern u8 lwsync_fixup_test_expected_SYNC;
411 unsigned long size = &end_lwsync_fixup_test -
412 &lwsync_fixup_test;
414 /* The fixups have already been done for us during boot */
415 if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
416 check(memcmp(&lwsync_fixup_test,
417 &lwsync_fixup_test_expected_LWSYNC, size) == 0);
418 } else {
419 check(memcmp(&lwsync_fixup_test,
420 &lwsync_fixup_test_expected_SYNC, size) == 0);
424 static int __init test_feature_fixups(void)
426 printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
428 test_basic_patching();
429 test_alternative_patching();
430 test_alternative_case_too_big();
431 test_alternative_case_too_small();
432 test_alternative_case_with_branch();
433 test_alternative_case_with_external_branch();
434 test_cpu_macros();
435 test_fw_macros();
436 test_lwsync_macros();
438 return 0;
440 late_initcall(test_feature_fixups);
442 #endif /* CONFIG_FTR_FIXUP_SELFTEST */