Linux 4.13.16
[linux/fpc-iii.git] / arch / powerpc / kernel / kvm.c
blob1086ea37c83241a115378ae1de5971f6c004aaad
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
5 * Authors:
6 * Alexander Graf <agraf@suse.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28 #include <linux/nmi.h> /* hardlockup_detector_disable() */
30 #include <asm/reg.h>
31 #include <asm/sections.h>
32 #include <asm/cacheflush.h>
33 #include <asm/disassemble.h>
34 #include <asm/ppc-opcode.h>
35 #include <asm/epapr_hcalls.h>
37 #define KVM_MAGIC_PAGE (-4096L)
38 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
40 #define KVM_INST_LWZ 0x80000000
41 #define KVM_INST_STW 0x90000000
42 #define KVM_INST_LD 0xe8000000
43 #define KVM_INST_STD 0xf8000000
44 #define KVM_INST_NOP 0x60000000
45 #define KVM_INST_B 0x48000000
46 #define KVM_INST_B_MASK 0x03ffffff
47 #define KVM_INST_B_MAX 0x01ffffff
48 #define KVM_INST_LI 0x38000000
50 #define KVM_MASK_RT 0x03e00000
51 #define KVM_RT_30 0x03c00000
52 #define KVM_MASK_RB 0x0000f800
53 #define KVM_INST_MFMSR 0x7c0000a6
55 #define SPR_FROM 0
56 #define SPR_TO 0x100
58 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
59 (((sprn) & 0x1f) << 16) | \
60 (((sprn) & 0x3e0) << 6) | \
61 (moveto))
63 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
64 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
66 #define KVM_INST_TLBSYNC 0x7c00046c
67 #define KVM_INST_MTMSRD_L0 0x7c000164
68 #define KVM_INST_MTMSRD_L1 0x7c010164
69 #define KVM_INST_MTMSR 0x7c000124
71 #define KVM_INST_WRTEE 0x7c000106
72 #define KVM_INST_WRTEEI_0 0x7c000146
73 #define KVM_INST_WRTEEI_1 0x7c008146
75 #define KVM_INST_MTSRIN 0x7c0001e4
77 static bool kvm_patching_worked = true;
78 char kvm_tmp[1024 * 1024];
79 static int kvm_tmp_index;
81 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
83 *inst = new_inst;
84 flush_icache_range((ulong)inst, (ulong)inst + 4);
87 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
89 #ifdef CONFIG_64BIT
90 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
91 #else
92 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
93 #endif
96 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
98 #ifdef CONFIG_64BIT
99 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
100 #else
101 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
102 #endif
105 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
107 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
110 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
112 #ifdef CONFIG_64BIT
113 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
114 #else
115 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
116 #endif
119 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
121 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
124 static void kvm_patch_ins_nop(u32 *inst)
126 kvm_patch_ins(inst, KVM_INST_NOP);
129 static void kvm_patch_ins_b(u32 *inst, int addr)
131 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
132 /* On relocatable kernels interrupts handlers and our code
133 can be in different regions, so we don't patch them */
135 if ((ulong)inst < (ulong)&__end_interrupts)
136 return;
137 #endif
139 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
142 static u32 *kvm_alloc(int len)
144 u32 *p;
146 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
147 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
148 kvm_tmp_index, len);
149 kvm_patching_worked = false;
150 return NULL;
153 p = (void*)&kvm_tmp[kvm_tmp_index];
154 kvm_tmp_index += len;
156 return p;
159 extern u32 kvm_emulate_mtmsrd_branch_offs;
160 extern u32 kvm_emulate_mtmsrd_reg_offs;
161 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
162 extern u32 kvm_emulate_mtmsrd_len;
163 extern u32 kvm_emulate_mtmsrd[];
165 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
167 u32 *p;
168 int distance_start;
169 int distance_end;
170 ulong next_inst;
172 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
173 if (!p)
174 return;
176 /* Find out where we are and put everything there */
177 distance_start = (ulong)p - (ulong)inst;
178 next_inst = ((ulong)inst + 4);
179 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
181 /* Make sure we only write valid b instructions */
182 if (distance_start > KVM_INST_B_MAX) {
183 kvm_patching_worked = false;
184 return;
187 /* Modify the chunk to fit the invocation */
188 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
189 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
190 switch (get_rt(rt)) {
191 case 30:
192 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
193 magic_var(scratch2), KVM_RT_30);
194 break;
195 case 31:
196 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
197 magic_var(scratch1), KVM_RT_30);
198 break;
199 default:
200 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
201 break;
204 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
205 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
207 /* Patch the invocation */
208 kvm_patch_ins_b(inst, distance_start);
211 extern u32 kvm_emulate_mtmsr_branch_offs;
212 extern u32 kvm_emulate_mtmsr_reg1_offs;
213 extern u32 kvm_emulate_mtmsr_reg2_offs;
214 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
215 extern u32 kvm_emulate_mtmsr_len;
216 extern u32 kvm_emulate_mtmsr[];
218 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
220 u32 *p;
221 int distance_start;
222 int distance_end;
223 ulong next_inst;
225 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
226 if (!p)
227 return;
229 /* Find out where we are and put everything there */
230 distance_start = (ulong)p - (ulong)inst;
231 next_inst = ((ulong)inst + 4);
232 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
234 /* Make sure we only write valid b instructions */
235 if (distance_start > KVM_INST_B_MAX) {
236 kvm_patching_worked = false;
237 return;
240 /* Modify the chunk to fit the invocation */
241 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
242 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
244 /* Make clobbered registers work too */
245 switch (get_rt(rt)) {
246 case 30:
247 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
248 magic_var(scratch2), KVM_RT_30);
249 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
250 magic_var(scratch2), KVM_RT_30);
251 break;
252 case 31:
253 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
254 magic_var(scratch1), KVM_RT_30);
255 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
256 magic_var(scratch1), KVM_RT_30);
257 break;
258 default:
259 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
260 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
261 break;
264 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
265 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
267 /* Patch the invocation */
268 kvm_patch_ins_b(inst, distance_start);
271 #ifdef CONFIG_BOOKE
273 extern u32 kvm_emulate_wrtee_branch_offs;
274 extern u32 kvm_emulate_wrtee_reg_offs;
275 extern u32 kvm_emulate_wrtee_orig_ins_offs;
276 extern u32 kvm_emulate_wrtee_len;
277 extern u32 kvm_emulate_wrtee[];
279 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
281 u32 *p;
282 int distance_start;
283 int distance_end;
284 ulong next_inst;
286 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
287 if (!p)
288 return;
290 /* Find out where we are and put everything there */
291 distance_start = (ulong)p - (ulong)inst;
292 next_inst = ((ulong)inst + 4);
293 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
295 /* Make sure we only write valid b instructions */
296 if (distance_start > KVM_INST_B_MAX) {
297 kvm_patching_worked = false;
298 return;
301 /* Modify the chunk to fit the invocation */
302 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
303 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
305 if (imm_one) {
306 p[kvm_emulate_wrtee_reg_offs] =
307 KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
308 } else {
309 /* Make clobbered registers work too */
310 switch (get_rt(rt)) {
311 case 30:
312 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
313 magic_var(scratch2), KVM_RT_30);
314 break;
315 case 31:
316 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
317 magic_var(scratch1), KVM_RT_30);
318 break;
319 default:
320 p[kvm_emulate_wrtee_reg_offs] |= rt;
321 break;
325 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
326 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
328 /* Patch the invocation */
329 kvm_patch_ins_b(inst, distance_start);
332 extern u32 kvm_emulate_wrteei_0_branch_offs;
333 extern u32 kvm_emulate_wrteei_0_len;
334 extern u32 kvm_emulate_wrteei_0[];
336 static void kvm_patch_ins_wrteei_0(u32 *inst)
338 u32 *p;
339 int distance_start;
340 int distance_end;
341 ulong next_inst;
343 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
344 if (!p)
345 return;
347 /* Find out where we are and put everything there */
348 distance_start = (ulong)p - (ulong)inst;
349 next_inst = ((ulong)inst + 4);
350 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
352 /* Make sure we only write valid b instructions */
353 if (distance_start > KVM_INST_B_MAX) {
354 kvm_patching_worked = false;
355 return;
358 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
359 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
360 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
362 /* Patch the invocation */
363 kvm_patch_ins_b(inst, distance_start);
366 #endif
368 #ifdef CONFIG_PPC_BOOK3S_32
370 extern u32 kvm_emulate_mtsrin_branch_offs;
371 extern u32 kvm_emulate_mtsrin_reg1_offs;
372 extern u32 kvm_emulate_mtsrin_reg2_offs;
373 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
374 extern u32 kvm_emulate_mtsrin_len;
375 extern u32 kvm_emulate_mtsrin[];
377 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
379 u32 *p;
380 int distance_start;
381 int distance_end;
382 ulong next_inst;
384 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
385 if (!p)
386 return;
388 /* Find out where we are and put everything there */
389 distance_start = (ulong)p - (ulong)inst;
390 next_inst = ((ulong)inst + 4);
391 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
393 /* Make sure we only write valid b instructions */
394 if (distance_start > KVM_INST_B_MAX) {
395 kvm_patching_worked = false;
396 return;
399 /* Modify the chunk to fit the invocation */
400 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
401 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
402 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
403 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
404 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
405 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
407 /* Patch the invocation */
408 kvm_patch_ins_b(inst, distance_start);
411 #endif
413 static void kvm_map_magic_page(void *data)
415 u32 *features = data;
417 ulong in[8] = {0};
418 ulong out[8];
420 in[0] = KVM_MAGIC_PAGE;
421 in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
423 epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
425 *features = out[0];
428 static void kvm_check_ins(u32 *inst, u32 features)
430 u32 _inst = *inst;
431 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
432 u32 inst_rt = _inst & KVM_MASK_RT;
434 switch (inst_no_rt) {
435 /* Loads */
436 case KVM_INST_MFMSR:
437 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
438 break;
439 case KVM_INST_MFSPR(SPRN_SPRG0):
440 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
441 break;
442 case KVM_INST_MFSPR(SPRN_SPRG1):
443 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
444 break;
445 case KVM_INST_MFSPR(SPRN_SPRG2):
446 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
447 break;
448 case KVM_INST_MFSPR(SPRN_SPRG3):
449 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
450 break;
451 case KVM_INST_MFSPR(SPRN_SRR0):
452 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
453 break;
454 case KVM_INST_MFSPR(SPRN_SRR1):
455 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
456 break;
457 #ifdef CONFIG_BOOKE
458 case KVM_INST_MFSPR(SPRN_DEAR):
459 #else
460 case KVM_INST_MFSPR(SPRN_DAR):
461 #endif
462 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
463 break;
464 case KVM_INST_MFSPR(SPRN_DSISR):
465 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
466 break;
468 #ifdef CONFIG_PPC_BOOK3E_MMU
469 case KVM_INST_MFSPR(SPRN_MAS0):
470 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
471 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
472 break;
473 case KVM_INST_MFSPR(SPRN_MAS1):
474 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
475 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
476 break;
477 case KVM_INST_MFSPR(SPRN_MAS2):
478 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
479 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
480 break;
481 case KVM_INST_MFSPR(SPRN_MAS3):
482 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
483 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
484 break;
485 case KVM_INST_MFSPR(SPRN_MAS4):
486 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
487 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
488 break;
489 case KVM_INST_MFSPR(SPRN_MAS6):
490 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
491 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
492 break;
493 case KVM_INST_MFSPR(SPRN_MAS7):
494 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
495 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
496 break;
497 #endif /* CONFIG_PPC_BOOK3E_MMU */
499 case KVM_INST_MFSPR(SPRN_SPRG4):
500 #ifdef CONFIG_BOOKE
501 case KVM_INST_MFSPR(SPRN_SPRG4R):
502 #endif
503 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
504 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
505 break;
506 case KVM_INST_MFSPR(SPRN_SPRG5):
507 #ifdef CONFIG_BOOKE
508 case KVM_INST_MFSPR(SPRN_SPRG5R):
509 #endif
510 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
511 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
512 break;
513 case KVM_INST_MFSPR(SPRN_SPRG6):
514 #ifdef CONFIG_BOOKE
515 case KVM_INST_MFSPR(SPRN_SPRG6R):
516 #endif
517 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
518 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
519 break;
520 case KVM_INST_MFSPR(SPRN_SPRG7):
521 #ifdef CONFIG_BOOKE
522 case KVM_INST_MFSPR(SPRN_SPRG7R):
523 #endif
524 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
525 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
526 break;
528 #ifdef CONFIG_BOOKE
529 case KVM_INST_MFSPR(SPRN_ESR):
530 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
531 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
532 break;
533 #endif
535 case KVM_INST_MFSPR(SPRN_PIR):
536 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
537 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
538 break;
541 /* Stores */
542 case KVM_INST_MTSPR(SPRN_SPRG0):
543 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
544 break;
545 case KVM_INST_MTSPR(SPRN_SPRG1):
546 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
547 break;
548 case KVM_INST_MTSPR(SPRN_SPRG2):
549 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
550 break;
551 case KVM_INST_MTSPR(SPRN_SPRG3):
552 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
553 break;
554 case KVM_INST_MTSPR(SPRN_SRR0):
555 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
556 break;
557 case KVM_INST_MTSPR(SPRN_SRR1):
558 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
559 break;
560 #ifdef CONFIG_BOOKE
561 case KVM_INST_MTSPR(SPRN_DEAR):
562 #else
563 case KVM_INST_MTSPR(SPRN_DAR):
564 #endif
565 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
566 break;
567 case KVM_INST_MTSPR(SPRN_DSISR):
568 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
569 break;
570 #ifdef CONFIG_PPC_BOOK3E_MMU
571 case KVM_INST_MTSPR(SPRN_MAS0):
572 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
573 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
574 break;
575 case KVM_INST_MTSPR(SPRN_MAS1):
576 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
577 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
578 break;
579 case KVM_INST_MTSPR(SPRN_MAS2):
580 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
581 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
582 break;
583 case KVM_INST_MTSPR(SPRN_MAS3):
584 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
585 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
586 break;
587 case KVM_INST_MTSPR(SPRN_MAS4):
588 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
589 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
590 break;
591 case KVM_INST_MTSPR(SPRN_MAS6):
592 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
593 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
594 break;
595 case KVM_INST_MTSPR(SPRN_MAS7):
596 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
597 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
598 break;
599 #endif /* CONFIG_PPC_BOOK3E_MMU */
601 case KVM_INST_MTSPR(SPRN_SPRG4):
602 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
603 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
604 break;
605 case KVM_INST_MTSPR(SPRN_SPRG5):
606 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
607 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
608 break;
609 case KVM_INST_MTSPR(SPRN_SPRG6):
610 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
611 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
612 break;
613 case KVM_INST_MTSPR(SPRN_SPRG7):
614 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
615 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
616 break;
618 #ifdef CONFIG_BOOKE
619 case KVM_INST_MTSPR(SPRN_ESR):
620 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
621 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
622 break;
623 #endif
625 /* Nops */
626 case KVM_INST_TLBSYNC:
627 kvm_patch_ins_nop(inst);
628 break;
630 /* Rewrites */
631 case KVM_INST_MTMSRD_L1:
632 kvm_patch_ins_mtmsrd(inst, inst_rt);
633 break;
634 case KVM_INST_MTMSR:
635 case KVM_INST_MTMSRD_L0:
636 kvm_patch_ins_mtmsr(inst, inst_rt);
637 break;
638 #ifdef CONFIG_BOOKE
639 case KVM_INST_WRTEE:
640 kvm_patch_ins_wrtee(inst, inst_rt, 0);
641 break;
642 #endif
645 switch (inst_no_rt & ~KVM_MASK_RB) {
646 #ifdef CONFIG_PPC_BOOK3S_32
647 case KVM_INST_MTSRIN:
648 if (features & KVM_MAGIC_FEAT_SR) {
649 u32 inst_rb = _inst & KVM_MASK_RB;
650 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
652 break;
653 #endif
656 switch (_inst) {
657 #ifdef CONFIG_BOOKE
658 case KVM_INST_WRTEEI_0:
659 kvm_patch_ins_wrteei_0(inst);
660 break;
662 case KVM_INST_WRTEEI_1:
663 kvm_patch_ins_wrtee(inst, 0, 1);
664 break;
665 #endif
669 extern u32 kvm_template_start[];
670 extern u32 kvm_template_end[];
672 static void kvm_use_magic_page(void)
674 u32 *p;
675 u32 *start, *end;
676 u32 tmp;
677 u32 features;
679 /* Tell the host to map the magic page to -4096 on all CPUs */
680 on_each_cpu(kvm_map_magic_page, &features, 1);
682 /* Quick self-test to see if the mapping works */
683 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
684 kvm_patching_worked = false;
685 return;
688 /* Now loop through all code and find instructions */
689 start = (void*)_stext;
690 end = (void*)_etext;
693 * Being interrupted in the middle of patching would
694 * be bad for SPRG4-7, which KVM can't keep in sync
695 * with emulated accesses because reads don't trap.
697 local_irq_disable();
699 for (p = start; p < end; p++) {
700 /* Avoid patching the template code */
701 if (p >= kvm_template_start && p < kvm_template_end) {
702 p = kvm_template_end - 1;
703 continue;
705 kvm_check_ins(p, features);
708 local_irq_enable();
710 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
711 kvm_patching_worked ? "worked" : "failed");
714 static __init void kvm_free_tmp(void)
716 free_reserved_area(&kvm_tmp[kvm_tmp_index],
717 &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
720 static int __init kvm_guest_init(void)
723 * The hardlockup detector is likely to get false positives in
724 * KVM guests, so disable it by default.
726 hardlockup_detector_disable();
728 if (!kvm_para_available())
729 goto free_tmp;
731 if (!epapr_paravirt_enabled)
732 goto free_tmp;
734 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
735 kvm_use_magic_page();
737 #ifdef CONFIG_PPC_BOOK3S_64
738 /* Enable napping */
739 powersave_nap = 1;
740 #endif
742 free_tmp:
743 kvm_free_tmp();
745 return 0;
748 postcore_initcall(kvm_guest_init);