2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
6 * Alexander Graf <agraf@suse.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
28 #include <linux/nmi.h> /* hardlockup_detector_disable() */
31 #include <asm/sections.h>
32 #include <asm/cacheflush.h>
33 #include <asm/disassemble.h>
34 #include <asm/ppc-opcode.h>
35 #include <asm/epapr_hcalls.h>
37 #define KVM_MAGIC_PAGE (-4096L)
38 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
40 #define KVM_INST_LWZ 0x80000000
41 #define KVM_INST_STW 0x90000000
42 #define KVM_INST_LD 0xe8000000
43 #define KVM_INST_STD 0xf8000000
44 #define KVM_INST_NOP 0x60000000
45 #define KVM_INST_B 0x48000000
46 #define KVM_INST_B_MASK 0x03ffffff
47 #define KVM_INST_B_MAX 0x01ffffff
48 #define KVM_INST_LI 0x38000000
50 #define KVM_MASK_RT 0x03e00000
51 #define KVM_RT_30 0x03c00000
52 #define KVM_MASK_RB 0x0000f800
53 #define KVM_INST_MFMSR 0x7c0000a6
58 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
59 (((sprn) & 0x1f) << 16) | \
60 (((sprn) & 0x3e0) << 6) | \
63 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
64 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
66 #define KVM_INST_TLBSYNC 0x7c00046c
67 #define KVM_INST_MTMSRD_L0 0x7c000164
68 #define KVM_INST_MTMSRD_L1 0x7c010164
69 #define KVM_INST_MTMSR 0x7c000124
71 #define KVM_INST_WRTEE 0x7c000106
72 #define KVM_INST_WRTEEI_0 0x7c000146
73 #define KVM_INST_WRTEEI_1 0x7c008146
75 #define KVM_INST_MTSRIN 0x7c0001e4
77 static bool kvm_patching_worked
= true;
78 char kvm_tmp
[1024 * 1024];
79 static int kvm_tmp_index
;
81 static inline void kvm_patch_ins(u32
*inst
, u32 new_inst
)
84 flush_icache_range((ulong
)inst
, (ulong
)inst
+ 4);
87 static void kvm_patch_ins_ll(u32
*inst
, long addr
, u32 rt
)
90 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
92 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000fffc));
96 static void kvm_patch_ins_ld(u32
*inst
, long addr
, u32 rt
)
99 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
101 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| ((addr
+ 4) & 0x0000fffc));
105 static void kvm_patch_ins_lwz(u32
*inst
, long addr
, u32 rt
)
107 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000ffff));
110 static void kvm_patch_ins_std(u32
*inst
, long addr
, u32 rt
)
113 kvm_patch_ins(inst
, KVM_INST_STD
| rt
| (addr
& 0x0000fffc));
115 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| ((addr
+ 4) & 0x0000fffc));
119 static void kvm_patch_ins_stw(u32
*inst
, long addr
, u32 rt
)
121 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| (addr
& 0x0000fffc));
124 static void kvm_patch_ins_nop(u32
*inst
)
126 kvm_patch_ins(inst
, KVM_INST_NOP
);
129 static void kvm_patch_ins_b(u32
*inst
, int addr
)
131 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
132 /* On relocatable kernels interrupts handlers and our code
133 can be in different regions, so we don't patch them */
135 if ((ulong
)inst
< (ulong
)&__end_interrupts
)
139 kvm_patch_ins(inst
, KVM_INST_B
| (addr
& KVM_INST_B_MASK
));
142 static u32
*kvm_alloc(int len
)
146 if ((kvm_tmp_index
+ len
) > ARRAY_SIZE(kvm_tmp
)) {
147 printk(KERN_ERR
"KVM: No more space (%d + %d)\n",
149 kvm_patching_worked
= false;
153 p
= (void*)&kvm_tmp
[kvm_tmp_index
];
154 kvm_tmp_index
+= len
;
159 extern u32 kvm_emulate_mtmsrd_branch_offs
;
160 extern u32 kvm_emulate_mtmsrd_reg_offs
;
161 extern u32 kvm_emulate_mtmsrd_orig_ins_offs
;
162 extern u32 kvm_emulate_mtmsrd_len
;
163 extern u32 kvm_emulate_mtmsrd
[];
165 static void kvm_patch_ins_mtmsrd(u32
*inst
, u32 rt
)
172 p
= kvm_alloc(kvm_emulate_mtmsrd_len
* 4);
176 /* Find out where we are and put everything there */
177 distance_start
= (ulong
)p
- (ulong
)inst
;
178 next_inst
= ((ulong
)inst
+ 4);
179 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsrd_branch_offs
];
181 /* Make sure we only write valid b instructions */
182 if (distance_start
> KVM_INST_B_MAX
) {
183 kvm_patching_worked
= false;
187 /* Modify the chunk to fit the invocation */
188 memcpy(p
, kvm_emulate_mtmsrd
, kvm_emulate_mtmsrd_len
* 4);
189 p
[kvm_emulate_mtmsrd_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
190 switch (get_rt(rt
)) {
192 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
193 magic_var(scratch2
), KVM_RT_30
);
196 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
197 magic_var(scratch1
), KVM_RT_30
);
200 p
[kvm_emulate_mtmsrd_reg_offs
] |= rt
;
204 p
[kvm_emulate_mtmsrd_orig_ins_offs
] = *inst
;
205 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsrd_len
* 4);
207 /* Patch the invocation */
208 kvm_patch_ins_b(inst
, distance_start
);
211 extern u32 kvm_emulate_mtmsr_branch_offs
;
212 extern u32 kvm_emulate_mtmsr_reg1_offs
;
213 extern u32 kvm_emulate_mtmsr_reg2_offs
;
214 extern u32 kvm_emulate_mtmsr_orig_ins_offs
;
215 extern u32 kvm_emulate_mtmsr_len
;
216 extern u32 kvm_emulate_mtmsr
[];
218 static void kvm_patch_ins_mtmsr(u32
*inst
, u32 rt
)
225 p
= kvm_alloc(kvm_emulate_mtmsr_len
* 4);
229 /* Find out where we are and put everything there */
230 distance_start
= (ulong
)p
- (ulong
)inst
;
231 next_inst
= ((ulong
)inst
+ 4);
232 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsr_branch_offs
];
234 /* Make sure we only write valid b instructions */
235 if (distance_start
> KVM_INST_B_MAX
) {
236 kvm_patching_worked
= false;
240 /* Modify the chunk to fit the invocation */
241 memcpy(p
, kvm_emulate_mtmsr
, kvm_emulate_mtmsr_len
* 4);
242 p
[kvm_emulate_mtmsr_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
244 /* Make clobbered registers work too */
245 switch (get_rt(rt
)) {
247 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
248 magic_var(scratch2
), KVM_RT_30
);
249 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
250 magic_var(scratch2
), KVM_RT_30
);
253 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
254 magic_var(scratch1
), KVM_RT_30
);
255 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
256 magic_var(scratch1
), KVM_RT_30
);
259 p
[kvm_emulate_mtmsr_reg1_offs
] |= rt
;
260 p
[kvm_emulate_mtmsr_reg2_offs
] |= rt
;
264 p
[kvm_emulate_mtmsr_orig_ins_offs
] = *inst
;
265 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsr_len
* 4);
267 /* Patch the invocation */
268 kvm_patch_ins_b(inst
, distance_start
);
273 extern u32 kvm_emulate_wrtee_branch_offs
;
274 extern u32 kvm_emulate_wrtee_reg_offs
;
275 extern u32 kvm_emulate_wrtee_orig_ins_offs
;
276 extern u32 kvm_emulate_wrtee_len
;
277 extern u32 kvm_emulate_wrtee
[];
279 static void kvm_patch_ins_wrtee(u32
*inst
, u32 rt
, int imm_one
)
286 p
= kvm_alloc(kvm_emulate_wrtee_len
* 4);
290 /* Find out where we are and put everything there */
291 distance_start
= (ulong
)p
- (ulong
)inst
;
292 next_inst
= ((ulong
)inst
+ 4);
293 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrtee_branch_offs
];
295 /* Make sure we only write valid b instructions */
296 if (distance_start
> KVM_INST_B_MAX
) {
297 kvm_patching_worked
= false;
301 /* Modify the chunk to fit the invocation */
302 memcpy(p
, kvm_emulate_wrtee
, kvm_emulate_wrtee_len
* 4);
303 p
[kvm_emulate_wrtee_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
306 p
[kvm_emulate_wrtee_reg_offs
] =
307 KVM_INST_LI
| __PPC_RT(R30
) | MSR_EE
;
309 /* Make clobbered registers work too */
310 switch (get_rt(rt
)) {
312 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
313 magic_var(scratch2
), KVM_RT_30
);
316 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
317 magic_var(scratch1
), KVM_RT_30
);
320 p
[kvm_emulate_wrtee_reg_offs
] |= rt
;
325 p
[kvm_emulate_wrtee_orig_ins_offs
] = *inst
;
326 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrtee_len
* 4);
328 /* Patch the invocation */
329 kvm_patch_ins_b(inst
, distance_start
);
332 extern u32 kvm_emulate_wrteei_0_branch_offs
;
333 extern u32 kvm_emulate_wrteei_0_len
;
334 extern u32 kvm_emulate_wrteei_0
[];
336 static void kvm_patch_ins_wrteei_0(u32
*inst
)
343 p
= kvm_alloc(kvm_emulate_wrteei_0_len
* 4);
347 /* Find out where we are and put everything there */
348 distance_start
= (ulong
)p
- (ulong
)inst
;
349 next_inst
= ((ulong
)inst
+ 4);
350 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrteei_0_branch_offs
];
352 /* Make sure we only write valid b instructions */
353 if (distance_start
> KVM_INST_B_MAX
) {
354 kvm_patching_worked
= false;
358 memcpy(p
, kvm_emulate_wrteei_0
, kvm_emulate_wrteei_0_len
* 4);
359 p
[kvm_emulate_wrteei_0_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
360 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrteei_0_len
* 4);
362 /* Patch the invocation */
363 kvm_patch_ins_b(inst
, distance_start
);
368 #ifdef CONFIG_PPC_BOOK3S_32
370 extern u32 kvm_emulate_mtsrin_branch_offs
;
371 extern u32 kvm_emulate_mtsrin_reg1_offs
;
372 extern u32 kvm_emulate_mtsrin_reg2_offs
;
373 extern u32 kvm_emulate_mtsrin_orig_ins_offs
;
374 extern u32 kvm_emulate_mtsrin_len
;
375 extern u32 kvm_emulate_mtsrin
[];
377 static void kvm_patch_ins_mtsrin(u32
*inst
, u32 rt
, u32 rb
)
384 p
= kvm_alloc(kvm_emulate_mtsrin_len
* 4);
388 /* Find out where we are and put everything there */
389 distance_start
= (ulong
)p
- (ulong
)inst
;
390 next_inst
= ((ulong
)inst
+ 4);
391 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtsrin_branch_offs
];
393 /* Make sure we only write valid b instructions */
394 if (distance_start
> KVM_INST_B_MAX
) {
395 kvm_patching_worked
= false;
399 /* Modify the chunk to fit the invocation */
400 memcpy(p
, kvm_emulate_mtsrin
, kvm_emulate_mtsrin_len
* 4);
401 p
[kvm_emulate_mtsrin_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
402 p
[kvm_emulate_mtsrin_reg1_offs
] |= (rb
<< 10);
403 p
[kvm_emulate_mtsrin_reg2_offs
] |= rt
;
404 p
[kvm_emulate_mtsrin_orig_ins_offs
] = *inst
;
405 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtsrin_len
* 4);
407 /* Patch the invocation */
408 kvm_patch_ins_b(inst
, distance_start
);
413 static void kvm_map_magic_page(void *data
)
415 u32
*features
= data
;
420 in
[0] = KVM_MAGIC_PAGE
;
421 in
[1] = KVM_MAGIC_PAGE
| MAGIC_PAGE_FLAG_NOT_MAPPED_NX
;
423 epapr_hypercall(in
, out
, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
));
428 static void kvm_check_ins(u32
*inst
, u32 features
)
431 u32 inst_no_rt
= _inst
& ~KVM_MASK_RT
;
432 u32 inst_rt
= _inst
& KVM_MASK_RT
;
434 switch (inst_no_rt
) {
437 kvm_patch_ins_ld(inst
, magic_var(msr
), inst_rt
);
439 case KVM_INST_MFSPR(SPRN_SPRG0
):
440 kvm_patch_ins_ld(inst
, magic_var(sprg0
), inst_rt
);
442 case KVM_INST_MFSPR(SPRN_SPRG1
):
443 kvm_patch_ins_ld(inst
, magic_var(sprg1
), inst_rt
);
445 case KVM_INST_MFSPR(SPRN_SPRG2
):
446 kvm_patch_ins_ld(inst
, magic_var(sprg2
), inst_rt
);
448 case KVM_INST_MFSPR(SPRN_SPRG3
):
449 kvm_patch_ins_ld(inst
, magic_var(sprg3
), inst_rt
);
451 case KVM_INST_MFSPR(SPRN_SRR0
):
452 kvm_patch_ins_ld(inst
, magic_var(srr0
), inst_rt
);
454 case KVM_INST_MFSPR(SPRN_SRR1
):
455 kvm_patch_ins_ld(inst
, magic_var(srr1
), inst_rt
);
458 case KVM_INST_MFSPR(SPRN_DEAR
):
460 case KVM_INST_MFSPR(SPRN_DAR
):
462 kvm_patch_ins_ld(inst
, magic_var(dar
), inst_rt
);
464 case KVM_INST_MFSPR(SPRN_DSISR
):
465 kvm_patch_ins_lwz(inst
, magic_var(dsisr
), inst_rt
);
468 #ifdef CONFIG_PPC_BOOK3E_MMU
469 case KVM_INST_MFSPR(SPRN_MAS0
):
470 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
471 kvm_patch_ins_lwz(inst
, magic_var(mas0
), inst_rt
);
473 case KVM_INST_MFSPR(SPRN_MAS1
):
474 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
475 kvm_patch_ins_lwz(inst
, magic_var(mas1
), inst_rt
);
477 case KVM_INST_MFSPR(SPRN_MAS2
):
478 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
479 kvm_patch_ins_ld(inst
, magic_var(mas2
), inst_rt
);
481 case KVM_INST_MFSPR(SPRN_MAS3
):
482 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
483 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
) + 4, inst_rt
);
485 case KVM_INST_MFSPR(SPRN_MAS4
):
486 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
487 kvm_patch_ins_lwz(inst
, magic_var(mas4
), inst_rt
);
489 case KVM_INST_MFSPR(SPRN_MAS6
):
490 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
491 kvm_patch_ins_lwz(inst
, magic_var(mas6
), inst_rt
);
493 case KVM_INST_MFSPR(SPRN_MAS7
):
494 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
495 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
), inst_rt
);
497 #endif /* CONFIG_PPC_BOOK3E_MMU */
499 case KVM_INST_MFSPR(SPRN_SPRG4
):
501 case KVM_INST_MFSPR(SPRN_SPRG4R
):
503 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
504 kvm_patch_ins_ld(inst
, magic_var(sprg4
), inst_rt
);
506 case KVM_INST_MFSPR(SPRN_SPRG5
):
508 case KVM_INST_MFSPR(SPRN_SPRG5R
):
510 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
511 kvm_patch_ins_ld(inst
, magic_var(sprg5
), inst_rt
);
513 case KVM_INST_MFSPR(SPRN_SPRG6
):
515 case KVM_INST_MFSPR(SPRN_SPRG6R
):
517 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
518 kvm_patch_ins_ld(inst
, magic_var(sprg6
), inst_rt
);
520 case KVM_INST_MFSPR(SPRN_SPRG7
):
522 case KVM_INST_MFSPR(SPRN_SPRG7R
):
524 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
525 kvm_patch_ins_ld(inst
, magic_var(sprg7
), inst_rt
);
529 case KVM_INST_MFSPR(SPRN_ESR
):
530 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
531 kvm_patch_ins_lwz(inst
, magic_var(esr
), inst_rt
);
535 case KVM_INST_MFSPR(SPRN_PIR
):
536 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
537 kvm_patch_ins_lwz(inst
, magic_var(pir
), inst_rt
);
542 case KVM_INST_MTSPR(SPRN_SPRG0
):
543 kvm_patch_ins_std(inst
, magic_var(sprg0
), inst_rt
);
545 case KVM_INST_MTSPR(SPRN_SPRG1
):
546 kvm_patch_ins_std(inst
, magic_var(sprg1
), inst_rt
);
548 case KVM_INST_MTSPR(SPRN_SPRG2
):
549 kvm_patch_ins_std(inst
, magic_var(sprg2
), inst_rt
);
551 case KVM_INST_MTSPR(SPRN_SPRG3
):
552 kvm_patch_ins_std(inst
, magic_var(sprg3
), inst_rt
);
554 case KVM_INST_MTSPR(SPRN_SRR0
):
555 kvm_patch_ins_std(inst
, magic_var(srr0
), inst_rt
);
557 case KVM_INST_MTSPR(SPRN_SRR1
):
558 kvm_patch_ins_std(inst
, magic_var(srr1
), inst_rt
);
561 case KVM_INST_MTSPR(SPRN_DEAR
):
563 case KVM_INST_MTSPR(SPRN_DAR
):
565 kvm_patch_ins_std(inst
, magic_var(dar
), inst_rt
);
567 case KVM_INST_MTSPR(SPRN_DSISR
):
568 kvm_patch_ins_stw(inst
, magic_var(dsisr
), inst_rt
);
570 #ifdef CONFIG_PPC_BOOK3E_MMU
571 case KVM_INST_MTSPR(SPRN_MAS0
):
572 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
573 kvm_patch_ins_stw(inst
, magic_var(mas0
), inst_rt
);
575 case KVM_INST_MTSPR(SPRN_MAS1
):
576 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
577 kvm_patch_ins_stw(inst
, magic_var(mas1
), inst_rt
);
579 case KVM_INST_MTSPR(SPRN_MAS2
):
580 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
581 kvm_patch_ins_std(inst
, magic_var(mas2
), inst_rt
);
583 case KVM_INST_MTSPR(SPRN_MAS3
):
584 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
585 kvm_patch_ins_stw(inst
, magic_var(mas7_3
) + 4, inst_rt
);
587 case KVM_INST_MTSPR(SPRN_MAS4
):
588 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
589 kvm_patch_ins_stw(inst
, magic_var(mas4
), inst_rt
);
591 case KVM_INST_MTSPR(SPRN_MAS6
):
592 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
593 kvm_patch_ins_stw(inst
, magic_var(mas6
), inst_rt
);
595 case KVM_INST_MTSPR(SPRN_MAS7
):
596 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
597 kvm_patch_ins_stw(inst
, magic_var(mas7_3
), inst_rt
);
599 #endif /* CONFIG_PPC_BOOK3E_MMU */
601 case KVM_INST_MTSPR(SPRN_SPRG4
):
602 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
603 kvm_patch_ins_std(inst
, magic_var(sprg4
), inst_rt
);
605 case KVM_INST_MTSPR(SPRN_SPRG5
):
606 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
607 kvm_patch_ins_std(inst
, magic_var(sprg5
), inst_rt
);
609 case KVM_INST_MTSPR(SPRN_SPRG6
):
610 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
611 kvm_patch_ins_std(inst
, magic_var(sprg6
), inst_rt
);
613 case KVM_INST_MTSPR(SPRN_SPRG7
):
614 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
615 kvm_patch_ins_std(inst
, magic_var(sprg7
), inst_rt
);
619 case KVM_INST_MTSPR(SPRN_ESR
):
620 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
621 kvm_patch_ins_stw(inst
, magic_var(esr
), inst_rt
);
626 case KVM_INST_TLBSYNC
:
627 kvm_patch_ins_nop(inst
);
631 case KVM_INST_MTMSRD_L1
:
632 kvm_patch_ins_mtmsrd(inst
, inst_rt
);
635 case KVM_INST_MTMSRD_L0
:
636 kvm_patch_ins_mtmsr(inst
, inst_rt
);
640 kvm_patch_ins_wrtee(inst
, inst_rt
, 0);
645 switch (inst_no_rt
& ~KVM_MASK_RB
) {
646 #ifdef CONFIG_PPC_BOOK3S_32
647 case KVM_INST_MTSRIN
:
648 if (features
& KVM_MAGIC_FEAT_SR
) {
649 u32 inst_rb
= _inst
& KVM_MASK_RB
;
650 kvm_patch_ins_mtsrin(inst
, inst_rt
, inst_rb
);
658 case KVM_INST_WRTEEI_0
:
659 kvm_patch_ins_wrteei_0(inst
);
662 case KVM_INST_WRTEEI_1
:
663 kvm_patch_ins_wrtee(inst
, 0, 1);
669 extern u32 kvm_template_start
[];
670 extern u32 kvm_template_end
[];
672 static void kvm_use_magic_page(void)
679 /* Tell the host to map the magic page to -4096 on all CPUs */
680 on_each_cpu(kvm_map_magic_page
, &features
, 1);
682 /* Quick self-test to see if the mapping works */
683 if (__get_user(tmp
, (u32
*)KVM_MAGIC_PAGE
)) {
684 kvm_patching_worked
= false;
688 /* Now loop through all code and find instructions */
689 start
= (void*)_stext
;
693 * Being interrupted in the middle of patching would
694 * be bad for SPRG4-7, which KVM can't keep in sync
695 * with emulated accesses because reads don't trap.
699 for (p
= start
; p
< end
; p
++) {
700 /* Avoid patching the template code */
701 if (p
>= kvm_template_start
&& p
< kvm_template_end
) {
702 p
= kvm_template_end
- 1;
705 kvm_check_ins(p
, features
);
710 printk(KERN_INFO
"KVM: Live patching for a fast VM %s\n",
711 kvm_patching_worked
? "worked" : "failed");
714 static __init
void kvm_free_tmp(void)
716 free_reserved_area(&kvm_tmp
[kvm_tmp_index
],
717 &kvm_tmp
[ARRAY_SIZE(kvm_tmp
)], -1, NULL
);
720 static int __init
kvm_guest_init(void)
723 * The hardlockup detector is likely to get false positives in
724 * KVM guests, so disable it by default.
726 hardlockup_detector_disable();
728 if (!kvm_para_available())
731 if (!epapr_paravirt_enabled
)
734 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE
))
735 kvm_use_magic_page();
737 #ifdef CONFIG_PPC_BOOK3S_64
748 postcore_initcall(kvm_guest_init
);