1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 * Copyright 2010-2011 Freescale Semiconductor, Inc.
7 * Alexander Graf <agraf@suse.de>
10 #include <linux/kvm_host.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
13 #include <linux/kmemleak.h>
14 #include <linux/kvm_para.h>
15 #include <linux/slab.h>
17 #include <linux/pagemap.h>
20 #include <asm/sections.h>
21 #include <asm/cacheflush.h>
22 #include <asm/disassemble.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/epapr_hcalls.h>
26 #define KVM_MAGIC_PAGE (-4096L)
27 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
29 #define KVM_INST_LWZ 0x80000000
30 #define KVM_INST_STW 0x90000000
31 #define KVM_INST_LD 0xe8000000
32 #define KVM_INST_STD 0xf8000000
33 #define KVM_INST_NOP 0x60000000
34 #define KVM_INST_B 0x48000000
35 #define KVM_INST_B_MASK 0x03ffffff
36 #define KVM_INST_B_MAX 0x01ffffff
37 #define KVM_INST_LI 0x38000000
39 #define KVM_MASK_RT 0x03e00000
40 #define KVM_RT_30 0x03c00000
41 #define KVM_MASK_RB 0x0000f800
42 #define KVM_INST_MFMSR 0x7c0000a6
47 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
48 (((sprn) & 0x1f) << 16) | \
49 (((sprn) & 0x3e0) << 6) | \
52 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
53 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
55 #define KVM_INST_TLBSYNC 0x7c00046c
56 #define KVM_INST_MTMSRD_L0 0x7c000164
57 #define KVM_INST_MTMSRD_L1 0x7c010164
58 #define KVM_INST_MTMSR 0x7c000124
60 #define KVM_INST_WRTEE 0x7c000106
61 #define KVM_INST_WRTEEI_0 0x7c000146
62 #define KVM_INST_WRTEEI_1 0x7c008146
64 #define KVM_INST_MTSRIN 0x7c0001e4
66 static bool kvm_patching_worked
= true;
67 extern char kvm_tmp
[];
68 extern char kvm_tmp_end
[];
69 static int kvm_tmp_index
;
71 static void __init
kvm_patch_ins(u32
*inst
, u32 new_inst
)
74 flush_icache_range((ulong
)inst
, (ulong
)inst
+ 4);
77 static void __init
kvm_patch_ins_ll(u32
*inst
, long addr
, u32 rt
)
80 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
82 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000fffc));
86 static void __init
kvm_patch_ins_ld(u32
*inst
, long addr
, u32 rt
)
89 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
91 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| ((addr
+ 4) & 0x0000fffc));
95 static void __init
kvm_patch_ins_lwz(u32
*inst
, long addr
, u32 rt
)
97 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000ffff));
100 static void __init
kvm_patch_ins_std(u32
*inst
, long addr
, u32 rt
)
103 kvm_patch_ins(inst
, KVM_INST_STD
| rt
| (addr
& 0x0000fffc));
105 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| ((addr
+ 4) & 0x0000fffc));
109 static void __init
kvm_patch_ins_stw(u32
*inst
, long addr
, u32 rt
)
111 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| (addr
& 0x0000fffc));
114 static void __init
kvm_patch_ins_nop(u32
*inst
)
116 kvm_patch_ins(inst
, KVM_INST_NOP
);
119 static void __init
kvm_patch_ins_b(u32
*inst
, int addr
)
121 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
122 /* On relocatable kernels interrupts handlers and our code
123 can be in different regions, so we don't patch them */
125 if ((ulong
)inst
< (ulong
)&__end_interrupts
)
129 kvm_patch_ins(inst
, KVM_INST_B
| (addr
& KVM_INST_B_MASK
));
132 static u32
* __init
kvm_alloc(int len
)
136 if ((kvm_tmp_index
+ len
) > (kvm_tmp_end
- kvm_tmp
)) {
137 printk(KERN_ERR
"KVM: No more space (%d + %d)\n",
139 kvm_patching_worked
= false;
143 p
= (void*)&kvm_tmp
[kvm_tmp_index
];
144 kvm_tmp_index
+= len
;
149 extern u32 kvm_emulate_mtmsrd_branch_offs
;
150 extern u32 kvm_emulate_mtmsrd_reg_offs
;
151 extern u32 kvm_emulate_mtmsrd_orig_ins_offs
;
152 extern u32 kvm_emulate_mtmsrd_len
;
153 extern u32 kvm_emulate_mtmsrd
[];
155 static void __init
kvm_patch_ins_mtmsrd(u32
*inst
, u32 rt
)
162 p
= kvm_alloc(kvm_emulate_mtmsrd_len
* 4);
166 /* Find out where we are and put everything there */
167 distance_start
= (ulong
)p
- (ulong
)inst
;
168 next_inst
= ((ulong
)inst
+ 4);
169 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsrd_branch_offs
];
171 /* Make sure we only write valid b instructions */
172 if (distance_start
> KVM_INST_B_MAX
) {
173 kvm_patching_worked
= false;
177 /* Modify the chunk to fit the invocation */
178 memcpy(p
, kvm_emulate_mtmsrd
, kvm_emulate_mtmsrd_len
* 4);
179 p
[kvm_emulate_mtmsrd_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
180 switch (get_rt(rt
)) {
182 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
183 magic_var(scratch2
), KVM_RT_30
);
186 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
187 magic_var(scratch1
), KVM_RT_30
);
190 p
[kvm_emulate_mtmsrd_reg_offs
] |= rt
;
194 p
[kvm_emulate_mtmsrd_orig_ins_offs
] = *inst
;
195 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsrd_len
* 4);
197 /* Patch the invocation */
198 kvm_patch_ins_b(inst
, distance_start
);
201 extern u32 kvm_emulate_mtmsr_branch_offs
;
202 extern u32 kvm_emulate_mtmsr_reg1_offs
;
203 extern u32 kvm_emulate_mtmsr_reg2_offs
;
204 extern u32 kvm_emulate_mtmsr_orig_ins_offs
;
205 extern u32 kvm_emulate_mtmsr_len
;
206 extern u32 kvm_emulate_mtmsr
[];
208 static void __init
kvm_patch_ins_mtmsr(u32
*inst
, u32 rt
)
215 p
= kvm_alloc(kvm_emulate_mtmsr_len
* 4);
219 /* Find out where we are and put everything there */
220 distance_start
= (ulong
)p
- (ulong
)inst
;
221 next_inst
= ((ulong
)inst
+ 4);
222 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsr_branch_offs
];
224 /* Make sure we only write valid b instructions */
225 if (distance_start
> KVM_INST_B_MAX
) {
226 kvm_patching_worked
= false;
230 /* Modify the chunk to fit the invocation */
231 memcpy(p
, kvm_emulate_mtmsr
, kvm_emulate_mtmsr_len
* 4);
232 p
[kvm_emulate_mtmsr_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
234 /* Make clobbered registers work too */
235 switch (get_rt(rt
)) {
237 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
238 magic_var(scratch2
), KVM_RT_30
);
239 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
240 magic_var(scratch2
), KVM_RT_30
);
243 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
244 magic_var(scratch1
), KVM_RT_30
);
245 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
246 magic_var(scratch1
), KVM_RT_30
);
249 p
[kvm_emulate_mtmsr_reg1_offs
] |= rt
;
250 p
[kvm_emulate_mtmsr_reg2_offs
] |= rt
;
254 p
[kvm_emulate_mtmsr_orig_ins_offs
] = *inst
;
255 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsr_len
* 4);
257 /* Patch the invocation */
258 kvm_patch_ins_b(inst
, distance_start
);
263 extern u32 kvm_emulate_wrtee_branch_offs
;
264 extern u32 kvm_emulate_wrtee_reg_offs
;
265 extern u32 kvm_emulate_wrtee_orig_ins_offs
;
266 extern u32 kvm_emulate_wrtee_len
;
267 extern u32 kvm_emulate_wrtee
[];
269 static void __init
kvm_patch_ins_wrtee(u32
*inst
, u32 rt
, int imm_one
)
276 p
= kvm_alloc(kvm_emulate_wrtee_len
* 4);
280 /* Find out where we are and put everything there */
281 distance_start
= (ulong
)p
- (ulong
)inst
;
282 next_inst
= ((ulong
)inst
+ 4);
283 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrtee_branch_offs
];
285 /* Make sure we only write valid b instructions */
286 if (distance_start
> KVM_INST_B_MAX
) {
287 kvm_patching_worked
= false;
291 /* Modify the chunk to fit the invocation */
292 memcpy(p
, kvm_emulate_wrtee
, kvm_emulate_wrtee_len
* 4);
293 p
[kvm_emulate_wrtee_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
296 p
[kvm_emulate_wrtee_reg_offs
] =
297 KVM_INST_LI
| __PPC_RT(R30
) | MSR_EE
;
299 /* Make clobbered registers work too */
300 switch (get_rt(rt
)) {
302 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
303 magic_var(scratch2
), KVM_RT_30
);
306 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
307 magic_var(scratch1
), KVM_RT_30
);
310 p
[kvm_emulate_wrtee_reg_offs
] |= rt
;
315 p
[kvm_emulate_wrtee_orig_ins_offs
] = *inst
;
316 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrtee_len
* 4);
318 /* Patch the invocation */
319 kvm_patch_ins_b(inst
, distance_start
);
322 extern u32 kvm_emulate_wrteei_0_branch_offs
;
323 extern u32 kvm_emulate_wrteei_0_len
;
324 extern u32 kvm_emulate_wrteei_0
[];
326 static void __init
kvm_patch_ins_wrteei_0(u32
*inst
)
333 p
= kvm_alloc(kvm_emulate_wrteei_0_len
* 4);
337 /* Find out where we are and put everything there */
338 distance_start
= (ulong
)p
- (ulong
)inst
;
339 next_inst
= ((ulong
)inst
+ 4);
340 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrteei_0_branch_offs
];
342 /* Make sure we only write valid b instructions */
343 if (distance_start
> KVM_INST_B_MAX
) {
344 kvm_patching_worked
= false;
348 memcpy(p
, kvm_emulate_wrteei_0
, kvm_emulate_wrteei_0_len
* 4);
349 p
[kvm_emulate_wrteei_0_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
350 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrteei_0_len
* 4);
352 /* Patch the invocation */
353 kvm_patch_ins_b(inst
, distance_start
);
358 #ifdef CONFIG_PPC_BOOK3S_32
360 extern u32 kvm_emulate_mtsrin_branch_offs
;
361 extern u32 kvm_emulate_mtsrin_reg1_offs
;
362 extern u32 kvm_emulate_mtsrin_reg2_offs
;
363 extern u32 kvm_emulate_mtsrin_orig_ins_offs
;
364 extern u32 kvm_emulate_mtsrin_len
;
365 extern u32 kvm_emulate_mtsrin
[];
367 static void __init
kvm_patch_ins_mtsrin(u32
*inst
, u32 rt
, u32 rb
)
374 p
= kvm_alloc(kvm_emulate_mtsrin_len
* 4);
378 /* Find out where we are and put everything there */
379 distance_start
= (ulong
)p
- (ulong
)inst
;
380 next_inst
= ((ulong
)inst
+ 4);
381 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtsrin_branch_offs
];
383 /* Make sure we only write valid b instructions */
384 if (distance_start
> KVM_INST_B_MAX
) {
385 kvm_patching_worked
= false;
389 /* Modify the chunk to fit the invocation */
390 memcpy(p
, kvm_emulate_mtsrin
, kvm_emulate_mtsrin_len
* 4);
391 p
[kvm_emulate_mtsrin_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
392 p
[kvm_emulate_mtsrin_reg1_offs
] |= (rb
<< 10);
393 p
[kvm_emulate_mtsrin_reg2_offs
] |= rt
;
394 p
[kvm_emulate_mtsrin_orig_ins_offs
] = *inst
;
395 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtsrin_len
* 4);
397 /* Patch the invocation */
398 kvm_patch_ins_b(inst
, distance_start
);
403 static void __init
kvm_map_magic_page(void *data
)
405 u32
*features
= data
;
410 in
[0] = KVM_MAGIC_PAGE
;
411 in
[1] = KVM_MAGIC_PAGE
| MAGIC_PAGE_FLAG_NOT_MAPPED_NX
;
413 epapr_hypercall(in
, out
, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
));
418 static void __init
kvm_check_ins(u32
*inst
, u32 features
)
421 u32 inst_no_rt
= _inst
& ~KVM_MASK_RT
;
422 u32 inst_rt
= _inst
& KVM_MASK_RT
;
424 switch (inst_no_rt
) {
427 kvm_patch_ins_ld(inst
, magic_var(msr
), inst_rt
);
429 case KVM_INST_MFSPR(SPRN_SPRG0
):
430 kvm_patch_ins_ld(inst
, magic_var(sprg0
), inst_rt
);
432 case KVM_INST_MFSPR(SPRN_SPRG1
):
433 kvm_patch_ins_ld(inst
, magic_var(sprg1
), inst_rt
);
435 case KVM_INST_MFSPR(SPRN_SPRG2
):
436 kvm_patch_ins_ld(inst
, magic_var(sprg2
), inst_rt
);
438 case KVM_INST_MFSPR(SPRN_SPRG3
):
439 kvm_patch_ins_ld(inst
, magic_var(sprg3
), inst_rt
);
441 case KVM_INST_MFSPR(SPRN_SRR0
):
442 kvm_patch_ins_ld(inst
, magic_var(srr0
), inst_rt
);
444 case KVM_INST_MFSPR(SPRN_SRR1
):
445 kvm_patch_ins_ld(inst
, magic_var(srr1
), inst_rt
);
448 case KVM_INST_MFSPR(SPRN_DEAR
):
450 case KVM_INST_MFSPR(SPRN_DAR
):
452 kvm_patch_ins_ld(inst
, magic_var(dar
), inst_rt
);
454 case KVM_INST_MFSPR(SPRN_DSISR
):
455 kvm_patch_ins_lwz(inst
, magic_var(dsisr
), inst_rt
);
458 #ifdef CONFIG_PPC_BOOK3E_MMU
459 case KVM_INST_MFSPR(SPRN_MAS0
):
460 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
461 kvm_patch_ins_lwz(inst
, magic_var(mas0
), inst_rt
);
463 case KVM_INST_MFSPR(SPRN_MAS1
):
464 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
465 kvm_patch_ins_lwz(inst
, magic_var(mas1
), inst_rt
);
467 case KVM_INST_MFSPR(SPRN_MAS2
):
468 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
469 kvm_patch_ins_ld(inst
, magic_var(mas2
), inst_rt
);
471 case KVM_INST_MFSPR(SPRN_MAS3
):
472 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
473 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
) + 4, inst_rt
);
475 case KVM_INST_MFSPR(SPRN_MAS4
):
476 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
477 kvm_patch_ins_lwz(inst
, magic_var(mas4
), inst_rt
);
479 case KVM_INST_MFSPR(SPRN_MAS6
):
480 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
481 kvm_patch_ins_lwz(inst
, magic_var(mas6
), inst_rt
);
483 case KVM_INST_MFSPR(SPRN_MAS7
):
484 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
485 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
), inst_rt
);
487 #endif /* CONFIG_PPC_BOOK3E_MMU */
489 case KVM_INST_MFSPR(SPRN_SPRG4
):
491 case KVM_INST_MFSPR(SPRN_SPRG4R
):
493 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
494 kvm_patch_ins_ld(inst
, magic_var(sprg4
), inst_rt
);
496 case KVM_INST_MFSPR(SPRN_SPRG5
):
498 case KVM_INST_MFSPR(SPRN_SPRG5R
):
500 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
501 kvm_patch_ins_ld(inst
, magic_var(sprg5
), inst_rt
);
503 case KVM_INST_MFSPR(SPRN_SPRG6
):
505 case KVM_INST_MFSPR(SPRN_SPRG6R
):
507 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
508 kvm_patch_ins_ld(inst
, magic_var(sprg6
), inst_rt
);
510 case KVM_INST_MFSPR(SPRN_SPRG7
):
512 case KVM_INST_MFSPR(SPRN_SPRG7R
):
514 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
515 kvm_patch_ins_ld(inst
, magic_var(sprg7
), inst_rt
);
519 case KVM_INST_MFSPR(SPRN_ESR
):
520 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
521 kvm_patch_ins_lwz(inst
, magic_var(esr
), inst_rt
);
525 case KVM_INST_MFSPR(SPRN_PIR
):
526 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
527 kvm_patch_ins_lwz(inst
, magic_var(pir
), inst_rt
);
532 case KVM_INST_MTSPR(SPRN_SPRG0
):
533 kvm_patch_ins_std(inst
, magic_var(sprg0
), inst_rt
);
535 case KVM_INST_MTSPR(SPRN_SPRG1
):
536 kvm_patch_ins_std(inst
, magic_var(sprg1
), inst_rt
);
538 case KVM_INST_MTSPR(SPRN_SPRG2
):
539 kvm_patch_ins_std(inst
, magic_var(sprg2
), inst_rt
);
541 case KVM_INST_MTSPR(SPRN_SPRG3
):
542 kvm_patch_ins_std(inst
, magic_var(sprg3
), inst_rt
);
544 case KVM_INST_MTSPR(SPRN_SRR0
):
545 kvm_patch_ins_std(inst
, magic_var(srr0
), inst_rt
);
547 case KVM_INST_MTSPR(SPRN_SRR1
):
548 kvm_patch_ins_std(inst
, magic_var(srr1
), inst_rt
);
551 case KVM_INST_MTSPR(SPRN_DEAR
):
553 case KVM_INST_MTSPR(SPRN_DAR
):
555 kvm_patch_ins_std(inst
, magic_var(dar
), inst_rt
);
557 case KVM_INST_MTSPR(SPRN_DSISR
):
558 kvm_patch_ins_stw(inst
, magic_var(dsisr
), inst_rt
);
560 #ifdef CONFIG_PPC_BOOK3E_MMU
561 case KVM_INST_MTSPR(SPRN_MAS0
):
562 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
563 kvm_patch_ins_stw(inst
, magic_var(mas0
), inst_rt
);
565 case KVM_INST_MTSPR(SPRN_MAS1
):
566 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
567 kvm_patch_ins_stw(inst
, magic_var(mas1
), inst_rt
);
569 case KVM_INST_MTSPR(SPRN_MAS2
):
570 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
571 kvm_patch_ins_std(inst
, magic_var(mas2
), inst_rt
);
573 case KVM_INST_MTSPR(SPRN_MAS3
):
574 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
575 kvm_patch_ins_stw(inst
, magic_var(mas7_3
) + 4, inst_rt
);
577 case KVM_INST_MTSPR(SPRN_MAS4
):
578 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
579 kvm_patch_ins_stw(inst
, magic_var(mas4
), inst_rt
);
581 case KVM_INST_MTSPR(SPRN_MAS6
):
582 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
583 kvm_patch_ins_stw(inst
, magic_var(mas6
), inst_rt
);
585 case KVM_INST_MTSPR(SPRN_MAS7
):
586 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
587 kvm_patch_ins_stw(inst
, magic_var(mas7_3
), inst_rt
);
589 #endif /* CONFIG_PPC_BOOK3E_MMU */
591 case KVM_INST_MTSPR(SPRN_SPRG4
):
592 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
593 kvm_patch_ins_std(inst
, magic_var(sprg4
), inst_rt
);
595 case KVM_INST_MTSPR(SPRN_SPRG5
):
596 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
597 kvm_patch_ins_std(inst
, magic_var(sprg5
), inst_rt
);
599 case KVM_INST_MTSPR(SPRN_SPRG6
):
600 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
601 kvm_patch_ins_std(inst
, magic_var(sprg6
), inst_rt
);
603 case KVM_INST_MTSPR(SPRN_SPRG7
):
604 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
605 kvm_patch_ins_std(inst
, magic_var(sprg7
), inst_rt
);
609 case KVM_INST_MTSPR(SPRN_ESR
):
610 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
611 kvm_patch_ins_stw(inst
, magic_var(esr
), inst_rt
);
616 case KVM_INST_TLBSYNC
:
617 kvm_patch_ins_nop(inst
);
621 case KVM_INST_MTMSRD_L1
:
622 kvm_patch_ins_mtmsrd(inst
, inst_rt
);
625 case KVM_INST_MTMSRD_L0
:
626 kvm_patch_ins_mtmsr(inst
, inst_rt
);
630 kvm_patch_ins_wrtee(inst
, inst_rt
, 0);
635 switch (inst_no_rt
& ~KVM_MASK_RB
) {
636 #ifdef CONFIG_PPC_BOOK3S_32
637 case KVM_INST_MTSRIN
:
638 if (features
& KVM_MAGIC_FEAT_SR
) {
639 u32 inst_rb
= _inst
& KVM_MASK_RB
;
640 kvm_patch_ins_mtsrin(inst
, inst_rt
, inst_rb
);
648 case KVM_INST_WRTEEI_0
:
649 kvm_patch_ins_wrteei_0(inst
);
652 case KVM_INST_WRTEEI_1
:
653 kvm_patch_ins_wrtee(inst
, 0, 1);
659 extern u32 kvm_template_start
[];
660 extern u32 kvm_template_end
[];
662 static void __init
kvm_use_magic_page(void)
668 /* Tell the host to map the magic page to -4096 on all CPUs */
669 on_each_cpu(kvm_map_magic_page
, &features
, 1);
671 /* Quick self-test to see if the mapping works */
672 if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE
, sizeof(u32
))) {
673 kvm_patching_worked
= false;
677 /* Now loop through all code and find instructions */
678 start
= (void*)_stext
;
682 * Being interrupted in the middle of patching would
683 * be bad for SPRG4-7, which KVM can't keep in sync
684 * with emulated accesses because reads don't trap.
688 for (p
= start
; p
< end
; p
++) {
689 /* Avoid patching the template code */
690 if (p
>= kvm_template_start
&& p
< kvm_template_end
) {
691 p
= kvm_template_end
- 1;
694 kvm_check_ins(p
, features
);
699 printk(KERN_INFO
"KVM: Live patching for a fast VM %s\n",
700 kvm_patching_worked
? "worked" : "failed");
703 static int __init
kvm_guest_init(void)
705 if (!kvm_para_available())
708 if (!epapr_paravirt_enabled
)
711 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE
))
712 kvm_use_magic_page();
714 #ifdef CONFIG_PPC_BOOK3S_64
722 postcore_initcall(kvm_guest_init
);