1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/paravirt.h>
3 #include <asm/asm-offsets.h>
4 #include <linux/stringify.h>
6 #ifdef CONFIG_PARAVIRT_XXL
7 DEF_NATIVE(irq
, irq_disable
, "cli");
8 DEF_NATIVE(irq
, irq_enable
, "sti");
9 DEF_NATIVE(irq
, restore_fl
, "pushq %rdi; popfq");
10 DEF_NATIVE(irq
, save_fl
, "pushfq; popq %rax");
11 DEF_NATIVE(mmu
, read_cr2
, "movq %cr2, %rax");
12 DEF_NATIVE(mmu
, read_cr3
, "movq %cr3, %rax");
13 DEF_NATIVE(mmu
, write_cr3
, "movq %rdi, %cr3");
14 DEF_NATIVE(cpu
, wbinvd
, "wbinvd");
16 DEF_NATIVE(cpu
, usergs_sysret64
, "swapgs; sysretq");
17 DEF_NATIVE(cpu
, swapgs
, "swapgs");
20 DEF_NATIVE(, mov32
, "mov %edi, %eax");
21 DEF_NATIVE(, mov64
, "mov %rdi, %rax");
23 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
24 DEF_NATIVE(lock
, queued_spin_unlock
, "movb $0, (%rdi)");
25 DEF_NATIVE(lock
, vcpu_is_preempted
, "xor %eax, %eax");
28 unsigned paravirt_patch_ident_32(void *insnbuf
, unsigned len
)
30 return paravirt_patch_insns(insnbuf
, len
,
31 start__mov32
, end__mov32
);
34 unsigned paravirt_patch_ident_64(void *insnbuf
, unsigned len
)
36 return paravirt_patch_insns(insnbuf
, len
,
37 start__mov64
, end__mov64
);
40 extern bool pv_is_native_spin_unlock(void);
41 extern bool pv_is_native_vcpu_is_preempted(void);
43 unsigned native_patch(u8 type
, void *ibuf
, unsigned long addr
, unsigned len
)
45 #define PATCH_SITE(ops, x) \
46 case PARAVIRT_PATCH(ops.x): \
47 return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)
50 #ifdef CONFIG_PARAVIRT_XXL
51 PATCH_SITE(irq
, restore_fl
);
52 PATCH_SITE(irq
, save_fl
);
53 PATCH_SITE(irq
, irq_enable
);
54 PATCH_SITE(irq
, irq_disable
);
55 PATCH_SITE(cpu
, usergs_sysret64
);
56 PATCH_SITE(cpu
, swapgs
);
57 PATCH_SITE(cpu
, wbinvd
);
58 PATCH_SITE(mmu
, read_cr2
);
59 PATCH_SITE(mmu
, read_cr3
);
60 PATCH_SITE(mmu
, write_cr3
);
62 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
63 case PARAVIRT_PATCH(lock
.queued_spin_unlock
):
64 if (pv_is_native_spin_unlock())
65 return paravirt_patch_insns(ibuf
, len
,
66 start_lock_queued_spin_unlock
,
67 end_lock_queued_spin_unlock
);
70 case PARAVIRT_PATCH(lock
.vcpu_is_preempted
):
71 if (pv_is_native_vcpu_is_preempted())
72 return paravirt_patch_insns(ibuf
, len
,
73 start_lock_vcpu_is_preempted
,
74 end_lock_vcpu_is_preempted
);
82 return paravirt_patch_default(type
, ibuf
, addr
, len
);