1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/stringify.h>
4 #include <asm/paravirt.h>
5 #include <asm/asm-offsets.h>
11 (PSTART(d, m) + sizeof(patch_data_##d.m))
13 #define PATCH(d, m, insn_buff, len) \
14 paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
16 #define PATCH_CASE(ops, m, data, insn_buff, len) \
17 case PARAVIRT_PATCH(ops.m): \
18 return PATCH(data, ops##_##m, insn_buff, len)
20 #ifdef CONFIG_PARAVIRT_XXL
22 const unsigned char irq_irq_disable
[1];
23 const unsigned char irq_irq_enable
[1];
24 const unsigned char irq_save_fl
[2];
25 const unsigned char mmu_read_cr2
[3];
26 const unsigned char mmu_read_cr3
[3];
27 const unsigned char mmu_write_cr3
[3];
28 const unsigned char irq_restore_fl
[2];
30 const unsigned char cpu_wbinvd
[2];
31 const unsigned char cpu_usergs_sysret64
[6];
32 const unsigned char cpu_swapgs
[3];
33 const unsigned char mov64
[3];
35 const unsigned char cpu_iret
[1];
39 static const struct patch_xxl patch_data_xxl
= {
40 .irq_irq_disable
= { 0xfa }, // cli
41 .irq_irq_enable
= { 0xfb }, // sti
42 .irq_save_fl
= { 0x9c, 0x58 }, // pushf; pop %[re]ax
43 .mmu_read_cr2
= { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
44 .mmu_read_cr3
= { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
46 .mmu_write_cr3
= { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
47 .irq_restore_fl
= { 0x57, 0x9d }, // push %rdi; popfq
48 .cpu_wbinvd
= { 0x0f, 0x09 }, // wbinvd
49 .cpu_usergs_sysret64
= { 0x0f, 0x01, 0xf8,
50 0x48, 0x0f, 0x07 }, // swapgs; sysretq
51 .cpu_swapgs
= { 0x0f, 0x01, 0xf8 }, // swapgs
52 .mov64
= { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
54 .mmu_write_cr3
= { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3
55 .irq_restore_fl
= { 0x50, 0x9d }, // push %eax; popf
56 .cpu_iret
= { 0xcf }, // iret
60 unsigned int paravirt_patch_ident_64(void *insn_buff
, unsigned int len
)
63 return PATCH(xxl
, mov64
, insn_buff
, len
);
67 # endif /* CONFIG_PARAVIRT_XXL */
69 #ifdef CONFIG_PARAVIRT_SPINLOCKS
71 unsigned char queued_spin_unlock
[3];
72 unsigned char vcpu_is_preempted
[2];
75 static const struct patch_lock patch_data_lock
= {
76 .vcpu_is_preempted
= { 0x31, 0xc0 }, // xor %eax, %eax
79 .queued_spin_unlock
= { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
81 .queued_spin_unlock
= { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
84 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
86 unsigned int native_patch(u8 type
, void *insn_buff
, unsigned long addr
,
91 #ifdef CONFIG_PARAVIRT_XXL
92 PATCH_CASE(irq
, restore_fl
, xxl
, insn_buff
, len
);
93 PATCH_CASE(irq
, save_fl
, xxl
, insn_buff
, len
);
94 PATCH_CASE(irq
, irq_enable
, xxl
, insn_buff
, len
);
95 PATCH_CASE(irq
, irq_disable
, xxl
, insn_buff
, len
);
97 PATCH_CASE(mmu
, read_cr2
, xxl
, insn_buff
, len
);
98 PATCH_CASE(mmu
, read_cr3
, xxl
, insn_buff
, len
);
99 PATCH_CASE(mmu
, write_cr3
, xxl
, insn_buff
, len
);
101 # ifdef CONFIG_X86_64
102 PATCH_CASE(cpu
, usergs_sysret64
, xxl
, insn_buff
, len
);
103 PATCH_CASE(cpu
, swapgs
, xxl
, insn_buff
, len
);
104 PATCH_CASE(cpu
, wbinvd
, xxl
, insn_buff
, len
);
106 PATCH_CASE(cpu
, iret
, xxl
, insn_buff
, len
);
110 #ifdef CONFIG_PARAVIRT_SPINLOCKS
111 case PARAVIRT_PATCH(lock
.queued_spin_unlock
):
112 if (pv_is_native_spin_unlock())
113 return PATCH(lock
, queued_spin_unlock
, insn_buff
, len
);
116 case PARAVIRT_PATCH(lock
.vcpu_is_preempted
):
117 if (pv_is_native_vcpu_is_preempted())
118 return PATCH(lock
, vcpu_is_preempted
, insn_buff
, len
);
125 return paravirt_patch_default(type
, insn_buff
, addr
, len
);