1 #include <asm/paravirt.h>
3 DEF_NATIVE(pv_irq_ops
, irq_disable
, "cli");
4 DEF_NATIVE(pv_irq_ops
, irq_enable
, "sti");
5 DEF_NATIVE(pv_irq_ops
, restore_fl
, "push %eax; popf");
6 DEF_NATIVE(pv_irq_ops
, save_fl
, "pushf; pop %eax");
7 DEF_NATIVE(pv_cpu_ops
, iret
, "iret");
8 DEF_NATIVE(pv_mmu_ops
, read_cr2
, "mov %cr2, %eax");
9 DEF_NATIVE(pv_mmu_ops
, write_cr3
, "mov %eax, %cr3");
10 DEF_NATIVE(pv_mmu_ops
, read_cr3
, "mov %cr3, %eax");
11 DEF_NATIVE(pv_cpu_ops
, clts
, "clts");
13 #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
14 DEF_NATIVE(pv_lock_ops
, queued_spin_unlock
, "movb $0, (%eax)");
17 unsigned paravirt_patch_ident_32(void *insnbuf
, unsigned len
)
19 /* arg in %eax, return in %eax */
23 unsigned paravirt_patch_ident_64(void *insnbuf
, unsigned len
)
25 /* arg in %edx:%eax, return in %edx:%eax */
29 extern bool pv_is_native_spin_unlock(void);
31 unsigned native_patch(u8 type
, u16 clobbers
, void *ibuf
,
32 unsigned long addr
, unsigned len
)
34 const unsigned char *start
, *end
;
37 #define PATCH_SITE(ops, x) \
38 case PARAVIRT_PATCH(ops.x): \
39 start = start_##ops##_##x; \
40 end = end_##ops##_##x; \
43 PATCH_SITE(pv_irq_ops
, irq_disable
);
44 PATCH_SITE(pv_irq_ops
, irq_enable
);
45 PATCH_SITE(pv_irq_ops
, restore_fl
);
46 PATCH_SITE(pv_irq_ops
, save_fl
);
47 PATCH_SITE(pv_cpu_ops
, iret
);
48 PATCH_SITE(pv_mmu_ops
, read_cr2
);
49 PATCH_SITE(pv_mmu_ops
, read_cr3
);
50 PATCH_SITE(pv_mmu_ops
, write_cr3
);
51 PATCH_SITE(pv_cpu_ops
, clts
);
52 #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
53 case PARAVIRT_PATCH(pv_lock_ops
.queued_spin_unlock
):
54 if (pv_is_native_spin_unlock()) {
55 start
= start_pv_lock_ops_queued_spin_unlock
;
56 end
= end_pv_lock_ops_queued_spin_unlock
;
62 ret
= paravirt_patch_default(type
, clobbers
, ibuf
, addr
, len
);
66 ret
= paravirt_patch_insns(ibuf
, len
, start
, end
);