3 * Compatibility header for building as an external module.
7 * Avoid picking up the kernel's kvm.h in case we have a newer one.
10 #include <linux/compiler.h>
11 #include <linux/version.h>
12 #include <linux/string.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_para.h>
15 #include <linux/cpu.h>
16 #include <linux/time.h>
17 #include <asm/processor.h>
18 #include <linux/hrtimer.h>
19 #include <asm/bitops.h>
21 /* Override CONFIG_KVM_TRACE */
22 #ifdef EXT_CONFIG_KVM_TRACE
23 # define CONFIG_KVM_TRACE 1
25 # undef CONFIG_KVM_TRACE
29 * 2.6.16 does not have GFP_NOWAIT
32 #include <linux/gfp.h>
35 #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
40 * kvm profiling support needs 2.6.20
42 #include <linux/profile.h>
45 #define KVM_PROFILING 1234
50 * smp_call_function_single() is not exported below 2.6.20, and has different
51 * semantics below 2.6.23. The 'nonatomic' argument was removed in 2.6.27.
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
55 int kvm_smp_call_function_single(int cpu
, void (*func
)(void *info
),
56 void *info
, int wait
);
57 #undef smp_call_function_single
58 #define smp_call_function_single kvm_smp_call_function_single
62 /* on_each_cpu() lost an argument in 2.6.27. */
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
65 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, 0, wait)
69 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, wait)
74 * The cpu hotplug stubs are broken if !CONFIG_CPU_HOTPLUG
77 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,15)
78 #define DEFINE_MUTEX(a) DECLARE_MUTEX(a)
79 #define mutex_lock_interruptible(a) down_interruptible(a)
80 #define mutex_unlock(a) up(a)
81 #define mutex_lock(a) down(a)
82 #define mutex_init(a) init_MUTEX(a)
83 #define mutex_trylock(a) down_trylock(a)
84 #define mutex semaphore
87 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
89 #define kzalloc(size,flags) \
91 void *__ret = kmalloc(size, flags); \
93 memset(__ret, 0, size); \
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
100 #ifndef kmem_cache_zalloc
101 #define kmem_cache_zalloc(cache,flags) \
103 void *__ret = kmem_cache_alloc(cache, flags); \
105 memset(__ret, 0, kmem_cache_size(cache)); \
111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
113 #ifndef CONFIG_HOTPLUG_CPU
114 #define register_cpu_notifier(nb) (0)
119 #include <linux/miscdevice.h>
121 #define KVM_MINOR 232
124 #include <linux/notifier.h>
125 #ifndef CPU_TASKS_FROZEN
127 #define CPU_TASKS_FROZEN 0x0010
128 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
129 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
130 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
131 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
132 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
133 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
138 #define CPU_DYING 0x000A
139 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
142 #include <asm/system.h>
145 #include <linux/anon_inodes.h>
146 #define anon_inode_getfd kvm_anon_inode_getfd
147 int kvm_init_anon_inodes(void);
148 void kvm_exit_anon_inodes(void);
149 int anon_inode_getfd(const char *name
,
150 const struct file_operations
*fops
,
151 void *priv
, int flags
);
154 * 2.6.23 removed the cache destructor
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
157 # define kmem_cache_create(name, size, align, flags, ctor) \
158 kmem_cache_create(name, size, align, flags, ctor, NULL)
161 /* HRTIMER_MODE_ABS started life with a different name */
162 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
163 #define HRTIMER_MODE_ABS HRTIMER_ABS
166 /* div64_u64 is fairly new */
167 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
169 #define div64_u64 kvm_div64_u64
173 static inline uint64_t div64_u64(uint64_t dividend
, uint64_t divisor
)
175 return dividend
/ divisor
;
180 uint64_t div64_u64(uint64_t dividend
, uint64_t divisor
);
186 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
188 #ifdef RHEL_RELEASE_CODE
189 #if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
203 * PF_VCPU is a Linux 2.6.24 addition
206 #include <linux/sched.h>
213 * smp_call_function_mask() is not defined/exported below 2.6.24
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
218 int kvm_smp_call_function_mask(cpumask_t mask
, void (*func
) (void *info
),
219 void *info
, int wait
);
221 #define smp_call_function_mask kvm_smp_call_function_mask
225 /* empty_zero_page isn't exported in all kernels */
226 #include <asm/pgtable.h>
228 #define empty_zero_page kvm_empty_zero_page
230 static char empty_zero_page
[PAGE_SIZE
];
232 static inline void blahblah(void)
234 (void)empty_zero_page
[0];
237 /* __mmdrop() is not exported before 2.6.25 */
238 #include <linux/sched.h>
240 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
242 #define mmdrop(x) do { (void)(x); } while (0)
243 #define mmget(x) do { (void)(x); } while (0)
247 #define mmget(x) do { atomic_inc(x); } while (0)
251 /* pagefault_enable(), page_fault_disable() - 2.6.20 */
252 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
253 # define KVM_NEED_PAGEFAULT_DISABLE 1
254 # ifdef RHEL_RELEASE_CODE
255 # if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,3)
256 # undef KVM_NEED_PAGEFAULT_DISABLE
261 #ifdef KVM_NEED_PAGEFAULT_DISABLE
263 static inline void pagefault_disable(void)
267 * make sure to have issued the store before a pagefault
273 static inline void pagefault_enable(void)
276 * make sure to issue those last loads/stores before enabling
277 * the pagefault handler again.
285 preempt_check_resched();
290 #include <linux/uaccess.h>
292 /* vm ops ->fault() was introduced in 2.6.23. */
293 #include <linux/mm.h>
296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
301 void __user
*virtual_address
;
305 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
);
306 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
);
308 static inline struct page
*kvm_nopage_to_fault(
309 int (*fault
)(struct vm_area_struct
*vma
, struct vm_fault
*vmf
),
310 struct vm_area_struct
*vma
,
311 unsigned long address
,
317 vmf
.pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
318 vmf
.virtual_address
= (void __user
*)address
;
319 ret
= fault(vma
, &vmf
);
321 return NOPAGE_SIGBUS
;
322 *type
= VM_FAULT_MINOR
;
326 static inline struct page
*__kvm_vcpu_fault(struct vm_area_struct
*vma
,
327 unsigned long address
,
330 return kvm_nopage_to_fault(kvm_vcpu_fault
, vma
, address
, type
);
333 static inline struct page
*__kvm_vm_fault(struct vm_area_struct
*vma
,
334 unsigned long address
,
337 return kvm_nopage_to_fault(kvm_vm_fault
, vma
, address
, type
);
340 #define VMA_OPS_FAULT(x) nopage
341 #define VMA_OPS_FAULT_FUNC(x) __##x
345 #define VMA_OPS_FAULT(x) x
346 #define VMA_OPS_FAULT_FUNC(x) x
351 /* simple vfs attribute getter signature has changed to add a return code */
353 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
355 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
356 static u64 x(void *v) \
366 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
367 static int x(void *v, u64 *val) \
369 return __##x(v, val); \
374 /* set_kset_name() is gone in 2.6.25 */
376 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
378 #define set_kset_name(x) .name = x
382 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
384 #define FASTCALL(x) x
389 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
391 unsigned kvm_get_tsc_khz(void);
392 #define kvm_tsc_khz (kvm_get_tsc_khz())
396 #define kvm_tsc_khz tsc_khz
400 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
402 #include <linux/ktime.h>
403 #include <linux/hrtimer.h>
405 #define ktime_get kvm_ktime_get
407 static inline ktime_t
ktime_get(void)
413 return timespec_to_ktime(now
);
418 /* __aligned arrived in 2.6.21 */
420 #define __aligned(x) __attribute__((__aligned__(x)))
423 #include <linux/mm.h>
425 /* The shrinker API changed in 2.6.23 */
426 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
428 struct kvm_shrinker
{
429 int (*shrink
)(int nr_to_scan
, gfp_t gfp_mask
);
431 struct shrinker
*kshrinker
;
434 static inline void register_shrinker(struct kvm_shrinker
*shrinker
)
436 shrinker
->kshrinker
= set_shrinker(shrinker
->seeks
, shrinker
->shrink
);
439 static inline void unregister_shrinker(struct kvm_shrinker
*shrinker
)
441 if (shrinker
->kshrinker
)
442 remove_shrinker(shrinker
->kshrinker
);
445 #define shrinker kvm_shrinker
450 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
451 static inline u32
clocksource_khz2mult(u32 khz
, u32 shift_constant
)
453 /* khz = cyc/(Million ns)
454 * mult/2^shift = ns/cyc
455 * mult = ns/cyc * 2^shift
456 * mult = 1Million/khz * 2^shift
457 * mult = 1000000 * 2^shift / khz
458 * mult = (1000000<<shift) / khz
460 u64 tmp
= ((u64
)1000000) << shift_constant
;
462 tmp
+= khz
/2; /* round for do_div */
468 #include <linux/clocksource.h>
471 /* manually export hrtimer_init/start/cancel */
472 #include <linux/kallsyms.h>
473 extern void (*hrtimer_init_p
)(struct hrtimer
*timer
, clockid_t which_clock
,
474 enum hrtimer_mode mode
);
475 extern int (*hrtimer_start_p
)(struct hrtimer
*timer
, ktime_t tim
,
476 const enum hrtimer_mode mode
);
477 extern int (*hrtimer_cancel_p
)(struct hrtimer
*timer
);
479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && defined(CONFIG_KALLSYMS)
480 static inline void hrtimer_kallsyms_resolve(void)
482 hrtimer_init_p
= (void *) kallsyms_lookup_name("hrtimer_init");
483 BUG_ON(!hrtimer_init_p
);
484 hrtimer_start_p
= (void *) kallsyms_lookup_name("hrtimer_start");
485 BUG_ON(!hrtimer_start_p
);
486 hrtimer_cancel_p
= (void *) kallsyms_lookup_name("hrtimer_cancel");
487 BUG_ON(!hrtimer_cancel_p
);
490 static inline void hrtimer_kallsyms_resolve(void)
492 hrtimer_init_p
= hrtimer_init
;
493 hrtimer_start_p
= hrtimer_start
;
494 hrtimer_cancel_p
= hrtimer_cancel
;
498 /* handle old hrtimer API with data pointer */
499 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
500 static inline void hrtimer_data_pointer(struct hrtimer
*timer
)
502 timer
->data
= (void *)timer
;
505 static inline void hrtimer_data_pointer(struct hrtimer
*timer
) {}
508 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
510 #define ns_to_timespec kvm_ns_to_timespec
512 struct timespec
kvm_ns_to_timespec(const s64 nsec
);
516 /* work_struct lost the 'data' field in 2.6.20 */
517 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
519 #define kvm_INIT_WORK(work, handler) \
520 INIT_WORK(work, (void (*)(void *))handler, work)
524 #define kvm_INIT_WORK(work, handler) INIT_WORK(work, handler)
528 /* cancel_work_sync() was flush_work() in 2.6.21 */
529 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
531 static inline int cancel_work_sync(struct work_struct
*work
)
534 * FIXME: actually cancel. How? Add own implementation of workqueues?
539 /* ... and it returned void before 2.6.23 */
540 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
542 #define cancel_work_sync(work) ({ cancel_work_sync(work); 0; })
546 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
550 struct pci_dev
*pci_get_bus_and_slot(unsigned int bus
, unsigned int devfn
);
554 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
556 #include <linux/relay.h>
558 /* relay_open() interface has changed on 2.6.21 */
560 struct rchan
*kvm_relay_open(const char *base_filename
,
561 struct dentry
*parent
,
564 struct rchan_callbacks
*cb
,
569 #define kvm_relay_open relay_open
573 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
575 static inline int get_user_pages_fast(unsigned long start
, int nr_pages
,
576 int write
, struct page
**pages
)
580 down_read(¤t
->mm
->mmap_sem
);
581 npages
= get_user_pages(current
, current
->mm
, start
, nr_pages
, write
,
583 up_read(¤t
->mm
->mmap_sem
);
590 /* spin_needbreak() was called something else in 2.6.24 */
591 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)
593 #define spin_needbreak need_lockbreak
597 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
599 static inline void kvm_hrtimer_add_expires_ns(struct hrtimer
*timer
, u64 delta
)
601 timer
->expires
= ktime_add_ns(timer
->expires
, delta
);
604 static inline ktime_t
kvm_hrtimer_get_expires(struct hrtimer
*timer
)
606 return timer
->expires
;
609 static inline u64
kvm_hrtimer_get_expires_ns(struct hrtimer
*timer
)
611 return ktime_to_ns(timer
->expires
);
614 static inline void kvm_hrtimer_start_expires(struct hrtimer
*timer
, int mode
)
616 hrtimer_start_p(timer
, timer
->expires
, mode
);
619 static inline ktime_t
kvm_hrtimer_expires_remaining(const struct hrtimer
*timer
)
621 return ktime_sub(timer
->expires
, timer
->base
->get_time());
626 #define kvm_hrtimer_add_expires_ns hrtimer_add_expires_ns
627 #define kvm_hrtimer_get_expires hrtimer_get_expires
628 #define kvm_hrtimer_get_expires_ns hrtimer_get_expires_ns
629 #define kvm_hrtimer_start_expires hrtimer_start_expires
630 #define kvm_hrtimer_expires_remaining hrtimer_expires_remaining
634 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
636 static inline int pci_reset_function(struct pci_dev
*dev
)
643 #include <linux/interrupt.h>
644 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
646 typedef irqreturn_t (*kvm_irq_handler_t
)(int, void *, struct pt_regs
*);
647 static inline int kvm_request_irq(unsigned int a
, kvm_irq_handler_t handler
,
648 unsigned long c
, const char *d
, void *e
)
650 /* FIXME: allocate thunk, etc. */
656 #define kvm_request_irq request_irq
660 /* dynamically allocated cpu masks introduced in 2.6.28 */
661 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
663 typedef cpumask_t cpumask_var_t
[1];
665 static inline bool alloc_cpumask_var(cpumask_var_t
*mask
, gfp_t flags
)
670 static inline void free_cpumask_var(cpumask_var_t mask
)
674 static inline void cpumask_clear(cpumask_var_t mask
)
679 static inline void cpumask_set_cpu(int cpu
, cpumask_var_t mask
)
684 static inline int smp_call_function_many(cpumask_var_t cpus
,
685 void (*func
)(void *data
), void *data
,
688 return smp_call_function_mask(*cpus
, func
, data
, sync
);
691 static inline int cpumask_empty(cpumask_var_t mask
)
693 return cpus_empty(*mask
);
696 static inline int cpumask_test_cpu(int cpu
, cpumask_var_t mask
)
698 return cpu_isset(cpu
, *mask
);
701 static inline void cpumask_clear_cpu(int cpu
, cpumask_var_t mask
)
703 cpu_clear(cpu
, *mask
);
706 #define cpu_online_mask (&cpu_online_map)
710 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
712 #define IF_ANON_INODES_DOES_REFCOUNTS(x)
716 #define IF_ANON_INODES_DOES_REFCOUNTS(x) x
721 /* Macro introduced only on newer kernels: */
722 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
723 #define marker_synchronize_unregister() synchronize_sched()
726 /* pci_dev.msi_enable was introduced in 2.6.18 */
727 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
731 int kvm_pcidev_msi_enabled(struct pci_dev
*dev
);
735 #define kvm_pcidev_msi_enabled(dev) (dev)->msi_enabled
739 /* compound_head() was introduced in 2.6.22 */
741 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
742 # define NEED_COMPOUND_HEAD 1
743 # ifdef RHEL_RELEASE_CODE
744 # if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
745 # undef NEED_COMPOUND_HEAD
750 #ifdef NEED_COMPOUND_HEAD
752 static inline struct page
*compound_head(struct page
*page
)
754 if (PageCompound(page
))
755 page
= (struct page
*)page_private(page
);