3 * Compatibility header for building as an external module.
7 * Avoid picking up the kernel's kvm.h in case we have a newer one.
10 #include <linux/compiler.h>
11 #include <linux/version.h>
12 #include <linux/string.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_para.h>
15 #include <linux/cpu.h>
16 #include <linux/time.h>
17 #include <asm/processor.h>
18 #include <linux/hrtimer.h>
19 #include <asm/bitops.h>
23 * 2.6.16 does not have GFP_NOWAIT
26 #include <linux/gfp.h>
29 #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
34 * kvm profiling support needs 2.6.20
36 #include <linux/profile.h>
39 #define KVM_PROFILING 1234
44 * smp_call_function_single() is not exported below 2.6.20, and has different
45 * semantics below 2.6.23. The 'nonatomic' argument was removed in 2.6.27.
47 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
49 int kvm_smp_call_function_single(int cpu
, void (*func
)(void *info
),
50 void *info
, int wait
);
52 #define smp_call_function_single kvm_smp_call_function_single
56 /* on_each_cpu() lost an argument in 2.6.27. */
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
59 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, 0, wait)
63 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, wait)
68 * The cpu hotplug stubs are broken if !CONFIG_CPU_HOTPLUG
71 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,15)
72 #define DEFINE_MUTEX(a) DECLARE_MUTEX(a)
73 #define mutex_lock_interruptible(a) down_interruptible(a)
74 #define mutex_unlock(a) up(a)
75 #define mutex_lock(a) down(a)
76 #define mutex_init(a) init_MUTEX(a)
77 #define mutex_trylock(a) down_trylock(a)
78 #define mutex semaphore
81 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
83 #define kzalloc(size,flags) \
85 void *__ret = kmalloc(size, flags); \
87 memset(__ret, 0, size); \
93 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
94 #ifndef kmem_cache_zalloc
95 #define kmem_cache_zalloc(cache,flags) \
97 void *__ret = kmem_cache_alloc(cache, flags); \
99 memset(__ret, 0, kmem_cache_size(cache)); \
105 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
107 #ifndef CONFIG_HOTPLUG_CPU
108 #define register_cpu_notifier(nb) (0)
113 #include <linux/miscdevice.h>
115 #define KVM_MINOR 232
118 #include <linux/notifier.h>
119 #ifndef CPU_TASKS_FROZEN
121 #define CPU_TASKS_FROZEN 0x0010
122 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
123 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
124 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
125 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
126 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
127 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
132 #define CPU_DYING 0x000A
133 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
136 #include <asm/system.h>
139 #include <linux/anon_inodes.h>
140 #define anon_inode_getfd kvm_anon_inode_getfd
141 int kvm_init_anon_inodes(void);
142 void kvm_exit_anon_inodes(void);
143 int anon_inode_getfd(const char *name
,
144 const struct file_operations
*fops
,
145 void *priv
, int flags
);
148 * 2.6.23 removed the cache destructor
150 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
151 # define kmem_cache_create(name, size, align, flags, ctor) \
152 kmem_cache_create(name, size, align, flags, ctor, NULL)
155 /* HRTIMER_MODE_ABS started life with a different name */
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
157 #define HRTIMER_MODE_ABS HRTIMER_ABS
160 /* div64_u64 is fairly new */
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
163 #define div64_u64 kvm_div64_u64
167 static inline uint64_t div64_u64(uint64_t dividend
, uint64_t divisor
)
169 return dividend
/ divisor
;
174 uint64_t div64_u64(uint64_t dividend
, uint64_t divisor
);
180 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
182 #ifdef RHEL_RELEASE_CODE
183 #if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
197 * PF_VCPU is a Linux 2.6.24 addition
200 #include <linux/sched.h>
207 * smp_call_function_mask() is not defined/exported below 2.6.24
210 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
212 int kvm_smp_call_function_mask(cpumask_t mask
, void (*func
) (void *info
),
213 void *info
, int wait
);
215 #define smp_call_function_mask kvm_smp_call_function_mask
219 /* empty_zero_page isn't exported in all kernels */
220 #include <asm/pgtable.h>
222 #define empty_zero_page kvm_empty_zero_page
224 static char empty_zero_page
[PAGE_SIZE
];
226 static inline void blahblah(void)
228 (void)empty_zero_page
[0];
231 /* __mmdrop() is not exported before 2.6.25 */
232 #include <linux/sched.h>
234 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
236 #define mmdrop(x) do { (void)(x); } while (0)
237 #define mmget(x) do { (void)(x); } while (0)
241 #define mmget(x) do { atomic_inc(x); } while (0)
245 /* pagefault_enable(), page_fault_disable() - 2.6.20 */
246 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
248 static inline void pagefault_disable(void)
252 * make sure to have issued the store before a pagefault
258 static inline void pagefault_enable(void)
261 * make sure to issue those last loads/stores before enabling
262 * the pagefault handler again.
270 preempt_check_resched();
275 /* vm ops ->fault() was introduced in 2.6.23. */
276 #include <linux/mm.h>
279 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
284 void __user
*virtual_address
;
288 static int kvm_vcpu_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
);
289 static int kvm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
);
291 static inline struct page
*kvm_nopage_to_fault(
292 int (*fault
)(struct vm_area_struct
*vma
, struct vm_fault
*vmf
),
293 struct vm_area_struct
*vma
,
294 unsigned long address
,
300 vmf
.pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
301 vmf
.virtual_address
= (void __user
*)address
;
302 ret
= fault(vma
, &vmf
);
304 return NOPAGE_SIGBUS
;
305 *type
= VM_FAULT_MINOR
;
309 static inline struct page
*__kvm_vcpu_fault(struct vm_area_struct
*vma
,
310 unsigned long address
,
313 return kvm_nopage_to_fault(kvm_vcpu_fault
, vma
, address
, type
);
316 static inline struct page
*__kvm_vm_fault(struct vm_area_struct
*vma
,
317 unsigned long address
,
320 return kvm_nopage_to_fault(kvm_vm_fault
, vma
, address
, type
);
323 #define VMA_OPS_FAULT(x) nopage
324 #define VMA_OPS_FAULT_FUNC(x) __##x
328 #define VMA_OPS_FAULT(x) x
329 #define VMA_OPS_FAULT_FUNC(x) x
334 /* simple vfs attribute getter signature has changed to add a return code */
336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
338 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
339 static u64 x(void *v) \
349 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
350 static int x(void *v, u64 *val) \
352 return __##x(v, val); \
357 /* set_kset_name() is gone in 2.6.25 */
359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
361 #define set_kset_name(x) .name = x
365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
367 #define FASTCALL(x) x
372 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
374 static unsigned __attribute__((__used__
)) kvm_tsc_khz
= 2000000;
378 #define kvm_tsc_khz tsc_khz
382 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
384 #include <linux/ktime.h>
385 #include <linux/hrtimer.h>
387 #define ktime_get kvm_ktime_get
389 static inline ktime_t
ktime_get(void)
395 return timespec_to_ktime(now
);
400 /* __aligned arrived in 2.6.21 */
401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
402 #define __aligned(x) __attribute__((__aligned__(x)))
405 #include <linux/mm.h>
407 /* The shrinker API changed in 2.6.23 */
408 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
410 struct kvm_shrinker
{
411 int (*shrink
)(int nr_to_scan
, gfp_t gfp_mask
);
413 struct shrinker
*kshrinker
;
416 static inline void register_shrinker(struct kvm_shrinker
*shrinker
)
418 shrinker
->kshrinker
= set_shrinker(shrinker
->seeks
, shrinker
->shrink
);
421 static inline void unregister_shrinker(struct kvm_shrinker
*shrinker
)
423 if (shrinker
->kshrinker
)
424 remove_shrinker(shrinker
->kshrinker
);
427 #define shrinker kvm_shrinker
432 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
433 static inline u32
clocksource_khz2mult(u32 khz
, u32 shift_constant
)
435 /* khz = cyc/(Million ns)
436 * mult/2^shift = ns/cyc
437 * mult = ns/cyc * 2^shift
438 * mult = 1Million/khz * 2^shift
439 * mult = 1000000 * 2^shift / khz
440 * mult = (1000000<<shift) / khz
442 u64 tmp
= ((u64
)1000000) << shift_constant
;
444 tmp
+= khz
/2; /* round for do_div */
450 #include <linux/clocksource.h>
453 /* manually export hrtimer_init/start/cancel */
454 #include <linux/kallsyms.h>
455 extern void (*hrtimer_init_p
)(struct hrtimer
*timer
, clockid_t which_clock
,
456 enum hrtimer_mode mode
);
457 extern int (*hrtimer_start_p
)(struct hrtimer
*timer
, ktime_t tim
,
458 const enum hrtimer_mode mode
);
459 extern int (*hrtimer_cancel_p
)(struct hrtimer
*timer
);
461 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && defined(CONFIG_KALLSYMS)
462 static inline void hrtimer_kallsyms_resolve(void)
464 hrtimer_init_p
= (void *) kallsyms_lookup_name("hrtimer_init");
465 BUG_ON(!hrtimer_init_p
);
466 hrtimer_start_p
= (void *) kallsyms_lookup_name("hrtimer_start");
467 BUG_ON(!hrtimer_start_p
);
468 hrtimer_cancel_p
= (void *) kallsyms_lookup_name("hrtimer_cancel");
469 BUG_ON(!hrtimer_cancel_p
);
472 static inline void hrtimer_kallsyms_resolve(void)
474 hrtimer_init_p
= hrtimer_init
;
475 hrtimer_start_p
= hrtimer_start
;
476 hrtimer_cancel_p
= hrtimer_cancel
;
480 /* handle old hrtimer API with data pointer */
481 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
482 static inline void hrtimer_data_pointer(struct hrtimer
*timer
)
484 timer
->data
= (void *)timer
;
487 static inline void hrtimer_data_pointer(struct hrtimer
*timer
) {}
490 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
492 #define ns_to_timespec kvm_ns_to_timespec
494 struct timespec
kvm_ns_to_timespec(const s64 nsec
);
498 /* work_struct lost the 'data' field in 2.6.20 */
499 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
501 #define kvm_INIT_WORK(work, handler) \
502 INIT_WORK(work, (void (*)(void *))handler, work)
506 #define kvm_INIT_WORK(work, handler) INIT_WORK(work, handler)
510 /* cancel_work_sync() was flush_work() in 2.6.21 */
511 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
513 static inline int cancel_work_sync(struct work_struct
*work
)
516 * FIXME: actually cancel. How? Add own implementation of workqueues?
523 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
527 struct pci_dev
*pci_get_bus_and_slot(unsigned int bus
, unsigned int devfn
);