kvm: qemu: work around dhclient brokenness
[kvm-userspace.git] / kernel / external-module-compat-comm.h
blob97e921bd856732fb98c012303cf310fdefa3eacf
2 /*
3 * Compatibility header for building as an external module.
4 */
6 /*
7 * Avoid picking up the kernel's kvm.h in case we have a newer one.
8 */
10 #include <linux/compiler.h>
11 #include <linux/version.h>
12 #include <linux/string.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_para.h>
15 #include <linux/cpu.h>
16 #include <linux/time.h>
17 #include <asm/processor.h>
18 #include <linux/hrtimer.h>
19 #include <asm/bitops.h>
20 #include <asm/msr.h>
23 * 2.6.16 does not have GFP_NOWAIT
26 #include <linux/gfp.h>
28 #ifndef GFP_NOWAIT
29 #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
30 #endif
34 * kvm profiling support needs 2.6.20
36 #include <linux/profile.h>
38 #ifndef KVM_PROFILING
39 #define KVM_PROFILING 1234
40 #define prof_on 4321
41 #endif
44 * smp_call_function_single() is not exported below 2.6.20, and has different
45 * semantics below 2.6.23. The 'nonatomic' argument was removed in 2.6.27.
47 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
49 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
50 void *info, int wait);
52 #define smp_call_function_single kvm_smp_call_function_single
54 #endif
56 /* on_each_cpu() lost an argument in 2.6.27. */
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
59 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, 0, wait)
61 #else
63 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, wait)
65 #endif
68 * The cpu hotplug stubs are broken if !CONFIG_CPU_HOTPLUG
71 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,15)
72 #define DEFINE_MUTEX(a) DECLARE_MUTEX(a)
73 #define mutex_lock_interruptible(a) down_interruptible(a)
74 #define mutex_unlock(a) up(a)
75 #define mutex_lock(a) down(a)
76 #define mutex_init(a) init_MUTEX(a)
77 #define mutex_trylock(a) down_trylock(a)
78 #define mutex semaphore
79 #endif
81 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
82 #ifndef kzalloc
83 #define kzalloc(size,flags) \
84 ({ \
85 void *__ret = kmalloc(size, flags); \
86 if (__ret) \
87 memset(__ret, 0, size); \
88 __ret; \
90 #endif
91 #endif
93 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
94 #ifndef kmem_cache_zalloc
95 #define kmem_cache_zalloc(cache,flags) \
96 ({ \
97 void *__ret = kmem_cache_alloc(cache, flags); \
98 if (__ret) \
99 memset(__ret, 0, kmem_cache_size(cache)); \
100 __ret; \
102 #endif
103 #endif
105 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
107 #ifndef CONFIG_HOTPLUG_CPU
108 #define register_cpu_notifier(nb) (0)
109 #endif
111 #endif
113 #include <linux/miscdevice.h>
114 #ifndef KVM_MINOR
115 #define KVM_MINOR 232
116 #endif
118 #include <linux/notifier.h>
119 #ifndef CPU_TASKS_FROZEN
121 #define CPU_TASKS_FROZEN 0x0010
122 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
123 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
124 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
125 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
126 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
127 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
129 #endif
131 #ifndef CPU_DYING
132 #define CPU_DYING 0x000A
133 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
134 #endif
136 #include <asm/system.h>
138 struct inode;
139 #include <linux/anon_inodes.h>
140 #define anon_inode_getfd kvm_anon_inode_getfd
141 int kvm_init_anon_inodes(void);
142 void kvm_exit_anon_inodes(void);
143 int anon_inode_getfd(const char *name,
144 const struct file_operations *fops,
145 void *priv , int flags);
148 * 2.6.23 removed the cache destructor
150 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
151 # define kmem_cache_create(name, size, align, flags, ctor) \
152 kmem_cache_create(name, size, align, flags, ctor, NULL)
153 #endif
155 /* HRTIMER_MODE_ABS started life with a different name */
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
157 #define HRTIMER_MODE_ABS HRTIMER_ABS
158 #endif
160 /* div64_u64 is fairly new */
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
163 #define div64_u64 kvm_div64_u64
165 #ifdef CONFIG_64BIT
167 static inline uint64_t div64_u64(uint64_t dividend, uint64_t divisor)
169 return dividend / divisor;
172 #else
174 uint64_t div64_u64(uint64_t dividend, uint64_t divisor);
176 #endif
178 #endif
180 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
182 #ifdef RHEL_RELEASE_CODE
183 #if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
184 #define RHEL_BOOL 1
185 #endif
186 #endif
188 #ifndef RHEL_BOOL
190 typedef _Bool bool;
192 #endif
194 #endif
197 * PF_VCPU is a Linux 2.6.24 addition
200 #include <linux/sched.h>
202 #ifndef PF_VCPU
203 #define PF_VCPU 0
204 #endif
207 * smp_call_function_mask() is not defined/exported below 2.6.24
210 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
212 int kvm_smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
213 void *info, int wait);
215 #define smp_call_function_mask kvm_smp_call_function_mask
217 #endif
219 /* empty_zero_page isn't exported in all kernels */
220 #include <asm/pgtable.h>
222 #define empty_zero_page kvm_empty_zero_page
224 static char empty_zero_page[PAGE_SIZE];
226 static inline void blahblah(void)
228 (void)empty_zero_page[0];
231 /* __mmdrop() is not exported before 2.6.25 */
232 #include <linux/sched.h>
234 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
236 #define mmdrop(x) do { (void)(x); } while (0)
237 #define mmget(x) do { (void)(x); } while (0)
239 #else
241 #define mmget(x) do { atomic_inc(x); } while (0)
243 #endif
245 /* pagefault_enable(), page_fault_disable() - 2.6.20 */
246 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
248 static inline void pagefault_disable(void)
250 inc_preempt_count();
252 * make sure to have issued the store before a pagefault
253 * can hit.
255 barrier();
258 static inline void pagefault_enable(void)
261 * make sure to issue those last loads/stores before enabling
262 * the pagefault handler again.
264 barrier();
265 dec_preempt_count();
267 * make sure we do..
269 barrier();
270 preempt_check_resched();
273 #endif
275 /* vm ops ->fault() was introduced in 2.6.23. */
276 #include <linux/mm.h>
278 #ifdef KVM_MAIN
279 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
281 struct vm_fault {
282 unsigned int flags;
283 pgoff_t pgoff;
284 void __user *virtual_address;
285 struct page *page;
288 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
289 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
291 static inline struct page *kvm_nopage_to_fault(
292 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf),
293 struct vm_area_struct *vma,
294 unsigned long address,
295 int *type)
297 struct vm_fault vmf;
298 int ret;
300 vmf.pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
301 vmf.virtual_address = (void __user *)address;
302 ret = fault(vma, &vmf);
303 if (ret)
304 return NOPAGE_SIGBUS;
305 *type = VM_FAULT_MINOR;
306 return vmf.page;
309 static inline struct page *__kvm_vcpu_fault(struct vm_area_struct *vma,
310 unsigned long address,
311 int *type)
313 return kvm_nopage_to_fault(kvm_vcpu_fault, vma, address, type);
316 static inline struct page *__kvm_vm_fault(struct vm_area_struct *vma,
317 unsigned long address,
318 int *type)
320 return kvm_nopage_to_fault(kvm_vm_fault, vma, address, type);
323 #define VMA_OPS_FAULT(x) nopage
324 #define VMA_OPS_FAULT_FUNC(x) __##x
326 #else
328 #define VMA_OPS_FAULT(x) x
329 #define VMA_OPS_FAULT_FUNC(x) x
331 #endif
332 #endif
334 /* simple vfs attribute getter signature has changed to add a return code */
336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
338 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
339 static u64 x(void *v) \
341 u64 ret = 0; \
343 __##x(v, &ret); \
344 return ret; \
347 #else
349 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
350 static int x(void *v, u64 *val) \
352 return __##x(v, val); \
355 #endif
357 /* set_kset_name() is gone in 2.6.25 */
359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
361 #define set_kset_name(x) .name = x
363 #endif
365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
366 #ifndef FASTCALL
367 #define FASTCALL(x) x
368 #define fastcall
369 #endif
370 #endif
372 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
374 static unsigned __attribute__((__used__)) kvm_tsc_khz = 2000000;
376 #else
378 #define kvm_tsc_khz tsc_khz
380 #endif
382 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
384 #include <linux/ktime.h>
385 #include <linux/hrtimer.h>
387 #define ktime_get kvm_ktime_get
389 static inline ktime_t ktime_get(void)
391 struct timespec now;
393 ktime_get_ts(&now);
395 return timespec_to_ktime(now);
398 #endif
400 /* __aligned arrived in 2.6.21 */
401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
402 #define __aligned(x) __attribute__((__aligned__(x)))
403 #endif
405 #include <linux/mm.h>
407 /* The shrinker API changed in 2.6.23 */
408 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
410 struct kvm_shrinker {
411 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
412 int seeks;
413 struct shrinker *kshrinker;
416 static inline void register_shrinker(struct kvm_shrinker *shrinker)
418 shrinker->kshrinker = set_shrinker(shrinker->seeks, shrinker->shrink);
421 static inline void unregister_shrinker(struct kvm_shrinker *shrinker)
423 if (shrinker->kshrinker)
424 remove_shrinker(shrinker->kshrinker);
427 #define shrinker kvm_shrinker
429 #endif
431 /* clocksource */
432 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
433 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
435 /* khz = cyc/(Million ns)
436 * mult/2^shift = ns/cyc
437 * mult = ns/cyc * 2^shift
438 * mult = 1Million/khz * 2^shift
439 * mult = 1000000 * 2^shift / khz
440 * mult = (1000000<<shift) / khz
442 u64 tmp = ((u64)1000000) << shift_constant;
444 tmp += khz/2; /* round for do_div */
445 do_div(tmp, khz);
447 return (u32)tmp;
449 #else
450 #include <linux/clocksource.h>
451 #endif
453 /* manually export hrtimer_init/start/cancel */
454 #include <linux/kallsyms.h>
455 extern void (*hrtimer_init_p)(struct hrtimer *timer, clockid_t which_clock,
456 enum hrtimer_mode mode);
457 extern int (*hrtimer_start_p)(struct hrtimer *timer, ktime_t tim,
458 const enum hrtimer_mode mode);
459 extern int (*hrtimer_cancel_p)(struct hrtimer *timer);
461 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && defined(CONFIG_KALLSYMS)
462 static inline void hrtimer_kallsyms_resolve(void)
464 hrtimer_init_p = (void *) kallsyms_lookup_name("hrtimer_init");
465 BUG_ON(!hrtimer_init_p);
466 hrtimer_start_p = (void *) kallsyms_lookup_name("hrtimer_start");
467 BUG_ON(!hrtimer_start_p);
468 hrtimer_cancel_p = (void *) kallsyms_lookup_name("hrtimer_cancel");
469 BUG_ON(!hrtimer_cancel_p);
471 #else
472 static inline void hrtimer_kallsyms_resolve(void)
474 hrtimer_init_p = hrtimer_init;
475 hrtimer_start_p = hrtimer_start;
476 hrtimer_cancel_p = hrtimer_cancel;
478 #endif
480 /* handle old hrtimer API with data pointer */
481 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
482 static inline void hrtimer_data_pointer(struct hrtimer *timer)
484 timer->data = (void *)timer;
486 #else
487 static inline void hrtimer_data_pointer(struct hrtimer *timer) {}
488 #endif
490 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
492 #define ns_to_timespec kvm_ns_to_timespec
494 struct timespec kvm_ns_to_timespec(const s64 nsec);
496 #endif
498 /* work_struct lost the 'data' field in 2.6.20 */
499 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
501 #define kvm_INIT_WORK(work, handler) \
502 INIT_WORK(work, (void (*)(void *))handler, work)
504 #else
506 #define kvm_INIT_WORK(work, handler) INIT_WORK(work, handler)
508 #endif
510 /* cancel_work_sync() was flush_work() in 2.6.21 */
511 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
513 static inline int cancel_work_sync(struct work_struct *work)
516 * FIXME: actually cancel. How? Add own implementation of workqueues?
518 return 0;
521 #endif
523 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
525 struct pci_dev;
527 struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
529 #endif