kvm: qemu: fix option_rom_setup_reset address
[kvm-userspace.git] / kernel / external-module-compat-comm.h
blobc9559272044e2278e5bdc91070a96196e0f6283d
2 /*
3 * Compatibility header for building as an external module.
4 */
6 /*
7 * Avoid picking up the kernel's kvm.h in case we have a newer one.
8 */
10 #include <linux/compiler.h>
11 #include <linux/version.h>
12 #include <linux/string.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_para.h>
15 #include <linux/cpu.h>
16 #include <linux/time.h>
17 #include <asm/processor.h>
18 #include <linux/hrtimer.h>
19 #include <asm/bitops.h>
21 /* Override CONFIG_KVM_TRACE */
22 #ifdef EXT_CONFIG_KVM_TRACE
23 # define CONFIG_KVM_TRACE 1
24 #else
25 # undef CONFIG_KVM_TRACE
26 #endif
29 * 2.6.16 does not have GFP_NOWAIT
32 #include <linux/gfp.h>
34 #ifndef GFP_NOWAIT
35 #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
36 #endif
40 * kvm profiling support needs 2.6.20
42 #include <linux/profile.h>
44 #ifndef KVM_PROFILING
45 #define KVM_PROFILING 1234
46 #define prof_on 4321
47 #endif
50 * smp_call_function_single() is not exported below 2.6.20, and has different
51 * semantics below 2.6.23. The 'nonatomic' argument was removed in 2.6.27.
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
55 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
56 void *info, int wait);
57 #undef smp_call_function_single
58 #define smp_call_function_single kvm_smp_call_function_single
60 #endif
62 /* on_each_cpu() lost an argument in 2.6.27. */
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
65 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, 0, wait)
67 #else
69 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, wait)
71 #endif
74 * The cpu hotplug stubs are broken if !CONFIG_CPU_HOTPLUG
77 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,15)
78 #define DEFINE_MUTEX(a) DECLARE_MUTEX(a)
79 #define mutex_lock_interruptible(a) down_interruptible(a)
80 #define mutex_unlock(a) up(a)
81 #define mutex_lock(a) down(a)
82 #define mutex_init(a) init_MUTEX(a)
83 #define mutex_trylock(a) down_trylock(a)
84 #define mutex semaphore
85 #endif
87 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
88 #ifndef kzalloc
89 #define kzalloc(size,flags) \
90 ({ \
91 void *__ret = kmalloc(size, flags); \
92 if (__ret) \
93 memset(__ret, 0, size); \
94 __ret; \
96 #endif
97 #endif
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
100 #ifndef kmem_cache_zalloc
101 #define kmem_cache_zalloc(cache,flags) \
102 ({ \
103 void *__ret = kmem_cache_alloc(cache, flags); \
104 if (__ret) \
105 memset(__ret, 0, kmem_cache_size(cache)); \
106 __ret; \
108 #endif
109 #endif
111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
113 #ifndef CONFIG_HOTPLUG_CPU
114 #define register_cpu_notifier(nb) (0)
115 #endif
117 #endif
119 #include <linux/miscdevice.h>
120 #ifndef KVM_MINOR
121 #define KVM_MINOR 232
122 #endif
124 #include <linux/notifier.h>
125 #ifndef CPU_TASKS_FROZEN
127 #define CPU_TASKS_FROZEN 0x0010
128 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
129 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
130 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
131 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
132 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
133 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
135 #endif
137 #ifndef CPU_DYING
138 #define CPU_DYING 0x000A
139 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
140 #endif
142 #include <asm/system.h>
144 struct inode;
145 #include <linux/anon_inodes.h>
146 #define anon_inode_getfd kvm_anon_inode_getfd
147 int kvm_init_anon_inodes(void);
148 void kvm_exit_anon_inodes(void);
149 int anon_inode_getfd(const char *name,
150 const struct file_operations *fops,
151 void *priv , int flags);
154 * 2.6.23 removed the cache destructor
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
157 # define kmem_cache_create(name, size, align, flags, ctor) \
158 kmem_cache_create(name, size, align, flags, ctor, NULL)
159 #endif
161 /* HRTIMER_MODE_ABS started life with a different name */
162 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
163 #define HRTIMER_MODE_ABS HRTIMER_ABS
164 #endif
166 /* div64_u64 is fairly new */
167 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
169 #define div64_u64 kvm_div64_u64
171 #ifdef CONFIG_64BIT
173 static inline uint64_t div64_u64(uint64_t dividend, uint64_t divisor)
175 return dividend / divisor;
178 #else
180 uint64_t div64_u64(uint64_t dividend, uint64_t divisor);
182 #endif
184 #endif
186 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
188 #ifdef RHEL_RELEASE_CODE
189 #if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
190 #define RHEL_BOOL 1
191 #endif
192 #endif
194 #ifndef RHEL_BOOL
196 typedef _Bool bool;
198 #endif
200 #endif
203 * PF_VCPU is a Linux 2.6.24 addition
206 #include <linux/sched.h>
208 #ifndef PF_VCPU
209 #define PF_VCPU 0
210 #endif
213 * smp_call_function_mask() is not defined/exported below 2.6.24
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
218 int kvm_smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
219 void *info, int wait);
221 #define smp_call_function_mask kvm_smp_call_function_mask
223 #endif
225 /* empty_zero_page isn't exported in all kernels */
226 #include <asm/pgtable.h>
228 #define empty_zero_page kvm_empty_zero_page
230 static char empty_zero_page[PAGE_SIZE];
232 static inline void blahblah(void)
234 (void)empty_zero_page[0];
237 /* __mmdrop() is not exported before 2.6.25 */
238 #include <linux/sched.h>
240 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
242 #define mmdrop(x) do { (void)(x); } while (0)
243 #define mmget(x) do { (void)(x); } while (0)
245 #else
247 #define mmget(x) do { atomic_inc(x); } while (0)
249 #endif
251 /* pagefault_enable(), page_fault_disable() - 2.6.20 */
252 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
253 # define KVM_NEED_PAGEFAULT_DISABLE 1
254 # ifdef RHEL_RELEASE_CODE
255 # if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,3)
256 # undef KVM_NEED_PAGEFAULT_DISABLE
257 # endif
258 # endif
259 #endif
261 #ifdef KVM_NEED_PAGEFAULT_DISABLE
263 static inline void pagefault_disable(void)
265 inc_preempt_count();
267 * make sure to have issued the store before a pagefault
268 * can hit.
270 barrier();
273 static inline void pagefault_enable(void)
276 * make sure to issue those last loads/stores before enabling
277 * the pagefault handler again.
279 barrier();
280 dec_preempt_count();
282 * make sure we do..
284 barrier();
285 preempt_check_resched();
288 #endif
290 #include <linux/uaccess.h>
292 /* vm ops ->fault() was introduced in 2.6.23. */
293 #include <linux/mm.h>
295 #ifdef KVM_MAIN
296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
298 struct vm_fault {
299 unsigned int flags;
300 pgoff_t pgoff;
301 void __user *virtual_address;
302 struct page *page;
305 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
306 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
308 static inline struct page *kvm_nopage_to_fault(
309 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf),
310 struct vm_area_struct *vma,
311 unsigned long address,
312 int *type)
314 struct vm_fault vmf;
315 int ret;
317 vmf.pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
318 vmf.virtual_address = (void __user *)address;
319 ret = fault(vma, &vmf);
320 if (ret)
321 return NOPAGE_SIGBUS;
322 *type = VM_FAULT_MINOR;
323 return vmf.page;
326 static inline struct page *__kvm_vcpu_fault(struct vm_area_struct *vma,
327 unsigned long address,
328 int *type)
330 return kvm_nopage_to_fault(kvm_vcpu_fault, vma, address, type);
333 static inline struct page *__kvm_vm_fault(struct vm_area_struct *vma,
334 unsigned long address,
335 int *type)
337 return kvm_nopage_to_fault(kvm_vm_fault, vma, address, type);
340 #define VMA_OPS_FAULT(x) nopage
341 #define VMA_OPS_FAULT_FUNC(x) __##x
343 #else
345 #define VMA_OPS_FAULT(x) x
346 #define VMA_OPS_FAULT_FUNC(x) x
348 #endif
349 #endif
351 /* simple vfs attribute getter signature has changed to add a return code */
353 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
355 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
356 static u64 x(void *v) \
358 u64 ret = 0; \
360 __##x(v, &ret); \
361 return ret; \
364 #else
366 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
367 static int x(void *v, u64 *val) \
369 return __##x(v, val); \
372 #endif
374 /* set_kset_name() is gone in 2.6.25 */
376 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
378 #define set_kset_name(x) .name = x
380 #endif
382 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
383 #ifndef FASTCALL
384 #define FASTCALL(x) x
385 #define fastcall
386 #endif
387 #endif
389 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
391 unsigned kvm_get_tsc_khz(void);
392 #define kvm_tsc_khz (kvm_get_tsc_khz())
394 #else
396 #define kvm_tsc_khz tsc_khz
398 #endif
400 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
402 #include <linux/ktime.h>
403 #include <linux/hrtimer.h>
405 #define ktime_get kvm_ktime_get
407 static inline ktime_t ktime_get(void)
409 struct timespec now;
411 ktime_get_ts(&now);
413 return timespec_to_ktime(now);
416 #endif
418 /* __aligned arrived in 2.6.21 */
419 #ifndef __aligned
420 #define __aligned(x) __attribute__((__aligned__(x)))
421 #endif
423 #include <linux/mm.h>
425 /* The shrinker API changed in 2.6.23 */
426 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
428 struct kvm_shrinker {
429 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
430 int seeks;
431 struct shrinker *kshrinker;
434 static inline void register_shrinker(struct kvm_shrinker *shrinker)
436 shrinker->kshrinker = set_shrinker(shrinker->seeks, shrinker->shrink);
439 static inline void unregister_shrinker(struct kvm_shrinker *shrinker)
441 if (shrinker->kshrinker)
442 remove_shrinker(shrinker->kshrinker);
445 #define shrinker kvm_shrinker
447 #endif
449 /* clocksource */
450 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
451 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
453 /* khz = cyc/(Million ns)
454 * mult/2^shift = ns/cyc
455 * mult = ns/cyc * 2^shift
456 * mult = 1Million/khz * 2^shift
457 * mult = 1000000 * 2^shift / khz
458 * mult = (1000000<<shift) / khz
460 u64 tmp = ((u64)1000000) << shift_constant;
462 tmp += khz/2; /* round for do_div */
463 do_div(tmp, khz);
465 return (u32)tmp;
467 #else
468 #include <linux/clocksource.h>
469 #endif
471 /* manually export hrtimer_init/start/cancel */
472 #include <linux/kallsyms.h>
473 extern void (*hrtimer_init_p)(struct hrtimer *timer, clockid_t which_clock,
474 enum hrtimer_mode mode);
475 extern int (*hrtimer_start_p)(struct hrtimer *timer, ktime_t tim,
476 const enum hrtimer_mode mode);
477 extern int (*hrtimer_cancel_p)(struct hrtimer *timer);
479 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && defined(CONFIG_KALLSYMS)
480 static inline void hrtimer_kallsyms_resolve(void)
482 hrtimer_init_p = (void *) kallsyms_lookup_name("hrtimer_init");
483 BUG_ON(!hrtimer_init_p);
484 hrtimer_start_p = (void *) kallsyms_lookup_name("hrtimer_start");
485 BUG_ON(!hrtimer_start_p);
486 hrtimer_cancel_p = (void *) kallsyms_lookup_name("hrtimer_cancel");
487 BUG_ON(!hrtimer_cancel_p);
489 #else
490 static inline void hrtimer_kallsyms_resolve(void)
492 hrtimer_init_p = hrtimer_init;
493 hrtimer_start_p = hrtimer_start;
494 hrtimer_cancel_p = hrtimer_cancel;
496 #endif
498 /* handle old hrtimer API with data pointer */
499 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
500 static inline void hrtimer_data_pointer(struct hrtimer *timer)
502 timer->data = (void *)timer;
504 #else
505 static inline void hrtimer_data_pointer(struct hrtimer *timer) {}
506 #endif
508 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
510 #define ns_to_timespec kvm_ns_to_timespec
512 struct timespec kvm_ns_to_timespec(const s64 nsec);
514 #endif
516 /* work_struct lost the 'data' field in 2.6.20 */
517 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
519 #define kvm_INIT_WORK(work, handler) \
520 INIT_WORK(work, (void (*)(void *))handler, work)
522 #else
524 #define kvm_INIT_WORK(work, handler) INIT_WORK(work, handler)
526 #endif
528 /* cancel_work_sync() was flush_work() in 2.6.21 */
529 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
531 static inline int cancel_work_sync(struct work_struct *work)
534 * FIXME: actually cancel. How? Add own implementation of workqueues?
536 return 0;
539 /* ... and it returned void before 2.6.23 */
540 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
542 #define cancel_work_sync(work) ({ cancel_work_sync(work); 0; })
544 #endif
546 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
548 struct pci_dev;
550 struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
552 #endif
554 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
556 #include <linux/relay.h>
558 /* relay_open() interface has changed on 2.6.21 */
560 struct rchan *kvm_relay_open(const char *base_filename,
561 struct dentry *parent,
562 size_t subbuf_size,
563 size_t n_subbufs,
564 struct rchan_callbacks *cb,
565 void *private_data);
567 #else
569 #define kvm_relay_open relay_open
571 #endif
573 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
575 static inline int get_user_pages_fast(unsigned long start, int nr_pages,
576 int write, struct page **pages)
578 int npages;
580 down_read(&current->mm->mmap_sem);
581 npages = get_user_pages(current, current->mm, start, nr_pages, write,
582 0, pages, NULL);
583 up_read(&current->mm->mmap_sem);
585 return npages;
588 #endif
590 /* spin_needbreak() was called something else in 2.6.24 */
591 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)
593 #define spin_needbreak need_lockbreak
595 #endif
597 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
599 static inline void kvm_hrtimer_add_expires_ns(struct hrtimer *timer, u64 delta)
601 timer->expires = ktime_add_ns(timer->expires, delta);
604 static inline ktime_t kvm_hrtimer_get_expires(struct hrtimer *timer)
606 return timer->expires;
609 static inline u64 kvm_hrtimer_get_expires_ns(struct hrtimer *timer)
611 return ktime_to_ns(timer->expires);
614 static inline void kvm_hrtimer_start_expires(struct hrtimer *timer, int mode)
616 hrtimer_start_p(timer, timer->expires, mode);
619 static inline ktime_t kvm_hrtimer_expires_remaining(const struct hrtimer *timer)
621 return ktime_sub(timer->expires, timer->base->get_time());
624 #else
626 #define kvm_hrtimer_add_expires_ns hrtimer_add_expires_ns
627 #define kvm_hrtimer_get_expires hrtimer_get_expires
628 #define kvm_hrtimer_get_expires_ns hrtimer_get_expires_ns
629 #define kvm_hrtimer_start_expires hrtimer_start_expires
630 #define kvm_hrtimer_expires_remaining hrtimer_expires_remaining
632 #endif
634 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
636 static inline int pci_reset_function(struct pci_dev *dev)
638 return 0;
641 #endif
643 #include <linux/interrupt.h>
644 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
646 typedef irqreturn_t (*kvm_irq_handler_t)(int, void *, struct pt_regs *);
647 static inline int kvm_request_irq(unsigned int a, kvm_irq_handler_t handler,
648 unsigned long c, const char *d, void *e)
650 /* FIXME: allocate thunk, etc. */
651 return -EINVAL;
654 #else
656 #define kvm_request_irq request_irq
658 #endif
660 /* dynamically allocated cpu masks introduced in 2.6.28 */
661 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
663 typedef cpumask_t cpumask_var_t[1];
665 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
667 return 1;
670 static inline void free_cpumask_var(cpumask_var_t mask)
674 static inline void cpumask_clear(cpumask_var_t mask)
676 cpus_clear(*mask);
679 static inline void cpumask_set_cpu(int cpu, cpumask_var_t mask)
681 cpu_set(cpu, *mask);
684 static inline int smp_call_function_many(cpumask_var_t cpus,
685 void (*func)(void *data), void *data,
686 int sync)
688 return smp_call_function_mask(*cpus, func, data, sync);
691 static inline int cpumask_empty(cpumask_var_t mask)
693 return cpus_empty(*mask);
696 static inline int cpumask_test_cpu(int cpu, cpumask_var_t mask)
698 return cpu_isset(cpu, *mask);
701 static inline void cpumask_clear_cpu(int cpu, cpumask_var_t mask)
703 cpu_clear(cpu, *mask);
706 #define cpu_online_mask (&cpu_online_map)
708 #endif
710 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
712 #define IF_ANON_INODES_DOES_REFCOUNTS(x)
714 #else
716 #define IF_ANON_INODES_DOES_REFCOUNTS(x) x
718 #endif
721 /* Macro introduced only on newer kernels: */
722 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
723 #define marker_synchronize_unregister() synchronize_sched()
724 #endif
726 /* pci_dev.msi_enable was introduced in 2.6.18 */
727 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
729 struct pci_dev;
731 int kvm_pcidev_msi_enabled(struct pci_dev *dev);
733 #else
735 #define kvm_pcidev_msi_enabled(dev) (dev)->msi_enabled
737 #endif
739 /* compound_head() was introduced in 2.6.22 */
741 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
742 # define NEED_COMPOUND_HEAD 1
743 # ifdef RHEL_RELEASE_CODE
744 # if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
745 # undef NEED_COMPOUND_HEAD
746 # endif
747 # endif
748 #endif
750 #ifdef NEED_COMPOUND_HEAD
752 static inline struct page *compound_head(struct page *page)
754 if (PageCompound(page))
755 page = (struct page *)page_private(page);
756 return page;
759 #endif