kvm: qemu: add missing include for ia64
[kvm-userspace.git] / kernel / external-module-compat.c
blob0d858be43535b92858bd26ab1696485534526627
2 /*
3 * smp_call_function_single() is not exported below 2.6.20.
4 */
6 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
8 #undef smp_call_function_single
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
13 struct scfs_thunk_info {
14 int cpu;
15 void (*func)(void *info);
16 void *info;
19 static void scfs_thunk(void *_thunk)
21 struct scfs_thunk_info *thunk = _thunk;
23 if (raw_smp_processor_id() == thunk->cpu)
24 thunk->func(thunk->info);
27 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
28 void *info, int wait)
30 int r, this_cpu;
31 struct scfs_thunk_info thunk;
33 this_cpu = get_cpu();
34 WARN_ON(irqs_disabled());
35 if (cpu == this_cpu) {
36 r = 0;
37 local_irq_disable();
38 func(info);
39 local_irq_enable();
40 } else {
41 thunk.cpu = cpu;
42 thunk.func = func;
43 thunk.info = info;
44 r = smp_call_function(scfs_thunk, &thunk, 0, 1);
46 put_cpu();
47 return r;
50 #define smp_call_function_single kvm_smp_call_function_single
52 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
54 * pre 2.6.23 doesn't handle smp_call_function_single on current cpu
57 #undef smp_call_function_single
59 #include <linux/smp.h>
61 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
62 void *info, int wait)
64 int this_cpu, r;
66 this_cpu = get_cpu();
67 WARN_ON(irqs_disabled());
68 if (cpu == this_cpu) {
69 r = 0;
70 local_irq_disable();
71 func(info);
72 local_irq_enable();
73 } else
74 r = smp_call_function_single(cpu, func, info, 0, wait);
75 put_cpu();
76 return r;
79 #define smp_call_function_single kvm_smp_call_function_single
81 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
83 /* The 'nonatomic' argument was removed in 2.6.27. */
85 #undef smp_call_function_single
87 #include <linux/smp.h>
89 #ifdef CONFIG_SMP
90 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
91 void *info, int wait)
93 return smp_call_function_single(cpu, func, info, 0, wait);
95 #else /* !CONFIG_SMP */
96 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
97 void *info, int wait)
99 WARN_ON(cpu != 0);
100 local_irq_disable();
101 func(info);
102 local_irq_enable();
103 return 0;
106 #endif /* !CONFIG_SMP */
108 #define smp_call_function_single kvm_smp_call_function_single
110 #endif
112 /* div64_u64 is fairly new */
113 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
115 #ifndef CONFIG_64BIT
117 /* 64bit divisor, dividend and result. dynamic precision */
118 uint64_t div64_u64(uint64_t dividend, uint64_t divisor)
120 uint32_t high, d;
122 high = divisor >> 32;
123 if (high) {
124 unsigned int shift = fls(high);
126 d = divisor >> shift;
127 dividend >>= shift;
128 } else
129 d = divisor;
131 do_div(dividend, d);
133 return dividend;
136 #endif
138 #endif
141 * smp_call_function_mask() is not defined/exported below 2.6.24
144 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
146 #include <linux/smp.h>
148 struct kvm_call_data_struct {
149 void (*func) (void *info);
150 void *info;
151 atomic_t started;
152 atomic_t finished;
153 int wait;
156 static void kvm_ack_smp_call(void *_data)
158 struct kvm_call_data_struct *data = _data;
159 /* if wait == 0, data can be out of scope
160 * after atomic_inc(info->started)
162 void (*func) (void *info) = data->func;
163 void *info = data->info;
164 int wait = data->wait;
166 smp_mb();
167 atomic_inc(&data->started);
168 (*func)(info);
169 if (wait) {
170 smp_mb();
171 atomic_inc(&data->finished);
175 int kvm_smp_call_function_mask(cpumask_t mask,
176 void (*func) (void *info), void *info, int wait)
178 #ifdef CONFIG_SMP
179 struct kvm_call_data_struct data;
180 cpumask_t allbutself;
181 int cpus;
182 int cpu;
183 int me;
185 me = get_cpu();
186 WARN_ON(irqs_disabled());
187 allbutself = cpu_online_map;
188 cpu_clear(me, allbutself);
190 cpus_and(mask, mask, allbutself);
191 cpus = cpus_weight(mask);
193 if (!cpus)
194 goto out;
196 data.func = func;
197 data.info = info;
198 atomic_set(&data.started, 0);
199 data.wait = wait;
200 if (wait)
201 atomic_set(&data.finished, 0);
203 for (cpu = first_cpu(mask); cpu != NR_CPUS; cpu = next_cpu(cpu, mask))
204 smp_call_function_single(cpu, kvm_ack_smp_call, &data, 0);
206 while (atomic_read(&data.started) != cpus) {
207 cpu_relax();
208 barrier();
211 if (!wait)
212 goto out;
214 while (atomic_read(&data.finished) != cpus) {
215 cpu_relax();
216 barrier();
218 out:
219 put_cpu();
220 #endif /* CONFIG_SMP */
221 return 0;
224 #endif
226 /* manually export hrtimer_init/start/cancel */
227 void (*hrtimer_init_p)(struct hrtimer *timer, clockid_t which_clock,
228 enum hrtimer_mode mode);
229 int (*hrtimer_start_p)(struct hrtimer *timer, ktime_t tim,
230 const enum hrtimer_mode mode);
231 int (*hrtimer_cancel_p)(struct hrtimer *timer);
233 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
235 static void kvm_set_normalized_timespec(struct timespec *ts, time_t sec,
236 long nsec)
238 while (nsec >= NSEC_PER_SEC) {
239 nsec -= NSEC_PER_SEC;
240 ++sec;
242 while (nsec < 0) {
243 nsec += NSEC_PER_SEC;
244 --sec;
246 ts->tv_sec = sec;
247 ts->tv_nsec = nsec;
250 struct timespec kvm_ns_to_timespec(const s64 nsec)
252 struct timespec ts;
254 if (!nsec)
255 return (struct timespec) {0, 0};
257 ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
258 if (unlikely(nsec < 0))
259 kvm_set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
261 return ts;
264 #endif
266 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
268 #include <linux/pci.h>
270 struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
272 struct pci_dev *dev = NULL;
274 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
275 if (pci_domain_nr(dev->bus) == 0 &&
276 (dev->bus->number == bus && dev->devfn == devfn))
277 return dev;
279 return NULL;
282 #endif
284 #include <linux/intel-iommu.h>
286 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
288 int intel_iommu_found()
290 return 0;
293 #endif
296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
298 /* relay_open() interface has changed on 2.6.21 */
300 struct rchan *kvm_relay_open(const char *base_filename,
301 struct dentry *parent,
302 size_t subbuf_size,
303 size_t n_subbufs,
304 struct rchan_callbacks *cb,
305 void *private_data)
307 struct rchan *chan = relay_open(base_filename, parent,
308 subbuf_size, n_subbufs,
309 cb);
310 if (chan)
311 chan->private_data = private_data;
312 return chan;
315 #endif
317 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
319 #include <linux/pci.h>
321 int kvm_pcidev_msi_enabled(struct pci_dev *dev)
323 int pos;
324 u16 control;
326 if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI)))
327 return 0;
329 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
330 if (control & PCI_MSI_FLAGS_ENABLE)
331 return 1;
333 return 0;
336 #endif
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
340 extern unsigned tsc_khz;
341 static unsigned tsc_khz_dummy = 2000000;
342 static unsigned *tsc_khz_p;
344 unsigned kvm_get_tsc_khz(void)
346 if (!tsc_khz_p) {
347 tsc_khz_p = symbol_get(tsc_khz);
348 if (!tsc_khz_p)
349 tsc_khz_p = &tsc_khz_dummy;
351 return *tsc_khz_p;
354 #endif