3 * smp_call_function_single() is not exported below 2.6.20.
6 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
8 #undef smp_call_function_single
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
13 struct scfs_thunk_info
{
15 void (*func
)(void *info
);
19 static void scfs_thunk(void *_thunk
)
21 struct scfs_thunk_info
*thunk
= _thunk
;
23 if (raw_smp_processor_id() == thunk
->cpu
)
24 thunk
->func(thunk
->info
);
27 int kvm_smp_call_function_single(int cpu
, void (*func
)(void *info
),
31 struct scfs_thunk_info thunk
;
34 WARN_ON(irqs_disabled());
35 if (cpu
== this_cpu
) {
44 r
= smp_call_function(scfs_thunk
, &thunk
, 0, 1);
50 #define smp_call_function_single kvm_smp_call_function_single
52 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
54 * pre 2.6.23 doesn't handle smp_call_function_single on current cpu
57 #undef smp_call_function_single
59 #include <linux/smp.h>
61 int kvm_smp_call_function_single(int cpu
, void (*func
)(void *info
),
67 WARN_ON(irqs_disabled());
68 if (cpu
== this_cpu
) {
74 r
= smp_call_function_single(cpu
, func
, info
, 0, wait
);
79 #define smp_call_function_single kvm_smp_call_function_single
81 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
83 /* The 'nonatomic' argument was removed in 2.6.27. */
85 #undef smp_call_function_single
87 #include <linux/smp.h>
90 int kvm_smp_call_function_single(int cpu
, void (*func
)(void *info
),
93 return smp_call_function_single(cpu
, func
, info
, 0, wait
);
95 #else /* !CONFIG_SMP */
96 int kvm_smp_call_function_single(int cpu
, void (*func
)(void *info
),
106 #endif /* !CONFIG_SMP */
108 #define smp_call_function_single kvm_smp_call_function_single
112 /* div64_u64 is fairly new */
113 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
117 /* 64bit divisor, dividend and result. dynamic precision */
118 uint64_t div64_u64(uint64_t dividend
, uint64_t divisor
)
122 high
= divisor
>> 32;
124 unsigned int shift
= fls(high
);
126 d
= divisor
>> shift
;
141 * smp_call_function_mask() is not defined/exported below 2.6.24
144 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
146 #include <linux/smp.h>
148 struct kvm_call_data_struct
{
149 void (*func
) (void *info
);
156 static void kvm_ack_smp_call(void *_data
)
158 struct kvm_call_data_struct
*data
= _data
;
159 /* if wait == 0, data can be out of scope
160 * after atomic_inc(info->started)
162 void (*func
) (void *info
) = data
->func
;
163 void *info
= data
->info
;
164 int wait
= data
->wait
;
167 atomic_inc(&data
->started
);
171 atomic_inc(&data
->finished
);
175 int kvm_smp_call_function_mask(cpumask_t mask
,
176 void (*func
) (void *info
), void *info
, int wait
)
179 struct kvm_call_data_struct data
;
180 cpumask_t allbutself
;
186 WARN_ON(irqs_disabled());
187 allbutself
= cpu_online_map
;
188 cpu_clear(me
, allbutself
);
190 cpus_and(mask
, mask
, allbutself
);
191 cpus
= cpus_weight(mask
);
198 atomic_set(&data
.started
, 0);
201 atomic_set(&data
.finished
, 0);
203 for (cpu
= first_cpu(mask
); cpu
!= NR_CPUS
; cpu
= next_cpu(cpu
, mask
))
204 smp_call_function_single(cpu
, kvm_ack_smp_call
, &data
, 0);
206 while (atomic_read(&data
.started
) != cpus
) {
214 while (atomic_read(&data
.finished
) != cpus
) {
220 #endif /* CONFIG_SMP */
226 /* manually export hrtimer_init/start/cancel */
227 void (*hrtimer_init_p
)(struct hrtimer
*timer
, clockid_t which_clock
,
228 enum hrtimer_mode mode
);
229 int (*hrtimer_start_p
)(struct hrtimer
*timer
, ktime_t tim
,
230 const enum hrtimer_mode mode
);
231 int (*hrtimer_cancel_p
)(struct hrtimer
*timer
);
233 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
235 static void kvm_set_normalized_timespec(struct timespec
*ts
, time_t sec
,
238 while (nsec
>= NSEC_PER_SEC
) {
239 nsec
-= NSEC_PER_SEC
;
243 nsec
+= NSEC_PER_SEC
;
250 struct timespec
kvm_ns_to_timespec(const s64 nsec
)
255 return (struct timespec
) {0, 0};
257 ts
.tv_sec
= div_long_long_rem_signed(nsec
, NSEC_PER_SEC
, &ts
.tv_nsec
);
258 if (unlikely(nsec
< 0))
259 kvm_set_normalized_timespec(&ts
, ts
.tv_sec
, ts
.tv_nsec
);
266 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
268 #include <linux/pci.h>
270 struct pci_dev
*pci_get_bus_and_slot(unsigned int bus
, unsigned int devfn
)
272 struct pci_dev
*dev
= NULL
;
274 while ((dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, dev
)) != NULL
) {
275 if (pci_domain_nr(dev
->bus
) == 0 &&
276 (dev
->bus
->number
== bus
&& dev
->devfn
== devfn
))
284 #include <linux/intel-iommu.h>
286 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
288 int intel_iommu_found()
296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
298 /* relay_open() interface has changed on 2.6.21 */
300 struct rchan
*kvm_relay_open(const char *base_filename
,
301 struct dentry
*parent
,
304 struct rchan_callbacks
*cb
,
307 struct rchan
*chan
= relay_open(base_filename
, parent
,
308 subbuf_size
, n_subbufs
,
311 chan
->private_data
= private_data
;
317 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
319 #include <linux/pci.h>
321 int kvm_pcidev_msi_enabled(struct pci_dev
*dev
)
326 if (!(pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
)))
329 pci_read_config_word(dev
, pos
+ PCI_MSI_FLAGS
, &control
);
330 if (control
& PCI_MSI_FLAGS_ENABLE
)
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
340 extern unsigned tsc_khz
;
341 static unsigned tsc_khz_dummy
= 2000000;
342 static unsigned *tsc_khz_p
;
344 unsigned kvm_get_tsc_khz(void)
347 tsc_khz_p
= symbol_get(tsc_khz
);
349 tsc_khz_p
= &tsc_khz_dummy
;