2 * Copyright (C) 2016 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
4 * This work is licensed under the terms of the GNU GPL, version 2.
6 * Common macros and functions for ring benchmarking.
15 #if defined(__x86_64__) || defined(__i386__)
16 #include "x86intrin.h"
18 static inline void wait_cycles(unsigned long long cycles
)
23 while (__rdtsc() - t
< cycles
) {}
26 #define VMEXIT_CYCLES 500
27 #define VMENTRY_CYCLES 500
30 static inline void wait_cycles(unsigned long long cycles
)
34 #define VMEXIT_CYCLES 0
35 #define VMENTRY_CYCLES 0
38 static inline void vmexit(void)
43 wait_cycles(VMEXIT_CYCLES
);
45 static inline void vmentry(void)
50 wait_cycles(VMENTRY_CYCLES
);
53 /* implemented by ring */
54 void alloc_ring(void);
56 int add_inbuf(unsigned, void *, void *);
57 void *get_buf(unsigned *, void **);
60 void kick_available();
65 bool use_buf(unsigned *, void **);
69 /* implemented by main */
72 void wait_for_kick(void);
74 void wait_for_call(void);
76 extern unsigned ring_size
;
78 /* Compiler barrier - similar to what Linux uses */
79 #define barrier() asm volatile("" ::: "memory")
81 /* Is there a portable way to do this? */
82 #if defined(__x86_64__) || defined(__i386__)
83 #define cpu_relax() asm ("rep; nop" ::: "memory")
85 #define cpu_relax() assert(0)
90 static inline void busy_wait(void)
95 /* prevent compiler from removing busy loops */
100 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
101 * with other __ATOMIC_SEQ_CST calls.
103 #define smp_mb() __sync_synchronize()
106 * This abuses the atomic builtins for thread fences, and
107 * adds a compiler barrier.
109 #define smp_release() do { \
111 __atomic_thread_fence(__ATOMIC_RELEASE); \
114 #define smp_acquire() do { \
115 __atomic_thread_fence(__ATOMIC_ACQUIRE); \