Merge tag 'powerpc-5.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux/fpc-iii.git] / tools / virtio / ringtest / main.h
blob6d1fccd3d86ced87840be5e23d77ff276b73aa9b
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2016 Red Hat, Inc.
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Common macros and functions for ring benchmarking.
7 */
8 #ifndef MAIN_H
9 #define MAIN_H
11 #include <stdbool.h>
13 extern int param;
15 extern bool do_exit;
17 #if defined(__x86_64__) || defined(__i386__)
18 #include "x86intrin.h"
20 static inline void wait_cycles(unsigned long long cycles)
22 unsigned long long t;
24 t = __rdtsc();
25 while (__rdtsc() - t < cycles) {}
28 #define VMEXIT_CYCLES 500
29 #define VMENTRY_CYCLES 500
31 #elif defined(__s390x__)
32 static inline void wait_cycles(unsigned long long cycles)
34 asm volatile("0: brctg %0,0b" : : "d" (cycles));
37 /* tweak me */
38 #define VMEXIT_CYCLES 200
39 #define VMENTRY_CYCLES 200
41 #else
42 static inline void wait_cycles(unsigned long long cycles)
44 _Exit(5);
46 #define VMEXIT_CYCLES 0
47 #define VMENTRY_CYCLES 0
48 #endif
50 static inline void vmexit(void)
52 if (!do_exit)
53 return;
55 wait_cycles(VMEXIT_CYCLES);
57 static inline void vmentry(void)
59 if (!do_exit)
60 return;
62 wait_cycles(VMENTRY_CYCLES);
65 /* implemented by ring */
66 void alloc_ring(void);
67 /* guest side */
68 int add_inbuf(unsigned, void *, void *);
69 void *get_buf(unsigned *, void **);
70 void disable_call();
71 bool used_empty();
72 bool enable_call();
73 void kick_available();
74 /* host side */
75 void disable_kick();
76 bool avail_empty();
77 bool enable_kick();
78 bool use_buf(unsigned *, void **);
79 void call_used();
81 /* implemented by main */
82 extern bool do_sleep;
83 void kick(void);
84 void wait_for_kick(void);
85 void call(void);
86 void wait_for_call(void);
88 extern unsigned ring_size;
90 /* Compiler barrier - similar to what Linux uses */
91 #define barrier() asm volatile("" ::: "memory")
93 /* Is there a portable way to do this? */
94 #if defined(__x86_64__) || defined(__i386__)
95 #define cpu_relax() asm ("rep; nop" ::: "memory")
96 #elif defined(__s390x__)
97 #define cpu_relax() barrier()
98 #else
99 #define cpu_relax() assert(0)
100 #endif
102 extern bool do_relax;
104 static inline void busy_wait(void)
106 if (do_relax)
107 cpu_relax();
108 else
109 /* prevent compiler from removing busy loops */
110 barrier();
113 #if defined(__x86_64__) || defined(__i386__)
114 #define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
115 #else
117 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
118 * with other __ATOMIC_SEQ_CST calls.
120 #define smp_mb() __sync_synchronize()
121 #endif
124 * This abuses the atomic builtins for thread fences, and
125 * adds a compiler barrier.
127 #define smp_release() do { \
128 barrier(); \
129 __atomic_thread_fence(__ATOMIC_RELEASE); \
130 } while (0)
132 #define smp_acquire() do { \
133 __atomic_thread_fence(__ATOMIC_ACQUIRE); \
134 barrier(); \
135 } while (0)
137 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
138 #define smp_wmb() barrier()
139 #else
140 #define smp_wmb() smp_release()
141 #endif
143 #ifdef __alpha__
144 #define smp_read_barrier_depends() smp_acquire()
145 #else
146 #define smp_read_barrier_depends() do {} while(0)
147 #endif
149 static __always_inline
150 void __read_once_size(const volatile void *p, void *res, int size)
152 switch (size) { \
153 case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break; \
154 case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break; \
155 case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break; \
156 case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break; \
157 default: \
158 barrier(); \
159 __builtin_memcpy((void *)res, (const void *)p, size); \
160 barrier(); \
164 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
166 switch (size) {
167 case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
168 case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
169 case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
170 case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
171 default:
172 barrier();
173 __builtin_memcpy((void *)p, (const void *)res, size);
174 barrier();
178 #define READ_ONCE(x) \
179 ({ \
180 union { typeof(x) __val; char __c[1]; } __u; \
181 __read_once_size(&(x), __u.__c, sizeof(x)); \
182 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
183 __u.__val; \
186 #define WRITE_ONCE(x, val) \
187 ({ \
188 union { typeof(x) __val; char __c[1]; } __u = \
189 { .__val = (typeof(x)) (val) }; \
190 __write_once_size(&(x), __u.__c, sizeof(x)); \
191 __u.__val; \
194 #endif