Merge tag 'block-5.9-2020-08-14' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / tools / virtio / ringtest / ptr_ring.c
blobc9b26335f891452c168d00487ff92634a9ac6a14
1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include "main.h"
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <pthread.h>
8 #include <malloc.h>
9 #include <assert.h>
10 #include <errno.h>
11 #include <limits.h>
13 #define SMP_CACHE_BYTES 64
14 #define cache_line_size() SMP_CACHE_BYTES
15 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
16 #define unlikely(x) (__builtin_expect(!!(x), 0))
17 #define likely(x) (__builtin_expect(!!(x), 1))
18 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19 #define SIZE_MAX (~(size_t)0)
20 #define KMALLOC_MAX_SIZE SIZE_MAX
22 typedef pthread_spinlock_t spinlock_t;
24 typedef int gfp_t;
25 #define __GFP_ZERO 0x1
27 static void *kmalloc(unsigned size, gfp_t gfp)
29 void *p = memalign(64, size);
30 if (!p)
31 return p;
33 if (gfp & __GFP_ZERO)
34 memset(p, 0, size);
35 return p;
38 static inline void *kzalloc(unsigned size, gfp_t flags)
40 return kmalloc(size, flags | __GFP_ZERO);
43 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
45 if (size != 0 && n > SIZE_MAX / size)
46 return NULL;
47 return kmalloc(n * size, flags);
50 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
52 return kmalloc_array(n, size, flags | __GFP_ZERO);
55 static void kfree(void *p)
57 if (p)
58 free(p);
61 #define kvmalloc_array kmalloc_array
62 #define kvfree kfree
64 static void spin_lock_init(spinlock_t *lock)
66 int r = pthread_spin_init(lock, 0);
67 assert(!r);
70 static void spin_lock(spinlock_t *lock)
72 int ret = pthread_spin_lock(lock);
73 assert(!ret);
76 static void spin_unlock(spinlock_t *lock)
78 int ret = pthread_spin_unlock(lock);
79 assert(!ret);
82 static void spin_lock_bh(spinlock_t *lock)
84 spin_lock(lock);
87 static void spin_unlock_bh(spinlock_t *lock)
89 spin_unlock(lock);
92 static void spin_lock_irq(spinlock_t *lock)
94 spin_lock(lock);
97 static void spin_unlock_irq(spinlock_t *lock)
99 spin_unlock(lock);
102 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
104 spin_lock(lock);
107 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
109 spin_unlock(lock);
112 #include "../../../include/linux/ptr_ring.h"
114 static unsigned long long headcnt, tailcnt;
115 static struct ptr_ring array ____cacheline_aligned_in_smp;
117 /* implemented by ring */
118 void alloc_ring(void)
120 int ret = ptr_ring_init(&array, ring_size, 0);
121 assert(!ret);
122 /* Hacky way to poke at ring internals. Useful for testing though. */
123 if (param)
124 array.batch = param;
127 /* guest side */
128 int add_inbuf(unsigned len, void *buf, void *datap)
130 int ret;
132 ret = __ptr_ring_produce(&array, buf);
133 if (ret >= 0) {
134 ret = 0;
135 headcnt++;
138 return ret;
142 * ptr_ring API provides no way for producer to find out whether a given
143 * buffer was consumed. Our tests merely require that a successful get_buf
144 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
145 * fake it accordingly.
147 void *get_buf(unsigned *lenp, void **bufp)
149 void *datap;
151 if (tailcnt == headcnt || __ptr_ring_full(&array))
152 datap = NULL;
153 else {
154 datap = "Buffer\n";
155 ++tailcnt;
158 return datap;
161 bool used_empty()
163 return (tailcnt == headcnt || __ptr_ring_full(&array));
166 void disable_call()
168 assert(0);
171 bool enable_call()
173 assert(0);
176 void kick_available(void)
178 assert(0);
181 /* host side */
182 void disable_kick()
184 assert(0);
187 bool enable_kick()
189 assert(0);
192 bool avail_empty()
194 return __ptr_ring_empty(&array);
197 bool use_buf(unsigned *lenp, void **bufp)
199 void *ptr;
201 ptr = __ptr_ring_consume(&array);
203 return ptr;
206 void call_used(void)
208 assert(0);