dax: Convert dax writeback to XArray
[linux/fpc-iii.git] / tools / virtio / ringtest / ptr_ring.c
blob2d566fbd236bed5d3f7d600e7d4bde628f2df8ae
1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include "main.h"
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <string.h>
7 #include <pthread.h>
8 #include <malloc.h>
9 #include <assert.h>
10 #include <errno.h>
11 #include <limits.h>
13 #define SMP_CACHE_BYTES 64
14 #define cache_line_size() SMP_CACHE_BYTES
15 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
16 #define unlikely(x) (__builtin_expect(!!(x), 0))
17 #define likely(x) (__builtin_expect(!!(x), 1))
18 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19 #define SIZE_MAX (~(size_t)0)
20 #define KMALLOC_MAX_SIZE SIZE_MAX
21 #define BUG_ON(x) assert(x)
23 typedef pthread_spinlock_t spinlock_t;
25 typedef int gfp_t;
26 #define __GFP_ZERO 0x1
28 static void *kmalloc(unsigned size, gfp_t gfp)
30 void *p = memalign(64, size);
31 if (!p)
32 return p;
34 if (gfp & __GFP_ZERO)
35 memset(p, 0, size);
36 return p;
39 static inline void *kzalloc(unsigned size, gfp_t flags)
41 return kmalloc(size, flags | __GFP_ZERO);
44 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
46 if (size != 0 && n > SIZE_MAX / size)
47 return NULL;
48 return kmalloc(n * size, flags);
51 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
53 return kmalloc_array(n, size, flags | __GFP_ZERO);
56 static void kfree(void *p)
58 if (p)
59 free(p);
62 #define kvmalloc_array kmalloc_array
63 #define kvfree kfree
65 static void spin_lock_init(spinlock_t *lock)
67 int r = pthread_spin_init(lock, 0);
68 assert(!r);
71 static void spin_lock(spinlock_t *lock)
73 int ret = pthread_spin_lock(lock);
74 assert(!ret);
77 static void spin_unlock(spinlock_t *lock)
79 int ret = pthread_spin_unlock(lock);
80 assert(!ret);
83 static void spin_lock_bh(spinlock_t *lock)
85 spin_lock(lock);
88 static void spin_unlock_bh(spinlock_t *lock)
90 spin_unlock(lock);
93 static void spin_lock_irq(spinlock_t *lock)
95 spin_lock(lock);
98 static void spin_unlock_irq(spinlock_t *lock)
100 spin_unlock(lock);
103 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
105 spin_lock(lock);
108 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
110 spin_unlock(lock);
113 #include "../../../include/linux/ptr_ring.h"
115 static unsigned long long headcnt, tailcnt;
116 static struct ptr_ring array ____cacheline_aligned_in_smp;
118 /* implemented by ring */
119 void alloc_ring(void)
121 int ret = ptr_ring_init(&array, ring_size, 0);
122 assert(!ret);
123 /* Hacky way to poke at ring internals. Useful for testing though. */
124 if (param)
125 array.batch = param;
128 /* guest side */
129 int add_inbuf(unsigned len, void *buf, void *datap)
131 int ret;
133 ret = __ptr_ring_produce(&array, buf);
134 if (ret >= 0) {
135 ret = 0;
136 headcnt++;
139 return ret;
143 * ptr_ring API provides no way for producer to find out whether a given
144 * buffer was consumed. Our tests merely require that a successful get_buf
145 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
146 * fake it accordingly.
148 void *get_buf(unsigned *lenp, void **bufp)
150 void *datap;
152 if (tailcnt == headcnt || __ptr_ring_full(&array))
153 datap = NULL;
154 else {
155 datap = "Buffer\n";
156 ++tailcnt;
159 return datap;
162 bool used_empty()
164 return (tailcnt == headcnt || __ptr_ring_full(&array));
167 void disable_call()
169 assert(0);
172 bool enable_call()
174 assert(0);
177 void kick_available(void)
179 assert(0);
182 /* host side */
183 void disable_kick()
185 assert(0);
188 bool enable_kick()
190 assert(0);
193 bool avail_empty()
195 return __ptr_ring_empty(&array);
198 bool use_buf(unsigned *lenp, void **bufp)
200 void *ptr;
202 ptr = __ptr_ring_consume(&array);
204 return ptr;
207 void call_used(void)
209 assert(0);