mm/mempolicy.c: remove BUG_ON() checks for VMA inside mpol_misplaced()
[linux/fpc-iii.git] / tools / virtio / ringtest / ptr_ring.c
blob7b22f1b20652082b606e4ee55dae31b56f670ef3
1 #define _GNU_SOURCE
2 #include "main.h"
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <pthread.h>
7 #include <malloc.h>
8 #include <assert.h>
9 #include <errno.h>
10 #include <limits.h>
12 #define SMP_CACHE_BYTES 64
13 #define cache_line_size() SMP_CACHE_BYTES
14 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
15 #define unlikely(x) (__builtin_expect(!!(x), 0))
16 #define likely(x) (__builtin_expect(!!(x), 1))
17 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
18 typedef pthread_spinlock_t spinlock_t;
20 typedef int gfp_t;
21 static void *kmalloc(unsigned size, gfp_t gfp)
23 return memalign(64, size);
26 static void *kzalloc(unsigned size, gfp_t gfp)
28 void *p = memalign(64, size);
29 if (!p)
30 return p;
31 memset(p, 0, size);
33 return p;
36 static void kfree(void *p)
38 if (p)
39 free(p);
42 static void spin_lock_init(spinlock_t *lock)
44 int r = pthread_spin_init(lock, 0);
45 assert(!r);
48 static void spin_lock(spinlock_t *lock)
50 int ret = pthread_spin_lock(lock);
51 assert(!ret);
54 static void spin_unlock(spinlock_t *lock)
56 int ret = pthread_spin_unlock(lock);
57 assert(!ret);
60 static void spin_lock_bh(spinlock_t *lock)
62 spin_lock(lock);
65 static void spin_unlock_bh(spinlock_t *lock)
67 spin_unlock(lock);
70 static void spin_lock_irq(spinlock_t *lock)
72 spin_lock(lock);
75 static void spin_unlock_irq(spinlock_t *lock)
77 spin_unlock(lock);
80 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
82 spin_lock(lock);
85 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
87 spin_unlock(lock);
90 #include "../../../include/linux/ptr_ring.h"
92 static unsigned long long headcnt, tailcnt;
93 static struct ptr_ring array ____cacheline_aligned_in_smp;
95 /* implemented by ring */
96 void alloc_ring(void)
98 int ret = ptr_ring_init(&array, ring_size, 0);
99 assert(!ret);
100 /* Hacky way to poke at ring internals. Useful for testing though. */
101 if (param)
102 array.batch = param;
105 /* guest side */
106 int add_inbuf(unsigned len, void *buf, void *datap)
108 int ret;
110 ret = __ptr_ring_produce(&array, buf);
111 if (ret >= 0) {
112 ret = 0;
113 headcnt++;
116 return ret;
120 * ptr_ring API provides no way for producer to find out whether a given
121 * buffer was consumed. Our tests merely require that a successful get_buf
122 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
123 * fake it accordingly.
125 void *get_buf(unsigned *lenp, void **bufp)
127 void *datap;
129 if (tailcnt == headcnt || __ptr_ring_full(&array))
130 datap = NULL;
131 else {
132 datap = "Buffer\n";
133 ++tailcnt;
136 return datap;
139 bool used_empty()
141 return (tailcnt == headcnt || __ptr_ring_full(&array));
144 void disable_call()
146 assert(0);
149 bool enable_call()
151 assert(0);
154 void kick_available(void)
156 assert(0);
159 /* host side */
160 void disable_kick()
162 assert(0);
165 bool enable_kick()
167 assert(0);
170 bool avail_empty()
172 return !__ptr_ring_peek(&array);
175 bool use_buf(unsigned *lenp, void **bufp)
177 void *ptr;
179 ptr = __ptr_ring_consume(&array);
181 return ptr;
184 void call_used(void)
186 assert(0);