1 // SPDX-License-Identifier: GPL-2.0
13 #define SMP_CACHE_BYTES 64
14 #define cache_line_size() SMP_CACHE_BYTES
15 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
16 #define unlikely(x) (__builtin_expect(!!(x), 0))
17 #define likely(x) (__builtin_expect(!!(x), 1))
18 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19 #define SIZE_MAX (~(size_t)0)
20 #define KMALLOC_MAX_SIZE SIZE_MAX
21 #define BUG_ON(x) assert(x)
23 typedef pthread_spinlock_t spinlock_t
;
26 #define __GFP_ZERO 0x1
28 static void *kmalloc(unsigned size
, gfp_t gfp
)
30 void *p
= memalign(64, size
);
39 static inline void *kzalloc(unsigned size
, gfp_t flags
)
41 return kmalloc(size
, flags
| __GFP_ZERO
);
44 static inline void *kmalloc_array(size_t n
, size_t size
, gfp_t flags
)
46 if (size
!= 0 && n
> SIZE_MAX
/ size
)
48 return kmalloc(n
* size
, flags
);
51 static inline void *kcalloc(size_t n
, size_t size
, gfp_t flags
)
53 return kmalloc_array(n
, size
, flags
| __GFP_ZERO
);
56 static void kfree(void *p
)
62 #define kvmalloc_array kmalloc_array
65 static void spin_lock_init(spinlock_t
*lock
)
67 int r
= pthread_spin_init(lock
, 0);
71 static void spin_lock(spinlock_t
*lock
)
73 int ret
= pthread_spin_lock(lock
);
77 static void spin_unlock(spinlock_t
*lock
)
79 int ret
= pthread_spin_unlock(lock
);
83 static void spin_lock_bh(spinlock_t
*lock
)
88 static void spin_unlock_bh(spinlock_t
*lock
)
93 static void spin_lock_irq(spinlock_t
*lock
)
98 static void spin_unlock_irq(spinlock_t
*lock
)
103 static void spin_lock_irqsave(spinlock_t
*lock
, unsigned long f
)
108 static void spin_unlock_irqrestore(spinlock_t
*lock
, unsigned long f
)
113 #include "../../../include/linux/ptr_ring.h"
115 static unsigned long long headcnt
, tailcnt
;
116 static struct ptr_ring array ____cacheline_aligned_in_smp
;
118 /* implemented by ring */
119 void alloc_ring(void)
121 int ret
= ptr_ring_init(&array
, ring_size
, 0);
123 /* Hacky way to poke at ring internals. Useful for testing though. */
129 int add_inbuf(unsigned len
, void *buf
, void *datap
)
133 ret
= __ptr_ring_produce(&array
, buf
);
143 * ptr_ring API provides no way for producer to find out whether a given
144 * buffer was consumed. Our tests merely require that a successful get_buf
145 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
146 * fake it accordingly.
148 void *get_buf(unsigned *lenp
, void **bufp
)
152 if (tailcnt
== headcnt
|| __ptr_ring_full(&array
))
164 return (tailcnt
== headcnt
|| __ptr_ring_full(&array
));
177 void kick_available(void)
195 return __ptr_ring_empty(&array
);
198 bool use_buf(unsigned *lenp
, void **bufp
)
202 ptr
= __ptr_ring_consume(&array
);