Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / include / linux / skb_array.h
bloba6b6e8bb3d7b8a7b37577d6376d10ea102298a99
1 /*
2 * Definitions for the 'struct skb_array' datastructure.
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
7 * Copyright (C) 2016 Red Hat, Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
14 * Limited-size FIFO of skbs. Can be used more or less whenever
15 * sk_buff_head can be used, except you need to know the queue size in
16 * advance.
17 * Implemented as a type-safe wrapper around ptr_ring.
20 #ifndef _LINUX_SKB_ARRAY_H
21 #define _LINUX_SKB_ARRAY_H 1
23 #ifdef __KERNEL__
24 #include <linux/ptr_ring.h>
25 #include <linux/skbuff.h>
26 #include <linux/if_vlan.h>
27 #endif
29 struct skb_array {
30 struct ptr_ring ring;
33 /* Might be slightly faster than skb_array_full below, but callers invoking
34 * this in a loop must use a compiler barrier, for example cpu_relax().
36 static inline bool __skb_array_full(struct skb_array *a)
38 return __ptr_ring_full(&a->ring);
41 static inline bool skb_array_full(struct skb_array *a)
43 return ptr_ring_full(&a->ring);
46 static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
48 return ptr_ring_produce(&a->ring, skb);
51 static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
53 return ptr_ring_produce_irq(&a->ring, skb);
56 static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
58 return ptr_ring_produce_bh(&a->ring, skb);
61 static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
63 return ptr_ring_produce_any(&a->ring, skb);
66 /* Might be slightly faster than skb_array_empty below, but only safe if the
67 * array is never resized. Also, callers invoking this in a loop must take care
68 * to use a compiler barrier, for example cpu_relax().
70 static inline bool __skb_array_empty(struct skb_array *a)
72 return __ptr_ring_empty(&a->ring);
75 static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
77 return __ptr_ring_peek(&a->ring);
80 static inline bool skb_array_empty(struct skb_array *a)
82 return ptr_ring_empty(&a->ring);
85 static inline bool skb_array_empty_bh(struct skb_array *a)
87 return ptr_ring_empty_bh(&a->ring);
90 static inline bool skb_array_empty_irq(struct skb_array *a)
92 return ptr_ring_empty_irq(&a->ring);
95 static inline bool skb_array_empty_any(struct skb_array *a)
97 return ptr_ring_empty_any(&a->ring);
100 static inline struct sk_buff *skb_array_consume(struct skb_array *a)
102 return ptr_ring_consume(&a->ring);
105 static inline int skb_array_consume_batched(struct skb_array *a,
106 struct sk_buff **array, int n)
108 return ptr_ring_consume_batched(&a->ring, (void **)array, n);
111 static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
113 return ptr_ring_consume_irq(&a->ring);
116 static inline int skb_array_consume_batched_irq(struct skb_array *a,
117 struct sk_buff **array, int n)
119 return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
122 static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
124 return ptr_ring_consume_any(&a->ring);
127 static inline int skb_array_consume_batched_any(struct skb_array *a,
128 struct sk_buff **array, int n)
130 return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
134 static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
136 return ptr_ring_consume_bh(&a->ring);
139 static inline int skb_array_consume_batched_bh(struct skb_array *a,
140 struct sk_buff **array, int n)
142 return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
145 static inline int __skb_array_len_with_tag(struct sk_buff *skb)
147 if (likely(skb)) {
148 int len = skb->len;
150 if (skb_vlan_tag_present(skb))
151 len += VLAN_HLEN;
153 return len;
154 } else {
155 return 0;
159 static inline int skb_array_peek_len(struct skb_array *a)
161 return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
164 static inline int skb_array_peek_len_irq(struct skb_array *a)
166 return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
169 static inline int skb_array_peek_len_bh(struct skb_array *a)
171 return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
174 static inline int skb_array_peek_len_any(struct skb_array *a)
176 return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
179 static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
181 return ptr_ring_init(&a->ring, size, gfp);
184 static void __skb_array_destroy_skb(void *ptr)
186 kfree_skb(ptr);
189 static inline void skb_array_unconsume(struct skb_array *a,
190 struct sk_buff **skbs, int n)
192 ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
195 static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
197 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
200 static inline int skb_array_resize_multiple(struct skb_array **rings,
201 int nrings, unsigned int size,
202 gfp_t gfp)
204 BUILD_BUG_ON(offsetof(struct skb_array, ring));
205 return ptr_ring_resize_multiple((struct ptr_ring **)rings,
206 nrings, size, gfp,
207 __skb_array_destroy_skb);
210 static inline void skb_array_cleanup(struct skb_array *a)
212 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
215 #endif /* _LINUX_SKB_ARRAY_H */