On Tue, Nov 06, 2007 at 02:33:53AM -0800, akpm@linux-foundation.org wrote:
[mmotm.git] / include / linux / ioq.h
blobf77e316fc9b930e1e3f25f1d471e3b18a021a085
1 /*
2 * Copyright 2009 Novell. All Rights Reserved.
4 * IOQ is a generic shared-memory, lockless queue mechanism. It can be used
5 * in a variety of ways, though its intended purpose is to become the
6 * asynchronous communication path for virtual-bus drivers.
8 * The following are a list of key design points:
10 * #) All shared-memory is always allocated on explicitly one side of the
11 * link. This typically would be the guest side in a VM/VMM scenario.
12 * #) Each IOQ has the concept of "north" and "south" locales, where
13 * north denotes the memory-owner side (e.g. guest).
14 * #) An IOQ is manipulated using an iterator idiom.
15 * #) Provides a bi-directional signaling/notification infrastructure on
16 * a per-queue basis, which includes an event mitigation strategy
17 * to reduce boundary switching.
18 * #) The signaling path is abstracted so that various technologies and
19 * topologies can define their own specific implementation while sharing
20 * the basic structures and code.
22 * Author:
23 * Gregory Haskins <ghaskins@novell.com>
25 * This file is free software; you can redistribute it and/or modify
26 * it under the terms of version 2 of the GNU General Public License
27 * as published by the Free Software Foundation.
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software Foundation,
36 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
39 #ifndef _LINUX_IOQ_H
40 #define _LINUX_IOQ_H
42 #include <linux/types.h>
43 #include <linux/shm_signal.h>
46 *---------
47 * The following structures represent data that is shared across boundaries
48 * which may be quite disparate from one another (e.g. Windows vs Linux,
49 * 32 vs 64 bit, etc). Therefore, care has been taken to make sure they
50 * present data in a manner that is independent of the environment.
51 *-----------
53 struct ioq_ring_desc {
54 __u64 cookie; /* for arbitrary use by north-side */
55 __u64 ptr;
56 __u64 len;
57 __u8 valid;
58 __u8 sown; /* South owned = 1, North owned = 0 */
61 #define IOQ_RING_MAGIC 0x47fa2fe4
62 #define IOQ_RING_VER 4
64 struct ioq_ring_idx {
65 __u32 head; /* 0 based index to head of ptr array */
66 __u32 tail; /* 0 based index to tail of ptr array */
67 __u8 full;
70 enum ioq_locality {
71 ioq_locality_north,
72 ioq_locality_south,
75 struct ioq_ring_head {
76 __u32 magic;
77 __u32 ver;
78 struct shm_signal_desc signal;
79 struct ioq_ring_idx idx[2];
80 __u32 count;
81 struct ioq_ring_desc ring[1]; /* "count" elements will be allocated */
84 #define IOQ_HEAD_DESC_SIZE(count) \
85 (sizeof(struct ioq_ring_head) + sizeof(struct ioq_ring_desc) * (count - 1))
87 /* --- END SHARED STRUCTURES --- */
89 #ifdef __KERNEL__
91 #include <linux/sched.h>
92 #include <linux/wait.h>
93 #include <linux/interrupt.h>
94 #include <linux/shm_signal.h>
95 #include <linux/kref.h>
97 enum ioq_idx_type {
98 ioq_idxtype_valid,
99 ioq_idxtype_inuse,
100 ioq_idxtype_both,
101 ioq_idxtype_invalid,
104 enum ioq_seek_type {
105 ioq_seek_tail,
106 ioq_seek_next,
107 ioq_seek_head,
108 ioq_seek_set
111 struct ioq_iterator {
112 struct ioq *ioq;
113 struct ioq_ring_idx *idx;
114 u32 pos;
115 struct ioq_ring_desc *desc;
116 int update:1;
117 int dualidx:1;
118 int flipowner:1;
121 struct ioq_notifier {
122 void (*signal)(struct ioq_notifier *);
125 struct ioq_ops {
126 void (*release)(struct ioq *ioq);
129 struct ioq {
130 struct ioq_ops *ops;
132 struct kref kref;
133 enum ioq_locality locale;
134 struct ioq_ring_head *head_desc;
135 struct ioq_ring_desc *ring;
136 struct shm_signal *signal;
137 wait_queue_head_t wq;
138 struct ioq_notifier *notifier;
139 size_t count;
140 struct shm_signal_notifier shm_notifier;
143 #define IOQ_ITER_AUTOUPDATE (1 << 0)
144 #define IOQ_ITER_NOFLIPOWNER (1 << 1)
147 * ioq_init() - initialize an IOQ
148 * @ioq: IOQ context
150 * Initializes IOQ context before first use
153 void ioq_init(struct ioq *ioq,
154 struct ioq_ops *ops,
155 enum ioq_locality locale,
156 struct ioq_ring_head *head,
157 struct shm_signal *signal,
158 size_t count);
161 * ioq_get() - acquire an IOQ context reference
162 * @ioq: IOQ context
165 static inline struct ioq *ioq_get(struct ioq *ioq)
167 kref_get(&ioq->kref);
169 return ioq;
172 static inline void _ioq_kref_release(struct kref *kref)
174 struct ioq *ioq = container_of(kref, struct ioq, kref);
176 shm_signal_put(ioq->signal);
177 ioq->ops->release(ioq);
181 * ioq_put() - release an IOQ context reference
182 * @ioq: IOQ context
185 static inline void ioq_put(struct ioq *ioq)
187 kref_put(&ioq->kref, _ioq_kref_release);
191 * ioq_notify_enable() - enables local notifications on an IOQ
192 * @ioq: IOQ context
193 * @flags: Reserved for future use, must be 0
195 * Enables/unmasks the registered ioq_notifier (if applicable) and waitq to
196 * receive wakeups whenever the remote side performs an ioq_signal() operation.
197 * A notification will be dispatched immediately if any pending signals have
198 * already been issued prior to invoking this call.
200 * This is synonymous with unmasking an interrupt.
202 * Returns: success = 0, <0 = ERRNO
205 static inline int ioq_notify_enable(struct ioq *ioq, int flags)
207 return shm_signal_enable(ioq->signal, 0);
211 * ioq_notify_disable() - disable local notifications on an IOQ
212 * @ioq: IOQ context
213 * @flags: Reserved for future use, must be 0
215 * Disables/masks the registered ioq_notifier (if applicable) and waitq
216 * from receiving any further notifications. Any subsequent calls to
217 * ioq_signal() by the remote side will update the ring as dirty, but
218 * will not traverse the locale boundary and will not invoke the notifier
219 * callback or wakeup the waitq. Signals delivered while masked will
220 * be deferred until ioq_notify_enable() is invoked
222 * This is synonymous with masking an interrupt
224 * Returns: success = 0, <0 = ERRNO
227 static inline int ioq_notify_disable(struct ioq *ioq, int flags)
229 return shm_signal_disable(ioq->signal, 0);
233 * ioq_signal() - notify the remote side about ring changes
234 * @ioq: IOQ context
235 * @flags: Reserved for future use, must be 0
237 * Marks the ring state as "dirty" and, if enabled, will traverse
238 * a locale boundary to invoke a remote notification. The remote
239 * side controls whether the notification should be delivered via
240 * the ioq_notify_enable/disable() interface.
242 * The specifics of how to traverse a locale boundary are abstracted
243 * by the ioq_ops->signal() interface and provided by a particular
244 * implementation. However, typically going north to south would be
245 * something like a syscall/hypercall, and going south to north would be
246 * something like a posix-signal/guest-interrupt.
248 * Returns: success = 0, <0 = ERRNO
251 static inline int ioq_signal(struct ioq *ioq, int flags)
253 return shm_signal_inject(ioq->signal, 0);
257 * ioq_count() - counts the number of outstanding descriptors in an index
258 * @ioq: IOQ context
259 * @type: Specifies the index type
260 * (*) valid: the descriptor is valid. This is usually
261 * used to keep track of descriptors that may not
262 * be carrying a useful payload, but still need to
263 * be tracked carefully.
264 * (*) inuse: Descriptors that carry useful payload
266 * Returns:
267 * (*) >=0: # of descriptors outstanding in the index
268 * (*) <0 = ERRNO
271 int ioq_count(struct ioq *ioq, enum ioq_idx_type type);
274 * ioq_remain() - counts the number of remaining descriptors in an index
275 * @ioq: IOQ context
276 * @type: Specifies the index type
277 * (*) valid: the descriptor is valid. This is usually
278 * used to keep track of descriptors that may not
279 * be carrying a useful payload, but still need to
280 * be tracked carefully.
281 * (*) inuse: Descriptors that carry useful payload
283 * This is the converse of ioq_count(). This function returns the number
284 * of "free" descriptors left in a particular index
286 * Returns:
287 * (*) >=0: # of descriptors remaining in the index
288 * (*) <0 = ERRNO
291 int ioq_remain(struct ioq *ioq, enum ioq_idx_type type);
294 * ioq_size() - counts the maximum number of descriptors in an ring
295 * @ioq: IOQ context
297 * This function returns the maximum number of descriptors supported in
298 * a ring, regardless of their current state (free or inuse).
300 * Returns:
301 * (*) >=0: total # of descriptors in the ring
302 * (*) <0 = ERRNO
305 int ioq_size(struct ioq *ioq);
308 * ioq_full() - determines if a specific index is "full"
309 * @ioq: IOQ context
310 * @type: Specifies the index type
311 * (*) valid: the descriptor is valid. This is usually
312 * used to keep track of descriptors that may not
313 * be carrying a useful payload, but still need to
314 * be tracked carefully.
315 * (*) inuse: Descriptors that carry useful payload
317 * Returns:
318 * (*) 0: index is not full
319 * (*) 1: index is full
320 * (*) <0 = ERRNO
323 int ioq_full(struct ioq *ioq, enum ioq_idx_type type);
326 * ioq_empty() - determines if a specific index is "empty"
327 * @ioq: IOQ context
328 * @type: Specifies the index type
329 * (*) valid: the descriptor is valid. This is usually
330 * used to keep track of descriptors that may not
331 * be carrying a useful payload, but still need to
332 * be tracked carefully.
333 * (*) inuse: Descriptors that carry useful payload
335 * Returns:
336 * (*) 0: index is not empty
337 * (*) 1: index is empty
338 * (*) <0 = ERRNO
341 static inline int ioq_empty(struct ioq *ioq, enum ioq_idx_type type)
343 return !ioq_count(ioq, type);
347 * ioq_iter_init() - initialize an iterator for IOQ descriptor traversal
348 * @ioq: IOQ context to iterate on
349 * @iter: Iterator context to init (usually from stack)
350 * @type: Specifies the index type to iterate against
351 * (*) valid: iterate against the "valid" index
352 * (*) inuse: iterate against the "inuse" index
353 * (*) both: iterate against both indexes simultaneously
354 * @flags: Bitfield with 0 or more bits set to alter behavior
355 * (*) autoupdate: automatically signal the remote side
356 * whenever the iterator pushes/pops to a new desc
357 * (*) noflipowner: do not flip the ownership bit during
358 * a push/pop operation
360 * Returns: success = 0, <0 = ERRNO
363 int ioq_iter_init(struct ioq *ioq, struct ioq_iterator *iter,
364 enum ioq_idx_type type, int flags);
367 * ioq_iter_seek() - seek to a specific location in the IOQ ring
368 * @iter: Iterator context (must be initialized with ioq_iter_init)
369 * @type: Specifies the type of seek operation
370 * (*) tail: seek to the absolute tail, offset is ignored
371 * (*) next: seek to the relative next, offset is ignored
372 * (*) head: seek to the absolute head, offset is ignored
373 * (*) set: seek to the absolute offset
374 * @offset: Offset for ioq_seek_set operations
375 * @flags: Reserved for future use, must be 0
377 * Returns: success = 0, <0 = ERRNO
380 int ioq_iter_seek(struct ioq_iterator *iter, enum ioq_seek_type type,
381 long offset, int flags);
384 * ioq_iter_push() - push the tail pointer forward
385 * @iter: Iterator context (must be initialized with ioq_iter_init)
386 * @flags: Reserved for future use, must be 0
388 * This function will simultaneously advance the tail ptr in the current
389 * index (valid/inuse, as specified in the ioq_iter_init) as well as
390 * perform a seek(next) operation. This effectively "pushes" a new pointer
391 * onto the tail of the index.
393 * Returns: success = 0, <0 = ERRNO
396 int ioq_iter_push(struct ioq_iterator *iter, int flags);
399 * ioq_iter_pop() - pop the head pointer from the ring
400 * @iter: Iterator context (must be initialized with ioq_iter_init)
401 * @flags: Reserved for future use, must be 0
403 * This function will simultaneously advance the head ptr in the current
404 * index (valid/inuse, as specified in the ioq_iter_init) as well as
405 * perform a seek(next) operation. This effectively "pops" a pointer
406 * from the head of the index.
408 * Returns: success = 0, <0 = ERRNO
411 int ioq_iter_pop(struct ioq_iterator *iter, int flags);
413 #endif /* __KERNEL__ */
415 #endif /* _LINUX_IOQ_H */