2 Copyright 2008-2013 David Robillard <http://drobilla.net>
4 Permission to use, copy, modify, and/or distribute this software for any
5 purpose with or without fee is hereby granted, provided that the above
6 copyright notice and this permission notice appear in all copies.
8 THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 @file util.h Helper functions for the LV2 Atom extension.
20 Note these functions are all static inline, do not take their address.
22 This header is non-normative, it is provided for convenience.
25 #ifndef LV2_ATOM_UTIL_H
26 #define LV2_ATOM_UTIL_H
40 /** Pad a size to 64 bits. */
41 static inline uint32_t
42 lv2_atom_pad_size(uint32_t size
)
44 return (size
+ 7U) & (~7U);
47 /** Return the total size of @p atom, including the header. */
48 static inline uint32_t
49 lv2_atom_total_size(const LV2_Atom
* atom
)
51 return (uint32_t)sizeof(LV2_Atom
) + atom
->size
;
54 /** Return true iff @p atom is null. */
56 lv2_atom_is_null(const LV2_Atom
* atom
)
58 return !atom
|| (atom
->type
== 0 && atom
->size
== 0);
61 /** Return true iff @p a is equal to @p b. */
63 lv2_atom_equals(const LV2_Atom
* a
, const LV2_Atom
* b
)
65 return (a
== b
) || ((a
->type
== b
->type
) &&
66 (a
->size
== b
->size
) &&
67 !memcmp(a
+ 1, b
+ 1, a
->size
));
71 @name Sequence Iterator
75 /** Get an iterator pointing to the first event in a Sequence body. */
76 static inline LV2_Atom_Event
*
77 lv2_atom_sequence_begin(const LV2_Atom_Sequence_Body
* body
)
79 return (LV2_Atom_Event
*)(body
+ 1);
82 /** Get an iterator pointing to the end of a Sequence body. */
83 static inline LV2_Atom_Event
*
84 lv2_atom_sequence_end(const LV2_Atom_Sequence_Body
* body
, uint32_t size
)
86 return (LV2_Atom_Event
*)((const uint8_t*)body
+ lv2_atom_pad_size(size
));
89 /** Return true iff @p i has reached the end of @p body. */
91 lv2_atom_sequence_is_end(const LV2_Atom_Sequence_Body
* body
,
93 const LV2_Atom_Event
* i
)
95 return (const uint8_t*)i
>= ((const uint8_t*)body
+ size
);
98 /** Return an iterator to the element following @p i. */
99 static inline LV2_Atom_Event
*
100 lv2_atom_sequence_next(const LV2_Atom_Event
* i
)
102 return (LV2_Atom_Event
*)((const uint8_t*)i
103 + sizeof(LV2_Atom_Event
)
104 + lv2_atom_pad_size(i
->body
.size
));
108 A macro for iterating over all events in a Sequence.
109 @param seq The sequence to iterate over
110 @param iter The name of the iterator
112 This macro is used similarly to a for loop (which it expands to), e.g.:
114 LV2_ATOM_SEQUENCE_FOREACH(sequence, ev) {
115 // Do something with ev (an LV2_Atom_Event*) here...
119 #define LV2_ATOM_SEQUENCE_FOREACH(seq, iter) \
120 for (LV2_Atom_Event* (iter) = lv2_atom_sequence_begin(&(seq)->body); \
121 !lv2_atom_sequence_is_end(&(seq)->body, (seq)->atom.size, (iter)); \
122 (iter) = lv2_atom_sequence_next(iter))
124 /** Like LV2_ATOM_SEQUENCE_FOREACH but for a headerless sequence body. */
125 #define LV2_ATOM_SEQUENCE_BODY_FOREACH(body, size, iter) \
126 for (LV2_Atom_Event* (iter) = lv2_atom_sequence_begin(body); \
127 !lv2_atom_sequence_is_end(body, size, (iter)); \
128 (iter) = lv2_atom_sequence_next(iter))
132 @name Sequence Utilities
137 Clear all events from @p sequence.
139 This simply resets the size field, the other fields are left untouched.
142 lv2_atom_sequence_clear(LV2_Atom_Sequence
* seq
)
144 seq
->atom
.size
= sizeof(LV2_Atom_Sequence_Body
);
148 Append an event at the end of @p sequence.
150 @param seq Sequence to append to.
151 @param capacity Total capacity of the sequence atom
152 (e.g. as set by the host for sequence output ports).
153 @param event Event to write.
155 @return A pointer to the newly written event in @p seq,
156 or NULL on failure (insufficient space).
158 static inline LV2_Atom_Event
*
159 lv2_atom_sequence_append_event(LV2_Atom_Sequence
* seq
,
161 const LV2_Atom_Event
* event
)
163 const uint32_t total_size
= (uint32_t)sizeof(*event
) + event
->body
.size
;
164 if (capacity
- seq
->atom
.size
< total_size
) {
168 LV2_Atom_Event
* e
= lv2_atom_sequence_end(&seq
->body
, seq
->atom
.size
);
169 memcpy(e
, event
, total_size
);
171 seq
->atom
.size
+= lv2_atom_pad_size(total_size
);
182 /** Get an iterator pointing to the first element in @p tup. */
183 static inline LV2_Atom
*
184 lv2_atom_tuple_begin(const LV2_Atom_Tuple
* tup
)
186 return (LV2_Atom
*)(LV2_ATOM_BODY(tup
));
189 /** Return true iff @p i has reached the end of @p body. */
191 lv2_atom_tuple_is_end(const void* body
, uint32_t size
, const LV2_Atom
* i
)
193 return (const uint8_t*)i
>= ((const uint8_t*)body
+ size
);
196 /** Return an iterator to the element following @p i. */
197 static inline LV2_Atom
*
198 lv2_atom_tuple_next(const LV2_Atom
* i
)
201 (const uint8_t*)i
+ sizeof(LV2_Atom
) + lv2_atom_pad_size(i
->size
));
205 A macro for iterating over all properties of a Tuple.
206 @param tuple The tuple to iterate over
207 @param iter The name of the iterator
209 This macro is used similarly to a for loop (which it expands to), e.g.:
211 LV2_ATOMO_TUPLE_FOREACH(tuple, elem) {
212 // Do something with elem (an LV2_Atom*) here...
216 #define LV2_ATOM_TUPLE_FOREACH(tuple, iter) \
217 for (LV2_Atom* (iter) = lv2_atom_tuple_begin(tuple); \
218 !lv2_atom_tuple_is_end(LV2_ATOM_BODY(tuple), (tuple)->size, (iter)); \
219 (iter) = lv2_atom_tuple_next(iter))
221 /** Like LV2_ATOM_TUPLE_FOREACH but for a headerless tuple body. */
222 #define LV2_ATOM_TUPLE_BODY_FOREACH(body, size, iter) \
223 for (LV2_Atom* (iter) = (LV2_Atom*)body; \
224 !lv2_atom_tuple_is_end(body, size, (iter)); \
225 (iter) = lv2_atom_tuple_next(iter))
229 @name Object Iterator
233 /** Return a pointer to the first property in @p body. */
234 static inline LV2_Atom_Property_Body
*
235 lv2_atom_object_begin(const LV2_Atom_Object_Body
* body
)
237 return (LV2_Atom_Property_Body
*)(body
+ 1);
240 /** Return true iff @p i has reached the end of @p obj. */
242 lv2_atom_object_is_end(const LV2_Atom_Object_Body
* body
,
244 const LV2_Atom_Property_Body
* i
)
246 return (const uint8_t*)i
>= ((const uint8_t*)body
+ size
);
249 /** Return an iterator to the property following @p i. */
250 static inline LV2_Atom_Property_Body
*
251 lv2_atom_object_next(const LV2_Atom_Property_Body
* i
)
253 const LV2_Atom
* const value
= (const LV2_Atom
*)(
254 (const uint8_t*)i
+ 2 * sizeof(uint32_t));
255 return (LV2_Atom_Property_Body
*)(
256 (const uint8_t*)i
+ lv2_atom_pad_size(
257 (uint32_t)sizeof(LV2_Atom_Property_Body
) + value
->size
));
261 A macro for iterating over all properties of an Object.
262 @param obj The object to iterate over
263 @param iter The name of the iterator
265 This macro is used similarly to a for loop (which it expands to), e.g.:
267 LV2_ATOM_OBJECT_FOREACH(object, i) {
268 // Do something with prop (an LV2_Atom_Property_Body*) here...
272 #define LV2_ATOM_OBJECT_FOREACH(obj, iter) \
273 for (LV2_Atom_Property_Body* (iter) = lv2_atom_object_begin(&(obj)->body); \
274 !lv2_atom_object_is_end(&(obj)->body, (obj)->atom.size, (iter)); \
275 (iter) = lv2_atom_object_next(iter))
277 /** Like LV2_ATOM_OBJECT_FOREACH but for a headerless object body. */
278 #define LV2_ATOM_OBJECT_BODY_FOREACH(body, size, iter) \
279 for (LV2_Atom_Property_Body* (iter) = lv2_atom_object_begin(body); \
280 !lv2_atom_object_is_end(body, size, (iter)); \
281 (iter) = lv2_atom_object_next(iter))
289 /** A single entry in an Object query. */
291 uint32_t key
; /**< Key to query (input set by user) */
292 const LV2_Atom
** value
; /**< Found value (output set by query function) */
293 } LV2_Atom_Object_Query
;
295 static const LV2_Atom_Object_Query LV2_ATOM_OBJECT_QUERY_END
= { 0, NULL
};
298 Get an object's values for various keys.
300 The value pointer of each item in @p query will be set to the location of
301 the corresponding value in @p object. Every value pointer in @p query MUST
302 be initialised to NULL. This function reads @p object in a single linear
303 sweep. By allocating @p query on the stack, objects can be "queried"
304 quickly without allocating any memory. This function is realtime safe.
306 This function can only do "flat" queries, it is not smart enough to match
307 variables in nested objects.
311 const LV2_Atom* name = NULL;
312 const LV2_Atom* age = NULL;
313 LV2_Atom_Object_Query q[] = {
314 { urids.eg_name, &name },
315 { urids.eg_age, &age },
316 LV2_ATOM_OBJECT_QUERY_END
318 lv2_atom_object_query(obj, q);
319 // name and age are now set to the appropriate values in obj, or NULL.
323 lv2_atom_object_query(const LV2_Atom_Object
* object
,
324 LV2_Atom_Object_Query
* query
)
329 /* Count number of query keys so we can short-circuit when done */
330 for (LV2_Atom_Object_Query
* q
= query
; q
->key
; ++q
) {
334 LV2_ATOM_OBJECT_FOREACH(object
, prop
) {
335 for (LV2_Atom_Object_Query
* q
= query
; q
->key
; ++q
) {
336 if (q
->key
== prop
->key
&& !*q
->value
) {
337 *q
->value
= &prop
->value
;
338 if (++matches
== n_queries
) {
349 Body only version of lv2_atom_object_get().
352 lv2_atom_object_body_get(uint32_t size
, const LV2_Atom_Object_Body
* body
, ...)
357 /* Count number of keys so we can short-circuit when done */
359 va_start(args
, body
);
360 for (n_queries
= 0; va_arg(args
, uint32_t); ++n_queries
) {
361 if (!va_arg(args
, const LV2_Atom
**)) {
367 LV2_ATOM_OBJECT_BODY_FOREACH(body
, size
, prop
) {
368 va_start(args
, body
);
369 for (int i
= 0; i
< n_queries
; ++i
) {
370 uint32_t qkey
= va_arg(args
, uint32_t);
371 const LV2_Atom
** qval
= va_arg(args
, const LV2_Atom
**);
372 if (qkey
== prop
->key
&& !*qval
) {
373 *qval
= &prop
->value
;
374 if (++matches
== n_queries
) {
386 Variable argument version of lv2_atom_object_query().
388 This is nicer-looking in code, but a bit more error-prone since it is not
389 type safe and the argument list must be terminated.
391 The arguments should be a series of uint32_t key and const LV2_Atom** value
392 pairs, terminated by a zero key. The value pointers MUST be initialized to
396 const LV2_Atom* name = NULL;
397 const LV2_Atom* age = NULL;
398 lv2_atom_object_get(obj,
399 uris.name_key, &name,
405 lv2_atom_object_get(const LV2_Atom_Object
* object
, ...)
410 /* Count number of keys so we can short-circuit when done */
412 va_start(args
, object
);
413 for (n_queries
= 0; va_arg(args
, uint32_t); ++n_queries
) {
414 if (!va_arg(args
, const LV2_Atom
**)) {
420 LV2_ATOM_OBJECT_FOREACH(object
, prop
) {
421 va_start(args
, object
);
422 for (int i
= 0; i
< n_queries
; ++i
) {
423 uint32_t qkey
= va_arg(args
, uint32_t);
424 const LV2_Atom
** qval
= va_arg(args
, const LV2_Atom
**);
425 if (qkey
== prop
->key
&& !*qval
) {
426 *qval
= &prop
->value
;
427 if (++matches
== n_queries
) {
446 #endif /* LV2_ATOM_UTIL_H */