1 /* $NetBSD: regress.gen.c,v 1.1.1.2 2013/04/11 16:43:32 christos Exp $ */
3 * Automatically generated from ./regress.rpc
4 * by event_rpcgen.py/0.1. DO NOT EDIT THIS FILE.
10 #include <event2/event-config.h>
11 #include <event2/event.h>
12 #include <event2/buffer.h>
13 #include <event2/tag.h>
15 #ifdef _EVENT___func__
16 #define __func__ _EVENT___func__
20 #include "regress.gen.h"
22 void event_warn(const char *fmt
, ...);
23 void event_warnx(const char *fmt
, ...);
27 * Implementation of msg
30 static struct msg_access_ __msg_base
= {
45 return msg_new_with_arg(NULL
);
49 msg_new_with_arg(void *unused
)
52 if ((tmp
= malloc(sizeof(struct msg
))) == NULL
) {
53 event_warn("%s: malloc", __func__
);
56 tmp
->base
= &__msg_base
;
58 tmp
->from_name_data
= NULL
;
59 tmp
->from_name_set
= 0;
61 tmp
->to_name_data
= NULL
;
64 tmp
->attack_data
= NULL
;
69 tmp
->run_num_allocated
= 0;
79 msg_run_expand_to_hold_more(struct msg
*msg
)
81 int tobe_allocated
= msg
->run_num_allocated
;
82 struct run
** new_data
= NULL
;
83 tobe_allocated
= !tobe_allocated
? 1 : tobe_allocated
<< 1;
84 new_data
= (struct run
**) realloc(msg
->run_data
,
85 tobe_allocated
* sizeof(struct run
*));
88 msg
->run_data
= new_data
;
89 msg
->run_num_allocated
= tobe_allocated
;
93 msg_run_add(struct msg
*msg
)
95 if (++msg
->run_length
>= msg
->run_num_allocated
) {
96 if (msg_run_expand_to_hold_more(msg
)<0)
99 msg
->run_data
[msg
->run_length
- 1] = run_new();
100 if (msg
->run_data
[msg
->run_length
- 1] == NULL
)
103 return (msg
->run_data
[msg
->run_length
- 1]);
110 msg_from_name_assign(struct msg
*msg
,
113 if (msg
->from_name_data
!= NULL
)
114 free(msg
->from_name_data
);
115 if ((msg
->from_name_data
= strdup(value
)) == NULL
)
117 msg
->from_name_set
= 1;
122 msg_to_name_assign(struct msg
*msg
,
125 if (msg
->to_name_data
!= NULL
)
126 free(msg
->to_name_data
);
127 if ((msg
->to_name_data
= strdup(value
)) == NULL
)
129 msg
->to_name_set
= 1;
134 msg_attack_assign(struct msg
*msg
,
135 const struct kill
* value
)
137 struct evbuffer
*tmp
= NULL
;
138 if (msg
->attack_set
) {
139 kill_clear(msg
->attack_data
);
142 msg
->attack_data
= kill_new();
143 if (msg
->attack_data
== NULL
) {
144 event_warn("%s: kill_new()", __func__
);
148 if ((tmp
= evbuffer_new()) == NULL
) {
149 event_warn("%s: evbuffer_new()", __func__
);
152 kill_marshal(tmp
, value
);
153 if (kill_unmarshal(msg
->attack_data
, tmp
) == -1) {
154 event_warnx("%s: kill_unmarshal", __func__
);
163 if (msg
->attack_data
!= NULL
) {
164 kill_free(msg
->attack_data
);
165 msg
->attack_data
= NULL
;
171 msg_run_assign(struct msg
*msg
, int off
,
172 const struct run
* value
)
174 if (!msg
->run_set
|| off
< 0 || off
>= msg
->run_length
)
179 struct evbuffer
*tmp
= NULL
;
180 run_clear(msg
->run_data
[off
]);
181 if ((tmp
= evbuffer_new()) == NULL
) {
182 event_warn("%s: evbuffer_new()", __func__
);
186 run_marshal(tmp
, value
);
187 if (run_unmarshal(msg
->run_data
[off
], tmp
) == -1) {
188 event_warnx("%s: run_unmarshal", __func__
);
192 done
:if (tmp
!= NULL
)
195 run_clear(msg
->run_data
[off
]);
203 msg_from_name_get(struct msg
*msg
, char * *value
)
205 if (msg
->from_name_set
!= 1)
207 *value
= msg
->from_name_data
;
212 msg_to_name_get(struct msg
*msg
, char * *value
)
214 if (msg
->to_name_set
!= 1)
216 *value
= msg
->to_name_data
;
221 msg_attack_get(struct msg
*msg
, struct kill
* *value
)
223 if (msg
->attack_set
!= 1) {
224 msg
->attack_data
= kill_new();
225 if (msg
->attack_data
== NULL
)
229 *value
= msg
->attack_data
;
234 msg_run_get(struct msg
*msg
, int offset
,
237 if (!msg
->run_set
|| offset
< 0 || offset
>= msg
->run_length
)
239 *value
= msg
->run_data
[offset
];
244 msg_clear(struct msg
*tmp
)
246 if (tmp
->from_name_set
== 1) {
247 free(tmp
->from_name_data
);
248 tmp
->from_name_data
= NULL
;
249 tmp
->from_name_set
= 0;
251 if (tmp
->to_name_set
== 1) {
252 free(tmp
->to_name_data
);
253 tmp
->to_name_data
= NULL
;
254 tmp
->to_name_set
= 0;
256 if (tmp
->attack_set
== 1) {
257 kill_free(tmp
->attack_data
);
258 tmp
->attack_data
= NULL
;
261 if (tmp
->run_set
== 1) {
263 for (i
= 0; i
< tmp
->run_length
; ++i
) {
264 run_free(tmp
->run_data
[i
]);
267 tmp
->run_data
= NULL
;
270 tmp
->run_num_allocated
= 0;
275 msg_free(struct msg
*tmp
)
277 if (tmp
->from_name_data
!= NULL
)
278 free (tmp
->from_name_data
);
279 if (tmp
->to_name_data
!= NULL
)
280 free (tmp
->to_name_data
);
281 if (tmp
->attack_data
!= NULL
)
282 kill_free(tmp
->attack_data
);
283 if (tmp
->run_set
== 1) {
285 for (i
= 0; i
< tmp
->run_length
; ++i
) {
286 run_free(tmp
->run_data
[i
]);
289 tmp
->run_data
= NULL
;
292 tmp
->run_num_allocated
= 0;
299 msg_marshal(struct evbuffer
*evbuf
, const struct msg
*tmp
){
300 evtag_marshal_string(evbuf
, MSG_FROM_NAME
, tmp
->from_name_data
);
301 evtag_marshal_string(evbuf
, MSG_TO_NAME
, tmp
->to_name_data
);
302 if (tmp
->attack_set
) {
303 evtag_marshal_kill(evbuf
, MSG_ATTACK
, tmp
->attack_data
);
308 for (i
= 0; i
< tmp
->run_length
; ++i
) {
309 evtag_marshal_run(evbuf
, MSG_RUN
, tmp
->run_data
[i
]);
316 msg_unmarshal(struct msg
*tmp
, struct evbuffer
*evbuf
)
319 while (evbuffer_get_length(evbuf
) > 0) {
320 if (evtag_peek(evbuf
, &tag
) == -1)
326 if (tmp
->from_name_set
)
328 if (evtag_unmarshal_string(evbuf
, MSG_FROM_NAME
, &tmp
->from_name_data
) == -1) {
329 event_warnx("%s: failed to unmarshal from_name", __func__
);
332 tmp
->from_name_set
= 1;
337 if (tmp
->to_name_set
)
339 if (evtag_unmarshal_string(evbuf
, MSG_TO_NAME
, &tmp
->to_name_data
) == -1) {
340 event_warnx("%s: failed to unmarshal to_name", __func__
);
343 tmp
->to_name_set
= 1;
350 tmp
->attack_data
= kill_new();
351 if (tmp
->attack_data
== NULL
)
353 if (evtag_unmarshal_kill(evbuf
, MSG_ATTACK
, tmp
->attack_data
) == -1) {
354 event_warnx("%s: failed to unmarshal attack", __func__
);
362 if (tmp
->run_length
>= tmp
->run_num_allocated
&&
363 msg_run_expand_to_hold_more(tmp
) < 0) {
367 tmp
->run_data
[tmp
->run_length
] = run_new();
368 if (tmp
->run_data
[tmp
->run_length
] == NULL
)
370 if (evtag_unmarshal_run(evbuf
, MSG_RUN
, tmp
->run_data
[tmp
->run_length
]) == -1) {
371 event_warnx("%s: failed to unmarshal run", __func__
);
383 if (msg_complete(tmp
) == -1)
389 msg_complete(struct msg
*msg
)
391 if (!msg
->from_name_set
)
393 if (!msg
->to_name_set
)
395 if (msg
->attack_set
&& kill_complete(msg
->attack_data
) == -1)
399 for (i
= 0; i
< msg
->run_length
; ++i
) {
400 if (msg
->run_set
&& run_complete(msg
->run_data
[i
]) == -1)
408 evtag_unmarshal_msg(struct evbuffer
*evbuf
, ev_uint32_t need_tag
, struct msg
*msg
)
413 struct evbuffer
*tmp
= evbuffer_new();
415 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
418 if (msg_unmarshal(msg
, tmp
) == -1)
429 evtag_marshal_msg(struct evbuffer
*evbuf
, ev_uint32_t tag
, const struct msg
*msg
)
431 struct evbuffer
*_buf
= evbuffer_new();
432 assert(_buf
!= NULL
);
433 msg_marshal(_buf
, msg
);
434 evtag_marshal_buffer(evbuf
, tag
, _buf
);
439 * Implementation of kill
442 static struct kill_access_ __kill_base
= {
447 kill_how_often_assign
,
455 return kill_new_with_arg(NULL
);
459 kill_new_with_arg(void *unused
)
462 if ((tmp
= malloc(sizeof(struct kill
))) == NULL
) {
463 event_warn("%s: malloc", __func__
);
466 tmp
->base
= &__kill_base
;
468 tmp
->weapon_data
= NULL
;
471 tmp
->action_data
= NULL
;
474 tmp
->how_often_data
= NULL
;
475 tmp
->how_often_length
= 0;
476 tmp
->how_often_num_allocated
= 0;
477 tmp
->how_often_set
= 0;
485 kill_how_often_expand_to_hold_more(struct kill
*msg
)
487 int tobe_allocated
= msg
->how_often_num_allocated
;
488 ev_uint32_t
* new_data
= NULL
;
489 tobe_allocated
= !tobe_allocated
? 1 : tobe_allocated
<< 1;
490 new_data
= (ev_uint32_t
*) realloc(msg
->how_often_data
,
491 tobe_allocated
* sizeof(ev_uint32_t
));
492 if (new_data
== NULL
)
494 msg
->how_often_data
= new_data
;
495 msg
->how_often_num_allocated
= tobe_allocated
;
499 kill_how_often_add(struct kill
*msg
, const ev_uint32_t value
)
501 if (++msg
->how_often_length
>= msg
->how_often_num_allocated
) {
502 if (kill_how_often_expand_to_hold_more(msg
)<0)
505 msg
->how_often_data
[msg
->how_often_length
- 1] = value
;
506 msg
->how_often_set
= 1;
507 return &(msg
->how_often_data
[msg
->how_often_length
- 1]);
509 --msg
->how_often_length
;
514 kill_weapon_assign(struct kill
*msg
,
517 if (msg
->weapon_data
!= NULL
)
518 free(msg
->weapon_data
);
519 if ((msg
->weapon_data
= strdup(value
)) == NULL
)
526 kill_action_assign(struct kill
*msg
,
529 if (msg
->action_data
!= NULL
)
530 free(msg
->action_data
);
531 if ((msg
->action_data
= strdup(value
)) == NULL
)
538 kill_how_often_assign(struct kill
*msg
, int off
,
539 const ev_uint32_t value
)
541 if (!msg
->how_often_set
|| off
< 0 || off
>= msg
->how_often_length
)
545 msg
->how_often_data
[off
] = value
;
551 kill_weapon_get(struct kill
*msg
, char * *value
)
553 if (msg
->weapon_set
!= 1)
555 *value
= msg
->weapon_data
;
560 kill_action_get(struct kill
*msg
, char * *value
)
562 if (msg
->action_set
!= 1)
564 *value
= msg
->action_data
;
569 kill_how_often_get(struct kill
*msg
, int offset
,
572 if (!msg
->how_often_set
|| offset
< 0 || offset
>= msg
->how_often_length
)
574 *value
= msg
->how_often_data
[offset
];
579 kill_clear(struct kill
*tmp
)
581 if (tmp
->weapon_set
== 1) {
582 free(tmp
->weapon_data
);
583 tmp
->weapon_data
= NULL
;
586 if (tmp
->action_set
== 1) {
587 free(tmp
->action_data
);
588 tmp
->action_data
= NULL
;
591 if (tmp
->how_often_set
== 1) {
592 free(tmp
->how_often_data
);
593 tmp
->how_often_data
= NULL
;
594 tmp
->how_often_set
= 0;
595 tmp
->how_often_length
= 0;
596 tmp
->how_often_num_allocated
= 0;
601 kill_free(struct kill
*tmp
)
603 if (tmp
->weapon_data
!= NULL
)
604 free (tmp
->weapon_data
);
605 if (tmp
->action_data
!= NULL
)
606 free (tmp
->action_data
);
607 if (tmp
->how_often_set
== 1) {
608 free(tmp
->how_often_data
);
609 tmp
->how_often_data
= NULL
;
610 tmp
->how_often_set
= 0;
611 tmp
->how_often_length
= 0;
612 tmp
->how_often_num_allocated
= 0;
614 free(tmp
->how_often_data
);
619 kill_marshal(struct evbuffer
*evbuf
, const struct kill
*tmp
){
620 evtag_marshal_string(evbuf
, KILL_WEAPON
, tmp
->weapon_data
);
621 evtag_marshal_string(evbuf
, KILL_ACTION
, tmp
->action_data
);
622 if (tmp
->how_often_set
) {
625 for (i
= 0; i
< tmp
->how_often_length
; ++i
) {
626 evtag_marshal_int(evbuf
, KILL_HOW_OFTEN
, tmp
->how_often_data
[i
]);
633 kill_unmarshal(struct kill
*tmp
, struct evbuffer
*evbuf
)
636 while (evbuffer_get_length(evbuf
) > 0) {
637 if (evtag_peek(evbuf
, &tag
) == -1)
645 if (evtag_unmarshal_string(evbuf
, KILL_WEAPON
, &tmp
->weapon_data
) == -1) {
646 event_warnx("%s: failed to unmarshal weapon", __func__
);
656 if (evtag_unmarshal_string(evbuf
, KILL_ACTION
, &tmp
->action_data
) == -1) {
657 event_warnx("%s: failed to unmarshal action", __func__
);
665 if (tmp
->how_often_length
>= tmp
->how_often_num_allocated
&&
666 kill_how_often_expand_to_hold_more(tmp
) < 0) {
670 if (evtag_unmarshal_int(evbuf
, KILL_HOW_OFTEN
, &tmp
->how_often_data
[tmp
->how_often_length
]) == -1) {
671 event_warnx("%s: failed to unmarshal how_often", __func__
);
674 ++tmp
->how_often_length
;
675 tmp
->how_often_set
= 1;
683 if (kill_complete(tmp
) == -1)
689 kill_complete(struct kill
*msg
)
691 if (!msg
->weapon_set
)
693 if (!msg
->action_set
)
699 evtag_unmarshal_kill(struct evbuffer
*evbuf
, ev_uint32_t need_tag
, struct kill
*msg
)
704 struct evbuffer
*tmp
= evbuffer_new();
706 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
709 if (kill_unmarshal(msg
, tmp
) == -1)
720 evtag_marshal_kill(struct evbuffer
*evbuf
, ev_uint32_t tag
, const struct kill
*msg
)
722 struct evbuffer
*_buf
= evbuffer_new();
723 assert(_buf
!= NULL
);
724 kill_marshal(_buf
, msg
);
725 evtag_marshal_buffer(evbuf
, tag
, _buf
);
730 * Implementation of run
733 static struct run_access_ __run_base
= {
736 run_some_bytes_assign
,
738 run_fixed_bytes_assign
,
743 run_large_number_assign
,
744 run_large_number_get
,
745 run_other_numbers_assign
,
746 run_other_numbers_get
,
747 run_other_numbers_add
,
753 return run_new_with_arg(NULL
);
757 run_new_with_arg(void *unused
)
760 if ((tmp
= malloc(sizeof(struct run
))) == NULL
) {
761 event_warn("%s: malloc", __func__
);
764 tmp
->base
= &__run_base
;
766 tmp
->how_data
= NULL
;
769 tmp
->some_bytes_data
= NULL
;
770 tmp
->some_bytes_length
= 0;
771 tmp
->some_bytes_set
= 0;
773 memset(tmp
->fixed_bytes_data
, 0, sizeof(tmp
->fixed_bytes_data
));
774 tmp
->fixed_bytes_set
= 0;
776 tmp
->notes_data
= NULL
;
777 tmp
->notes_length
= 0;
778 tmp
->notes_num_allocated
= 0;
781 tmp
->large_number_data
= 0;
782 tmp
->large_number_set
= 0;
784 tmp
->other_numbers_data
= NULL
;
785 tmp
->other_numbers_length
= 0;
786 tmp
->other_numbers_num_allocated
= 0;
787 tmp
->other_numbers_set
= 0;
796 run_notes_expand_to_hold_more(struct run
*msg
)
798 int tobe_allocated
= msg
->notes_num_allocated
;
799 char ** new_data
= NULL
;
800 tobe_allocated
= !tobe_allocated
? 1 : tobe_allocated
<< 1;
801 new_data
= (char **) realloc(msg
->notes_data
,
802 tobe_allocated
* sizeof(char *));
803 if (new_data
== NULL
)
805 msg
->notes_data
= new_data
;
806 msg
->notes_num_allocated
= tobe_allocated
;
810 run_notes_add(struct run
*msg
, const char * value
)
812 if (++msg
->notes_length
>= msg
->notes_num_allocated
) {
813 if (run_notes_expand_to_hold_more(msg
)<0)
817 msg
->notes_data
[msg
->notes_length
- 1] = strdup(value
);
818 if (msg
->notes_data
[msg
->notes_length
- 1] == NULL
) {
822 msg
->notes_data
[msg
->notes_length
- 1] = NULL
;
825 return &(msg
->notes_data
[msg
->notes_length
- 1]);
833 run_other_numbers_expand_to_hold_more(struct run
*msg
)
835 int tobe_allocated
= msg
->other_numbers_num_allocated
;
836 ev_uint32_t
* new_data
= NULL
;
837 tobe_allocated
= !tobe_allocated
? 1 : tobe_allocated
<< 1;
838 new_data
= (ev_uint32_t
*) realloc(msg
->other_numbers_data
,
839 tobe_allocated
* sizeof(ev_uint32_t
));
840 if (new_data
== NULL
)
842 msg
->other_numbers_data
= new_data
;
843 msg
->other_numbers_num_allocated
= tobe_allocated
;
847 run_other_numbers_add(struct run
*msg
, const ev_uint32_t value
)
849 if (++msg
->other_numbers_length
>= msg
->other_numbers_num_allocated
) {
850 if (run_other_numbers_expand_to_hold_more(msg
)<0)
853 msg
->other_numbers_data
[msg
->other_numbers_length
- 1] = value
;
854 msg
->other_numbers_set
= 1;
855 return &(msg
->other_numbers_data
[msg
->other_numbers_length
- 1]);
857 --msg
->other_numbers_length
;
862 run_how_assign(struct run
*msg
,
865 if (msg
->how_data
!= NULL
)
867 if ((msg
->how_data
= strdup(value
)) == NULL
)
874 run_some_bytes_assign(struct run
*msg
, const ev_uint8_t
* value
, ev_uint32_t len
)
876 if (msg
->some_bytes_data
!= NULL
)
877 free (msg
->some_bytes_data
);
878 msg
->some_bytes_data
= malloc(len
);
879 if (msg
->some_bytes_data
== NULL
)
881 msg
->some_bytes_set
= 1;
882 msg
->some_bytes_length
= len
;
883 memcpy(msg
->some_bytes_data
, value
, len
);
888 run_fixed_bytes_assign(struct run
*msg
, const ev_uint8_t
*value
)
890 msg
->fixed_bytes_set
= 1;
891 memcpy(msg
->fixed_bytes_data
, value
, 24);
896 run_notes_assign(struct run
*msg
, int off
,
899 if (!msg
->notes_set
|| off
< 0 || off
>= msg
->notes_length
)
903 if (msg
->notes_data
[off
] != NULL
)
904 free(msg
->notes_data
[off
]);
905 msg
->notes_data
[off
] = strdup(value
);
906 if (msg
->notes_data
[off
] == NULL
) {
907 event_warnx("%s: strdup", __func__
);
915 run_large_number_assign(struct run
*msg
, const ev_uint64_t value
)
917 msg
->large_number_set
= 1;
918 msg
->large_number_data
= value
;
923 run_other_numbers_assign(struct run
*msg
, int off
,
924 const ev_uint32_t value
)
926 if (!msg
->other_numbers_set
|| off
< 0 || off
>= msg
->other_numbers_length
)
930 msg
->other_numbers_data
[off
] = value
;
936 run_how_get(struct run
*msg
, char * *value
)
938 if (msg
->how_set
!= 1)
940 *value
= msg
->how_data
;
945 run_some_bytes_get(struct run
*msg
, ev_uint8_t
* *value
, ev_uint32_t
*plen
)
947 if (msg
->some_bytes_set
!= 1)
949 *value
= msg
->some_bytes_data
;
950 *plen
= msg
->some_bytes_length
;
955 run_fixed_bytes_get(struct run
*msg
, ev_uint8_t
**value
)
957 if (msg
->fixed_bytes_set
!= 1)
959 *value
= msg
->fixed_bytes_data
;
964 run_notes_get(struct run
*msg
, int offset
,
967 if (!msg
->notes_set
|| offset
< 0 || offset
>= msg
->notes_length
)
969 *value
= msg
->notes_data
[offset
];
974 run_large_number_get(struct run
*msg
, ev_uint64_t
*value
)
976 if (msg
->large_number_set
!= 1)
978 *value
= msg
->large_number_data
;
983 run_other_numbers_get(struct run
*msg
, int offset
,
986 if (!msg
->other_numbers_set
|| offset
< 0 || offset
>= msg
->other_numbers_length
)
988 *value
= msg
->other_numbers_data
[offset
];
993 run_clear(struct run
*tmp
)
995 if (tmp
->how_set
== 1) {
997 tmp
->how_data
= NULL
;
1000 if (tmp
->some_bytes_set
== 1) {
1001 free (tmp
->some_bytes_data
);
1002 tmp
->some_bytes_data
= NULL
;
1003 tmp
->some_bytes_length
= 0;
1004 tmp
->some_bytes_set
= 0;
1006 tmp
->fixed_bytes_set
= 0;
1007 memset(tmp
->fixed_bytes_data
, 0, sizeof(tmp
->fixed_bytes_data
));
1008 if (tmp
->notes_set
== 1) {
1010 for (i
= 0; i
< tmp
->notes_length
; ++i
) {
1011 if (tmp
->notes_data
[i
] != NULL
) free(tmp
->notes_data
[i
]);
1013 free(tmp
->notes_data
);
1014 tmp
->notes_data
= NULL
;
1016 tmp
->notes_length
= 0;
1017 tmp
->notes_num_allocated
= 0;
1019 tmp
->large_number_set
= 0;
1020 if (tmp
->other_numbers_set
== 1) {
1021 free(tmp
->other_numbers_data
);
1022 tmp
->other_numbers_data
= NULL
;
1023 tmp
->other_numbers_set
= 0;
1024 tmp
->other_numbers_length
= 0;
1025 tmp
->other_numbers_num_allocated
= 0;
1030 run_free(struct run
*tmp
)
1032 if (tmp
->how_data
!= NULL
)
1033 free (tmp
->how_data
);
1034 if (tmp
->some_bytes_data
!= NULL
)
1035 free(tmp
->some_bytes_data
);
1036 if (tmp
->notes_set
== 1) {
1038 for (i
= 0; i
< tmp
->notes_length
; ++i
) {
1039 if (tmp
->notes_data
[i
] != NULL
) free(tmp
->notes_data
[i
]);
1041 free(tmp
->notes_data
);
1042 tmp
->notes_data
= NULL
;
1044 tmp
->notes_length
= 0;
1045 tmp
->notes_num_allocated
= 0;
1047 free(tmp
->notes_data
);
1048 if (tmp
->other_numbers_set
== 1) {
1049 free(tmp
->other_numbers_data
);
1050 tmp
->other_numbers_data
= NULL
;
1051 tmp
->other_numbers_set
= 0;
1052 tmp
->other_numbers_length
= 0;
1053 tmp
->other_numbers_num_allocated
= 0;
1055 free(tmp
->other_numbers_data
);
1060 run_marshal(struct evbuffer
*evbuf
, const struct run
*tmp
){
1061 evtag_marshal_string(evbuf
, RUN_HOW
, tmp
->how_data
);
1062 if (tmp
->some_bytes_set
) {
1063 evtag_marshal(evbuf
, RUN_SOME_BYTES
, tmp
->some_bytes_data
, tmp
->some_bytes_length
);
1065 evtag_marshal(evbuf
, RUN_FIXED_BYTES
, tmp
->fixed_bytes_data
, (24));
1066 if (tmp
->notes_set
) {
1069 for (i
= 0; i
< tmp
->notes_length
; ++i
) {
1070 evtag_marshal_string(evbuf
, RUN_NOTES
, tmp
->notes_data
[i
]);
1074 if (tmp
->large_number_set
) {
1075 evtag_marshal_int64(evbuf
, RUN_LARGE_NUMBER
, tmp
->large_number_data
);
1077 if (tmp
->other_numbers_set
) {
1080 for (i
= 0; i
< tmp
->other_numbers_length
; ++i
) {
1081 evtag_marshal_int(evbuf
, RUN_OTHER_NUMBERS
, tmp
->other_numbers_data
[i
]);
1088 run_unmarshal(struct run
*tmp
, struct evbuffer
*evbuf
)
1091 while (evbuffer_get_length(evbuf
) > 0) {
1092 if (evtag_peek(evbuf
, &tag
) == -1)
1100 if (evtag_unmarshal_string(evbuf
, RUN_HOW
, &tmp
->how_data
) == -1) {
1101 event_warnx("%s: failed to unmarshal how", __func__
);
1107 case RUN_SOME_BYTES
:
1109 if (tmp
->some_bytes_set
)
1111 if (evtag_payload_length(evbuf
, &tmp
->some_bytes_length
) == -1)
1113 if (tmp
->some_bytes_length
> evbuffer_get_length(evbuf
))
1115 if ((tmp
->some_bytes_data
= malloc(tmp
->some_bytes_length
)) == NULL
)
1117 if (evtag_unmarshal_fixed(evbuf
, RUN_SOME_BYTES
, tmp
->some_bytes_data
, tmp
->some_bytes_length
) == -1) {
1118 event_warnx("%s: failed to unmarshal some_bytes", __func__
);
1121 tmp
->some_bytes_set
= 1;
1124 case RUN_FIXED_BYTES
:
1126 if (tmp
->fixed_bytes_set
)
1128 if (evtag_unmarshal_fixed(evbuf
, RUN_FIXED_BYTES
, tmp
->fixed_bytes_data
, (24)) == -1) {
1129 event_warnx("%s: failed to unmarshal fixed_bytes", __func__
);
1132 tmp
->fixed_bytes_set
= 1;
1137 if (tmp
->notes_length
>= tmp
->notes_num_allocated
&&
1138 run_notes_expand_to_hold_more(tmp
) < 0) {
1142 if (evtag_unmarshal_string(evbuf
, RUN_NOTES
, &tmp
->notes_data
[tmp
->notes_length
]) == -1) {
1143 event_warnx("%s: failed to unmarshal notes", __func__
);
1146 ++tmp
->notes_length
;
1150 case RUN_LARGE_NUMBER
:
1152 if (tmp
->large_number_set
)
1154 if (evtag_unmarshal_int64(evbuf
, RUN_LARGE_NUMBER
, &tmp
->large_number_data
) == -1) {
1155 event_warnx("%s: failed to unmarshal large_number", __func__
);
1158 tmp
->large_number_set
= 1;
1161 case RUN_OTHER_NUMBERS
:
1163 if (tmp
->other_numbers_length
>= tmp
->other_numbers_num_allocated
&&
1164 run_other_numbers_expand_to_hold_more(tmp
) < 0) {
1168 if (evtag_unmarshal_int(evbuf
, RUN_OTHER_NUMBERS
, &tmp
->other_numbers_data
[tmp
->other_numbers_length
]) == -1) {
1169 event_warnx("%s: failed to unmarshal other_numbers", __func__
);
1172 ++tmp
->other_numbers_length
;
1173 tmp
->other_numbers_set
= 1;
1181 if (run_complete(tmp
) == -1)
1187 run_complete(struct run
*msg
)
1191 if (!msg
->fixed_bytes_set
)
1197 evtag_unmarshal_run(struct evbuffer
*evbuf
, ev_uint32_t need_tag
, struct run
*msg
)
1202 struct evbuffer
*tmp
= evbuffer_new();
1204 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
1207 if (run_unmarshal(msg
, tmp
) == -1)
1218 evtag_marshal_run(struct evbuffer
*evbuf
, ev_uint32_t tag
, const struct run
*msg
)
1220 struct evbuffer
*_buf
= evbuffer_new();
1221 assert(_buf
!= NULL
);
1222 run_marshal(_buf
, msg
);
1223 evtag_marshal_buffer(evbuf
, tag
, _buf
);
1224 evbuffer_free(_buf
);