2 * Automatically generated from ./regress.rpc
3 * by event_rpcgen.py/0.1. DO NOT EDIT THIS FILE.
7 #ifdef _EVENT_HAVE_SYS_TIME_H
13 #define EVENT_NO_STRUCT
16 #ifdef _EVENT___func__
17 #define __func__ _EVENT___func__
20 #include "./regress.gen.h"
22 void event_err(int eval
, const char *fmt
, ...);
23 void event_warn(const char *fmt
, ...);
24 void event_errx(int eval
, const char *fmt
, ...);
25 void event_warnx(const char *fmt
, ...);
29 * Implementation of msg
32 static struct msg_access_ __msg_base
= {
48 if ((tmp
= malloc(sizeof(struct msg
))) == NULL
) {
49 event_warn("%s: malloc", __func__
);
52 tmp
->base
= &__msg_base
;
54 tmp
->from_name_data
= NULL
;
55 tmp
->from_name_set
= 0;
57 tmp
->to_name_data
= NULL
;
60 tmp
->attack_data
= NULL
;
65 tmp
->run_num_allocated
= 0;
75 msg_run_add(struct msg
*msg
)
77 if (++msg
->run_length
>= msg
->run_num_allocated
) {
78 int tobe_allocated
= msg
->run_num_allocated
;
79 struct run
** new_data
= NULL
;
80 tobe_allocated
= !tobe_allocated
? 1 : tobe_allocated
<< 1;
81 new_data
= (struct run
**) realloc(msg
->run_data
,
82 tobe_allocated
* sizeof(struct run
*));
85 msg
->run_data
= new_data
;
86 msg
->run_num_allocated
= tobe_allocated
;
88 msg
->run_data
[msg
->run_length
- 1] = run_new();
89 if (msg
->run_data
[msg
->run_length
- 1] == NULL
)
92 return (msg
->run_data
[msg
->run_length
- 1]);
100 msg_from_name_assign(struct msg
*msg
,
103 if (msg
->from_name_data
!= NULL
)
104 free(msg
->from_name_data
);
105 if ((msg
->from_name_data
= strdup(value
)) == NULL
)
107 msg
->from_name_set
= 1;
112 msg_to_name_assign(struct msg
*msg
,
115 if (msg
->to_name_data
!= NULL
)
116 free(msg
->to_name_data
);
117 if ((msg
->to_name_data
= strdup(value
)) == NULL
)
119 msg
->to_name_set
= 1;
124 msg_attack_assign(struct msg
*msg
,
125 const struct kill
* value
)
127 struct evbuffer
*tmp
= NULL
;
128 if (msg
->attack_set
) {
129 kill_clear(msg
->attack_data
);
132 msg
->attack_data
= kill_new();
133 if (msg
->attack_data
== NULL
) {
134 event_warn("%s: kill_new()", __func__
);
138 if ((tmp
= evbuffer_new()) == NULL
) {
139 event_warn("%s: evbuffer_new()", __func__
);
142 kill_marshal(tmp
, value
);
143 if (kill_unmarshal(msg
->attack_data
, tmp
) == -1) {
144 event_warnx("%s: kill_unmarshal", __func__
);
153 if (msg
->attack_data
!= NULL
) {
154 kill_free(msg
->attack_data
);
155 msg
->attack_data
= NULL
;
161 msg_run_assign(struct msg
*msg
, int off
,
162 const struct run
* value
)
164 struct evbuffer
*tmp
= NULL
;
165 if (!msg
->run_set
|| off
< 0 || off
>= msg
->run_length
)
167 run_clear(msg
->run_data
[off
]);
168 if ((tmp
= evbuffer_new()) == NULL
) {
169 event_warn("%s: evbuffer_new()", __func__
);
172 run_marshal(tmp
, value
);
173 if (run_unmarshal(msg
->run_data
[off
], tmp
) == -1) {
174 event_warnx("%s: run_unmarshal", __func__
);
182 run_clear(msg
->run_data
[off
]);
187 msg_from_name_get(struct msg
*msg
, char * *value
)
189 if (msg
->from_name_set
!= 1)
191 *value
= msg
->from_name_data
;
196 msg_to_name_get(struct msg
*msg
, char * *value
)
198 if (msg
->to_name_set
!= 1)
200 *value
= msg
->to_name_data
;
205 msg_attack_get(struct msg
*msg
, struct kill
* *value
)
207 if (msg
->attack_set
!= 1) {
208 msg
->attack_data
= kill_new();
209 if (msg
->attack_data
== NULL
)
213 *value
= msg
->attack_data
;
218 msg_run_get(struct msg
*msg
, int offset
,
221 if (!msg
->run_set
|| offset
< 0 || offset
>= msg
->run_length
)
223 *value
= msg
->run_data
[offset
];
228 msg_clear(struct msg
*tmp
)
230 if (tmp
->from_name_set
== 1) {
231 free (tmp
->from_name_data
);
232 tmp
->from_name_data
= NULL
;
233 tmp
->from_name_set
= 0;
235 if (tmp
->to_name_set
== 1) {
236 free (tmp
->to_name_data
);
237 tmp
->to_name_data
= NULL
;
238 tmp
->to_name_set
= 0;
240 if (tmp
->attack_set
== 1) {
241 kill_free(tmp
->attack_data
);
242 tmp
->attack_data
= NULL
;
245 if (tmp
->run_set
== 1) {
247 for (i
= 0; i
< tmp
->run_length
; ++i
) {
248 run_free(tmp
->run_data
[i
]);
251 tmp
->run_data
= NULL
;
254 tmp
->run_num_allocated
= 0;
259 msg_free(struct msg
*tmp
)
261 if (tmp
->from_name_data
!= NULL
)
262 free (tmp
->from_name_data
);
263 if (tmp
->to_name_data
!= NULL
)
264 free (tmp
->to_name_data
);
265 if (tmp
->attack_data
!= NULL
)
266 kill_free(tmp
->attack_data
);
267 if (tmp
->run_data
!= NULL
) {
269 for (i
= 0; i
< tmp
->run_length
; ++i
) {
270 run_free(tmp
->run_data
[i
]);
271 tmp
->run_data
[i
] = NULL
;
274 tmp
->run_data
= NULL
;
276 tmp
->run_num_allocated
= 0;
282 msg_marshal(struct evbuffer
*evbuf
, const struct msg
*tmp
){
283 evtag_marshal_string(evbuf
, MSG_FROM_NAME
, tmp
->from_name_data
);
284 evtag_marshal_string(evbuf
, MSG_TO_NAME
, tmp
->to_name_data
);
285 if (tmp
->attack_set
) {
286 evtag_marshal_kill(evbuf
, MSG_ATTACK
, tmp
->attack_data
);
290 for (i
= 0; i
< tmp
->run_length
; ++i
) {
291 evtag_marshal_run(evbuf
, MSG_RUN
, tmp
->run_data
[i
]);
297 msg_unmarshal(struct msg
*tmp
, struct evbuffer
*evbuf
)
300 while (EVBUFFER_LENGTH(evbuf
) > 0) {
301 if (evtag_peek(evbuf
, &tag
) == -1)
307 if (tmp
->from_name_set
)
309 if (evtag_unmarshal_string(evbuf
, MSG_FROM_NAME
, &tmp
->from_name_data
) == -1) {
310 event_warnx("%s: failed to unmarshal from_name", __func__
);
313 tmp
->from_name_set
= 1;
318 if (tmp
->to_name_set
)
320 if (evtag_unmarshal_string(evbuf
, MSG_TO_NAME
, &tmp
->to_name_data
) == -1) {
321 event_warnx("%s: failed to unmarshal to_name", __func__
);
324 tmp
->to_name_set
= 1;
331 tmp
->attack_data
= kill_new();
332 if (tmp
->attack_data
== NULL
)
334 if (evtag_unmarshal_kill(evbuf
, MSG_ATTACK
, tmp
->attack_data
) == -1) {
335 event_warnx("%s: failed to unmarshal attack", __func__
);
343 if (msg_run_add(tmp
) == NULL
)
345 if (evtag_unmarshal_run(evbuf
, MSG_RUN
,
346 tmp
->run_data
[tmp
->run_length
- 1]) == -1) {
348 event_warnx("%s: failed to unmarshal run", __func__
);
359 if (msg_complete(tmp
) == -1)
365 msg_complete(struct msg
*msg
)
367 if (!msg
->from_name_set
)
369 if (!msg
->to_name_set
)
371 if (msg
->attack_set
&& kill_complete(msg
->attack_data
) == -1)
375 for (i
= 0; i
< msg
->run_length
; ++i
) {
376 if (run_complete(msg
->run_data
[i
]) == -1)
384 evtag_unmarshal_msg(struct evbuffer
*evbuf
, ev_uint32_t need_tag
, struct msg
*msg
)
389 struct evbuffer
*tmp
= evbuffer_new();
391 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
394 if (msg_unmarshal(msg
, tmp
) == -1)
405 evtag_marshal_msg(struct evbuffer
*evbuf
, ev_uint32_t tag
, const struct msg
*msg
)
407 struct evbuffer
*_buf
= evbuffer_new();
408 assert(_buf
!= NULL
);
409 evbuffer_drain(_buf
, -1);
410 msg_marshal(_buf
, msg
);
411 evtag_marshal(evbuf
, tag
, EVBUFFER_DATA(_buf
), EVBUFFER_LENGTH(_buf
));
416 * Implementation of kill
419 static struct kill_access_ __kill_base
= {
424 kill_how_often_assign
,
432 if ((tmp
= malloc(sizeof(struct kill
))) == NULL
) {
433 event_warn("%s: malloc", __func__
);
436 tmp
->base
= &__kill_base
;
438 tmp
->weapon_data
= NULL
;
441 tmp
->action_data
= NULL
;
444 tmp
->how_often_data
= 0;
445 tmp
->how_often_set
= 0;
454 kill_weapon_assign(struct kill
*msg
,
457 if (msg
->weapon_data
!= NULL
)
458 free(msg
->weapon_data
);
459 if ((msg
->weapon_data
= strdup(value
)) == NULL
)
466 kill_action_assign(struct kill
*msg
,
469 if (msg
->action_data
!= NULL
)
470 free(msg
->action_data
);
471 if ((msg
->action_data
= strdup(value
)) == NULL
)
478 kill_how_often_assign(struct kill
*msg
, const ev_uint32_t value
)
480 msg
->how_often_set
= 1;
481 msg
->how_often_data
= value
;
486 kill_weapon_get(struct kill
*msg
, char * *value
)
488 if (msg
->weapon_set
!= 1)
490 *value
= msg
->weapon_data
;
495 kill_action_get(struct kill
*msg
, char * *value
)
497 if (msg
->action_set
!= 1)
499 *value
= msg
->action_data
;
504 kill_how_often_get(struct kill
*msg
, ev_uint32_t
*value
)
506 if (msg
->how_often_set
!= 1)
508 *value
= msg
->how_often_data
;
513 kill_clear(struct kill
*tmp
)
515 if (tmp
->weapon_set
== 1) {
516 free (tmp
->weapon_data
);
517 tmp
->weapon_data
= NULL
;
520 if (tmp
->action_set
== 1) {
521 free (tmp
->action_data
);
522 tmp
->action_data
= NULL
;
525 tmp
->how_often_set
= 0;
529 kill_free(struct kill
*tmp
)
531 if (tmp
->weapon_data
!= NULL
)
532 free (tmp
->weapon_data
);
533 if (tmp
->action_data
!= NULL
)
534 free (tmp
->action_data
);
539 kill_marshal(struct evbuffer
*evbuf
, const struct kill
*tmp
){
540 evtag_marshal_string(evbuf
, KILL_WEAPON
, tmp
->weapon_data
);
541 evtag_marshal_string(evbuf
, KILL_ACTION
, tmp
->action_data
);
542 if (tmp
->how_often_set
) {
543 evtag_marshal_int(evbuf
, KILL_HOW_OFTEN
, tmp
->how_often_data
);
548 kill_unmarshal(struct kill
*tmp
, struct evbuffer
*evbuf
)
551 while (EVBUFFER_LENGTH(evbuf
) > 0) {
552 if (evtag_peek(evbuf
, &tag
) == -1)
560 if (evtag_unmarshal_string(evbuf
, KILL_WEAPON
, &tmp
->weapon_data
) == -1) {
561 event_warnx("%s: failed to unmarshal weapon", __func__
);
571 if (evtag_unmarshal_string(evbuf
, KILL_ACTION
, &tmp
->action_data
) == -1) {
572 event_warnx("%s: failed to unmarshal action", __func__
);
580 if (tmp
->how_often_set
)
582 if (evtag_unmarshal_int(evbuf
, KILL_HOW_OFTEN
, &tmp
->how_often_data
) == -1) {
583 event_warnx("%s: failed to unmarshal how_often", __func__
);
586 tmp
->how_often_set
= 1;
594 if (kill_complete(tmp
) == -1)
600 kill_complete(struct kill
*msg
)
602 if (!msg
->weapon_set
)
604 if (!msg
->action_set
)
610 evtag_unmarshal_kill(struct evbuffer
*evbuf
, ev_uint32_t need_tag
, struct kill
*msg
)
615 struct evbuffer
*tmp
= evbuffer_new();
617 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
620 if (kill_unmarshal(msg
, tmp
) == -1)
631 evtag_marshal_kill(struct evbuffer
*evbuf
, ev_uint32_t tag
, const struct kill
*msg
)
633 struct evbuffer
*_buf
= evbuffer_new();
634 assert(_buf
!= NULL
);
635 evbuffer_drain(_buf
, -1);
636 kill_marshal(_buf
, msg
);
637 evtag_marshal(evbuf
, tag
, EVBUFFER_DATA(_buf
), EVBUFFER_LENGTH(_buf
));
642 * Implementation of run
645 static struct run_access_ __run_base
= {
648 run_some_bytes_assign
,
650 run_fixed_bytes_assign
,
658 if ((tmp
= malloc(sizeof(struct run
))) == NULL
) {
659 event_warn("%s: malloc", __func__
);
662 tmp
->base
= &__run_base
;
664 tmp
->how_data
= NULL
;
667 tmp
->some_bytes_data
= NULL
;
668 tmp
->some_bytes_length
= 0;
669 tmp
->some_bytes_set
= 0;
671 memset(tmp
->fixed_bytes_data
, 0, sizeof(tmp
->fixed_bytes_data
));
672 tmp
->fixed_bytes_set
= 0;
681 run_how_assign(struct run
*msg
,
684 if (msg
->how_data
!= NULL
)
686 if ((msg
->how_data
= strdup(value
)) == NULL
)
693 run_some_bytes_assign(struct run
*msg
, const ev_uint8_t
* value
, ev_uint32_t len
)
695 if (msg
->some_bytes_data
!= NULL
)
696 free (msg
->some_bytes_data
);
697 msg
->some_bytes_data
= malloc(len
);
698 if (msg
->some_bytes_data
== NULL
)
700 msg
->some_bytes_set
= 1;
701 msg
->some_bytes_length
= len
;
702 memcpy(msg
->some_bytes_data
, value
, len
);
707 run_fixed_bytes_assign(struct run
*msg
, const ev_uint8_t
*value
)
709 msg
->fixed_bytes_set
= 1;
710 memcpy(msg
->fixed_bytes_data
, value
, 24);
715 run_how_get(struct run
*msg
, char * *value
)
717 if (msg
->how_set
!= 1)
719 *value
= msg
->how_data
;
724 run_some_bytes_get(struct run
*msg
, ev_uint8_t
* *value
, ev_uint32_t
*plen
)
726 if (msg
->some_bytes_set
!= 1)
728 *value
= msg
->some_bytes_data
;
729 *plen
= msg
->some_bytes_length
;
734 run_fixed_bytes_get(struct run
*msg
, ev_uint8_t
**value
)
736 if (msg
->fixed_bytes_set
!= 1)
738 *value
= msg
->fixed_bytes_data
;
743 run_clear(struct run
*tmp
)
745 if (tmp
->how_set
== 1) {
746 free (tmp
->how_data
);
747 tmp
->how_data
= NULL
;
750 if (tmp
->some_bytes_set
== 1) {
751 free (tmp
->some_bytes_data
);
752 tmp
->some_bytes_data
= NULL
;
753 tmp
->some_bytes_length
= 0;
754 tmp
->some_bytes_set
= 0;
756 tmp
->fixed_bytes_set
= 0;
757 memset(tmp
->fixed_bytes_data
, 0, sizeof(tmp
->fixed_bytes_data
));
761 run_free(struct run
*tmp
)
763 if (tmp
->how_data
!= NULL
)
764 free (tmp
->how_data
);
765 if (tmp
->some_bytes_data
!= NULL
)
766 free (tmp
->some_bytes_data
);
771 run_marshal(struct evbuffer
*evbuf
, const struct run
*tmp
){
772 evtag_marshal_string(evbuf
, RUN_HOW
, tmp
->how_data
);
773 if (tmp
->some_bytes_set
) {
774 evtag_marshal(evbuf
, RUN_SOME_BYTES
, tmp
->some_bytes_data
, tmp
->some_bytes_length
);
776 evtag_marshal(evbuf
, RUN_FIXED_BYTES
, tmp
->fixed_bytes_data
, sizeof(tmp
->fixed_bytes_data
));
780 run_unmarshal(struct run
*tmp
, struct evbuffer
*evbuf
)
783 while (EVBUFFER_LENGTH(evbuf
) > 0) {
784 if (evtag_peek(evbuf
, &tag
) == -1)
792 if (evtag_unmarshal_string(evbuf
, RUN_HOW
, &tmp
->how_data
) == -1) {
793 event_warnx("%s: failed to unmarshal how", __func__
);
801 if (tmp
->some_bytes_set
)
803 if (evtag_payload_length(evbuf
, &tmp
->some_bytes_length
) == -1)
805 if (tmp
->some_bytes_length
> EVBUFFER_LENGTH(evbuf
))
807 if ((tmp
->some_bytes_data
= malloc(tmp
->some_bytes_length
)) == NULL
)
809 if (evtag_unmarshal_fixed(evbuf
, RUN_SOME_BYTES
, tmp
->some_bytes_data
, tmp
->some_bytes_length
) == -1) {
810 event_warnx("%s: failed to unmarshal some_bytes", __func__
);
813 tmp
->some_bytes_set
= 1;
816 case RUN_FIXED_BYTES
:
818 if (tmp
->fixed_bytes_set
)
820 if (evtag_unmarshal_fixed(evbuf
, RUN_FIXED_BYTES
, tmp
->fixed_bytes_data
, sizeof(tmp
->fixed_bytes_data
)) == -1) {
821 event_warnx("%s: failed to unmarshal fixed_bytes", __func__
);
824 tmp
->fixed_bytes_set
= 1;
832 if (run_complete(tmp
) == -1)
838 run_complete(struct run
*msg
)
842 if (!msg
->fixed_bytes_set
)
848 evtag_unmarshal_run(struct evbuffer
*evbuf
, ev_uint32_t need_tag
, struct run
*msg
)
853 struct evbuffer
*tmp
= evbuffer_new();
855 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
858 if (run_unmarshal(msg
, tmp
) == -1)
869 evtag_marshal_run(struct evbuffer
*evbuf
, ev_uint32_t tag
, const struct run
*msg
)
871 struct evbuffer
*_buf
= evbuffer_new();
872 assert(_buf
!= NULL
);
873 evbuffer_drain(_buf
, -1);
874 run_marshal(_buf
, msg
);
875 evtag_marshal(evbuf
, tag
, EVBUFFER_DATA(_buf
), EVBUFFER_LENGTH(_buf
));