3 * Automatically generated from ./regress.rpc
4 * by event_rpcgen.py/0.1. DO NOT EDIT THIS FILE.
15 #include "./regress.gen.h"
17 void event_err(int eval
, const char *fmt
, ...);
18 void event_warn(const char *fmt
, ...);
19 void event_errx(int eval
, const char *fmt
, ...);
20 void event_warnx(const char *fmt
, ...);
24 * Implementation of msg
27 static struct msg_access_ __msg_base
= {
43 if ((tmp
= malloc(sizeof(struct msg
))) == NULL
) {
44 event_warn("%s: malloc", __func__
);
47 tmp
->base
= &__msg_base
;
49 tmp
->from_name_data
= NULL
;
50 tmp
->from_name_set
= 0;
52 tmp
->to_name_data
= NULL
;
55 tmp
->attack_data
= NULL
;
60 tmp
->run_num_allocated
= 0;
70 msg_run_add(struct msg
*msg
)
72 if (++msg
->run_length
>= msg
->run_num_allocated
) {
73 int tobe_allocated
= msg
->run_num_allocated
;
74 struct run
** new_data
= NULL
;
75 tobe_allocated
= !tobe_allocated
? 1 : tobe_allocated
<< 1;
76 new_data
= (struct run
**) realloc(msg
->run_data
,
77 tobe_allocated
* sizeof(struct run
*));
80 msg
->run_data
= new_data
;
81 msg
->run_num_allocated
= tobe_allocated
;
83 msg
->run_data
[msg
->run_length
- 1] = run_new();
84 if (msg
->run_data
[msg
->run_length
- 1] == NULL
)
87 return (msg
->run_data
[msg
->run_length
- 1]);
95 msg_from_name_assign(struct msg
*msg
,
98 if (msg
->from_name_data
!= NULL
)
99 free(msg
->from_name_data
);
100 if ((msg
->from_name_data
= strdup(value
)) == NULL
)
102 msg
->from_name_set
= 1;
107 msg_to_name_assign(struct msg
*msg
,
110 if (msg
->to_name_data
!= NULL
)
111 free(msg
->to_name_data
);
112 if ((msg
->to_name_data
= strdup(value
)) == NULL
)
114 msg
->to_name_set
= 1;
119 msg_attack_assign(struct msg
*msg
,
120 const struct kill
* value
)
122 struct evbuffer
*tmp
= NULL
;
123 if (msg
->attack_set
) {
124 kill_clear(msg
->attack_data
);
127 msg
->attack_data
= kill_new();
128 if (msg
->attack_data
== NULL
) {
129 event_warn("%s: kill_new()", __func__
);
133 if ((tmp
= evbuffer_new()) == NULL
) {
134 event_warn("%s: evbuffer_new()", __func__
);
137 kill_marshal(tmp
, value
);
138 if (kill_unmarshal(msg
->attack_data
, tmp
) == -1) {
139 event_warnx("%s: kill_unmarshal", __func__
);
148 if (msg
->attack_data
!= NULL
) {
149 kill_free(msg
->attack_data
);
150 msg
->attack_data
= NULL
;
156 msg_run_assign(struct msg
*msg
, int off
,
157 const struct run
* value
)
159 struct evbuffer
*tmp
= NULL
;
160 if (!msg
->run_set
|| off
< 0 || off
>= msg
->run_length
)
162 run_clear(msg
->run_data
[off
]);
163 if ((tmp
= evbuffer_new()) == NULL
) {
164 event_warn("%s: evbuffer_new()", __func__
);
167 run_marshal(tmp
, value
);
168 if (run_unmarshal(msg
->run_data
[off
], tmp
) == -1) {
169 event_warnx("%s: run_unmarshal", __func__
);
177 run_clear(msg
->run_data
[off
]);
182 msg_from_name_get(struct msg
*msg
, char * *value
)
184 if (msg
->from_name_set
!= 1)
186 *value
= msg
->from_name_data
;
191 msg_to_name_get(struct msg
*msg
, char * *value
)
193 if (msg
->to_name_set
!= 1)
195 *value
= msg
->to_name_data
;
200 msg_attack_get(struct msg
*msg
, struct kill
* *value
)
202 if (msg
->attack_set
!= 1) {
203 msg
->attack_data
= kill_new();
204 if (msg
->attack_data
== NULL
)
208 *value
= msg
->attack_data
;
213 msg_run_get(struct msg
*msg
, int offset
,
216 if (!msg
->run_set
|| offset
< 0 || offset
>= msg
->run_length
)
218 *value
= msg
->run_data
[offset
];
223 msg_clear(struct msg
*tmp
)
225 if (tmp
->from_name_set
== 1) {
226 free (tmp
->from_name_data
);
227 tmp
->from_name_data
= NULL
;
228 tmp
->from_name_set
= 0;
230 if (tmp
->to_name_set
== 1) {
231 free (tmp
->to_name_data
);
232 tmp
->to_name_data
= NULL
;
233 tmp
->to_name_set
= 0;
235 if (tmp
->attack_set
== 1) {
236 kill_free(tmp
->attack_data
);
237 tmp
->attack_data
= NULL
;
240 if (tmp
->run_set
== 1) {
242 for (i
= 0; i
< tmp
->run_length
; ++i
) {
243 run_free(tmp
->run_data
[i
]);
246 tmp
->run_data
= NULL
;
249 tmp
->run_num_allocated
= 0;
254 msg_free(struct msg
*tmp
)
256 if (tmp
->from_name_data
!= NULL
)
257 free (tmp
->from_name_data
);
258 if (tmp
->to_name_data
!= NULL
)
259 free (tmp
->to_name_data
);
260 if (tmp
->attack_data
!= NULL
)
261 kill_free(tmp
->attack_data
);
262 if (tmp
->run_data
!= NULL
) {
264 for (i
= 0; i
< tmp
->run_length
; ++i
) {
265 run_free(tmp
->run_data
[i
]);
266 tmp
->run_data
[i
] = NULL
;
269 tmp
->run_data
= NULL
;
271 tmp
->run_num_allocated
= 0;
277 msg_marshal(struct evbuffer
*evbuf
, const struct msg
*tmp
){
278 evtag_marshal_string(evbuf
, MSG_FROM_NAME
, tmp
->from_name_data
);
279 evtag_marshal_string(evbuf
, MSG_TO_NAME
, tmp
->to_name_data
);
280 if (tmp
->attack_set
) {
281 evtag_marshal_kill(evbuf
, MSG_ATTACK
, tmp
->attack_data
);
285 for (i
= 0; i
< tmp
->run_length
; ++i
) {
286 evtag_marshal_run(evbuf
, MSG_RUN
, tmp
->run_data
[i
]);
292 msg_unmarshal(struct msg
*tmp
, struct evbuffer
*evbuf
)
295 while (EVBUFFER_LENGTH(evbuf
) > 0) {
296 if (evtag_peek(evbuf
, &tag
) == -1)
302 if (tmp
->from_name_set
)
304 if (evtag_unmarshal_string(evbuf
, MSG_FROM_NAME
, &tmp
->from_name_data
) == -1) {
305 event_warnx("%s: failed to unmarshal from_name", __func__
);
308 tmp
->from_name_set
= 1;
313 if (tmp
->to_name_set
)
315 if (evtag_unmarshal_string(evbuf
, MSG_TO_NAME
, &tmp
->to_name_data
) == -1) {
316 event_warnx("%s: failed to unmarshal to_name", __func__
);
319 tmp
->to_name_set
= 1;
326 tmp
->attack_data
= kill_new();
327 if (tmp
->attack_data
== NULL
)
329 if (evtag_unmarshal_kill(evbuf
, MSG_ATTACK
, tmp
->attack_data
) == -1) {
330 event_warnx("%s: failed to unmarshal attack", __func__
);
338 if (msg_run_add(tmp
) == NULL
)
340 if (evtag_unmarshal_run(evbuf
, MSG_RUN
,
341 tmp
->run_data
[tmp
->run_length
- 1]) == -1) {
343 event_warnx("%s: failed to unmarshal run", __func__
);
354 if (msg_complete(tmp
) == -1)
360 msg_complete(struct msg
*msg
)
362 if (!msg
->from_name_set
)
364 if (!msg
->to_name_set
)
366 if (msg
->attack_set
&& kill_complete(msg
->attack_data
) == -1)
370 for (i
= 0; i
< msg
->run_length
; ++i
) {
371 if (run_complete(msg
->run_data
[i
]) == -1)
379 evtag_unmarshal_msg(struct evbuffer
*evbuf
, uint32_t need_tag
, struct msg
*msg
)
384 struct evbuffer
*tmp
= evbuffer_new();
386 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
389 if (msg_unmarshal(msg
, tmp
) == -1)
400 evtag_marshal_msg(struct evbuffer
*evbuf
, uint32_t tag
, const struct msg
*msg
)
402 struct evbuffer
*_buf
= evbuffer_new();
403 assert(_buf
!= NULL
);
404 evbuffer_drain(_buf
, -1);
405 msg_marshal(_buf
, msg
);
406 evtag_marshal(evbuf
, tag
, EVBUFFER_DATA(_buf
), EVBUFFER_LENGTH(_buf
));
411 * Implementation of kill
414 static struct kill_access_ __kill_base
= {
419 kill_how_often_assign
,
427 if ((tmp
= malloc(sizeof(struct kill
))) == NULL
) {
428 event_warn("%s: malloc", __func__
);
431 tmp
->base
= &__kill_base
;
433 tmp
->weapon_data
= NULL
;
436 tmp
->action_data
= NULL
;
439 tmp
->how_often_data
= 0;
440 tmp
->how_often_set
= 0;
449 kill_weapon_assign(struct kill
*msg
,
452 if (msg
->weapon_data
!= NULL
)
453 free(msg
->weapon_data
);
454 if ((msg
->weapon_data
= strdup(value
)) == NULL
)
461 kill_action_assign(struct kill
*msg
,
464 if (msg
->action_data
!= NULL
)
465 free(msg
->action_data
);
466 if ((msg
->action_data
= strdup(value
)) == NULL
)
473 kill_how_often_assign(struct kill
*msg
, const uint32_t value
)
475 msg
->how_often_set
= 1;
476 msg
->how_often_data
= value
;
481 kill_weapon_get(struct kill
*msg
, char * *value
)
483 if (msg
->weapon_set
!= 1)
485 *value
= msg
->weapon_data
;
490 kill_action_get(struct kill
*msg
, char * *value
)
492 if (msg
->action_set
!= 1)
494 *value
= msg
->action_data
;
499 kill_how_often_get(struct kill
*msg
, uint32_t *value
)
501 if (msg
->how_often_set
!= 1)
503 *value
= msg
->how_often_data
;
508 kill_clear(struct kill
*tmp
)
510 if (tmp
->weapon_set
== 1) {
511 free (tmp
->weapon_data
);
512 tmp
->weapon_data
= NULL
;
515 if (tmp
->action_set
== 1) {
516 free (tmp
->action_data
);
517 tmp
->action_data
= NULL
;
520 tmp
->how_often_set
= 0;
524 kill_free(struct kill
*tmp
)
526 if (tmp
->weapon_data
!= NULL
)
527 free (tmp
->weapon_data
);
528 if (tmp
->action_data
!= NULL
)
529 free (tmp
->action_data
);
534 kill_marshal(struct evbuffer
*evbuf
, const struct kill
*tmp
){
535 evtag_marshal_string(evbuf
, KILL_WEAPON
, tmp
->weapon_data
);
536 evtag_marshal_string(evbuf
, KILL_ACTION
, tmp
->action_data
);
537 if (tmp
->how_often_set
) {
538 evtag_marshal_int(evbuf
, KILL_HOW_OFTEN
, tmp
->how_often_data
);
543 kill_unmarshal(struct kill
*tmp
, struct evbuffer
*evbuf
)
546 while (EVBUFFER_LENGTH(evbuf
) > 0) {
547 if (evtag_peek(evbuf
, &tag
) == -1)
555 if (evtag_unmarshal_string(evbuf
, KILL_WEAPON
, &tmp
->weapon_data
) == -1) {
556 event_warnx("%s: failed to unmarshal weapon", __func__
);
566 if (evtag_unmarshal_string(evbuf
, KILL_ACTION
, &tmp
->action_data
) == -1) {
567 event_warnx("%s: failed to unmarshal action", __func__
);
575 if (tmp
->how_often_set
)
577 if (evtag_unmarshal_int(evbuf
, KILL_HOW_OFTEN
, &tmp
->how_often_data
) == -1) {
578 event_warnx("%s: failed to unmarshal how_often", __func__
);
581 tmp
->how_often_set
= 1;
589 if (kill_complete(tmp
) == -1)
595 kill_complete(struct kill
*msg
)
597 if (!msg
->weapon_set
)
599 if (!msg
->action_set
)
605 evtag_unmarshal_kill(struct evbuffer
*evbuf
, uint32_t need_tag
, struct kill
*msg
)
610 struct evbuffer
*tmp
= evbuffer_new();
612 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
615 if (kill_unmarshal(msg
, tmp
) == -1)
626 evtag_marshal_kill(struct evbuffer
*evbuf
, uint32_t tag
, const struct kill
*msg
)
628 struct evbuffer
*_buf
= evbuffer_new();
629 assert(_buf
!= NULL
);
630 evbuffer_drain(_buf
, -1);
631 kill_marshal(_buf
, msg
);
632 evtag_marshal(evbuf
, tag
, EVBUFFER_DATA(_buf
), EVBUFFER_LENGTH(_buf
));
637 * Implementation of run
640 static struct run_access_ __run_base
= {
643 run_some_bytes_assign
,
645 run_fixed_bytes_assign
,
653 if ((tmp
= malloc(sizeof(struct run
))) == NULL
) {
654 event_warn("%s: malloc", __func__
);
657 tmp
->base
= &__run_base
;
659 tmp
->how_data
= NULL
;
662 tmp
->some_bytes_data
= NULL
;
663 tmp
->some_bytes_length
= 0;
664 tmp
->some_bytes_set
= 0;
666 memset(tmp
->fixed_bytes_data
, 0, sizeof(tmp
->fixed_bytes_data
));
667 tmp
->fixed_bytes_set
= 0;
676 run_how_assign(struct run
*msg
,
679 if (msg
->how_data
!= NULL
)
681 if ((msg
->how_data
= strdup(value
)) == NULL
)
688 run_some_bytes_assign(struct run
*msg
, const uint8_t * value
, uint32_t len
)
690 if (msg
->some_bytes_data
!= NULL
)
691 free (msg
->some_bytes_data
);
692 msg
->some_bytes_data
= malloc(len
);
693 if (msg
->some_bytes_data
== NULL
)
695 msg
->some_bytes_set
= 1;
696 msg
->some_bytes_length
= len
;
697 memcpy(msg
->some_bytes_data
, value
, len
);
702 run_fixed_bytes_assign(struct run
*msg
, const uint8_t *value
)
704 msg
->fixed_bytes_set
= 1;
705 memcpy(msg
->fixed_bytes_data
, value
, 24);
710 run_how_get(struct run
*msg
, char * *value
)
712 if (msg
->how_set
!= 1)
714 *value
= msg
->how_data
;
719 run_some_bytes_get(struct run
*msg
, uint8_t * *value
, uint32_t *plen
)
721 if (msg
->some_bytes_set
!= 1)
723 *value
= msg
->some_bytes_data
;
724 *plen
= msg
->some_bytes_length
;
729 run_fixed_bytes_get(struct run
*msg
, uint8_t **value
)
731 if (msg
->fixed_bytes_set
!= 1)
733 *value
= msg
->fixed_bytes_data
;
738 run_clear(struct run
*tmp
)
740 if (tmp
->how_set
== 1) {
741 free (tmp
->how_data
);
742 tmp
->how_data
= NULL
;
745 if (tmp
->some_bytes_set
== 1) {
746 free (tmp
->some_bytes_data
);
747 tmp
->some_bytes_data
= NULL
;
748 tmp
->some_bytes_length
= 0;
749 tmp
->some_bytes_set
= 0;
751 tmp
->fixed_bytes_set
= 0;
752 memset(tmp
->fixed_bytes_data
, 0, sizeof(tmp
->fixed_bytes_data
));
756 run_free(struct run
*tmp
)
758 if (tmp
->how_data
!= NULL
)
759 free (tmp
->how_data
);
760 if (tmp
->some_bytes_data
!= NULL
)
761 free (tmp
->some_bytes_data
);
766 run_marshal(struct evbuffer
*evbuf
, const struct run
*tmp
){
767 evtag_marshal_string(evbuf
, RUN_HOW
, tmp
->how_data
);
768 if (tmp
->some_bytes_set
) {
769 evtag_marshal(evbuf
, RUN_SOME_BYTES
, tmp
->some_bytes_data
, tmp
->some_bytes_length
);
771 evtag_marshal(evbuf
, RUN_FIXED_BYTES
, tmp
->fixed_bytes_data
, sizeof(tmp
->fixed_bytes_data
));
775 run_unmarshal(struct run
*tmp
, struct evbuffer
*evbuf
)
778 while (EVBUFFER_LENGTH(evbuf
) > 0) {
779 if (evtag_peek(evbuf
, &tag
) == -1)
787 if (evtag_unmarshal_string(evbuf
, RUN_HOW
, &tmp
->how_data
) == -1) {
788 event_warnx("%s: failed to unmarshal how", __func__
);
796 if (tmp
->some_bytes_set
)
798 if (evtag_payload_length(evbuf
, &tmp
->some_bytes_length
) == -1)
800 if (tmp
->some_bytes_length
> EVBUFFER_LENGTH(evbuf
))
802 if ((tmp
->some_bytes_data
= malloc(tmp
->some_bytes_length
)) == NULL
)
804 if (evtag_unmarshal_fixed(evbuf
, RUN_SOME_BYTES
, tmp
->some_bytes_data
, tmp
->some_bytes_length
) == -1) {
805 event_warnx("%s: failed to unmarshal some_bytes", __func__
);
808 tmp
->some_bytes_set
= 1;
811 case RUN_FIXED_BYTES
:
813 if (tmp
->fixed_bytes_set
)
815 if (evtag_unmarshal_fixed(evbuf
, RUN_FIXED_BYTES
, tmp
->fixed_bytes_data
, sizeof(tmp
->fixed_bytes_data
)) == -1) {
816 event_warnx("%s: failed to unmarshal fixed_bytes", __func__
);
819 tmp
->fixed_bytes_set
= 1;
827 if (run_complete(tmp
) == -1)
833 run_complete(struct run
*msg
)
837 if (!msg
->fixed_bytes_set
)
843 evtag_unmarshal_run(struct evbuffer
*evbuf
, uint32_t need_tag
, struct run
*msg
)
848 struct evbuffer
*tmp
= evbuffer_new();
850 if (evtag_unmarshal(evbuf
, &tag
, tmp
) == -1 || tag
!= need_tag
)
853 if (run_unmarshal(msg
, tmp
) == -1)
864 evtag_marshal_run(struct evbuffer
*evbuf
, uint32_t tag
, const struct run
*msg
)
866 struct evbuffer
*_buf
= evbuffer_new();
867 assert(_buf
!= NULL
);
868 evbuffer_drain(_buf
, -1);
869 run_marshal(_buf
, msg
);
870 evtag_marshal(evbuf
, tag
, EVBUFFER_DATA(_buf
), EVBUFFER_LENGTH(_buf
));