virtproxy: integration, fixup connection checks
[qemu/mdroth.git] / virtproxy.c
blob55aa7e90d60af6b71fdccd1bfd7fcaf7ec73ed0d
1 /*
2 * virt-proxy - host/guest communication layer
4 * Copyright IBM Corp. 2010
6 * Authors:
7 * Michael Roth <mdroth@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "virtproxy.h"
15 #include "qemu_socket.h"
17 #define DEBUG_VP
19 #ifdef DEBUG_VP
20 #define TRACE(msg, ...) do { \
21 fprintf(stderr, "%s:%s():L%d: " msg "\n", \
22 __FILE__, __FUNCTION__, __LINE__, ## __VA_ARGS__); \
23 } while(0)
24 #else
25 #define TRACE(msg, ...) \
26 do { } while (0)
27 #endif
29 #define LOG(msg, ...) do { \
30 fprintf(stderr, "%s:%s(): " msg "\n", \
31 __FILE__, __FUNCTION__, ## __VA_ARGS__); \
32 } while(0)
34 #define VP_SERVICE_ID_LEN 32 /* max length of service id string */
35 #define VP_PKT_DATA_LEN 1024 /* max proxied bytes per VPPacket */
36 #define VP_CONN_DATA_LEN 1024 /* max bytes conns can send at a time */
37 #define VP_CHAN_DATA_LEN 4096 /* max bytes channel can send at a time */
38 #define VP_MAGIC 0x1F374059
40 /* listening fd, one for each service we're forwarding to remote end */
41 typedef struct VPOForward {
42 VPDriver *drv;
43 int listen_fd;
44 char service_id[VP_SERVICE_ID_LEN];
45 QLIST_ENTRY(VPOForward) next;
46 } VPOForward;
48 /* service_id->path/port mapping of each service forwarded from remote end */
49 typedef struct VPIForward {
50 VPDriver *drv;
51 char service_id[VP_SERVICE_ID_LEN];
52 QemuOpts *socket_opts;
53 QLIST_ENTRY(VPIForward) next;
54 } VPIForward;
56 /* proxied client/server connected states */
57 typedef struct VPConn {
58 VPDriver *drv;
59 int client_fd;
60 int server_fd;
61 enum {
62 VP_CONN_CLIENT = 1,
63 VP_CONN_SERVER,
64 } type;
65 enum {
66 VP_STATE_NEW = 1, /* accept()'d and registered fd */
67 VP_STATE_INIT, /* sent init pkt to remote end, waiting for ack */
68 VP_STATE_CONNECTED, /* client and server connected */
69 } state;
70 QLIST_ENTRY(VPConn) next;
71 } VPConn;
73 typedef struct VPControlMsg {
74 enum {
75 VP_CONTROL_CONNECT_INIT = 1,
76 VP_CONTROL_CONNECT_ACK,
77 VP_CONTROL_CLOSE,
78 } type;
79 union {
80 /* tell remote end connect to server and map client_fd to it */
81 struct {
82 int client_fd;
83 char service_id[VP_SERVICE_ID_LEN];
84 } connect_init;
85 /* tell remote end we've created the connection to the server,
86 * and give them the corresponding fd to use so we don't have
87 * to do a reverse lookup everytime
89 struct {
90 int client_fd;
91 int server_fd;
92 } connect_ack;
93 /* tell remote end to close fd in question, presumably because
94 * connection was closed on our end
96 struct {
97 int client_fd;
98 int server_fd;
99 } close;
100 } args;
101 } VPControlMsg;
103 typedef struct VPPacket {
104 enum {
105 VP_PKT_CONTROL = 1,
106 VP_PKT_CLIENT,
107 VP_PKT_SERVER,
108 } type;
109 union {
110 VPControlMsg msg;
111 struct {
112 int client_fd;
113 int server_fd;
114 int bytes;
115 char data[VP_PKT_DATA_LEN];
116 } proxied;
117 } payload;
118 int magic;
119 } __attribute__((__packed__)) VPPacket;
121 struct VPDriver {
122 enum vp_context ctx;
123 int channel_fd;
124 int listen_fd;
125 CharDriverState *chr;
126 char buf[sizeof(VPPacket)];
127 int buflen;
128 QLIST_HEAD(, VPOForward) oforwards;
129 QLIST_HEAD(, VPIForward) iforwards;
130 QLIST_HEAD(, VPConn) conns;
133 static QemuOptsList vp_socket_opts = {
134 .name = "vp_socket_opts",
135 .head = QTAILQ_HEAD_INITIALIZER(vp_socket_opts.head),
136 .desc = {
138 .name = "path",
139 .type = QEMU_OPT_STRING,
141 .name = "host",
142 .type = QEMU_OPT_STRING,
144 .name = "port",
145 .type = QEMU_OPT_STRING,
147 .name = "ipv4",
148 .type = QEMU_OPT_BOOL,
150 .name = "ipv6",
151 .type = QEMU_OPT_BOOL,
153 { /* end if list */ }
157 static void vp_channel_read(void *opaque);
159 static int vp_channel_send_all(VPDriver *drv, uint8_t *buf, int count)
161 int ret;
162 CharDriverState *chr = drv->chr;
164 if (drv->chr != NULL) {
165 /* send data to guest via channel device's read handler */
166 vp_chr_read(chr, buf, count);
167 /* TODO: we assume here the full buffer was written to device
168 * due to the dev write handler being a void function.
169 * can we confirm? Do we need to?
171 ret = count;
172 } else if (drv->channel_fd != -1) {
173 /* send data to host via channel fd */
174 ret = vp_send_all(drv->channel_fd, buf, count);
175 if (ret == -1) {
176 LOG("error sending data");
177 goto out_bad;
179 } else {
180 LOG("driver in unknown state");
181 goto out_bad;
184 return ret;
185 out_bad:
186 LOG("unable to send to channel");
187 return -1;
190 /* get VPConn by fd, "client" denotes whether to look for client or server */
191 static VPConn *get_conn(const VPDriver *drv, int fd, bool client)
193 VPConn *c = NULL;
194 int cur_fd;
196 QLIST_FOREACH(c, &drv->conns, next) {
197 cur_fd = client ? c->client_fd : c->server_fd;
198 if (cur_fd == fd) {
199 return c;
203 return NULL;
206 static void vp_channel_accept(void *opaque);
208 /* get VPOForward by service_id */
209 static VPOForward *get_oforward(const VPDriver *drv, const char *service_id)
211 VPOForward *f = NULL;
213 QLIST_FOREACH(f, &drv->oforwards, next) {
214 if (strncmp(f->service_id, service_id, VP_SERVICE_ID_LEN) == 0) {
215 return f;
219 return NULL;
222 /* get VPIForward by service_id */
223 static VPIForward *get_iforward(const VPDriver *drv, const char *service_id)
225 VPIForward *f = NULL;
227 QLIST_FOREACH(f, &drv->iforwards, next) {
228 if (strncmp(f->service_id, service_id, VP_SERVICE_ID_LEN) == 0) {
229 return f;
233 return NULL;
236 /* read handler for proxied connections */
237 static void vp_conn_read(void *opaque)
239 VPConn *conn = opaque;
240 VPDriver *drv = conn->drv;
241 VPPacket pkt;
242 char buf[VP_CONN_DATA_LEN];
243 int fd, count, ret;
244 bool client;
246 TRACE("called with opaque: %p, drv: %p", opaque, drv);
248 if (conn->state != VP_STATE_CONNECTED) {
249 LOG("invalid connection state");
250 return;
253 if (conn->type != VP_CONN_CLIENT && conn->type != VP_CONN_SERVER) {
254 LOG("invalid connection type");
255 return;
258 /* TODO: all fields should be explicitly set so we shouldn't
259 * need to memset. this might hurt if we beef up VPPacket size
261 memset(&pkt, 0, sizeof(VPPacket));
262 pkt.magic = VP_MAGIC;
264 if (conn->type == VP_CONN_CLIENT) {
265 client = true;
266 fd = conn->client_fd;
267 } else {
268 client = false;
269 fd = conn->server_fd;
272 count = read(fd, buf, VP_CONN_DATA_LEN);
273 if (count == -1) {
274 LOG("read() failed: %s", strerror(errno));
275 return;
276 } else if (count == 0) {
277 /* connection closed, tell remote end to clean up */
278 TRACE("connection closed");
279 pkt.type = VP_PKT_CONTROL;
280 pkt.payload.msg.type = VP_CONTROL_CLOSE;
281 if (client) {
282 /* we're closing the client, have remote close the server conn */
283 TRACE("closing connection for client fd %d", conn->client_fd);
284 pkt.payload.msg.args.close.client_fd = -1;
285 pkt.payload.msg.args.close.server_fd = conn->server_fd;
286 } else {
287 TRACE("closing connection for server fd %d", conn->server_fd);
288 pkt.payload.msg.args.close.server_fd = -1;
289 pkt.payload.msg.args.close.client_fd = conn->client_fd;;
291 /* clean up things on our end */
292 closesocket(fd);
293 vp_set_fd_handler(fd, NULL, NULL, NULL);
294 QLIST_REMOVE(conn, next);
295 qemu_free(conn);
296 } else {
297 TRACE("data read");
298 pkt.type = client ? VP_PKT_CLIENT : VP_PKT_SERVER;
299 pkt.payload.proxied.client_fd = conn->client_fd;
300 pkt.payload.proxied.server_fd = conn->server_fd;
301 memcpy(pkt.payload.proxied.data, buf, count);
302 pkt.payload.proxied.bytes = count;
305 ret = vp_channel_send_all(drv, (uint8_t*)&pkt, sizeof(VPPacket));
306 if (ret == -1) {
307 LOG("error sending data over channel");
308 return;
310 if (ret != sizeof(VPPacket)) {
311 TRACE("buffer full?");
312 return;
316 /* accept handler for communication channel
318 * accept()s connection to communication channel (for sockets), and sets
319 * up the read handler for resulting FD.
321 static void vp_channel_accept(void *opaque)
323 VPDriver *drv = opaque;
324 struct sockaddr_in saddr;
325 struct sockaddr *addr;
326 socklen_t len;
327 int fd;
329 TRACE("called with opaque: %p", drv);
331 for(;;) {
332 len = sizeof(saddr);
333 addr = (struct sockaddr *)&saddr;
334 fd = qemu_accept(drv->listen_fd, addr, &len);
336 if (fd < 0 && errno != EINTR) {
337 TRACE("accept() failed");
338 return;
339 } else if (fd >= 0) {
340 TRACE("accepted connection");
341 break;
345 drv->channel_fd = fd;
346 vp_set_fd_handler(drv->channel_fd, vp_channel_read, NULL, drv);
347 /* dont accept anymore connections until channel_fd is closed */
348 vp_set_fd_handler(drv->listen_fd, NULL, NULL, NULL);
351 /* handle control packets
353 * process VPPackets containing control messages
355 static int vp_handle_control_packet(VPDriver *drv, const VPPacket *pkt)
357 const VPControlMsg *msg = &pkt->payload.msg;
358 int ret;
360 TRACE("called with drv: %p", drv);
362 switch (msg->type) {
363 case VP_CONTROL_CONNECT_INIT: {
364 int client_fd = msg->args.connect_init.client_fd;
365 int server_fd;
366 char service_id[VP_SERVICE_ID_LEN];
367 VPPacket resp_pkt;
368 VPConn *new_conn;
369 VPIForward *iforward;
371 pstrcpy(service_id, VP_SERVICE_ID_LEN,
372 msg->args.connect_init.service_id);
373 TRACE("setting up connection for service id %s", service_id);
375 /* create server connection on behalf of remote end */
376 iforward = get_iforward(drv, service_id);
377 if (iforward == NULL) {
378 LOG("no forwarder configured for service id");
379 return -1;
382 qemu_opts_print(iforward->socket_opts, NULL);
383 if (qemu_opt_get(iforward->socket_opts, "host") != NULL) {
384 server_fd = inet_connect_opts(iforward->socket_opts);
385 } else if (qemu_opt_get(iforward->socket_opts, "path") != NULL) {
386 server_fd = unix_connect_opts(iforward->socket_opts);
387 } else {
388 LOG("unable to find listening socket host/addr info");
389 return -1;
392 if (server_fd == -1) {
393 LOG("failed to create connection to service with id %s",
394 service_id);
396 TRACE("server_fd: %d", server_fd);
398 new_conn = qemu_mallocz(sizeof(VPConn));
399 if (!new_conn) {
400 LOG("memory allocation failed");
401 return -1;
404 /* send a connect_ack back over the channel */
405 /* TODO: all fields should be explicitly set so we shouldn't
406 * need to memset. this might hurt if we beef up VPPacket size
408 memset(&resp_pkt, 0, sizeof(resp_pkt));
409 resp_pkt.type = VP_PKT_CONTROL;
410 resp_pkt.payload.msg.type = VP_CONTROL_CONNECT_ACK;
411 resp_pkt.payload.msg.args.connect_ack.server_fd = server_fd;
412 resp_pkt.payload.msg.args.connect_ack.client_fd = client_fd;
413 resp_pkt.magic = VP_MAGIC;
415 /* TODO: can this potentially block or cause a deadlock with
416 * the remote end? need to look into potentially buffering these
417 * if it looks like the remote end is waiting for us to read data
418 * off the channel.
420 if (!drv->chr && drv->channel_fd == -1) {
421 TRACE("channel no longer connected, ignoring packet");
422 return -1;
425 ret = vp_channel_send_all(drv, (void *)&resp_pkt, sizeof(resp_pkt));
426 if (ret == -1) {
427 LOG("error sending data over channel");
428 return -1;
430 if (ret != sizeof(resp_pkt)) {
431 TRACE("buffer full? %d bytes remaining", ret);
432 return -1;
435 /* add new VPConn to list and set a read handler for it */
436 new_conn->drv = drv;
437 new_conn->client_fd = client_fd;
438 new_conn->server_fd = server_fd;
439 new_conn->type = VP_CONN_SERVER;
440 new_conn->state = VP_STATE_CONNECTED;
441 QLIST_INSERT_HEAD(&drv->conns, new_conn, next);
442 vp_set_fd_handler(server_fd, vp_conn_read, NULL, new_conn);
444 break;
446 case VP_CONTROL_CONNECT_ACK: {
447 int client_fd = msg->args.connect_ack.client_fd;
448 int server_fd = msg->args.connect_ack.server_fd;
449 VPConn *conn;
451 TRACE("recieved ack from remote end for client fd %d", client_fd);
453 if (server_fd <= 0) {
454 LOG("remote end sent invalid server fd");
455 return -1;
458 conn = get_conn(drv, client_fd, true);
460 if (conn == NULL) {
461 LOG("failed to find connection with client_fd %d", client_fd);
462 return -1;
465 conn->server_fd = server_fd;
466 conn->state = VP_STATE_CONNECTED;
467 vp_set_fd_handler(client_fd, vp_conn_read, NULL, conn);
469 break;
471 case VP_CONTROL_CLOSE: {
472 int fd;
473 VPConn *conn;
475 TRACE("closing connection on behalf of remote end");
477 if (msg->args.close.client_fd >= 0) {
478 fd = msg->args.close.client_fd;
479 TRACE("recieved close msg from remote end for client fd %d", fd);
480 conn = get_conn(drv, fd, true);
481 } else if (msg->args.close.server_fd >= 0) {
482 fd = msg->args.close.server_fd;
483 TRACE("recieved close msg from remote end for server fd %d", fd);
484 conn = get_conn(drv, fd, false);
485 } else {
486 LOG("invalid fd");
487 return -1;
490 if (conn == NULL) {
491 LOG("failed to find conn with specified fd %d", fd);
492 return -1;
495 closesocket(fd);
496 vp_set_fd_handler(fd, NULL, NULL, conn);
497 QLIST_REMOVE(conn, next);
498 qemu_free(conn);
499 break;
502 return 0;
505 /* handle data packets
507 * process VPPackets containing data and send them to the corresponding
508 * FDs
510 static int vp_handle_data_packet(void *drv, const VPPacket *pkt)
512 int fd, ret;
514 TRACE("called with drv: %p", drv);
516 if (pkt->type == VP_PKT_CLIENT) {
517 TRACE("recieved client packet, client fd: %d, server fd: %d",
518 pkt->payload.proxied.client_fd, pkt->payload.proxied.server_fd);
519 fd = pkt->payload.proxied.server_fd;
520 } else if (pkt->type == VP_PKT_SERVER) {
521 TRACE("recieved server packet, client fd: %d, server fd: %d",
522 pkt->payload.proxied.client_fd, pkt->payload.proxied.server_fd);
523 fd = pkt->payload.proxied.client_fd;
524 } else {
525 TRACE("unknown packet type");
526 return -1;
529 /* TODO: proxied in non-blocking mode can causes us to spin here
530 * for slow servers/clients. need to use write()'s and maintain
531 * a per-conn write queue that we clear out before sending any
532 * more data to the fd
534 ret = vp_send_all(fd, (void *)pkt->payload.proxied.data,
535 pkt->payload.proxied.bytes);
536 if (ret == -1) {
537 LOG("error sending data over channel");
538 return -1;
539 } else if (ret != pkt->payload.proxied.bytes) {
540 TRACE("buffer full?");
541 return -1;
544 return 0;
547 static inline int vp_handle_packet(VPDriver *drv, const VPPacket *pkt)
549 int ret;
551 TRACE("called with drv: %p", drv);
553 if (pkt->magic != VP_MAGIC) {
554 LOG("invalid packet magic field");
555 return -1;
558 if (pkt->type == VP_PKT_CONTROL) {
559 ret = vp_handle_control_packet(drv, pkt);
560 } else if (pkt->type == VP_PKT_CLIENT || pkt->type == VP_PKT_SERVER) {
561 ret = vp_handle_data_packet(drv, pkt);
562 } else {
563 LOG("invalid packet type");
564 return -1;
567 return ret;
570 /* process packets read from the channel */
571 int vp_handle_packet_buf(VPDriver *drv, const void *buf, int count)
573 VPPacket pkt;
574 int ret, buf_offset;
575 char *pkt_ptr;
576 const char *buf_ptr;
578 if (drv->buflen + count >= sizeof(VPPacket)) {
579 TRACE("initial packet, drv->buflen: %d", drv->buflen);
580 pkt_ptr = (char *)&pkt;
581 memcpy(pkt_ptr, drv->buf, drv->buflen);
582 pkt_ptr += drv->buflen;
583 memcpy(pkt_ptr, buf, sizeof(VPPacket) - drv->buflen);
584 /* handle first packet */
585 ret = vp_handle_packet(drv, &pkt);
586 if (ret != 0) {
587 LOG("error handling packet");
589 /* handle the rest of the buffer */
590 buf_offset = sizeof(VPPacket) - drv->buflen;
591 drv->buflen = 0;
592 buf_ptr = buf + buf_offset;
593 count -= buf_offset;
594 while (count > 0) {
595 if (count >= sizeof(VPPacket)) {
596 /* handle full packet */
597 TRACE("additional packet, drv->buflen: %d", drv->buflen);
598 memcpy((void *)&pkt, buf_ptr, sizeof(VPPacket));
599 ret = vp_handle_packet(drv, &pkt);
600 if (ret != 0) {
601 LOG("error handling packet");
603 count -= sizeof(VPPacket);
604 buf_ptr += sizeof(VPPacket);
605 } else {
606 /* buffer the remainder */
607 TRACE("buffering packet, drv->buflen: %d", drv->buflen);
608 memcpy(drv->buf, buf_ptr, count);
609 drv->buflen = count;
610 break;
613 } else {
614 /* haven't got a full VPPacket yet, buffer for later */
615 TRACE("buffering packet, drv->buflen: %d", drv->buflen);
616 memcpy(drv->buf + drv->buflen, buf, count);
617 drv->buflen += count;
619 return 0;
622 /* read handler for communication channel
624 * de-multiplexes data coming in over the channel. for control messages
625 * we process them here, for data destined for a service or client we
626 * send it to the appropriate FD.
628 static void vp_channel_read(void *opaque)
630 VPDriver *drv = opaque;
631 int count, ret;
632 char buf[VP_CHAN_DATA_LEN];
634 TRACE("called with opaque: %p", drv);
636 count = read(drv->channel_fd, buf, sizeof(buf));
638 if (count == -1) {
639 LOG("read() failed: %s", strerror(errno));
640 return;
641 } else if (count == 0) {
642 /* TODO: channel closed, this probably shouldn't happen for guest-side
643 * serial/virtio-serial connections, but need to confirm and consider
644 * what should happen in this case. as it stands this virtproxy instance
645 * is basically defunct at this point, same goes for "client" instances
646 * of virtproxy where the remote end has hung-up.
648 LOG("channel connection closed");
649 vp_set_fd_handler(drv->channel_fd, NULL, NULL, drv);
650 drv->channel_fd = -1;
651 if (drv->listen_fd) {
652 vp_set_fd_handler(drv->listen_fd, vp_channel_accept, NULL, drv);
654 /* TODO: should close/remove/delete all existing VPConns here */
657 ret = vp_handle_packet_buf(drv, buf, count);
658 if (ret != 0) {
659 LOG("error handling packet stream");
663 /* handler to accept() and init new client connections */
664 static void vp_oforward_accept(void *opaque)
666 VPOForward *f = opaque;
667 VPDriver *drv = f->drv;
669 struct sockaddr_in saddr;
670 struct sockaddr *addr;
671 socklen_t len;
672 int fd, ret;
673 VPConn *conn = NULL;
674 VPPacket pkt;
675 VPControlMsg msg;
677 TRACE("called with opaque: %p, drv: %p", f, drv);
679 for(;;) {
680 len = sizeof(saddr);
681 addr = (struct sockaddr *)&saddr;
682 fd = qemu_accept(f->listen_fd, addr, &len);
684 if (fd < 0 && errno != EINTR) {
685 TRACE("accept() failed");
686 return;
687 } else if (fd >= 0) {
688 TRACE("accepted connection");
689 break;
693 if (!drv->chr && drv->channel_fd == -1) {
694 TRACE("communication channel not open, closing connection");
695 closesocket(fd);
696 return;
699 /* send init packet over channel */
700 memset(&msg, 0, sizeof(VPControlMsg));
701 msg.type = VP_CONTROL_CONNECT_INIT;
702 msg.args.connect_init.client_fd = fd;
703 pstrcpy(msg.args.connect_init.service_id, VP_SERVICE_ID_LEN, f->service_id);
705 memset(&pkt, 0, sizeof(VPPacket));
706 pkt.type = VP_PKT_CONTROL;
707 pkt.payload.msg = msg;
708 pkt.magic = VP_MAGIC;
710 ret = vp_channel_send_all(drv, (uint8_t *)&pkt, sizeof(VPPacket));
711 if (ret == -1) {
712 LOG("vp_send_all() failed");
713 return;
716 /* create new VPConn for client */
717 conn = qemu_mallocz(sizeof(VPConn));
718 conn->drv = drv;
719 conn->client_fd = fd;
720 conn->type = VP_CONN_CLIENT;
721 conn->state = VP_STATE_NEW;
722 QLIST_INSERT_HEAD(&drv->conns, conn, next);
724 socket_set_nonblock(fd);
727 VPDriver *vp_new(enum vp_context ctx, CharDriverState *s, int fd, bool listen)
729 VPDriver *drv = NULL;
731 drv = qemu_mallocz(sizeof(VPDriver));
732 drv->listen_fd = -1;
733 drv->channel_fd = -1;
734 drv->chr = NULL;
735 drv->ctx = ctx;
736 QLIST_INIT(&drv->oforwards);
737 QLIST_INIT(&drv->conns);
739 if (ctx == VP_CTX_CHARDEV) {
740 if (drv->chr == NULL) {
741 LOG("invalid virtproxy chardev");
742 goto out_bad;
744 drv->chr = s;
745 } else if (ctx == VP_CTX_FD) {
746 if (fd <= 0) {
747 LOG("invalid FD");
748 goto out_bad;
749 } else if (listen) {
750 /* provided FD is to be listened on for channel connection */
751 drv->listen_fd = fd;
752 vp_set_fd_handler(drv->listen_fd, vp_channel_accept, NULL, drv);
753 } else {
754 drv->channel_fd = fd;
755 vp_set_fd_handler(drv->channel_fd, vp_channel_read, NULL, drv);
757 } else {
758 LOG("invalid context");
759 goto out_bad;
762 return drv;
763 out_bad:
764 qemu_free(drv);
765 return NULL;
768 /* set/modify/remove a service_id -> net/unix listening socket mapping
770 * "service_id" is a user-defined id for the service. this is what the
771 * client end will tag it's connections with so that the remote end can
772 * route it to the proper socket on the remote end.
774 * "fd" is a listen()'ing socket we want virtproxy to listen for new
775 * connections of this service type on. set "fd" to -1 to remove the
776 * existing listening socket for this "service_id"
778 int vp_set_oforward(VPDriver *drv, int fd, const char *service_id)
780 VPOForward *f = get_oforward(drv, service_id);
782 if (fd == -1) {
783 if (f != NULL) {
784 vp_set_fd_handler(f->listen_fd, NULL, NULL, NULL);
785 QLIST_REMOVE(f, next);
786 qemu_free(f);
788 return 0;
791 if (f == NULL) {
792 f = qemu_mallocz(sizeof(VPOForward));
793 f->drv = drv;
794 strncpy(f->service_id, service_id, VP_SERVICE_ID_LEN);
795 QLIST_INSERT_HEAD(&drv->oforwards, f, next);
796 } else {
797 closesocket(f->listen_fd);
800 f->listen_fd = fd;
801 vp_set_fd_handler(f->listen_fd, vp_oforward_accept, NULL, f);
803 return 0;
806 /* add/modify a service_id -> net/unix socket mapping
808 * "service_id" is a user-defined id for the service. this is what the
809 * remote end will use to proxy connections to a specific service on
810 * our end.
812 * if "port" is NULL, "addr" is the address of the net socket the
813 * service is running on. otherwise, addr is the path to the unix socket
814 * the service is running on.
816 * if "port" AND "addr" are NULL, find and remove the current iforward
817 * for this "service_id" if it exists.
819 * "ipv6" is a bool denoting whether or not to use ipv6
821 int vp_set_iforward(VPDriver *drv, const char *service_id, const char *addr,
822 const char *port, bool ipv6)
824 VPIForward *f = get_iforward(drv, service_id);
826 if (addr == NULL && port == NULL) {
827 if (f != NULL) {
828 qemu_opts_del(f->socket_opts);
829 QLIST_REMOVE(f, next);
830 qemu_free(f);
832 return 0;
835 if (f == NULL) {
836 f = qemu_mallocz(sizeof(VPIForward));
837 f->drv = drv;
838 strncpy(f->service_id, service_id, VP_SERVICE_ID_LEN);
839 QLIST_INSERT_HEAD(&drv->iforwards, f, next);
840 } else {
841 qemu_opts_del(f->socket_opts);
844 /* stick socket-related options in a QemuOpts so we can
845 * utilize qemu socket utility functions directly
847 f->socket_opts = qemu_opts_create(&vp_socket_opts, NULL, 0);
848 if (port == NULL) {
849 /* no port given, assume unix path */
850 qemu_opt_set(f->socket_opts, "path", addr);
851 } else {
852 qemu_opt_set(f->socket_opts, "host", addr);
853 qemu_opt_set(f->socket_opts, "port", port);
856 if (ipv6) {
857 qemu_opt_set(f->socket_opts, "ipv6", "on");
858 } else {
859 qemu_opt_set(f->socket_opts, "ipv4", "on");
862 return 0;