3 #include <minix/sysutil.h>
5 #include <sys/ioc_net.h>
6 #include <net/gen/in.h>
7 #include <net/gen/tcp.h>
8 #include <net/gen/tcp_io.h>
11 #include <lwip/tcp_impl.h>
12 #include <lwip/ip_addr.h>
14 #include <minix/netsock.h>
17 #define TCP_BUF_SIZE (32 << 10)
19 #define sock_alloc_buf(s) debug_malloc(s)
20 #define sock_free_buf(x) debug_free(x)
22 static int do_tcp_debug
;
25 #define debug_tcp_print(str, ...) printf("LWIP %s:%d : " str "\n", \
26 __func__, __LINE__, ##__VA_ARGS__)
28 #define debug_tcp_print(...) debug_print(__VA_ARGS__)
43 struct wbuf
* unsent
; /* points to the first buffer that contains unsent
44 data. It may point anywhere between head and
48 static void tcp_error_callback(void *arg
, err_t err
)
51 struct socket
* sock
= (struct socket
*) arg
;
53 debug_tcp_print("socket num %ld err %d", get_sock_num(sock
), err
);
69 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
70 sock_reply(sock
, perr
);
71 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
72 } else if (sock_select_set(sock
))
73 sock_select_notify(sock
);
75 * When error callback is called the tcb either does not exist anymore
76 * or is going to be deallocated soon after. We must not use the pcb
82 static int tcp_fill_new_socket(struct socket
* sock
, struct tcp_pcb
* pcb
)
84 struct wbuf_chain
* wc
;
86 if (!(wc
= malloc(sizeof(struct wbuf_chain
))))
89 wc
-> head
= wc
->tail
= wc
->unsent
= NULL
;
95 tcp_err(pcb
, tcp_error_callback
);
96 tcp_nagle_disable(pcb
);
101 static int tcp_op_open(struct socket
* sock
, __unused message
* m
)
103 struct tcp_pcb
* pcb
;
106 debug_tcp_print("socket num %ld", get_sock_num(sock
));
108 if (!(pcb
= tcp_new()))
110 debug_tcp_print("new tcp pcb %p\n", pcb
);
112 if ((ret
= tcp_fill_new_socket(sock
, pcb
) != OK
))
118 static void tcp_recv_free(__unused
void * data
)
120 pbuf_free((struct pbuf
*) data
);
123 static void tcp_backlog_free(void * data
)
125 tcp_abort((struct tcp_pcb
*) data
);
128 static void free_wbuf_chain(struct wbuf_chain
* wc
)
136 struct wbuf
* w
= wb
;
137 debug_tcp_print("freeing wbuf %p", wb
);
145 static void tcp_op_close(struct socket
* sock
, __unused message
* m
)
147 debug_tcp_print("socket num %ld", get_sock_num(sock
));
149 if (sock
->flags
& SOCK_FLG_OP_LISTENING
)
150 sock_dequeue_data_all(sock
, tcp_backlog_free
);
152 sock_dequeue_data_all(sock
, tcp_recv_free
);
153 debug_tcp_print("dequed RX data");
158 /* we are not able to handle any callback anymore */
159 tcp_arg((struct tcp_pcb
*)sock
->pcb
, NULL
);
160 tcp_err((struct tcp_pcb
*)sock
->pcb
, NULL
);
161 tcp_sent((struct tcp_pcb
*)sock
->pcb
, NULL
);
162 tcp_recv((struct tcp_pcb
*)sock
->pcb
, NULL
);
164 err
= tcp_close(sock
->pcb
);
165 assert(err
== ERR_OK
);
168 debug_tcp_print("freed pcb");
171 free_wbuf_chain((struct wbuf_chain
*) sock
->buf
);
174 debug_tcp_print("freed TX data");
176 sock_reply_close(sock
, OK
);
177 debug_tcp_print("socket unused");
179 /* mark it as unused */
183 __unused
static void print_tcp_payload(unsigned char * buf
, int len
)
187 printf("LWIP tcp payload (%d) :\n", len
);
188 for (i
= 0; i
< len
; i
++, buf
++) {
189 printf("%02x ", buf
[0]);
196 static int read_from_tcp(struct socket
* sock
, message
* m
)
198 unsigned rem_buf
, written
= 0;
201 assert(!(sock
->flags
& SOCK_FLG_OP_LISTENING
) && sock
->recv_head
);
205 debug_tcp_print("socket num %ld recv buff sz %d", get_sock_num(sock
), rem_buf
);
207 p
= (struct pbuf
*)sock
->recv_head
->data
;
211 if (rem_buf
>= p
->len
) {
215 * FIXME perhaps copy this to a local buffer and do a
216 * single copy to user then
219 print_tcp_payload(p
->payload
, p
->len
);
221 err
= copy_to_user(m
->m_source
, p
->payload
, p
->len
,
222 (cp_grant_id_t
) m
->IO_GRANT
, written
);
225 sock
->recv_data_size
-= p
->len
;
227 debug_tcp_print("whole pbuf copied (%d bytes)", p
->len
);
231 if ((np
= p
->next
)) {
233 if (pbuf_free(p
) != 1)
234 panic("LWIP : pbuf_free != 1");
236 * Mark where we are going to continue if an
239 sock
->recv_head
->data
= np
;
242 sock_dequeue_data(sock
);
245 p
= (struct pbuf
*)sock
->recv_head
->data
;
254 * It must be PBUF_RAM for us to be able to shift the
257 assert(p
->type
== PBUF_RAM
);
260 print_tcp_payload(p
->payload
, rem_buf
);
262 err
= copy_to_user(m
->m_source
, p
->payload
, rem_buf
,
263 (cp_grant_id_t
) m
->IO_GRANT
, written
);
266 sock
->recv_data_size
-= rem_buf
;
268 debug_tcp_print("partial pbuf copied (%d bytes)", rem_buf
);
270 * The whole pbuf hasn't been copied out, we only shift
271 * the payload pointer to remember where to continue
274 pbuf_header(p
, -rem_buf
);
280 debug_tcp_print("%d bytes written to userspace", written
);
281 //printf("%d wr, queue %d\n", written, sock->recv_data_size);
282 tcp_recved((struct tcp_pcb
*) sock
->pcb
, written
);
287 debug_tcp_print("%d bytes written to userspace", written
);
293 static void tcp_op_read(struct socket
* sock
, message
* m
, int blk
)
295 debug_tcp_print("socket num %ld", get_sock_num(sock
));
297 if (!sock
->pcb
|| ((struct tcp_pcb
*) sock
->pcb
)->state
!=
299 debug_tcp_print("Connection not established\n");
300 sock_reply(sock
, ENOTCONN
);
303 if (sock
->recv_head
) {
304 /* data available receive immeditely */
305 int ret
= read_from_tcp(sock
, m
);
306 debug_tcp_print("read op finished");
307 sock_reply(sock
, ret
);
309 if (sock
->flags
& SOCK_FLG_CLOSED
) {
310 printf("socket %ld already closed!!! call from %d\n",
311 get_sock_num(sock
), m
->USER_ENDPT
);
317 debug_tcp_print("reading would block -> EAGAIN");
318 sock_reply(sock
, EAGAIN
);
321 /* operation is being processed */
322 debug_tcp_print("no data to read, suspending");
323 sock
->flags
|= SOCK_FLG_OP_PENDING
| SOCK_FLG_OP_READING
;
327 static struct wbuf
* wbuf_add(struct socket
* sock
, unsigned sz
)
330 struct wbuf_chain
* wc
= (struct wbuf_chain
*)sock
->buf
;
334 wbuf
= debug_malloc(sizeof(struct wbuf
) + sz
);
339 wbuf
->written
= wbuf
->unacked
= 0;
342 if (wc
->head
== NULL
)
343 wc
->head
= wc
->tail
= wbuf
;
345 wc
->tail
->next
= wbuf
;
349 sock
->buf_size
+= sz
;
350 debug_tcp_print("buffer %p size %d\n", wbuf
, sock
->buf_size
);
355 static struct wbuf
* wbuf_ack_sent(struct socket
* sock
, unsigned sz
)
357 struct wbuf_chain
* wc
= (struct wbuf_chain
*) sock
->buf
;
362 if ((*wb
)->unacked
<= sz
) {
364 assert((*wb
)->rem_len
== 0);
367 sock
->buf_size
-= w
->len
;
369 debug_tcp_print("whole buffer acked (%d / %d), removed",
373 (*wb
)->unacked
-= sz
;
374 (*wb
)->written
+= sz
;
375 debug_tcp_print("acked %d / %d bytes", sz
, (*wb
)->len
);
380 /* did we write out more than we had? */
383 if (wc
->head
== NULL
)
385 debug_tcp_print("buffer size %d\n", sock
->buf_size
);
390 static void tcp_op_write(struct socket
* sock
, message
* m
, __unused
int blk
)
394 unsigned snd_buf_len
, usr_buf_len
;
399 sock_reply(sock
, ENOTCONN
);
403 usr_buf_len
= m
->COUNT
;
404 debug_tcp_print("socket num %ld data size %d",
405 get_sock_num(sock
), usr_buf_len
);
408 * Let at most one buffer grow beyond TCP_BUF_SIZE. This is to minimize
409 * small writes from userspace if only a few bytes were sent before
411 if (sock
->buf_size
>= TCP_BUF_SIZE
) {
412 /* FIXME do not block for now */
413 debug_tcp_print("WARNING : tcp buffers too large, cannot allocate more");
414 sock_reply(sock
, ENOMEM
);
418 * Never let the allocated buffers grow more than to 2xTCP_BUF_SIZE and
419 * never copy more than space available
421 usr_buf_len
= (usr_buf_len
> TCP_BUF_SIZE
? TCP_BUF_SIZE
: usr_buf_len
);
422 wbuf
= wbuf_add(sock
, usr_buf_len
);
423 debug_tcp_print("new wbuf for %d bytes", wbuf
->len
);
426 debug_tcp_print("cannot allocate new buffer of %d bytes", usr_buf_len
);
427 sock_reply(sock
, ENOMEM
);
430 if ((ret
= copy_from_user(m
->m_source
, wbuf
->data
, usr_buf_len
,
431 (cp_grant_id_t
) m
->IO_GRANT
, 0)) != OK
) {
432 sock_reply(sock
, ret
);
437 wbuf
->rem_len
= usr_buf_len
;
440 * If a writing operation is already in progress, we just enqueue the
443 if (sock
->flags
& SOCK_FLG_OP_WRITING
) {
444 struct wbuf_chain
* wc
= (struct wbuf_chain
*)sock
->buf
;
446 * We are adding a buffer with unsent data. If we don't have any other
447 * unsent data, set the pointer to this buffer.
449 if (wc
->unsent
== NULL
) {
451 debug_tcp_print("unsent %p remains %d\n", wbuf
, wbuf
->rem_len
);
453 debug_tcp_print("returns %d\n", usr_buf_len
);
454 sock_reply(sock
, usr_buf_len
);
456 * We cannot accept new operations (write). We set the flag
457 * after sending reply not to revive only. We could deadlock.
459 if (sock
->buf_size
>= TCP_BUF_SIZE
)
460 sock
->flags
|= SOCK_FLG_OP_PENDING
;
466 * Start sending data if the operation is not in progress yet. The
467 * current buffer is the nly one we have, we cannot send more.
470 snd_buf_len
= tcp_sndbuf((struct tcp_pcb
*)sock
->pcb
);
471 debug_tcp_print("tcp can accept %d bytes", snd_buf_len
);
473 wbuf
->unacked
= (snd_buf_len
< wbuf
->rem_len
? snd_buf_len
: wbuf
->rem_len
);
474 wbuf
->rem_len
-= wbuf
->unacked
;
477 flgs
= TCP_WRITE_FLAG_MORE
;
479 * Remember that this buffer has some data which we didn't pass
482 ((struct wbuf_chain
*)sock
->buf
)->unsent
= wbuf
;
483 debug_tcp_print("unsent %p remains %d\n", wbuf
, wbuf
->rem_len
);
486 ret
= tcp_write((struct tcp_pcb
*)sock
->pcb
, wbuf
->data
,
487 wbuf
->unacked
, flgs
);
488 tcp_output((struct tcp_pcb
*)sock
->pcb
);
489 debug_tcp_print("%d bytes to tcp", wbuf
->unacked
);
493 * Operation is being processed, no need to remember the message
494 * in this case, we are going to reply immediatly
496 debug_tcp_print("returns %d\n", usr_buf_len
);
497 sock_reply(sock
, usr_buf_len
);
498 sock
->flags
|= SOCK_FLG_OP_WRITING
;
499 if (sock
->buf_size
>= TCP_BUF_SIZE
)
500 sock
->flags
|= SOCK_FLG_OP_PENDING
;
502 sock_reply(sock
, EIO
);
505 static void tcp_set_conf(struct socket
* sock
, message
* m
)
508 nwio_tcpconf_t tconf
;
509 struct tcp_pcb
* pcb
= (struct tcp_pcb
*) sock
->pcb
;
511 debug_tcp_print("socket num %ld", get_sock_num(sock
));
515 err
= copy_from_user(m
->m_source
, &tconf
, sizeof(tconf
),
516 (cp_grant_id_t
) m
->IO_GRANT
, 0);
519 sock_reply(sock
, err
);
521 debug_tcp_print("tconf.nwtc_flags = 0x%lx", tconf
.nwtc_flags
);
522 debug_tcp_print("tconf.nwtc_remaddr = 0x%x",
523 (unsigned int) tconf
.nwtc_remaddr
);
524 debug_tcp_print("tconf.nwtc_remport = 0x%x", ntohs(tconf
.nwtc_remport
));
525 debug_tcp_print("tconf.nwtc_locaddr = 0x%x",
526 (unsigned int) tconf
.nwtc_locaddr
);
527 debug_tcp_print("tconf.nwtc_locport = 0x%x", ntohs(tconf
.nwtc_locport
));
529 sock
->usr_flags
= tconf
.nwtc_flags
;
531 if (sock
->usr_flags
& NWTC_SET_RA
)
532 pcb
->remote_ip
.addr
= tconf
.nwtc_remaddr
;
533 if (sock
->usr_flags
& NWTC_SET_RP
)
534 pcb
->remote_port
= ntohs(tconf
.nwtc_remport
);
536 if (sock
->usr_flags
& NWTC_LP_SET
) {
537 /* FIXME the user library can only bind to ANY anyway */
538 if (tcp_bind(pcb
, IP_ADDR_ANY
, ntohs(tconf
.nwtc_locport
)) == ERR_USE
) {
539 sock_reply(sock
, EADDRINUSE
);
544 sock_reply(sock
, OK
);
547 static void tcp_get_conf(struct socket
* sock
, message
* m
)
550 nwio_tcpconf_t tconf
;
551 struct tcp_pcb
* pcb
= (struct tcp_pcb
*) sock
->pcb
;
553 debug_tcp_print("socket num %ld", get_sock_num(sock
));
557 tconf
.nwtc_locaddr
= pcb
->local_ip
.addr
;
558 tconf
.nwtc_locport
= htons(pcb
->local_port
);
559 tconf
.nwtc_remaddr
= pcb
->remote_ip
.addr
;
560 tconf
.nwtc_remport
= htons(pcb
->remote_port
);
561 tconf
.nwtc_flags
= sock
->usr_flags
;
563 debug_tcp_print("tconf.nwtc_flags = 0x%lx", tconf
.nwtc_flags
);
564 debug_tcp_print("tconf.nwtc_remaddr = 0x%x",
565 (unsigned int) tconf
.nwtc_remaddr
);
566 debug_tcp_print("tconf.nwtc_remport = 0x%x", ntohs(tconf
.nwtc_remport
));
567 debug_tcp_print("tconf.nwtc_locaddr = 0x%x",
568 (unsigned int) tconf
.nwtc_locaddr
);
569 debug_tcp_print("tconf.nwtc_locport = 0x%x", ntohs(tconf
.nwtc_locport
));
571 if ((unsigned) m
->COUNT
< sizeof(tconf
)) {
572 sock_reply(sock
, EINVAL
);
576 err
= copy_to_user(m
->m_source
, &tconf
, sizeof(tconf
),
577 (cp_grant_id_t
) m
->IO_GRANT
, 0);
580 sock_reply(sock
, err
);
582 sock_reply(sock
, OK
);
585 static int enqueue_rcv_data(struct socket
* sock
, struct pbuf
* pbuf
)
587 /* Do not enqueue more data than allowed */
588 if (0 && sock
->recv_data_size
> 4 * TCP_BUF_SIZE
)
591 if (sock_enqueue_data(sock
, pbuf
, pbuf
->tot_len
) != OK
) {
592 debug_tcp_print("data enqueueing failed");
595 debug_tcp_print("enqueued %d bytes", pbuf
->tot_len
);
596 //printf("enqueued %d bytes, queue %d\n", pbuf->tot_len, sock->recv_data_size);
601 static err_t
tcp_recv_callback(void *arg
,
602 struct tcp_pcb
*tpcb
,
606 int ret
, enqueued
= 0;
607 struct socket
* sock
= (struct socket
*) arg
;
609 debug_tcp_print("socket num %ld", get_sock_num(sock
));
611 if (sock
->pcb
== NULL
) {
612 if (sock_select_set(sock
))
613 sock_select_notify(sock
);
617 assert((struct tcp_pcb
*) sock
->pcb
== tpcb
);
622 debug_tcp_print("tcp stream closed on the remote side");
623 // sock->flags |= SOCK_FLG_CLOSED;
625 /* wake up the reader and report EOF */
626 if (sock
->flags
& SOCK_FLG_OP_PENDING
&&
627 sock
->flags
& SOCK_FLG_OP_READING
) {
629 sock
->flags
&= ~(SOCK_FLG_OP_PENDING
|
630 SOCK_FLG_OP_READING
);
633 /* if there are any undelivered data, drop them */
634 sock_dequeue_data_all(sock
, tcp_recv_free
);
635 tcp_abandon(tpcb
, 0);
643 * FIXME we always enqueue the data first. If the head is empty and read
644 * operation is pending we could try to deliver immeditaly without
647 if (enqueue_rcv_data(sock
, pbuf
) == ERR_OK
)
651 * Deliver data if there is a pending read operation, otherwise notify
652 * select if the socket is being monitored
654 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
655 if (sock
->flags
& SOCK_FLG_OP_READING
) {
656 ret
= read_from_tcp(sock
, &sock
->mess
);
657 debug_tcp_print("read op finished");
658 sock_reply(sock
, ret
);
659 sock
->flags
&= ~(SOCK_FLG_OP_PENDING
|
660 SOCK_FLG_OP_READING
);
662 } else if (!(sock
->flags
& SOCK_FLG_OP_WRITING
) &&
663 sock_select_rw_set(sock
))
664 sock_select_notify(sock
);
666 /* perhaps we have deliverd some data to user, try to enqueue again */
668 return enqueue_rcv_data(sock
, pbuf
);
673 static err_t
tcp_sent_callback(void *arg
, struct tcp_pcb
*tpcb
, u16_t len
)
675 struct socket
* sock
= (struct socket
*) arg
;
677 struct wbuf_chain
* wc
= (struct wbuf_chain
*) sock
->buf
;
678 unsigned snd_buf_len
;
681 debug_tcp_print("socket num %ld", get_sock_num(sock
));
683 /* an error might have had happen */
684 if (sock
->pcb
== NULL
) {
685 if (sock_select_set(sock
))
686 sock_select_notify(sock
);
690 assert((struct tcp_pcb
*)sock
->pcb
== tpcb
);
692 /* operation must have been canceled, do not send any other data */
693 if (!sock
->flags
& SOCK_FLG_OP_PENDING
)
696 wbuf
= wbuf_ack_sent(sock
, len
);
699 debug_tcp_print("all data acked, nothing more to send");
700 sock
->flags
&= ~SOCK_FLG_OP_WRITING
;
701 if (!(sock
->flags
& SOCK_FLG_OP_READING
))
702 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
703 /* no reviving, we must notify. Write and read possible */
704 if (sock_select_rw_set(sock
))
705 sock_select_notify(sock
);
709 /* we have just freed some space, write will be accepted */
710 if (sock
->buf_size
< TCP_BUF_SIZE
&& sock_select_rw_set(sock
)) {
711 if (!(sock
->flags
& SOCK_FLG_OP_READING
)) {
712 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
713 sock_select_notify(sock
);
718 * Check if there is some space for new data, there should be, we just
719 * got a confirmation that some data reached the other end of the
722 snd_buf_len
= tcp_sndbuf(tpcb
);
723 assert(snd_buf_len
> 0);
724 debug_tcp_print("tcp can accept %d bytes", snd_buf_len
);
727 debug_tcp_print("nothing to send");
736 towrite
= (snd_buf_len
< wbuf
->rem_len
?
737 snd_buf_len
: wbuf
->rem_len
);
738 wbuf
->rem_len
-= towrite
;
739 debug_tcp_print("data to send, sending %d", towrite
);
741 if (wbuf
->rem_len
|| wbuf
->next
)
742 flgs
= TCP_WRITE_FLAG_MORE
;
743 ret
= tcp_write(tpcb
, wbuf
->data
+ wbuf
->written
+ wbuf
->unacked
,
745 debug_tcp_print("%d bytes to tcp", towrite
);
747 /* tcp_output() is called once we return from this callback */
750 debug_print("tcp_write() failed (%d), written %d"
751 , ret
, wbuf
->written
);
752 sock
->flags
&= ~(SOCK_FLG_OP_PENDING
| SOCK_FLG_OP_WRITING
);
753 /* no reviving, we must notify. Write and read possible */
754 if (sock_select_rw_set(sock
))
755 sock_select_notify(sock
);
759 wbuf
->unacked
+= towrite
;
760 snd_buf_len
-= towrite
;
761 debug_tcp_print("tcp still accepts %d bytes\n", snd_buf_len
);
764 assert(wbuf
->rem_len
== 0);
768 debug_tcp_print("unsent %p remains %d\n",
769 wbuf
, wbuf
->rem_len
);
771 debug_tcp_print("nothing to send");
780 static err_t
tcp_connected_callback(void *arg
,
781 struct tcp_pcb
*tpcb
,
784 struct socket
* sock
= (struct socket
*) arg
;
786 debug_tcp_print("socket num %ld err %d", get_sock_num(sock
), err
);
788 if (sock
->pcb
== NULL
) {
789 if (sock_select_set(sock
))
790 sock_select_notify(sock
);
794 assert((struct tcp_pcb
*)sock
->pcb
== tpcb
);
796 tcp_sent(tpcb
, tcp_sent_callback
);
797 tcp_recv(tpcb
, tcp_recv_callback
);
798 sock_reply(sock
, OK
);
799 sock
->flags
&= ~(SOCK_FLG_OP_PENDING
| SOCK_FLG_OP_CONNECTING
);
801 /* revive does the sock_select_notify() for us */
806 static void tcp_op_connect(struct socket
* sock
)
809 struct tcp_pcb
* pcb
;
812 debug_tcp_print("socket num %ld", get_sock_num(sock
));
814 * Connecting is going to send some packets. Unless an immediate error
815 * occurs this operation is going to block
817 sock
->flags
|= SOCK_FLG_OP_PENDING
| SOCK_FLG_OP_CONNECTING
;
819 /* try to connect now */
820 pcb
= (struct tcp_pcb
*) sock
->pcb
;
821 remaddr
= pcb
->remote_ip
;
822 err
= tcp_connect(pcb
, &remaddr
, pcb
->remote_port
,
823 tcp_connected_callback
);
825 panic("Wrong tcp_connect arguments");
827 panic("Other tcp_connect error %d\n", err
);
830 static int tcp_do_accept(struct socket
* listen_sock
,
832 struct tcp_pcb
* newpcb
)
834 struct socket
* newsock
;
838 debug_tcp_print("socket num %ld", get_sock_num(listen_sock
));
840 if ((ret
= copy_from_user(m
->m_source
, &sock_num
, sizeof(sock_num
),
841 (cp_grant_id_t
) m
->IO_GRANT
, 0)) != OK
)
843 if (!is_valid_sock_num(sock_num
))
846 newsock
= get_sock(sock_num
);
847 assert(newsock
->pcb
); /* because of previous open() */
849 /* we really want to forget about this socket */
850 tcp_err((struct tcp_pcb
*)newsock
->pcb
, NULL
);
851 tcp_abandon((struct tcp_pcb
*)newsock
->pcb
, 0);
853 tcp_arg(newpcb
, newsock
);
854 tcp_err(newpcb
, tcp_error_callback
);
855 tcp_sent(newpcb
, tcp_sent_callback
);
856 tcp_recv(newpcb
, tcp_recv_callback
);
857 tcp_nagle_disable(newpcb
);
858 tcp_accepted(((struct tcp_pcb
*)(listen_sock
->pcb
)));
859 newsock
->pcb
= newpcb
;
861 debug_tcp_print("Accepted new connection using socket %d\n", sock_num
);
866 static err_t
tcp_accept_callback(void *arg
, struct tcp_pcb
*newpcb
, err_t err
)
868 struct socket
* sock
= (struct socket
*) arg
;
870 debug_tcp_print("socket num %ld", get_sock_num(sock
));
872 assert(err
== ERR_OK
&& newpcb
);
873 assert(sock
->flags
& SOCK_FLG_OP_LISTENING
);
875 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
878 ret
= tcp_do_accept(sock
, &sock
->mess
, newpcb
);
879 sock_reply(sock
, ret
);
880 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
884 /* in case of an error fall through */
887 /* If we cannot accept rightaway we enqueue the connection for later */
889 debug_tcp_print("Enqueue connection sock %ld pcb %p\n",
890 get_sock_num(sock
), newpcb
);
891 if (sock_enqueue_data(sock
, newpcb
, 1) != OK
) {
895 if (sock_select_read_set(sock
))
896 sock_select_notify(sock
);
901 static void tcp_op_listen(struct socket
* sock
, message
* m
)
904 struct tcp_pcb
* new_pcb
;
906 debug_tcp_print("socket num %ld", get_sock_num(sock
));
908 err
= copy_from_user(m
->m_source
, &backlog
, sizeof(backlog
),
909 (cp_grant_id_t
) m
->IO_GRANT
, 0);
911 new_pcb
= tcp_listen_with_backlog((struct tcp_pcb
*) sock
->pcb
,
913 debug_tcp_print("listening pcb %p", new_pcb
);
916 debug_tcp_print("Cannot listen on socket %ld", get_sock_num(sock
));
917 sock_reply(sock
, EGENERIC
);
921 /* advertise that this socket is willing to accept connections */
922 tcp_accept(new_pcb
, tcp_accept_callback
);
923 sock
->flags
|= SOCK_FLG_OP_LISTENING
;
926 sock_reply(sock
, OK
);
929 static void tcp_op_accept(struct socket
* sock
, message
* m
)
931 debug_tcp_print("socket num %ld", get_sock_num(sock
));
933 if (!(sock
->flags
& SOCK_FLG_OP_LISTENING
)) {
934 debug_tcp_print("socket %ld does not listen\n", get_sock_num(sock
));
935 sock_reply(sock
, EINVAL
);
939 /* there is a connection ready to be accepted */
940 if (sock
->recv_head
) {
942 struct tcp_pcb
* pcb
;
944 pcb
= (struct tcp_pcb
*) sock
->recv_head
->data
;
947 ret
= tcp_do_accept(sock
, m
, pcb
);
948 sock_reply(sock
, ret
);
950 sock_dequeue_data(sock
);
954 debug_tcp_print("no ready connection, suspending\n");
956 sock
->flags
|= SOCK_FLG_OP_PENDING
;
959 static void tcp_op_shutdown_tx(struct socket
* sock
)
963 debug_tcp_print("socket num %ld", get_sock_num(sock
));
965 err
= tcp_shutdown((struct tcp_pcb
*) sock
->pcb
, 0, 1);
969 sock_reply(sock
, OK
);
972 sock_reply(sock
, ENOTCONN
);
975 sock_reply(sock
, EGENERIC
);
979 static void tcp_op_get_cookie(struct socket
* sock
, message
* m
)
984 assert(sizeof(cookie
) >= sizeof(sock
));
986 sock_num
= get_sock_num(sock
);
987 memcpy(&cookie
, &sock_num
, sizeof(sock_num
));
989 if (copy_to_user(m
->m_source
, &cookie
, sizeof(sock
),
990 (cp_grant_id_t
) m
->IO_GRANT
, 0) == OK
)
991 sock_reply(sock
, OK
);
993 sock_reply(sock
, EFAULT
);
996 static void tcp_get_opt(struct socket
* sock
, message
* m
)
999 nwio_tcpopt_t tcpopt
;
1000 struct tcp_pcb
* pcb
= (struct tcp_pcb
*) sock
->pcb
;
1002 debug_tcp_print("socket num %ld", get_sock_num(sock
));
1006 if ((unsigned) m
->COUNT
< sizeof(tcpopt
)) {
1007 sock_reply(sock
, EINVAL
);
1011 /* FIXME : not used by the userspace library */
1012 tcpopt
.nwto_flags
= 0;
1014 err
= copy_to_user(m
->m_source
, &tcpopt
, sizeof(tcpopt
),
1015 (cp_grant_id_t
) m
->IO_GRANT
, 0);
1018 sock_reply(sock
, err
);
1020 sock_reply(sock
, OK
);
1023 static void tcp_set_opt(struct socket
* sock
, message
* m
)
1026 nwio_tcpopt_t tcpopt
;
1027 struct tcp_pcb
* pcb
= (struct tcp_pcb
*) sock
->pcb
;
1029 debug_tcp_print("socket num %ld", get_sock_num(sock
));
1033 err
= copy_from_user(m
->m_source
, &tcpopt
, sizeof(tcpopt
),
1034 (cp_grant_id_t
) m
->IO_GRANT
, 0);
1037 sock_reply(sock
, err
);
1039 /* FIXME : The userspace library does not use this */
1041 sock_reply(sock
, OK
);
1044 static void tcp_op_ioctl(struct socket
* sock
, message
* m
, __unused
int blk
)
1047 sock_reply(sock
, ENOTCONN
);
1051 debug_tcp_print("socket num %ld req %c %d %d",
1053 (m
->REQUEST
>> 8) & 0xff,
1055 (m
->REQUEST
>> 16) & _IOCPARM_MASK
);
1057 switch (m
->REQUEST
) {
1059 tcp_get_conf(sock
, m
);
1062 tcp_set_conf(sock
, m
);
1065 tcp_op_connect(sock
);
1067 case NWIOTCPLISTENQ
:
1068 tcp_op_listen(sock
, m
);
1070 case NWIOGTCPCOOKIE
:
1071 tcp_op_get_cookie(sock
, m
);
1073 case NWIOTCPACCEPTTO
:
1074 tcp_op_accept(sock
, m
);
1076 case NWIOTCPSHUTDOWN
:
1077 tcp_op_shutdown_tx(sock
);
1080 tcp_get_opt(sock
, m
);
1083 tcp_set_opt(sock
, m
);
1086 sock_reply(sock
, EBADIOCTL
);
1091 static void tcp_op_select(struct socket
* sock
, __unused message
* m
)
1093 int retsel
= 0, sel
;
1095 sel
= m
->USER_ENDPT
;
1096 debug_tcp_print("socket num %ld 0x%x", get_sock_num(sock
), sel
);
1098 /* in this case any operation would block, no error */
1099 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
1100 debug_tcp_print("SOCK_FLG_OP_PENDING");
1101 if (sel
& SEL_NOTIFY
) {
1103 sock
->flags
|= SOCK_FLG_SEL_READ
;
1104 debug_tcp_print("monitor read");
1107 sock
->flags
|= SOCK_FLG_SEL_WRITE
;
1108 debug_tcp_print("monitor write");
1111 sock
->flags
|= SOCK_FLG_SEL_ERROR
;
1113 sock_reply_select(sock
, 0);
1119 * If recv_head is not NULL we can either read or accept a
1120 * connection which is the same for select()
1123 if (sock
->recv_head
&&
1124 !(sock
->flags
& SOCK_FLG_OP_WRITING
))
1126 else if (!(sock
->flags
& SOCK_FLG_OP_LISTENING
) &&
1127 ((struct tcp_pcb
*) sock
->pcb
)->state
!= ESTABLISHED
)
1129 else if (sel
& SEL_NOTIFY
) {
1130 sock
->flags
|= SOCK_FLG_SEL_READ
;
1131 debug_tcp_print("monitor read");
1134 retsel
|= SEL_RD
; /* not connected read does not block */
1138 if (((struct tcp_pcb
*) sock
->pcb
)->state
== ESTABLISHED
)
1140 else if (sel
& SEL_NOTIFY
) {
1141 sock
->flags
|= SOCK_FLG_SEL_WRITE
;
1142 debug_tcp_print("monitor write");
1145 retsel
|= SEL_WR
; /* not connected write does not block */
1148 if (retsel
& SEL_RD
) {
1149 debug_tcp_print("read won't block");
1151 if (retsel
& SEL_WR
) {
1152 debug_tcp_print("write won't block");
1155 /* we only monitor if errors will happen in the future */
1156 if (sel
& SEL_ERR
&& sel
& SEL_NOTIFY
)
1157 sock
->flags
|= SOCK_FLG_SEL_ERROR
;
1159 sock_reply_select(sock
, retsel
);
1162 static void tcp_op_select_reply(struct socket
* sock
, message
* m
)
1164 assert(sock
->select_ep
!= NONE
);
1165 debug_tcp_print("socket num %ld", get_sock_num(sock
));
1168 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
1169 debug_tcp_print("WARNING socket still blocking!");
1173 if (sock
->flags
& SOCK_FLG_SEL_READ
) {
1174 if (sock
->pcb
== NULL
|| (sock
->recv_head
&&
1175 !(sock
->flags
& SOCK_FLG_OP_WRITING
)) ||
1176 (!(sock
->flags
& SOCK_FLG_OP_LISTENING
) &&
1177 ((struct tcp_pcb
*) sock
->pcb
)->state
!=
1179 m
->DEV_SEL_OPS
|= SEL_RD
;
1180 debug_tcp_print("read won't block");
1184 if (sock
->flags
& SOCK_FLG_SEL_WRITE
&&
1185 (sock
->pcb
== NULL
||
1186 ((struct tcp_pcb
*) sock
->pcb
)->state
==
1188 m
->DEV_SEL_OPS
|= SEL_WR
;
1189 debug_tcp_print("write won't block");
1193 sock
->flags
&= ~(SOCK_FLG_SEL_WRITE
| SOCK_FLG_SEL_READ
|
1194 SOCK_FLG_SEL_ERROR
);
1197 struct sock_ops sock_tcp_ops
= {
1198 .open
= tcp_op_open
,
1199 .close
= tcp_op_close
,
1200 .read
= tcp_op_read
,
1201 .write
= tcp_op_write
,
1202 .ioctl
= tcp_op_ioctl
,
1203 .select
= tcp_op_select
,
1204 .select_reply
= tcp_op_select_reply