2 * This file implements handling of socket-related requests from VFS
10 #include <minix/com.h>
11 #include <minix/callnr.h>
12 #include <minix/sysutil.h>
13 #include <minix/netsock.h>
17 #include <sys/ioc_net.h>
19 char * netsock_user_name
= NULL
;
20 #define NETSOCK_USER_NAME (netsock_user_name ? netsock_user_name : "NETSOCK")
22 #define debug_print(str, ...) printf("%s : %s:%d : " str "\n", \
23 NETSOCK_USER_NAME, __func__, __LINE__, ##__VA_ARGS__)
26 #define debug_sock_print(...) debug_print(__VA_ARGS__)
28 #define debug_sock_print(...)
32 #define debug_sock_select_print(...) debug_print(__VA_ARGS__)
34 #define debug_sock_select_print(...) debug_sock_print(__VA_ARGS__)
37 #define netsock_panic(str, ...) panic("%s : " str, NETSOCK_USER_NAME, \
39 #define netsock_error(str, ...) printf("%s : " str, NETSOCK_USER_NAME, \
43 struct socket socket
[MAX_SOCKETS
];
45 static int netsock_open(devminor_t minor
, int access
, endpoint_t user_endpt
);
46 static int netsock_close(devminor_t minor
);
47 static ssize_t
netsock_read(devminor_t minor
, u64_t position
, endpoint_t endpt
,
48 cp_grant_id_t grant
, size_t size
, int flags
, cdev_id_t id
);
49 static ssize_t
netsock_write(devminor_t minor
, u64_t position
,
50 endpoint_t endpt
, cp_grant_id_t grant
, size_t size
, int flags
,
52 static int netsock_ioctl(devminor_t minor
, unsigned long request
,
53 endpoint_t endpt
, cp_grant_id_t grant
, int flags
,
54 endpoint_t user_endpt
, cdev_id_t id
);
55 static int netsock_cancel(devminor_t minor
, endpoint_t endpt
, cdev_id_t id
);
56 static int netsock_select(devminor_t minor
, unsigned int ops
,
59 static struct chardriver netsock_tab
= {
60 .cdr_open
= netsock_open
,
61 .cdr_close
= netsock_close
,
62 .cdr_read
= netsock_read
,
63 .cdr_write
= netsock_write
,
64 .cdr_ioctl
= netsock_ioctl
,
65 .cdr_cancel
= netsock_cancel
,
66 .cdr_select
= netsock_select
69 #define recv_q_alloc() debug_malloc(sizeof(struct recv_q))
70 #define recv_q_free debug_free
78 #define mq_alloc() debug_malloc(sizeof(struct mq))
79 #define mq_free debug_free
81 static struct mq
* mq_head
, *mq_tail
;
83 int mq_enqueue(struct sock_req
* req
)
87 debug_sock_print("sock %d op %d", req
->minor
, req
->type
);
103 mq_head
= mq_tail
= mq
;
109 __unused
static struct mq
* mq_dequeue_head(void)
118 if (mq_head
!= mq_tail
) {
119 mq_head
= mq_head
->next
;
120 mq_head
->prev
= NULL
;
122 mq_head
= mq_tail
= NULL
;
124 debug_sock_print("socket %d\n", ret
->req
.minor
);
129 static void mq_dequeue(struct mq
* mq
)
131 if (mq_head
== mq_tail
)
132 mq_head
= mq_tail
= NULL
;
134 if (mq
->prev
== NULL
) {
136 mq_head
->prev
= NULL
;
138 mq
->prev
->next
= mq
->next
;
139 if (mq
->next
== NULL
) {
141 mq_tail
->next
= NULL
;
143 mq
->next
->prev
= mq
->prev
;
147 static int mq_cancel(devminor_t minor
, endpoint_t endpt
, cdev_id_t id
)
151 for (mq
= mq_tail
; mq
; mq
= mq
->prev
) {
152 if (minor
== mq
->req
.minor
&& endpt
== mq
->req
.endpt
&&
154 debug_sock_print("socket %d\n", minor
);
164 /* FIXME: shouldn't this return (!!mq) ? */
168 int sock_enqueue_data(struct socket
* sock
, void * data
, unsigned size
)
172 if (!(r
= recv_q_alloc()))
178 if (sock
->recv_head
) {
179 sock
->recv_tail
->next
= r
;
182 sock
->recv_head
= sock
->recv_tail
= r
;
186 sock
->recv_data_size
+= size
;
191 void * sock_dequeue_data(struct socket
* sock
)
196 if ((r
= sock
->recv_head
)) {
198 if (!(sock
->recv_head
= r
->next
))
199 sock
->recv_tail
= NULL
;
208 void sock_dequeue_data_all(struct socket
* sock
,
209 recv_data_free_fn data_free
)
213 while ((data
= sock_dequeue_data(sock
)))
215 sock
->recv_data_size
= 0;
218 void send_req_reply(struct sock_req
* req
, int status
)
220 if (status
== EDONTREPLY
)
223 chardriver_reply_task(req
->endpt
, req
->id
, status
);
226 void sock_select_notify(struct socket
* sock
)
230 debug_sock_select_print("socket num %ld", get_sock_num(sock
));
231 assert(sock
->select_ep
!= NONE
);
233 ops
= sock
->ops
->select_reply(sock
);
235 debug_sock_select_print("called from %p sflags 0x%x TXsz %d RXsz %d\n",
236 __builtin_return_address(0), sock
->flags
,
237 sock
->buf_size
, sock
->recv_data_size
);
241 chardriver_reply_select(sock
->select_ep
, get_sock_num(sock
), ops
);
243 sock_clear_select(sock
);
244 sock
->select_ep
= NONE
;
247 struct socket
* get_unused_sock(void)
251 for (i
= SOCK_TYPES
+ MAX_DEVS
; i
< MAX_SOCKETS
; i
++) {
252 if (socket
[i
].ops
== NULL
) {
254 memset(&socket
[i
], 0, sizeof(struct socket
));
262 static int socket_request_socket(struct socket
* sock
, struct sock_req
* req
)
264 int r
, blocking
= (req
->flags
& CDEV_NONBLOCK
) ? 0 : 1;
268 if (sock
->ops
&& sock
->ops
->read
)
269 r
= sock
->ops
->read(sock
, req
, blocking
);
274 if (sock
->ops
&& sock
->ops
->write
)
275 r
= sock
->ops
->write(sock
, req
, blocking
);
280 if (sock
->ops
&& sock
->ops
->ioctl
)
281 r
= sock
->ops
->ioctl(sock
, req
, blocking
);
286 netsock_panic("cannot happen!");
292 static int netsock_open(devminor_t minor
, int UNUSED(access
),
293 endpoint_t
UNUSED(user_endpt
))
297 if ((r
= socket_open(minor
)) < 0)
300 return CDEV_CLONED
| r
;
303 static int netsock_close(devminor_t minor
)
307 if (!(sock
= get_sock(minor
)))
310 if (sock
->ops
&& sock
->ops
->close
) {
311 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
313 return sock
->ops
->close(sock
);
318 static int netsock_request(struct socket
*sock
, struct sock_req
*req
)
323 * If an operation is pending (blocking operation) or writing is
324 * still going on and we're reading, suspend the new operation
326 if ((sock
->flags
& SOCK_FLG_OP_PENDING
) ||
327 (req
->type
== SOCK_REQ_READ
&&
328 sock
->flags
& SOCK_FLG_OP_WRITING
)) {
329 if (sock
->flags
& SOCK_FLG_OP_READING
)
331 else if (sock
->flags
& SOCK_FLG_OP_WRITING
)
335 debug_sock_print("socket %ld is busy by %s flgs 0x%x\n",
336 get_sock_num(sock
), o
, sock
->flags
);
338 if (mq_enqueue(req
) != 0) {
339 debug_sock_print("Enqueuing suspended call failed");
346 return socket_request_socket(sock
, req
);
349 static ssize_t
netsock_read(devminor_t minor
, u64_t
UNUSED(position
),
350 endpoint_t endpt
, cp_grant_id_t grant
, size_t size
, int flags
,
356 if (!(sock
= get_sock(minor
)))
359 /* Build a request record for this request. */
360 req
.type
= SOCK_REQ_READ
;
368 /* Process the request. */
369 return netsock_request(sock
, &req
);
372 static ssize_t
netsock_write(devminor_t minor
, u64_t
UNUSED(position
),
373 endpoint_t endpt
, cp_grant_id_t grant
, size_t size
, int flags
,
379 if (!(sock
= get_sock(minor
)))
382 /* Build a request record for this request. */
383 req
.type
= SOCK_REQ_WRITE
;
391 /* Process the request. */
392 return netsock_request(sock
, &req
);
395 static int netsock_ioctl(devminor_t minor
, unsigned long request
,
396 endpoint_t endpt
, cp_grant_id_t grant
, int flags
,
397 endpoint_t
UNUSED(user_endpt
), cdev_id_t id
)
402 if (!(sock
= get_sock(minor
)))
405 /* Build a request record for this request. */
406 req
.type
= SOCK_REQ_IOCTL
;
414 /* Process the request. */
415 return netsock_request(sock
, &req
);
418 static int netsock_cancel(devminor_t minor
, endpoint_t endpt
, cdev_id_t id
)
422 if (!(sock
= get_sock(minor
)))
425 debug_sock_print("socket num %ld", get_sock_num(sock
));
427 /* Cancel the last operation in the queue */
428 if (mq_cancel(minor
, endpt
, id
))
431 /* Cancel any ongoing blocked read */
432 if ((sock
->flags
& SOCK_FLG_OP_PENDING
) &&
433 (sock
->flags
& SOCK_FLG_OP_READING
) &&
434 endpt
== sock
->req
.endpt
&& id
== sock
->req
.id
) {
435 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
439 /* The request may not be found. This is OK. Do not reply. */
443 static int netsock_select(devminor_t minor
, unsigned int ops
, endpoint_t endpt
)
449 * Select is always executed immediately and is never suspended.
450 * Although, it sets actions which must be monitored
452 if (!(sock
= get_sock(minor
)))
455 assert(sock
->select_ep
== NONE
|| sock
->select_ep
== endpt
);
457 if (sock
->ops
&& sock
->ops
->select
) {
458 sock
->select_ep
= endpt
;
459 r
= sock
->ops
->select(sock
, ops
);
460 if (!sock_select_set(sock
))
461 sock
->select_ep
= NONE
;
468 void socket_request(message
* m
, int ipc_status
)
470 debug_sock_print("request %d", m
->m_type
);
472 /* Let the chardriver library decode the request for us. */
473 chardriver_process(&netsock_tab
, m
, ipc_status
);
476 void mq_process(void)
479 struct socket
* sock
;
485 struct mq
* next
= mq
->next
;
487 sock
= get_sock(mq
->req
.minor
);
488 if (!(sock
->flags
& SOCK_FLG_OP_PENDING
) &&
489 !(mq
->req
.type
== SOCK_REQ_READ
&&
490 sock
->flags
& SOCK_FLG_OP_WRITING
)) {
491 debug_sock_print("resuming op on sock %ld\n",
494 r
= socket_request_socket(sock
, &sock
->req
);
495 send_req_reply(&sock
->req
, r
);
505 int generic_op_select(struct socket
* sock
, unsigned int sel
)
509 debug_sock_print("socket num %ld 0x%x", get_sock_num(sock
), sel
);
511 /* in this case any operation would block, no error */
512 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
513 if (sel
& CDEV_NOTIFY
) {
514 if (sel
& CDEV_OP_RD
)
515 sock
->flags
|= SOCK_FLG_SEL_READ
;
516 if (sel
& CDEV_OP_WR
)
517 sock
->flags
|= SOCK_FLG_SEL_WRITE
;
518 /* FIXME we do not monitor error */
523 if (sel
& CDEV_OP_RD
) {
525 retsel
|= CDEV_OP_RD
;
526 else if (sel
& CDEV_NOTIFY
)
527 sock
->flags
|= SOCK_FLG_SEL_READ
;
529 /* FIXME generic packet socket never blocks on write */
530 if (sel
& CDEV_OP_WR
)
531 retsel
|= CDEV_OP_WR
;
532 /* FIXME CDEV_OP_ERR is ignored, we do not generate exceptions */
537 int generic_op_select_reply(struct socket
* sock
)
539 unsigned int sel
= 0;
541 assert(sock
->select_ep
!= NONE
);
542 debug_sock_print("socket num %ld", get_sock_num(sock
));
544 /* unused for generic packet socket, see generic_op_select() */
545 assert((sock
->flags
& (SOCK_FLG_SEL_WRITE
| SOCK_FLG_SEL_ERROR
)) == 0);
547 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
548 debug_sock_print("WARNING socket still blocking!");
552 if (sock
->flags
& SOCK_FLG_SEL_READ
&& sock
->recv_head
)
556 sock
->flags
&= ~(SOCK_FLG_SEL_WRITE
| SOCK_FLG_SEL_READ
|