2 * This file implements handling of socket-related requests from VFS
10 #include <minix/com.h>
11 #include <minix/callnr.h>
12 #include <minix/sysutil.h>
13 #include <minix/netsock.h>
17 #include <sys/ioc_net.h>
19 char * netsock_user_name
= NULL
;
20 #define NETSOCK_USER_NAME (netsock_user_name ? netsock_user_name : "NETSOCK")
22 #define debug_print(str, ...) printf("%s : %s:%d : " str "\n", \
23 NETSOCK_USER_NAME, __func__, __LINE__, ##__VA_ARGS__)
26 #define debug_sock_print(...) debug_print(__VA_ARGS__)
28 #define debug_sock_print(...)
32 #define debug_sock_select_print(...) debug_print(__VA_ARGS__)
34 #define debug_sock_select_print(...) debug_sock_print(__VA_ARGS__)
37 #define netsock_panic(str, ...) panic("%s : " str, NETSOCK_USER_NAME, \
39 #define netsock_error(str, ...) printf("%s : " str, NETSOCK_USER_NAME, \
43 struct socket socket
[MAX_SOCKETS
];
45 #define recv_q_alloc() debug_malloc(sizeof(struct recv_q))
46 #define recv_q_free debug_free
54 #define mq_alloc() debug_malloc(sizeof(struct mq))
55 #define mq_free debug_free
57 static struct mq
* mq_head
, *mq_tail
;
59 int mq_enqueue(message
* m
)
63 debug_sock_print("sock %d op %d", m
->DEVICE
, m
->m_type
);
79 mq_head
= mq_tail
= mq
;
85 __unused
static struct mq
* mq_dequeue_head(void)
94 if (mq_head
!= mq_tail
) {
95 mq_head
= mq_head
->next
;
98 mq_head
= mq_tail
= NULL
;
100 debug_sock_print("socket %d\n", ret
->m
.DEVICE
);
105 static void mq_dequeue(struct mq
* mq
)
107 if (mq_head
== mq_tail
)
108 mq_head
= mq_tail
= NULL
;
110 if (mq
->prev
== NULL
) {
112 mq_head
->prev
= NULL
;
114 mq
->prev
->next
= mq
->next
;
115 if (mq
->next
== NULL
) {
117 mq_tail
->next
= NULL
;
119 mq
->next
->prev
= mq
->prev
;
123 static int mq_cancel(message
* m
)
127 for (mq
= mq_tail
; mq
; mq
= mq
->prev
) {
128 if (m
->DEVICE
== mq
->m
.DEVICE
&&
129 m
->USER_ENDPT
== mq
->m
.USER_ENDPT
&&
130 m
->IO_GRANT
== mq
->m
.IO_GRANT
) {
131 debug_sock_print("socket %d\n", mq
->m
.DEVICE
);
144 int sock_enqueue_data(struct socket
* sock
, void * data
, unsigned size
)
148 if (!(r
= recv_q_alloc()))
154 if (sock
->recv_head
) {
155 sock
->recv_tail
->next
= r
;
158 sock
->recv_head
= sock
->recv_tail
= r
;
162 sock
->recv_data_size
+= size
;
167 void * sock_dequeue_data(struct socket
* sock
)
172 if ((r
= sock
->recv_head
)) {
174 if (!(sock
->recv_head
= r
->next
))
175 sock
->recv_tail
= NULL
;
184 void sock_dequeue_data_all(struct socket
* sock
,
185 recv_data_free_fn data_free
)
189 while ((data
= sock_dequeue_data(sock
)))
191 sock
->recv_data_size
= 0;
194 static void set_reply_msg(message
* m
, int status
)
199 ref
= (int)m
->IO_GRANT
;
202 m
->REP_STATUS
= status
;
203 m
->REP_IO_GRANT
= ref
;
206 void send_reply_type(message
* m
, int type
, int status
)
210 set_reply_msg(m
, status
);
213 result
= send(m
->m_source
, m
);
215 netsock_panic("unable to send (err %d)", result
);
218 void send_reply(message
* m
, int status
)
220 debug_sock_print("status %d", status
);
221 send_reply_type(m
, DEV_REVIVE
, status
);
224 void send_reply_open(message
* m
, int status
)
226 debug_sock_print("status %d", status
);
227 send_reply_type(m
, DEV_OPEN_REPL
, status
);
230 void send_reply_close(message
* m
, int status
)
232 debug_sock_print("status %d", status
);
233 send_reply_type(m
, DEV_CLOSE_REPL
, status
);
236 void sock_reply_select(struct socket
* sock
, unsigned selops
)
241 debug_sock_select_print("selops %d", selops
);
243 msg
.m_type
= DEV_SEL_REPL1
;
244 msg
.DEV_MINOR
= get_sock_num(sock
);
245 msg
.DEV_SEL_OPS
= selops
;
247 result
= send(sock
->select_ep
, &msg
);
249 netsock_panic("unable to send (err %d)", result
);
252 void sock_select_notify(struct socket
* sock
)
257 debug_sock_select_print("socket num %ld", get_sock_num(sock
));
258 assert(sock
->select_ep
!= NONE
);
261 sock
->ops
->select_reply(sock
, &msg
);
262 if (msg
.DEV_SEL_OPS
== 0) {
263 debug_sock_select_print("called from %p sflags 0x%x TXsz %d RXsz %d\n",
264 __builtin_return_address(0), sock
->flags
,
265 sock
->buf_size
, sock
->recv_data_size
);
269 msg
.m_type
= DEV_SEL_REPL2
;
270 msg
.DEV_MINOR
= get_sock_num(sock
);
272 debug_sock_select_print("socket num %d select result 0x%x sent",
273 msg
.DEV_MINOR
, msg
.DEV_SEL_OPS
);
274 result
= send(sock
->select_ep
, &msg
);
276 netsock_panic("unable to send (err %d)", result
);
278 sock_clear_select(sock
);
279 sock
->select_ep
= NONE
;
282 static void sock_reply_type(struct socket
* sock
, int type
, int status
)
284 sock
->mess
.m_type
= type
;
286 send_reply_type(&sock
->mess
, type
, status
);
289 void sock_reply_close(struct socket
* sock
, int status
)
291 debug_sock_print("sock %ld status %d", get_sock_num(sock
), status
);
292 sock_reply_type(sock
, DEV_CLOSE_REPL
, status
);
295 void sock_reply(struct socket
* sock
, int status
)
297 debug_sock_print("sock %ld status %d", get_sock_num(sock
), status
);
298 sock_reply_type(sock
, DEV_REVIVE
, status
);
301 struct socket
* get_unused_sock(void)
305 for (i
= SOCK_TYPES
+ MAX_DEVS
; i
< MAX_SOCKETS
; i
++) {
306 if (socket
[i
].ops
== NULL
) {
308 memset(&socket
[i
], 0, sizeof(struct socket
));
316 static void socket_request_socket(struct socket
* sock
, message
* m
)
318 int blocking
= m
->FLAGS
& FLG_OP_NONBLOCK
? 0 : 1;
322 if (sock
->ops
&& sock
->ops
->read
)
323 sock
->ops
->read(sock
, m
, blocking
);
325 send_reply(m
, EINVAL
);
328 if (sock
->ops
&& sock
->ops
->write
)
329 sock
->ops
->write(sock
, m
, blocking
);
331 send_reply(m
, EINVAL
);
334 if (sock
->ops
&& sock
->ops
->ioctl
)
335 sock
->ops
->ioctl(sock
, m
, blocking
);
337 send_reply(m
, EINVAL
);
340 netsock_panic("cannot happen!");
344 void socket_request(message
* m
)
346 struct socket
* sock
;
348 debug_sock_print("request %d", m
->m_type
);
354 sock
= get_sock(m
->DEVICE
);
355 if (sock
->ops
&& sock
->ops
->close
) {
356 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
358 sock
->ops
->close(sock
, m
);
360 send_reply_close(m
, EINVAL
);
365 sock
= get_sock(m
->DEVICE
);
367 send_reply(m
, EINVAL
);
371 * If an operation is pending (blocking operation) or writing is
372 * still going and we want to read, suspend the new operation
374 if ((sock
->flags
& SOCK_FLG_OP_PENDING
) ||
375 (m
->m_type
== DEV_READ_S
&&
376 sock
->flags
& SOCK_FLG_OP_WRITING
)) {
378 if (sock
->flags
& SOCK_FLG_OP_READING
)
380 else if (sock
->flags
& SOCK_FLG_OP_WRITING
)
384 debug_sock_print("socket %ld is busy by %s flgs 0x%x\n",
385 get_sock_num(sock
), o
, sock
->flags
);
386 if (mq_enqueue(m
) != 0) {
387 debug_sock_print("Enqueuing suspended "
389 send_reply(m
, ENOMEM
);
394 socket_request_socket(sock
, m
);
397 sock
= get_sock(m
->DEVICE
);
398 printf("socket num %ld\n", get_sock_num(sock
));
399 debug_sock_print("socket num %ld", get_sock_num(sock
));
400 /* Cancel the last operation in the queue */
402 send_reply(m
, EINTR
);
404 /* ... or a blocked read */
405 } else if (sock
->flags
& SOCK_FLG_OP_PENDING
&&
406 sock
->flags
& SOCK_FLG_OP_READING
) {
407 sock
->flags
&= ~SOCK_FLG_OP_PENDING
;
408 send_reply(m
, EINTR
);
411 netsock_panic("no operation to cancel");
416 * Select is always executed immediately and is never suspended.
417 * Although, it sets actions which must be monitored
419 sock
= get_sock(m
->DEVICE
);
420 assert(sock
->select_ep
== NONE
|| sock
->select_ep
== m
->m_source
);
422 if (sock
->ops
&& sock
->ops
->select
) {
423 sock
->select_ep
= m
->m_source
;
424 sock
->ops
->select(sock
, m
);
425 if (!sock_select_set(sock
))
426 sock
->select_ep
= NONE
;
428 send_reply(m
, EINVAL
);
431 netsock_error("unknown message from VFS, type %d\n",
434 send_reply(m
, EGENERIC
);
437 void mq_process(void)
440 struct socket
* sock
;
445 struct mq
* next
= mq
->next
;
447 sock
= get_sock(mq
->m
.DEVICE
);
448 if (!(sock
->flags
& SOCK_FLG_OP_PENDING
) &&
449 !(mq
->m
.m_type
== DEV_READ_S
&&
450 sock
->flags
& SOCK_FLG_OP_WRITING
)) {
451 debug_sock_print("resuming op on sock %ld\n",
454 socket_request_socket(sock
, &sock
->mess
);
464 void generic_op_select(struct socket
* sock
, message
* m
)
468 debug_sock_print("socket num %ld 0x%x", get_sock_num(sock
), m
->USER_ENDPT
);
472 /* in this case any operation would block, no error */
473 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
474 if (sel
& SEL_NOTIFY
) {
476 sock
->flags
|= SOCK_FLG_SEL_READ
;
478 sock
->flags
|= SOCK_FLG_SEL_WRITE
;
479 /* FIXME we do not monitor error */
481 sock_reply_select(sock
, 0);
488 else if (sel
& SEL_NOTIFY
)
489 sock
->flags
|= SOCK_FLG_SEL_READ
;
491 /* FIXME generic packet socket never blocks on write */
494 /* FIXME SEL_ERR is ignored, we do not generate exceptions */
496 sock_reply_select(sock
, retsel
);
499 void generic_op_select_reply(struct socket
* sock
, __unused message
* m
)
501 assert(sock
->select_ep
!= NONE
);
502 debug_sock_print("socket num %ld", get_sock_num(sock
));
504 /* unused for generic packet socket, see generic_op_select() */
505 assert((sock
->flags
& (SOCK_FLG_SEL_WRITE
| SOCK_FLG_SEL_ERROR
)) == 0);
507 if (sock
->flags
& SOCK_FLG_OP_PENDING
) {
508 debug_sock_print("WARNING socket still blocking!");
512 if (sock
->flags
& SOCK_FLG_SEL_READ
&& sock
->recv_head
)
513 m
->DEV_SEL_OPS
|= SEL_RD
;
516 sock
->flags
&= ~(SOCK_FLG_SEL_WRITE
| SOCK_FLG_SEL_READ
|