kernel: scheduling fix for ARM
[minix.git] / lib / libnetsock / socket.c
blob7897a126e89f2cb58a2e320ef78fc5f528304639
1 /*
2 * This file implements handling of socket-related requests from VFS
3 */
5 #include <stdlib.h>
6 #include <stdio.h>
7 #include <assert.h>
9 #include <minix/ipc.h>
10 #include <minix/com.h>
11 #include <minix/callnr.h>
12 #include <minix/sysutil.h>
13 #include <minix/netsock.h>
15 #include <lwip/tcp.h>
17 #include <sys/ioc_net.h>
19 char * netsock_user_name = NULL;
20 #define NETSOCK_USER_NAME (netsock_user_name ? netsock_user_name : "NETSOCK")
22 #define debug_print(str, ...) printf("%s : %s:%d : " str "\n", \
23 NETSOCK_USER_NAME, __func__, __LINE__, ##__VA_ARGS__)
25 #if 0
26 #define debug_sock_print(...) debug_print(__VA_ARGS__)
27 #else
28 #define debug_sock_print(...)
29 #endif
31 #if 0
32 #define debug_sock_select_print(...) debug_print(__VA_ARGS__)
33 #else
34 #define debug_sock_select_print(...) debug_sock_print(__VA_ARGS__)
35 #endif
37 #define netsock_panic(str, ...) panic("%s : " str, NETSOCK_USER_NAME, \
38 ##__VA_ARGS__)
39 #define netsock_error(str, ...) printf("%s : " str, NETSOCK_USER_NAME, \
40 ##__VA_ARGS__)
43 struct socket socket[MAX_SOCKETS];
45 #define recv_q_alloc() debug_malloc(sizeof(struct recv_q))
46 #define recv_q_free debug_free
48 struct mq {
49 message m;
50 struct mq * prev;
51 struct mq * next;
54 #define mq_alloc() debug_malloc(sizeof(struct mq))
55 #define mq_free debug_free
57 static struct mq * mq_head, *mq_tail;
59 int mq_enqueue(message * m)
61 struct mq * mq;
63 debug_sock_print("sock %d op %d", m->DEVICE, m->m_type);
64 mq = mq_alloc();
66 if (mq == NULL)
67 return -1;
69 mq->next = NULL;
70 mq->m = *m;
72 if (mq_head) {
73 mq->prev = mq_tail;
74 mq_tail->next = mq;
75 mq_tail = mq;
77 else {
78 mq->prev = NULL;
79 mq_head = mq_tail = mq;
82 return 0;
85 __unused static struct mq * mq_dequeue_head(void)
87 struct mq * ret;
89 if (!mq_head)
90 return NULL;
92 ret = mq_head;
94 if (mq_head != mq_tail) {
95 mq_head = mq_head->next;
96 mq_head->prev = NULL;
97 } else
98 mq_head = mq_tail = NULL;
100 debug_sock_print("socket %d\n", ret->m.DEVICE);
102 return ret;
105 static void mq_dequeue(struct mq * mq)
107 if (mq_head == mq_tail)
108 mq_head = mq_tail = NULL;
109 else {
110 if (mq->prev == NULL) {
111 mq_head = mq->next;
112 mq_head->prev = NULL;
113 } else
114 mq->prev->next = mq->next;
115 if (mq->next == NULL) {
116 mq_tail = mq->prev;
117 mq_tail->next = NULL;
118 } else
119 mq->next->prev = mq->prev;
123 static int mq_cancel(message * m)
125 struct mq * mq;
127 for (mq = mq_tail; mq; mq = mq->prev) {
128 if (m->DEVICE == mq->m.DEVICE &&
129 m->USER_ENDPT == mq->m.USER_ENDPT &&
130 m->IO_GRANT == mq->m.IO_GRANT) {
131 debug_sock_print("socket %d\n", mq->m.DEVICE);
132 break;
136 if (mq) {
137 mq_dequeue(mq);
138 mq_free(mq);
141 return 1;
144 int sock_enqueue_data(struct socket * sock, void * data, unsigned size)
146 struct recv_q * r;
148 if (!(r = recv_q_alloc()))
149 return ENOMEM;
151 r->data = data;
152 r->next = NULL;
154 if (sock->recv_head) {
155 sock->recv_tail->next = r;
156 sock->recv_tail = r;
157 } else {
158 sock->recv_head = sock->recv_tail = r;
161 assert(size > 0);
162 sock->recv_data_size += size;
164 return OK;
167 void * sock_dequeue_data(struct socket * sock)
169 void * data;
170 struct recv_q * r;
172 if ((r = sock->recv_head)) {
173 data = r->data;
174 if (!(sock->recv_head = r->next))
175 sock->recv_tail = NULL;
176 recv_q_free(r);
178 return data;
181 return NULL;
184 void sock_dequeue_data_all(struct socket * sock,
185 recv_data_free_fn data_free)
187 void * data;
189 while ((data = sock_dequeue_data(sock)))
190 data_free(data);
191 sock->recv_data_size = 0;
194 static void set_reply_msg(message * m, int status)
196 int proc, ref;
198 proc= m->USER_ENDPT;
199 ref= (int)m->IO_GRANT;
201 m->REP_ENDPT= proc;
202 m->REP_STATUS= status;
203 m->REP_IO_GRANT= ref;
206 void send_reply_type(message * m, int type, int status)
208 int result;
210 set_reply_msg(m, status);
212 m->m_type = type;
213 result = send(m->m_source, m);
214 if (result != OK)
215 netsock_panic("unable to send (err %d)", result);
218 void send_reply(message * m, int status)
220 debug_sock_print("status %d", status);
221 send_reply_type(m, DEV_REVIVE, status);
224 void send_reply_open(message * m, int status)
226 debug_sock_print("status %d", status);
227 send_reply_type(m, DEV_OPEN_REPL, status);
230 void send_reply_close(message * m, int status)
232 debug_sock_print("status %d", status);
233 send_reply_type(m, DEV_CLOSE_REPL, status);
236 void sock_reply_select(struct socket * sock, unsigned selops)
238 int result;
239 message msg;
241 debug_sock_select_print("selops %d", selops);
243 msg.m_type = DEV_SEL_REPL1;
244 msg.DEV_MINOR = get_sock_num(sock);
245 msg.DEV_SEL_OPS = selops;
247 result = send(sock->select_ep, &msg);
248 if (result != OK)
249 netsock_panic("unable to send (err %d)", result);
252 void sock_select_notify(struct socket * sock)
254 int result;
255 message msg;
257 debug_sock_select_print("socket num %ld", get_sock_num(sock));
258 assert(sock->select_ep != NONE);
260 msg.DEV_SEL_OPS = 0;
261 sock->ops->select_reply(sock, &msg);
262 if (msg.DEV_SEL_OPS == 0) {
263 debug_sock_select_print("called from %p sflags 0x%x TXsz %d RXsz %d\n",
264 __builtin_return_address(0), sock->flags,
265 sock->buf_size, sock->recv_data_size);
266 return;
269 msg.m_type = DEV_SEL_REPL2;
270 msg.DEV_MINOR = get_sock_num(sock);
272 debug_sock_select_print("socket num %d select result 0x%x sent",
273 msg.DEV_MINOR, msg.DEV_SEL_OPS);
274 result = send(sock->select_ep, &msg);
275 if (result != OK)
276 netsock_panic("unable to send (err %d)", result);
278 sock_clear_select(sock);
279 sock->select_ep = NONE;
282 static void sock_reply_type(struct socket * sock, int type, int status)
284 sock->mess.m_type = type;
286 send_reply_type(&sock->mess, type, status);
289 void sock_reply_close(struct socket * sock, int status)
291 debug_sock_print("sock %ld status %d", get_sock_num(sock), status);
292 sock_reply_type(sock, DEV_CLOSE_REPL, status);
295 void sock_reply(struct socket * sock, int status)
297 debug_sock_print("sock %ld status %d", get_sock_num(sock), status);
298 sock_reply_type(sock, DEV_REVIVE, status);
301 struct socket * get_unused_sock(void)
303 int i;
305 for (i = SOCK_TYPES + MAX_DEVS; i < MAX_SOCKETS; i++) {
306 if (socket[i].ops == NULL) {
307 /* clear it all */
308 memset(&socket[i], 0, sizeof(struct socket));
309 return &socket[i];
313 return NULL;
316 static void socket_request_socket(struct socket * sock, message * m)
318 int blocking = m->FLAGS & FLG_OP_NONBLOCK ? 0 : 1;
320 switch (m->m_type) {
321 case DEV_READ_S:
322 if (sock->ops && sock->ops->read)
323 sock->ops->read(sock, m, blocking);
324 else
325 send_reply(m, EINVAL);
326 return;
327 case DEV_WRITE_S:
328 if (sock->ops && sock->ops->write)
329 sock->ops->write(sock, m, blocking);
330 else
331 send_reply(m, EINVAL);
332 return;
333 case DEV_IOCTL_S:
334 if (sock->ops && sock->ops->ioctl)
335 sock->ops->ioctl(sock, m, blocking);
336 else
337 send_reply(m, EINVAL);
338 return;
339 default:
340 netsock_panic("cannot happen!");
344 void socket_request(message * m)
346 struct socket * sock;
348 debug_sock_print("request %d", m->m_type);
349 switch (m->m_type) {
350 case DEV_OPEN:
351 socket_open(m);
352 return;
353 case DEV_CLOSE:
354 sock = get_sock(m->DEVICE);
355 if (sock->ops && sock->ops->close) {
356 sock->flags &= ~SOCK_FLG_OP_PENDING;
357 sock->mess = *m;
358 sock->ops->close(sock, m);
359 } else
360 send_reply_close(m, EINVAL);
361 return;
362 case DEV_READ_S:
363 case DEV_WRITE_S:
364 case DEV_IOCTL_S:
365 sock = get_sock(m->DEVICE);
366 if (!sock) {
367 send_reply(m, EINVAL);
368 return;
371 * If an operation is pending (blocking operation) or writing is
372 * still going and we want to read, suspend the new operation
374 if ((sock->flags & SOCK_FLG_OP_PENDING) ||
375 (m->m_type == DEV_READ_S &&
376 sock->flags & SOCK_FLG_OP_WRITING)) {
377 char * o = "\0";
378 if (sock->flags & SOCK_FLG_OP_READING)
379 o = "READ";
380 else if (sock->flags & SOCK_FLG_OP_WRITING)
381 o = "WRITE";
382 else
383 o = "non R/W op";
384 debug_sock_print("socket %ld is busy by %s flgs 0x%x\n",
385 get_sock_num(sock), o, sock->flags);
386 if (mq_enqueue(m) != 0) {
387 debug_sock_print("Enqueuing suspended "
388 "call failed");
389 send_reply(m, ENOMEM);
391 return;
393 sock->mess = *m;
394 socket_request_socket(sock, m);
395 return;
396 case CANCEL:
397 sock = get_sock(m->DEVICE);
398 printf("socket num %ld\n", get_sock_num(sock));
399 debug_sock_print("socket num %ld", get_sock_num(sock));
400 /* Cancel the last operation in the queue */
401 if (mq_cancel(m)) {
402 send_reply(m, EINTR);
403 return;
404 /* ... or a blocked read */
405 } else if (sock->flags & SOCK_FLG_OP_PENDING &&
406 sock->flags & SOCK_FLG_OP_READING) {
407 sock->flags &= ~SOCK_FLG_OP_PENDING;
408 send_reply(m, EINTR);
409 return;
410 } else
411 netsock_panic("no operation to cancel");
413 return;
414 case DEV_SELECT:
416 * Select is always executed immediately and is never suspended.
417 * Although, it sets actions which must be monitored
419 sock = get_sock(m->DEVICE);
420 assert(sock->select_ep == NONE || sock->select_ep == m->m_source);
422 if (sock->ops && sock->ops->select) {
423 sock->select_ep = m->m_source;
424 sock->ops->select(sock, m);
425 if (!sock_select_set(sock))
426 sock->select_ep = NONE;
427 } else
428 send_reply(m, EINVAL);
429 return;
430 default:
431 netsock_error("unknown message from VFS, type %d\n",
432 m->m_type);
434 send_reply(m, EGENERIC);
437 void mq_process(void)
439 struct mq * mq;
440 struct socket * sock;
442 mq = mq_head;
444 while(mq) {
445 struct mq * next = mq->next;
447 sock = get_sock(mq->m.DEVICE);
448 if (!(sock->flags & SOCK_FLG_OP_PENDING) &&
449 !(mq->m.m_type == DEV_READ_S &&
450 sock->flags & SOCK_FLG_OP_WRITING)) {
451 debug_sock_print("resuming op on sock %ld\n",
452 get_sock_num(sock));
453 sock->mess = mq->m;
454 socket_request_socket(sock, &sock->mess);
455 mq_dequeue(mq);
456 mq_free(mq);
457 return;
460 mq = next;
464 void generic_op_select(struct socket * sock, message * m)
466 int retsel = 0, sel;
468 debug_sock_print("socket num %ld 0x%x", get_sock_num(sock), m->USER_ENDPT);
470 sel = m->USER_ENDPT;
472 /* in this case any operation would block, no error */
473 if (sock->flags & SOCK_FLG_OP_PENDING) {
474 if (sel & SEL_NOTIFY) {
475 if (sel & SEL_RD)
476 sock->flags |= SOCK_FLG_SEL_READ;
477 if (sel & SEL_WR)
478 sock->flags |= SOCK_FLG_SEL_WRITE;
479 /* FIXME we do not monitor error */
481 sock_reply_select(sock, 0);
482 return;
485 if (sel & SEL_RD) {
486 if (sock->recv_head)
487 retsel |= SEL_RD;
488 else if (sel & SEL_NOTIFY)
489 sock->flags |= SOCK_FLG_SEL_READ;
491 /* FIXME generic packet socket never blocks on write */
492 if (sel & SEL_WR)
493 retsel |= SEL_WR;
494 /* FIXME SEL_ERR is ignored, we do not generate exceptions */
496 sock_reply_select(sock, retsel);
499 void generic_op_select_reply(struct socket * sock, __unused message * m)
501 assert(sock->select_ep != NONE);
502 debug_sock_print("socket num %ld", get_sock_num(sock));
504 /* unused for generic packet socket, see generic_op_select() */
505 assert((sock->flags & (SOCK_FLG_SEL_WRITE | SOCK_FLG_SEL_ERROR)) == 0);
507 if (sock->flags & SOCK_FLG_OP_PENDING) {
508 debug_sock_print("WARNING socket still blocking!");
509 return;
512 if (sock->flags & SOCK_FLG_SEL_READ && sock->recv_head)
513 m->DEV_SEL_OPS |= SEL_RD;
515 if (m->DEV_SEL_OPS)
516 sock->flags &= ~(SOCK_FLG_SEL_WRITE | SOCK_FLG_SEL_READ |
517 SOCK_FLG_SEL_ERROR);