tools/llvm: Do not build with symbols
[minix3.git] / minix / lib / libnetsock / socket.c
blobbef626ec76b3f329e7e8e8895096843a3be36c65
1 /*
2 * This file implements handling of socket-related requests from VFS
3 */
5 #include <stdlib.h>
6 #include <stdio.h>
7 #include <assert.h>
9 #include <minix/ipc.h>
10 #include <minix/com.h>
11 #include <minix/callnr.h>
12 #include <minix/sysutil.h>
13 #include <minix/netsock.h>
15 #include <lwip/tcp.h>
17 #include <sys/ioc_net.h>
19 char * netsock_user_name = NULL;
20 #define NETSOCK_USER_NAME (netsock_user_name ? netsock_user_name : "NETSOCK")
22 #define debug_print(str, ...) printf("%s : %s:%d : " str "\n", \
23 NETSOCK_USER_NAME, __func__, __LINE__, ##__VA_ARGS__)
25 #if 0
26 #define debug_sock_print(...) debug_print(__VA_ARGS__)
27 #else
28 #define debug_sock_print(...)
29 #endif
31 #if 0
32 #define debug_sock_select_print(...) debug_print(__VA_ARGS__)
33 #else
34 #define debug_sock_select_print(...) debug_sock_print(__VA_ARGS__)
35 #endif
37 #define netsock_panic(str, ...) panic("%s : " str, NETSOCK_USER_NAME, \
38 ##__VA_ARGS__)
39 #define netsock_error(str, ...) printf("%s : " str, NETSOCK_USER_NAME, \
40 ##__VA_ARGS__)
43 struct socket socket[MAX_SOCKETS];
45 static int netsock_open(devminor_t minor, int access, endpoint_t user_endpt);
46 static int netsock_close(devminor_t minor);
47 static ssize_t netsock_read(devminor_t minor, u64_t position, endpoint_t endpt,
48 cp_grant_id_t grant, size_t size, int flags, cdev_id_t id);
49 static ssize_t netsock_write(devminor_t minor, u64_t position,
50 endpoint_t endpt, cp_grant_id_t grant, size_t size, int flags,
51 cdev_id_t id);
52 static int netsock_ioctl(devminor_t minor, unsigned long request,
53 endpoint_t endpt, cp_grant_id_t grant, int flags,
54 endpoint_t user_endpt, cdev_id_t id);
55 static int netsock_cancel(devminor_t minor, endpoint_t endpt, cdev_id_t id);
56 static int netsock_select(devminor_t minor, unsigned int ops,
57 endpoint_t endpt);
59 static struct chardriver netsock_tab = {
60 .cdr_open = netsock_open,
61 .cdr_close = netsock_close,
62 .cdr_read = netsock_read,
63 .cdr_write = netsock_write,
64 .cdr_ioctl = netsock_ioctl,
65 .cdr_cancel = netsock_cancel,
66 .cdr_select = netsock_select
69 #define recv_q_alloc() debug_malloc(sizeof(struct recv_q))
70 #define recv_q_free debug_free
72 struct mq {
73 struct sock_req req;
74 struct mq * prev;
75 struct mq * next;
78 #define mq_alloc() debug_malloc(sizeof(struct mq))
79 #define mq_free debug_free
81 static struct mq * mq_head, *mq_tail;
83 int mq_enqueue(struct sock_req * req)
85 struct mq * mq;
87 debug_sock_print("sock %d op %d", req->minor, req->type);
88 mq = mq_alloc();
90 if (mq == NULL)
91 return -1;
93 mq->next = NULL;
94 mq->req = *req;
96 if (mq_head) {
97 mq->prev = mq_tail;
98 mq_tail->next = mq;
99 mq_tail = mq;
101 else {
102 mq->prev = NULL;
103 mq_head = mq_tail = mq;
106 return 0;
109 __unused static struct mq * mq_dequeue_head(void)
111 struct mq * ret;
113 if (!mq_head)
114 return NULL;
116 ret = mq_head;
118 if (mq_head != mq_tail) {
119 mq_head = mq_head->next;
120 mq_head->prev = NULL;
121 } else
122 mq_head = mq_tail = NULL;
124 debug_sock_print("socket %d\n", ret->req.minor);
126 return ret;
129 static void mq_dequeue(struct mq * mq)
131 if (mq_head == mq_tail)
132 mq_head = mq_tail = NULL;
133 else {
134 if (mq->prev == NULL) {
135 mq_head = mq->next;
136 mq_head->prev = NULL;
137 } else
138 mq->prev->next = mq->next;
139 if (mq->next == NULL) {
140 mq_tail = mq->prev;
141 mq_tail->next = NULL;
142 } else
143 mq->next->prev = mq->prev;
147 static int mq_cancel(devminor_t minor, endpoint_t endpt, cdev_id_t id)
149 struct mq * mq;
151 for (mq = mq_tail; mq; mq = mq->prev) {
152 if (minor == mq->req.minor && endpt == mq->req.endpt &&
153 id == mq->req.id) {
154 debug_sock_print("socket %d\n", minor);
155 break;
159 if (mq) {
160 mq_dequeue(mq);
161 mq_free(mq);
164 /* FIXME: shouldn't this return (!!mq) ? */
165 return 1;
168 int sock_enqueue_data(struct socket * sock, void * data, unsigned size)
170 struct recv_q * r;
172 if (!(r = recv_q_alloc()))
173 return ENOMEM;
175 r->data = data;
176 r->next = NULL;
178 if (sock->recv_head) {
179 sock->recv_tail->next = r;
180 sock->recv_tail = r;
181 } else {
182 sock->recv_head = sock->recv_tail = r;
185 assert(size > 0);
186 sock->recv_data_size += size;
188 return OK;
191 void * sock_dequeue_data(struct socket * sock)
193 void * data;
194 struct recv_q * r;
196 if ((r = sock->recv_head)) {
197 data = r->data;
198 if (!(sock->recv_head = r->next))
199 sock->recv_tail = NULL;
200 recv_q_free(r);
202 return data;
205 return NULL;
208 void sock_dequeue_data_all(struct socket * sock,
209 recv_data_free_fn data_free)
211 void * data;
213 while ((data = sock_dequeue_data(sock)))
214 data_free(data);
215 sock->recv_data_size = 0;
218 void send_req_reply(struct sock_req * req, int status)
220 if (status == EDONTREPLY)
221 return;
223 chardriver_reply_task(req->endpt, req->id, status);
226 void sock_select_notify(struct socket * sock)
228 unsigned int ops;
230 debug_sock_select_print("socket num %ld", get_sock_num(sock));
231 assert(sock->select_ep != NONE);
233 ops = sock->ops->select_reply(sock);
234 if (ops == 0) {
235 debug_sock_select_print("called from %p sflags 0x%x TXsz %d RXsz %d\n",
236 __builtin_return_address(0), sock->flags,
237 sock->buf_size, sock->recv_data_size);
238 return;
241 chardriver_reply_select(sock->select_ep, get_sock_num(sock), ops);
243 sock_clear_select(sock);
244 sock->select_ep = NONE;
247 struct socket * get_unused_sock(void)
249 int i;
251 for (i = SOCK_TYPES + MAX_DEVS; i < MAX_SOCKETS; i++) {
252 if (socket[i].ops == NULL) {
253 /* clear it all */
254 memset(&socket[i], 0, sizeof(struct socket));
255 return &socket[i];
259 return NULL;
262 static int socket_request_socket(struct socket * sock, struct sock_req * req)
264 int r, blocking = (req->flags & CDEV_NONBLOCK) ? 0 : 1;
266 switch (req->type) {
267 case SOCK_REQ_READ:
268 if (sock->ops && sock->ops->read)
269 r = sock->ops->read(sock, req, blocking);
270 else
271 r = EINVAL;
272 break;
273 case SOCK_REQ_WRITE:
274 if (sock->ops && sock->ops->write)
275 r = sock->ops->write(sock, req, blocking);
276 else
277 r = EINVAL;
278 break;
279 case SOCK_REQ_IOCTL:
280 if (sock->ops && sock->ops->ioctl)
281 r = sock->ops->ioctl(sock, req, blocking);
282 else
283 r = EINVAL;
284 break;
285 default:
286 netsock_panic("cannot happen!");
289 return r;
292 static int netsock_open(devminor_t minor, int UNUSED(access),
293 endpoint_t UNUSED(user_endpt))
295 int r;
297 if ((r = socket_open(minor)) < 0)
298 return r;
300 return CDEV_CLONED | r;
303 static int netsock_close(devminor_t minor)
305 struct socket *sock;
307 if (!(sock = get_sock(minor)))
308 return EINVAL;
310 if (sock->ops && sock->ops->close) {
311 sock->flags &= ~SOCK_FLG_OP_PENDING;
313 return sock->ops->close(sock);
314 } else
315 return EINVAL;
318 static int netsock_request(struct socket *sock, struct sock_req *req)
320 char *o;
323 * If an operation is pending (blocking operation) or writing is
324 * still going on and we're reading, suspend the new operation
326 if ((sock->flags & SOCK_FLG_OP_PENDING) ||
327 (req->type == SOCK_REQ_READ &&
328 sock->flags & SOCK_FLG_OP_WRITING)) {
329 if (sock->flags & SOCK_FLG_OP_READING)
330 o = "READ";
331 else if (sock->flags & SOCK_FLG_OP_WRITING)
332 o = "WRITE";
333 else
334 o = "non R/W op";
335 debug_sock_print("socket %ld is busy by %s flgs 0x%x\n",
336 get_sock_num(sock), o, sock->flags);
338 if (mq_enqueue(req) != 0) {
339 debug_sock_print("Enqueuing suspended call failed");
340 return ENOMEM;
343 return EDONTREPLY;
346 return socket_request_socket(sock, req);
349 static ssize_t netsock_read(devminor_t minor, u64_t UNUSED(position),
350 endpoint_t endpt, cp_grant_id_t grant, size_t size, int flags,
351 cdev_id_t id)
353 struct socket *sock;
354 struct sock_req req;
356 if (!(sock = get_sock(minor)))
357 return EINVAL;
359 /* Build a request record for this request. */
360 req.type = SOCK_REQ_READ;
361 req.minor = minor;
362 req.endpt = endpt;
363 req.grant = grant;
364 req.size = size;
365 req.flags = flags;
366 req.id = id;
368 /* Process the request. */
369 return netsock_request(sock, &req);
372 static ssize_t netsock_write(devminor_t minor, u64_t UNUSED(position),
373 endpoint_t endpt, cp_grant_id_t grant, size_t size, int flags,
374 cdev_id_t id)
376 struct socket *sock;
377 struct sock_req req;
379 if (!(sock = get_sock(minor)))
380 return EINVAL;
382 /* Build a request record for this request. */
383 req.type = SOCK_REQ_WRITE;
384 req.minor = minor;
385 req.endpt = endpt;
386 req.grant = grant;
387 req.size = size;
388 req.flags = flags;
389 req.id = id;
391 /* Process the request. */
392 return netsock_request(sock, &req);
395 static int netsock_ioctl(devminor_t minor, unsigned long request,
396 endpoint_t endpt, cp_grant_id_t grant, int flags,
397 endpoint_t UNUSED(user_endpt), cdev_id_t id)
399 struct socket *sock;
400 struct sock_req req;
402 if (!(sock = get_sock(minor)))
403 return EINVAL;
405 /* Build a request record for this request. */
406 req.type = SOCK_REQ_IOCTL;
407 req.minor = minor;
408 req.req = request;
409 req.endpt = endpt;
410 req.grant = grant;
411 req.flags = flags;
412 req.id = id;
414 /* Process the request. */
415 return netsock_request(sock, &req);
418 static int netsock_cancel(devminor_t minor, endpoint_t endpt, cdev_id_t id)
420 struct socket *sock;
422 if (!(sock = get_sock(minor)))
423 return EDONTREPLY;
425 debug_sock_print("socket num %ld", get_sock_num(sock));
427 /* Cancel the last operation in the queue */
428 if (mq_cancel(minor, endpt, id))
429 return EINTR;
431 /* Cancel any ongoing blocked read */
432 if ((sock->flags & SOCK_FLG_OP_PENDING) &&
433 (sock->flags & SOCK_FLG_OP_READING) &&
434 endpt == sock->req.endpt && id == sock->req.id) {
435 sock->flags &= ~SOCK_FLG_OP_PENDING;
436 return EINTR;
439 /* The request may not be found. This is OK. Do not reply. */
440 return EDONTREPLY;
443 static int netsock_select(devminor_t minor, unsigned int ops, endpoint_t endpt)
445 struct socket *sock;
446 int r;
449 * Select is always executed immediately and is never suspended.
450 * Although, it sets actions which must be monitored
452 if (!(sock = get_sock(minor)))
453 return EBADF;
455 assert(sock->select_ep == NONE || sock->select_ep == endpt);
457 if (sock->ops && sock->ops->select) {
458 sock->select_ep = endpt;
459 r = sock->ops->select(sock, ops);
460 if (!sock_select_set(sock))
461 sock->select_ep = NONE;
462 } else
463 r = EINVAL;
465 return r;
468 void socket_request(message * m, int ipc_status)
470 debug_sock_print("request %d", m->m_type);
472 /* Let the chardriver library decode the request for us. */
473 chardriver_process(&netsock_tab, m, ipc_status);
476 void mq_process(void)
478 struct mq * mq;
479 struct socket * sock;
480 int r;
482 mq = mq_head;
484 while(mq) {
485 struct mq * next = mq->next;
487 sock = get_sock(mq->req.minor);
488 if (!(sock->flags & SOCK_FLG_OP_PENDING) &&
489 !(mq->req.type == SOCK_REQ_READ &&
490 sock->flags & SOCK_FLG_OP_WRITING)) {
491 debug_sock_print("resuming op on sock %ld\n",
492 get_sock_num(sock));
493 sock->req = mq->req;
494 r = socket_request_socket(sock, &sock->req);
495 send_req_reply(&sock->req, r);
496 mq_dequeue(mq);
497 mq_free(mq);
498 return;
501 mq = next;
505 int generic_op_select(struct socket * sock, unsigned int sel)
507 int retsel = 0;
509 debug_sock_print("socket num %ld 0x%x", get_sock_num(sock), sel);
511 /* in this case any operation would block, no error */
512 if (sock->flags & SOCK_FLG_OP_PENDING) {
513 if (sel & CDEV_NOTIFY) {
514 if (sel & CDEV_OP_RD)
515 sock->flags |= SOCK_FLG_SEL_READ;
516 if (sel & CDEV_OP_WR)
517 sock->flags |= SOCK_FLG_SEL_WRITE;
518 /* FIXME we do not monitor error */
520 return 0;
523 if (sel & CDEV_OP_RD) {
524 if (sock->recv_head)
525 retsel |= CDEV_OP_RD;
526 else if (sel & CDEV_NOTIFY)
527 sock->flags |= SOCK_FLG_SEL_READ;
529 /* FIXME generic packet socket never blocks on write */
530 if (sel & CDEV_OP_WR)
531 retsel |= CDEV_OP_WR;
532 /* FIXME CDEV_OP_ERR is ignored, we do not generate exceptions */
534 return retsel;
537 int generic_op_select_reply(struct socket * sock)
539 unsigned int sel = 0;
541 assert(sock->select_ep != NONE);
542 debug_sock_print("socket num %ld", get_sock_num(sock));
544 /* unused for generic packet socket, see generic_op_select() */
545 assert((sock->flags & (SOCK_FLG_SEL_WRITE | SOCK_FLG_SEL_ERROR)) == 0);
547 if (sock->flags & SOCK_FLG_OP_PENDING) {
548 debug_sock_print("WARNING socket still blocking!");
549 return 0;
552 if (sock->flags & SOCK_FLG_SEL_READ && sock->recv_head)
553 sel |= CDEV_OP_RD;
555 if (sel)
556 sock->flags &= ~(SOCK_FLG_SEL_WRITE | SOCK_FLG_SEL_READ |
557 SOCK_FLG_SEL_ERROR);
559 return sel;