tools/adflib: build only host variant which is used by Sam440 target
[AROS.git] / workbench / network / stacks / AROSTCP / bsdsocket / kern / uipc_socket.c
blob37bfc27ee8c1646d625ba9e9c15aa8584aae8a22
1 /*
2 * Copyright (C) 1993 AmiTCP/IP Group, <amitcp-group@hut.fi>
3 * Helsinki University of Technology, Finland.
4 * All rights reserved.
5 * Copyright (C) 2005 - 2007 The AROS Dev Team
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
19 * MA 02111-1307, USA.
23 /*
24 * Mach Operating System
25 * Copyright (c) 1992 Carnegie Mellon University
26 * All Rights Reserved.
28 * Permission to use, copy, modify and distribute this software and its
29 * documentation is hereby granted, provided that both the copyright
30 * notice and this permission notice appear in all copies of the
31 * software, derivative works or modified versions, and any portions
32 * thereof, and that both notices appear in supporting documentation.
34 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
35 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
36 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
38 * Carnegie Mellon requests users of this software to return to
40 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
41 * School of Computer Science
42 * Carnegie Mellon University
43 * Pittsburgh PA 15213-3890
45 * any improvements or extensions that they make and grant Carnegie Mellon
46 * the rights to redistribute these changes.
50 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California.
51 * All rights reserved.
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
55 * are met:
56 * 1. Redistributions of source code must retain the above copyright
57 * notice, this list of conditions and the following disclaimer.
58 * 2. Redistributions in binary form must reproduce the above copyright
59 * notice, this list of conditions and the following disclaimer in the
60 * documentation and/or other materials provided with the distribution.
61 * 3. All advertising materials mentioning features or use of this software
62 * must display the following acknowledgement:
63 * This product includes software developed by the University of
64 * California, Berkeley and its contributors.
65 * 4. Neither the name of the University nor the names of its contributors
66 * may be used to endorse or promote products derived from this software
67 * without specific prior written permission.
69 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
70 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
71 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
72 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
73 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
74 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
75 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
77 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
78 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
79 * SUCH DAMAGE.
81 * @(#)uipc_socket.c 7.28 (Berkeley) 5/4/91
84 #include <conf.h>
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/malloc.h>
89 #include <sys/mbuf.h>
90 #include <sys/domain.h>
91 #include <sys/kernel.h>
92 #include <sys/protosw.h>
93 #include <sys/socket.h>
94 #include <sys/socketvar.h>
96 #include <sys/synch.h>
97 #include <api/amiga_api.h>
99 #include <kern/amiga_includes.h>
101 #include <kern/uipc_socket_protos.h>
102 #include <kern/uipc_socket2_protos.h>
103 #include <kern/uipc_domain_protos.h>
104 #include <kern/amiga_select_protos.h>
106 #include <sys/uio.h>
109 * Socket operation routines.
110 * These routines are called by the routines in
111 * sys_socket.c or from a system process, and
112 * implement the semantics of socket operations by
113 * switching out to the protocol specific routines.
116 socreate(dom, aso, type, proto)
117 int dom;
118 struct socket **aso;
119 register int type;
120 int proto;
122 #ifndef AMITCP
123 struct proc *p = (struct proc*)cthread_data(cthread_self());
124 #endif /* AMITCP */
125 register struct protosw *prp;
126 register struct socket *so;
127 register int error;
129 if (proto)
130 prp = pffindproto(dom, proto, type);
131 else
132 prp = pffindtype(dom, type);
133 if (prp == 0)
134 return (EPROTONOSUPPORT);
135 if (prp->pr_type != type)
136 return (EPROTOTYPE);
137 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
138 aligned_bzero_const((caddr_t)so, sizeof(*so));
139 so->so_type = type;
140 #ifndef AMITCP
141 if (p->p_ucred->cr_uid == 0)
142 #endif /* AMITCP */
143 so->so_state = SS_PRIV; /* all sockets have the force now */
144 so->so_proto = prp;
145 error =
146 (*prp->pr_usrreq)(so, PRU_ATTACH,
147 (struct mbuf *)0, (struct mbuf *)(long)proto, (struct mbuf *)0);
148 if (error) {
149 so->so_state |= SS_NOFDREF;
150 sofree(so);
151 return (error);
153 *aso = so;
154 return (0);
158 sobind(so, nam)
159 struct socket *so;
160 struct mbuf *nam;
162 spl_t s = splnet();
163 int error;
165 error =
166 (*so->so_proto->pr_usrreq)(so, PRU_BIND,
167 (struct mbuf *)0, nam, (struct mbuf *)0);
168 splx(s);
169 return (error);
173 solisten(so, backlog)
174 register struct socket *so;
175 int backlog;
177 spl_t s = splnet(), error;
179 error =
180 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
181 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
182 if (error) {
183 splx(s);
184 return (error);
186 if (so->so_q == 0)
187 so->so_options |= SO_ACCEPTCONN;
188 if (backlog < 0)
189 backlog = 0;
190 so->so_qlimit = MIN(backlog, SOMAXCONN);
191 splx(s);
192 return (0);
195 void
196 sofree(so)
197 register struct socket *so;
200 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
201 return;
202 if (so->so_head) {
203 if (!soqremque(so, 0) && !soqremque(so, 1))
204 panic("sofree dq");
205 so->so_head = 0;
207 sbrelease(&so->so_snd);
208 sorflush(so);
209 FREE(so, M_SOCKET);
213 * Close a socket on last file table reference removal.
214 * Initiate disconnect if connected.
215 * Free socket when disconnect complete.
218 soclose(so)
219 register struct socket *so;
221 spl_t s = splnet(); /* conservative */
222 int error = 0;
223 struct SocketBase *socketBase;
224 struct timeval *timeo;
226 if (so->so_options & SO_ACCEPTCONN) {
227 while (so->so_q0)
228 (void) soabort(so->so_q0);
229 while (so->so_q)
230 (void) soabort(so->so_q);
232 if (so->so_pcb == 0)
233 goto discard;
234 if (so->so_state & SS_ISCONNECTED) {
235 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
236 error = sodisconnect(so);
237 if (error)
238 goto drop;
240 if (so->so_options & SO_LINGER) {
241 if ((so->so_state & SS_ISDISCONNECTING) &&
242 (so->so_state & SS_NBIO))
243 goto drop;
245 * Find socket base for the tsleep()
247 if ((socketBase = FindSocketBase(FindTask(NULL)))
248 == NULL)
249 goto drop; /* couldn't find */
250 timeo = (so->so_linger.tv_sec
251 || so->so_linger.tv_usec) ?
252 &so->so_linger : NULL;
253 while (so->so_state & SS_ISCONNECTED)
254 if (error = tsleep(socketBase,
255 (caddr_t)&so->so_timeo,
256 netcls, timeo))
257 break;
260 drop:
261 if (so->so_pcb) {
262 int error2 =
263 (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
264 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
265 if (error == 0)
266 error = error2;
268 discard:
269 if (so->so_state & SS_NOFDREF)
270 panic("soclose: NOFDREF");
271 so->so_state |= SS_NOFDREF;
272 sofree(so);
273 splx(s);
274 return (error);
278 * Must be called at splnet...
281 soabort(so)
282 struct socket *so;
285 return (
286 (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
287 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
291 soaccept(so, nam)
292 register struct socket *so;
293 struct mbuf *nam;
295 spl_t s = splnet();
296 int error;
298 if ((so->so_state & SS_NOFDREF) == 0)
299 panic("soaccept: !NOFDREF");
300 so->so_state &= ~SS_NOFDREF;
301 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
302 (struct mbuf *)0, nam, (struct mbuf *)0);
303 splx(s);
304 return (error);
308 soconnect(so, nam)
309 register struct socket *so;
310 struct mbuf *nam;
312 spl_t s;
313 int error;
315 if (so->so_options & SO_ACCEPTCONN)
316 return (EOPNOTSUPP);
317 s = splnet();
319 * If protocol is connection-based, can only connect once.
320 * Otherwise, if connected, try to disconnect first.
321 * This allows user to disconnect by connecting to, e.g.,
322 * a null address.
324 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
325 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
326 (error = sodisconnect(so))))
327 error = EISCONN;
328 else
329 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
330 (struct mbuf *)0, nam, (struct mbuf *)0);
331 splx(s);
332 return (error);
335 #ifndef AMITCP
336 soconnect2(so1, so2)
337 register struct socket *so1;
338 struct socket *so2;
340 spl_t s = splnet();
341 int error;
343 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
344 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
345 splx(s);
346 return (error);
348 #endif
351 sodisconnect(so)
352 register struct socket *so;
354 spl_t s = splnet();
355 int error;
357 if ((so->so_state & SS_ISCONNECTED) == 0) {
358 error = ENOTCONN;
359 goto bad;
361 if (so->so_state & SS_ISDISCONNECTING) {
362 error = EALREADY;
363 goto bad;
365 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
366 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
367 bad:
368 splx(s);
369 return (error);
372 #ifdef AMITCP
374 * uioread() replaces uiomove() in sosend
377 static inline void uioread(caddr_t cp, int n, struct uio *uio)
379 struct iovec *iov;
380 u_int cnt;
382 while (n > 0 && uio->uio_resid) {
383 iov = uio->uio_iov;
384 cnt = iov->iov_len;
385 if (cnt == 0) {
386 uio->uio_iov++;
387 uio->uio_iovcnt--;
388 continue;
390 if (cnt > n)
391 cnt = n;
393 bcopy(iov->iov_base, cp, cnt); /* wrong direction //pp */
395 iov->iov_base += cnt;
396 iov->iov_len -= cnt;
397 uio->uio_resid -= cnt;
398 cp += cnt;
399 n -= cnt;
403 #endif /* AMITCP */
407 * Send on a socket.
408 * If send must go all at once and message is larger than
409 * send buffering, then hard error.
410 * Lock against other senders.
411 * If must go all at once and not enough room now, then
412 * inform user that this would block and do nothing.
413 * Otherwise, if nonblocking, send as much as possible.
414 * The data to be sent is described by "uio" if nonzero,
415 * otherwise by the mbuf chain "top" (which must be null
416 * if uio is not). Data provided in mbuf chain must be small
417 * enough to send all at once.
419 * Returns nonzero on error, timeout or signal; callers
420 * must check for short counts if EINTR/ERESTART are returned.
421 * Data and control buffers are freed on return.
424 sosend(so, addr, uio, top, control, flags)
425 register struct socket *so;
426 struct mbuf *addr;
427 struct uio *uio;
428 struct mbuf *top;
429 struct mbuf *control;
430 int flags;
432 #ifndef AMITCP /* proc not defined */
433 struct proc *p = (struct proc*)cthread_data(cthread_self());
434 #endif /* AMITCP */
435 struct mbuf **mp;
436 register struct mbuf *m;
437 register long space, len, resid;
438 int clen = 0, error, dontroute, mlen;
439 spl_t s;
440 int atomic = sosendallatonce(so) || top;
442 if (uio)
443 resid = uio->uio_resid;
444 else
445 resid = top->m_pkthdr.len;
447 dontroute =
448 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
449 (so->so_proto->pr_flags & PR_ATOMIC);
450 #ifndef AMITCP
451 p->p_stats->p_ru.ru_msgsnd++;
452 #endif /* AMITCP */
453 if (control)
454 clen = control->m_len;
455 #define snderr(errno) { error = errno; splx(s); goto release; }
457 restart:
458 if (error = sblock(&so->so_snd, uio->uio_procp))
459 goto out;
460 do {
461 s = splnet();
462 if (so->so_state & SS_CANTSENDMORE)
463 snderr(EPIPE);
464 if (so->so_error)
465 snderr(so->so_error);
466 if ((so->so_state & SS_ISCONNECTED) == 0) {
467 if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
468 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
469 !(resid == 0 && clen != 0))
470 snderr(ENOTCONN);
471 } else if (addr == 0)
472 snderr(EDESTADDRREQ);
474 space = sbspace(&so->so_snd);
475 if (flags & MSG_OOB)
476 space += 1024;
477 if (space < resid + clen &&
478 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
479 if ((atomic && resid > so->so_snd.sb_hiwat) ||
480 clen > so->so_snd.sb_hiwat)
481 snderr(EMSGSIZE);
482 if (so->so_state & SS_NBIO)
483 snderr(EWOULDBLOCK);
484 sbunlock(&so->so_snd);
485 error = sbwait(&so->so_snd, uio->uio_procp);
486 splx(s);
487 if (error)
488 goto out;
489 goto restart;
491 splx(s);
492 mp = &top;
493 space -= clen;
494 do {
495 if (uio == NULL) {
497 * Data is prepackaged in "top".
499 resid = 0;
500 #ifdef USE_M_EOR
501 if (flags & MSG_EOR)
502 top->m_flags |= M_EOR;
503 #endif
504 } else do {
505 if (top == 0) {
506 MGETHDR(m, M_WAIT, MT_DATA);
507 mlen = MHLEN;
508 m->m_pkthdr.len = 0;
509 m->m_pkthdr.rcvif = (struct ifnet *)0;
510 } else {
511 MGET(m, M_WAIT, MT_DATA);
512 mlen = MLEN;
514 if (resid >= mlen && space >= mbconf.mclbytes) {
515 MCLGET(m, M_WAIT);
516 if ((m->m_flags & M_EXT) == 0)
517 goto nopages;
518 mlen = mbconf.mclbytes;
519 #ifdef MAPPED_MBUFS
520 len = MIN(mbconf.mclbytes, resid);
521 #else
522 if (top == 0) {
523 len = MIN(mbconf.mclbytes - max_hdr, resid);
524 m->m_data += max_hdr;
525 } else
526 len = MIN(mbconf.mclbytes, resid);
527 #endif
528 space -= mbconf.mclbytes;
529 } else
531 nopages:
532 len = MIN(MIN(mlen, resid), space);
533 space -= len;
535 * For datagram protocols, leave room
536 * for protocol headers in first mbuf.
538 if (atomic && top == 0 && len < mlen)
539 MH_ALIGN(m, len);
541 uioread(mtod(m, caddr_t), (int)len, uio);
542 resid = uio->uio_resid;
543 m->m_len = len;
544 *mp = m;
545 top->m_pkthdr.len += len;
546 if (error)
547 goto release;
548 mp = &m->m_next;
549 if (resid <= 0) {
550 #ifdef USE_M_EOR
551 if (flags & MSG_EOR)
552 top->m_flags |= M_EOR;
553 #endif
554 break;
557 #if defined(AMITCP) && 0
558 /* all the data in 1 mbuf chain */
559 while (space > 0);
560 #else
561 while (space > 0 && atomic);
562 #endif
563 if (dontroute)
564 so->so_options |= SO_DONTROUTE;
565 s = splnet(); /* XXX */
566 error = (*so->so_proto->pr_usrreq)(so,
567 (flags & MSG_OOB) ? PRU_SENDOOB : //PRU_SEND,
569 * If the user set MSG_EOF, the protocol
570 * understands this flag and nothing left to
571 * send then use PRU_SEND_EOF instead of PRU_SEND.
573 ((flags & MSG_EOF) &&
574 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
575 (resid <= 0)) ?
576 PRU_SEND_EOF : PRU_SEND,
577 top, addr, control);
578 splx(s);
579 if (dontroute)
580 so->so_options &= ~SO_DONTROUTE;
581 clen = 0;
582 control = 0;
583 top = 0;
584 mp = &top;
585 if (error)
586 goto release;
587 } while (resid && space > 0);
588 } while (resid);
590 release:
591 sbunlock(&so->so_snd);
592 out:
593 if (top)
594 m_freem(top);
595 if (control)
596 m_freem(control);
597 return (error);
601 #ifdef AMITCP
603 * uiowrite() replaces uiomove() in soreceive
606 static inline void uiowrite(caddr_t cp, int n, struct uio *uio)
608 struct iovec *iov;
609 u_int cnt;
611 while (n > 0 && uio->uio_resid) {
612 iov = uio->uio_iov;
613 cnt = iov->iov_len;
614 if (cnt == 0) {
615 uio->uio_iov++;
616 uio->uio_iovcnt--;
617 continue;
619 if (cnt > n)
620 cnt = n;
622 bcopy(cp, iov->iov_base, cnt);
624 iov->iov_base += cnt;
625 iov->iov_len -= cnt;
626 uio->uio_resid -= cnt;
627 cp += cnt;
628 n -= cnt;
631 #endif /* AMITCP */
634 * Implement receive operations on a socket.
635 * We depend on the way that records are added to the sockbuf
636 * by sbappend*. In particular, each record (mbufs linked through m_next)
637 * must begin with an address if the protocol so specifies,
638 * followed by an optional mbuf or mbufs containing ancillary data,
639 * and then zero or more mbufs of data.
640 * In order to avoid blocking network interrupts for the entire time here,
641 * we splx() while doing the actual copy to user space.
642 * Although the sockbuf is locked, new data may still be appended,
643 * and thus we must maintain consistency of the sockbuf during that time.
645 * The caller may receive the data as a single mbuf chain by supplying
646 * an mbuf **mp0 for use in returning the chain. The uio is then used
647 * only for the count in uio_resid.
650 soreceive(so, paddr, uio, mp0, controlp, flagsp)
651 register struct socket *so;
652 struct mbuf **paddr;
653 struct uio *uio;
654 struct mbuf **mp0;
655 struct mbuf **controlp;
656 int *flagsp;
658 #ifndef AMITCP
659 struct proc *p = (struct proc*)cthread_data(cthread_self());
660 #endif
661 register struct mbuf *m, **mp;
662 register int flags, len, error, offset;
663 spl_t s;
664 struct protosw *pr = so->so_proto;
665 struct mbuf *nextrecord;
666 int moff, type;
668 mp = mp0;
669 if (paddr)
670 *paddr = 0;
671 if (controlp)
672 *controlp = 0;
673 if (flagsp)
674 flags = *flagsp &~ MSG_EOR;
675 else
676 flags = 0;
677 if (flags & MSG_OOB) {
678 m = m_get(M_WAIT, MT_DATA);
679 error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
680 m, (struct mbuf *)(long)(flags & MSG_PEEK), (struct mbuf *)0);
681 if (error)
682 goto bad;
684 do {
685 uiowrite(mtod(m, caddr_t),
686 (int) MIN(uio->uio_resid, m->m_len), uio);
687 m = m_free(m);
688 } while (uio->uio_resid && error == 0 && m);
690 bad:
691 if (m)
692 m_freem(m);
693 return (error);
695 if (mp)
696 *mp = (struct mbuf *)0;
698 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
699 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
700 (struct mbuf *)0, (struct mbuf *)0);
703 restart:
704 if (error = sblock(&so->so_rcv, uio->uio_procp))
705 return (error);
706 s = splnet();
708 m = so->so_rcv.sb_mb;
710 * If we have less data than requested, block awaiting more
711 * (subject to any timeout) if:
712 * 1. the current count is less than the low water mark, or
713 * 2. MSG_WAITALL is set, and it is possible to do the entire
714 * receive operation at once if we block (resid <= hiwat).
715 * If MSG_WAITALL is set but resid is larger than the receive buffer,
716 * we have to do the receive in sections, and thus risk returning
717 * a short count if a timeout or signal occurs after we start.
720 while (m == 0 || (so->so_rcv.sb_cc < uio->uio_resid &&
721 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
722 ((flags & MSG_WAITALL)
723 && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
724 m->m_nextpkt == 0)) {
725 #if DIAGNOSTIC
726 if (m == 0 && so->so_rcv.sb_cc)
727 panic("receive 1");
728 #endif
729 if (so->so_error) {
730 if (m)
731 break;
732 error = so->so_error;
733 if ((flags & MSG_PEEK) == 0)
734 so->so_error = 0;
735 goto release;
737 if (so->so_state & SS_CANTRCVMORE) {
738 if (m)
739 break;
740 else
741 goto release;
743 for (; m; m = m->m_next)
744 if (m->m_type == MT_OOBDATA
745 #ifdef USE_M_EOR
746 || (m->m_flags & M_EOR)
747 #endif
749 m = so->so_rcv.sb_mb;
750 goto dontblock;
752 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
753 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
754 error = ENOTCONN;
755 goto release;
758 if (uio->uio_resid == 0)
759 goto release;
761 if (so->so_state & SS_NBIO) {
762 error = EWOULDBLOCK;
763 goto release;
765 sbunlock(&so->so_rcv);
766 error = sbwait(&so->so_rcv, uio->uio_procp);
767 splx(s);
768 if (error)
769 return (error);
770 goto restart;
772 dontblock:
773 #ifndef AMITCP
774 p->p_stats->p_ru.ru_msgrcv++;
775 #endif
776 nextrecord = m->m_nextpkt;
777 if (pr->pr_flags & PR_ADDR) {
778 #if DIAGNOSTIC
779 if (m->m_type != MT_SONAME)
780 panic("receive 1a");
781 #endif
782 if (flags & MSG_PEEK) {
783 if (paddr)
784 *paddr = m_copy(m, 0, m->m_len);
785 m = m->m_next;
786 } else {
787 sbfree(&so->so_rcv, m);
788 if (paddr) {
789 *paddr = m;
790 so->so_rcv.sb_mb = m->m_next;
791 m->m_next = 0;
792 m = so->so_rcv.sb_mb;
793 } else {
794 MFREE(m, so->so_rcv.sb_mb);
795 m = so->so_rcv.sb_mb;
799 while (m && m->m_type == MT_CONTROL && error == 0) {
800 if (flags & MSG_PEEK) {
801 if (controlp)
802 *controlp = m_copy(m, 0, m->m_len);
803 m = m->m_next;
804 } else {
805 sbfree(&so->so_rcv, m);
806 if (controlp) {
807 if (pr->pr_domain->dom_externalize &&
808 mtod(m, struct cmsghdr *)->cmsg_type ==
809 SCM_RIGHTS)
810 error = (*pr->pr_domain->dom_externalize)(m);
811 *controlp = m;
812 so->so_rcv.sb_mb = m->m_next;
813 m->m_next = 0;
814 m = so->so_rcv.sb_mb;
815 } else {
816 MFREE(m, so->so_rcv.sb_mb);
817 m = so->so_rcv.sb_mb;
820 if (controlp)
821 controlp = &(*controlp)->m_next;
823 if (m) {
824 if ((flags & MSG_PEEK) == 0)
825 m->m_nextpkt = nextrecord;
826 type = m->m_type;
827 if (type == MT_OOBDATA)
828 flags |= MSG_OOB;
830 moff = 0;
831 offset = 0;
832 while (m && uio->uio_resid > 0 && error == 0) {
833 if (m->m_type == MT_OOBDATA) {
834 if (type != MT_OOBDATA)
835 break;
836 } else if (type == MT_OOBDATA)
837 break;
838 #if DIAGNOSTIC
839 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
840 panic("receive 3");
841 #endif
842 so->so_state &= ~SS_RCVATMARK;
844 len = uio->uio_resid;
846 if (so->so_oobmark && len > so->so_oobmark - offset)
847 len = so->so_oobmark - offset;
848 if (len > m->m_len - moff)
849 len = m->m_len - moff;
851 * If mp is set, just pass back the mbufs.
852 * Otherwise copy them out via the uio, then free.
853 * Sockbuf must be consistent here (points to current mbuf,
854 * it points to next record) when we drop priority;
855 * we must note any additions to the sockbuf when we
856 * block interrupts again.
859 if (mp == 0) {
860 splx(s);
861 uiowrite(mtod(m, caddr_t) + moff, (int)len, uio);
862 s = splnet();
863 } else
864 uio->uio_resid -= len;
866 if (len == m->m_len - moff) {
867 #ifdef USE_M_EOR
868 if (m->m_flags & M_EOR)
869 flags |= MSG_EOR;
870 #endif
871 if (flags & MSG_PEEK) {
872 m = m->m_next;
873 moff = 0;
874 } else {
875 nextrecord = m->m_nextpkt;
876 sbfree(&so->so_rcv, m);
877 if (mp) {
878 *mp = m;
879 mp = &m->m_next;
880 so->so_rcv.sb_mb = m = m->m_next;
881 *mp = (struct mbuf *)0;
882 } else {
883 MFREE(m, so->so_rcv.sb_mb);
884 m = so->so_rcv.sb_mb;
886 if (m)
887 m->m_nextpkt = nextrecord;
889 } else {
890 if (flags & MSG_PEEK)
891 moff += len;
892 else {
893 if (mp)
894 *mp = m_copym(m, 0, len, M_WAIT);
895 m->m_data += len;
896 m->m_len -= len;
897 so->so_rcv.sb_cc -= len;
900 if (so->so_oobmark) {
901 if ((flags & MSG_PEEK) == 0) {
902 so->so_oobmark -= len;
903 if (so->so_oobmark == 0) {
904 so->so_state |= SS_RCVATMARK;
905 break;
907 } else
908 offset += len;
910 if (flags & MSG_EOR)
911 break;
913 * If the MSG_WAITALL flag is set (for non-atomic socket),
914 * we must not quit until "uio->uio_resid == 0" or an error
915 * termination. If a signal/timeout occurs, return
916 * with a short count but without error.
917 * Keep sockbuf locked against other readers.
919 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
920 !sosendallatonce(so)) {
921 if (so->so_error || so->so_state & SS_CANTRCVMORE)
922 break;
923 error = sbwait(&so->so_rcv, uio->uio_procp);
924 if (error) {
925 sbunlock(&so->so_rcv);
926 splx(s);
927 return (0);
929 if (m = so->so_rcv.sb_mb)
930 nextrecord = m->m_nextpkt;
933 if ((flags & MSG_PEEK) == 0) {
934 if (m == 0)
935 so->so_rcv.sb_mb = nextrecord;
936 else if (pr->pr_flags & PR_ATOMIC) {
937 flags |= MSG_TRUNC;
938 (void) sbdroprecord(&so->so_rcv);
940 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
941 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
942 (struct mbuf *)(long)flags, /* (struct mbuf *)0, */
943 (struct mbuf *)0); /* BUG! One extra arg! */
945 if (flagsp)
946 *flagsp |= flags;
947 release:
948 sbunlock(&so->so_rcv);
949 splx(s);
950 return (error);
954 soshutdown(so, how)
955 register struct socket *so;
956 register int how;
958 register struct protosw *pr = so->so_proto;
960 how++; /* now how: 1 - no receives, 2 - no sends, 3 - no either */
961 if (how & 1)
962 sorflush(so);
963 if (how & 2)
964 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN, (struct mbuf *)0,
965 (struct mbuf *)0, (struct mbuf *)0));
966 return (0);
969 void
970 sorflush(so)
971 register struct socket *so;
973 register struct sockbuf *sb = &so->so_rcv;
974 register struct protosw *pr = so->so_proto;
975 register spl_t s;
976 struct sockbuf asb;
978 sb->sb_flags |= SB_NOINTR;
979 (void) sblock(sb, NULL);
980 s = splimp();
981 socantrcvmore(so);
982 sbunlock(sb);
983 asb = *sb;
984 aligned_bzero_const((caddr_t)sb, sizeof (*sb));
985 splx(s);
986 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
987 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
988 sbrelease(&asb);
992 * in some fields of socket structure previous short fields of ticks are
993 * changed to struct timeval fields. (actually so_linger and sb_timeo)
994 * sosetopt() and sogetopt() does transformations needed for those.
998 sosetopt(so, level, optname, m0)
999 register struct socket *so;
1000 int level, optname;
1001 struct mbuf *m0;
1003 int error = 0;
1004 register struct mbuf *m = m0;
1006 #ifdef ENABLE_TTCP_SHUTUP
1007 if ((optname == TCP_NOPUSH) && (level == IPPROTO_TCP)) {
1008 if (m == NULL || m->m_len < sizeof (int))
1009 error = EINVAL;
1010 else {
1011 if (*mtod(m, int *))
1012 so->so_options |= SO_TTCP_SHUTUP;
1013 else
1014 so->so_options &= ~SO_TTCP_SHUTUP;
1016 goto bad;
1018 #endif
1019 if (level != SOL_SOCKET) {
1020 if (so->so_proto && so->so_proto->pr_ctloutput)
1021 return ((*so->so_proto->pr_ctloutput)
1022 (PRCO_SETOPT, so, level, optname, &m0));
1023 error = ENOPROTOOPT;
1024 } else {
1025 switch (optname) {
1027 case SO_LINGER:
1028 if (m == NULL || m->m_len != sizeof (struct linger)) {
1029 error = EINVAL;
1030 goto bad;
1033 int val = mtod(m, struct linger *)->l_linger;
1034 #ifdef AMITCP
1036 * l_linger is in seconds
1038 so->so_linger.tv_sec = val;
1039 so->so_linger.tv_usec = 0;
1040 #else
1041 so->so_linger.tv_sec = val / hz;
1042 so->so_linger.tv_usec = (val % hz) * tick;
1043 #endif
1045 /* fall thru... */
1047 case SO_DEBUG:
1048 case SO_KEEPALIVE:
1049 case SO_DONTROUTE:
1050 case SO_USELOOPBACK:
1051 case SO_BROADCAST:
1052 case SO_REUSEADDR:
1053 case SO_REUSEPORT:
1054 case SO_OOBINLINE:
1055 if (m == NULL || m->m_len < sizeof (int)) {
1056 error = EINVAL;
1057 goto bad;
1059 if (*mtod(m, int *))
1060 so->so_options |= optname;
1061 else
1062 so->so_options &= ~optname;
1063 break;
1065 case SO_SNDBUF:
1066 case SO_RCVBUF:
1067 case SO_SNDLOWAT:
1068 case SO_RCVLOWAT:
1069 case SO_EVENTMASK:
1070 if (m == NULL || m->m_len < sizeof (int)) {
1071 error = EINVAL;
1072 goto bad;
1074 switch (optname) {
1076 case SO_SNDBUF:
1077 case SO_RCVBUF:
1078 if (sbreserve(optname == SO_SNDBUF ?
1079 &so->so_snd : &so->so_rcv,
1080 (u_long) *mtod(m, int *)) == 0) {
1081 error = ENOBUFS;
1082 goto bad;
1084 break;
1086 case SO_SNDLOWAT:
1087 so->so_snd.sb_lowat = *mtod(m, int *);
1088 break;
1089 case SO_RCVLOWAT:
1090 so->so_rcv.sb_lowat = *mtod(m, int *);
1091 break;
1092 case SO_EVENTMASK:
1093 so->so_eventmask = *mtod(m, int *);
1094 so->so_state |= SS_ASYNC;
1095 so->so_rcv.sb_flags |= SB_ASYNC;
1096 so->so_snd.sb_flags |= SB_ASYNC;
1097 DEVENTS(log(LOG_DEBUG, "Setting SO_EVENTMASK = 0x%08lx", so->so_eventmask);)
1099 break;
1101 case SO_SNDTIMEO:
1102 case SO_RCVTIMEO:
1104 struct timeval *tv;
1106 if (m == NULL || m->m_len < sizeof (*tv)) {
1107 error = EINVAL;
1108 goto bad;
1110 tv = mtod(m, struct timeval *);
1112 switch (optname) {
1114 case SO_SNDTIMEO:
1115 so->so_snd.sb_timeo = *tv;
1116 break;
1117 case SO_RCVTIMEO:
1118 so->so_rcv.sb_timeo = *tv;
1119 break;
1121 break;
1124 default:
1125 error = ENOPROTOOPT;
1126 break;
1129 bad:
1130 if (m)
1131 (void) m_free(m);
1132 return (error);
1136 sogetopt(so, level, optname, mp)
1137 register struct socket *so;
1138 int level, optname;
1139 struct mbuf **mp;
1141 register struct mbuf *m;
1143 if (level != SOL_SOCKET) {
1144 if (so->so_proto && so->so_proto->pr_ctloutput) {
1145 return ((*so->so_proto->pr_ctloutput)
1146 (PRCO_GETOPT, so, level, optname, mp));
1147 } else
1148 return (ENOPROTOOPT);
1149 } else {
1150 m = m_get(M_WAIT, MT_SOOPTS);
1151 m->m_len = sizeof (int);
1153 switch (optname) {
1155 case SO_LINGER:
1156 m->m_len = sizeof (struct linger);
1157 mtod(m, struct linger *)->l_onoff =
1158 so->so_options & SO_LINGER;
1159 mtod(m, struct linger *)->l_linger =
1160 #ifndef AMITCP
1161 so->so_linger.tv_sec * hz +
1162 so->so_linger.tv_usec / tick;
1163 #else
1165 * l_linger is in seconds
1167 so->so_linger.tv_sec;
1168 #endif
1169 break;
1171 case SO_USELOOPBACK:
1172 case SO_DONTROUTE:
1173 case SO_DEBUG:
1174 case SO_KEEPALIVE:
1175 case SO_REUSEADDR:
1176 case SO_REUSEPORT:
1177 case SO_BROADCAST:
1178 case SO_OOBINLINE:
1179 *mtod(m, int *) = so->so_options & optname;
1180 break;
1182 case SO_TYPE:
1183 *mtod(m, int *) = so->so_type;
1184 break;
1186 case SO_ERROR:
1187 *mtod(m, int *) = so->so_error;
1188 so->so_error = 0;
1189 break;
1191 case SO_SNDBUF:
1192 *mtod(m, int *) = so->so_snd.sb_hiwat;
1193 break;
1195 case SO_RCVBUF:
1196 *mtod(m, int *) = so->so_rcv.sb_hiwat;
1197 break;
1199 case SO_SNDLOWAT:
1200 *mtod(m, int *) = so->so_snd.sb_lowat;
1201 break;
1203 case SO_RCVLOWAT:
1204 *mtod(m, int *) = so->so_rcv.sb_lowat;
1205 break;
1207 case SO_EVENTMASK:
1208 DEVENTS(log(LOG_DEBUG, "Getting SO_EVENTMASK = 0x%08lx", so->so_eventmask);)
1209 *mtod(m, int *) = so->so_eventmask;
1210 break;
1212 case SO_SNDTIMEO:
1213 case SO_RCVTIMEO:
1215 struct timeval *tv= (optname == SO_SNDTIMEO ?
1216 &so->so_snd.sb_timeo : &so->so_rcv.sb_timeo);
1218 m->m_len = sizeof(struct timeval);
1219 mtod(m, struct timeval *)->tv_sec = tv->tv_sec;
1220 mtod(m, struct timeval *)->tv_usec = tv->tv_usec;
1221 break;
1224 default:
1225 (void)m_free(m);
1226 return (ENOPROTOOPT);
1228 *mp = m;
1229 return (0);
1233 void
1234 sohasoutofband(so)
1235 register struct socket *so;
1237 #ifdef AMITCP
1238 if (so->so_pgid)
1239 Signal(so->so_pgid->thisTask, so->so_pgid->sigUrgMask);
1240 soevent(so, FD_OOB);
1241 #else
1242 struct proc *p;
1244 if (so->so_pgid < 0)
1245 gsignal(-so->so_pgid, SIGURG);
1246 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1247 psignal(p, SIGURG);
1248 #endif
1249 if (so->so_rcv.sb_flags & SB_SEL) {
1250 so->so_rcv.sb_flags &= ~SB_SEL; /* do not notify us any more */
1251 selwakeup(&so->so_rcv.sb_sel);