tools/adflib: build only host variant which is used by Sam440 target
[AROS.git] / workbench / network / stacks / AROSTCP / bsdsocket / kern / uipc_socket2.c
blobae553401981250d304aad16bbb3060197cb19e52
1 /*
2 * Copyright (C) 1993 AmiTCP/IP Group, <amitcp-group@hut.fi>
3 * Helsinki University of Technology, Finland.
4 * All rights reserved.
5 * Copyright (C) 2005 Neil Cafferkey
6 * Copyright (C) 2005 Pavel Fedin
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * MA 02111-1307, USA.
24 /*
25 * Mach Operating System
26 * Copyright (c) 1992 Carnegie Mellon University
27 * All Rights Reserved.
29 * Permission to use, copy, modify and distribute this software and its
30 * documentation is hereby granted, provided that both the copyright
31 * notice and this permission notice appear in all copies of the
32 * software, derivative works or modified versions, and any portions
33 * thereof, and that both notices appear in supporting documentation.
35 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
36 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
37 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 * Carnegie Mellon requests users of this software to return to
41 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
42 * School of Computer Science
43 * Carnegie Mellon University
44 * Pittsburgh PA 15213-3890
46 * any improvements or extensions that they make and grant Carnegie Mellon
47 * the rights to redistribute these changes.
51 * Copyright (c) 1982, 1986, 1988, 1990 Regents of the University of California.
52 * All rights reserved.
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 * 3. All advertising materials mentioning features or use of this software
63 * must display the following acknowledgement:
64 * This product includes software developed by the University of
65 * California, Berkeley and its contributors.
66 * 4. Neither the name of the University nor the names of its contributors
67 * may be used to endorse or promote products derived from this software
68 * without specific prior written permission.
70 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
71 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
72 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
73 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
74 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
75 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
76 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
77 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
78 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
79 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
80 * SUCH DAMAGE.
82 * @(#)uipc_socket2.c 7.17 (Berkeley) 5/4/91
85 #include <conf.h>
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/malloc.h>
90 #include <sys/mbuf.h>
91 #include <sys/protosw.h>
92 #include <sys/socket.h>
93 #include <sys/socketvar.h>
94 #include <sys/synch.h>
96 #include <api/amiga_api.h>
97 #include <kern/amiga_includes.h>
99 #include <kern/uipc_socket2_protos.h>
100 #include <kern/amiga_select_protos.h>
103 * Primitive routines for operating on sockets and socket buffers
106 /* strings for sleep message: */
107 char netio[] = "netio";
108 char netcon[] = "netcon";
109 char netcls[] = "netcls";
111 u_long sb_max = SB_MAX; /* patchable */
114 * Procedures to manipulate state flags of socket
115 * and do appropriate wakeups. Normal sequence from the
116 * active (originating) side is that soisconnecting() is
117 * called during processing of connect() call,
118 * resulting in an eventual call to soisconnected() if/when the
119 * connection is established. When the connection is torn down
120 * soisdisconnecting() is called during processing of disconnect() call,
121 * and soisdisconnected() is called when the connection to the peer
122 * is totally severed. The semantics of these routines are such that
123 * connectionless protocols can call soisconnected() and soisdisconnected()
124 * only, bypassing the in-progress calls when setting up a ``connection''
125 * takes no time.
127 * From the passive side, a socket is created with
128 * two queues of sockets: so_q0 for connections in progress
129 * and so_q for connections already made and awaiting user acceptance.
130 * As a protocol is preparing incoming connections, it creates a socket
131 * structure queued on so_q0 by calling sonewconn(). When the connection
132 * is established, soisconnected() is called, and transfers the
133 * socket structure to so_q, making it available to accept().
135 * If a socket is closed with sockets on either
136 * so_q0 or so_q, these sockets are dropped.
138 * If higher level protocols are implemented in
139 * the kernel, the wakeups done here will sometimes
140 * cause software-interrupt process scheduling.
143 void
144 soisconnecting(so)
145 register struct socket *so;
148 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
149 so->so_state |= SS_ISCONNECTING;
152 void
153 soisconnected(so)
154 register struct socket *so;
156 register struct socket *head = so->so_head;
158 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
159 so->so_state |= SS_ISCONNECTED;
160 if (head && soqremque(so, 0)) {
161 soqinsque(head, so, 1);
162 sowakeup(head, &head->so_rcv);
163 soevent(head, FD_CONNECT);
164 wakeup((caddr_t)&head->so_timeo);
165 } else {
166 wakeup((caddr_t)&so->so_timeo);
167 sowakeup(so, &so->so_rcv);
168 sowakeup(so, &so->so_snd);
169 soevent(so, FD_CONNECT);
173 void
174 soisdisconnecting(so)
175 register struct socket *so;
178 so->so_state &= ~SS_ISCONNECTING;
179 so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
180 wakeup((caddr_t)&so->so_timeo);
181 sowakeup(so, &so->so_snd);
182 sowakeup(so, &so->so_rcv);
183 DEVENTS(__log(LOG_DEBUG,"soisdisconnecting(0x%08lx) called", so);)
184 // soevent(so, FD_CLOSE);
187 void
188 soisdisconnected(so)
189 register struct socket *so;
192 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
193 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE);
194 wakeup((caddr_t)&so->so_timeo);
195 sowakeup(so, &so->so_snd);
196 sowakeup(so, &so->so_rcv);
197 DEVENTS(__log(LOG_DEBUG,"soisdisconnected(0x%08lx) called", so);)
198 // soevent(so, FD_CLOSE);
202 * When an attempt at a new connection is noted on a socket
203 * which accepts connections, sonewconn is called. If the
204 * connection is possible (subject to space constraints, etc.)
205 * then we allocate a new structure, propoerly linked into the
206 * data structure of the original socket, and return this.
207 * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED.
209 struct socket *
210 sonewconn(head, connstatus)
211 register struct socket *head;
212 int connstatus;
214 register struct socket *so;
215 int soqueue = connstatus ? 1 : 0;
217 if (head->so_qlen + head->so_q0len > 3 * head->so_qlimit / 2)
218 return ((struct socket *)0);
219 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_DONTWAIT);
220 if (so == NULL)
221 return ((struct socket *)0);
222 aligned_bzero_const((caddr_t)so, sizeof(*so));
223 so->so_type = head->so_type;
224 so->so_options = head->so_options &~ SO_ACCEPTCONN;
225 so->so_linger = head->so_linger;
226 so->so_state = head->so_state | SS_NOFDREF;
227 so->so_proto = head->so_proto;
228 so->so_timeo = head->so_timeo;
229 so->so_pgid = head->so_pgid;
230 so->so_eventmask = head->so_eventmask;
231 (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat);
232 soqinsque(head, so, soqueue);
233 if ((*so->so_proto->pr_usrreq)(so, PRU_ATTACH,
234 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0)) {
235 (void) soqremque(so, soqueue);
236 (void) bsd_free((caddr_t)so, M_SOCKET);
237 return ((struct socket *)0);
239 if (connstatus) {
240 sowakeup(head, &head->so_rcv);
241 // soevent(head, FD_ACCEPT);
242 wakeup((caddr_t)&head->so_timeo);
243 so->so_state |= connstatus;
245 soevent(head, FD_ACCEPT);
246 return (so);
249 void
250 soqinsque(head, so, q)
251 register struct socket *head, *so;
252 int q;
255 register struct socket **prev;
256 so->so_head = head;
257 if (q == 0) {
258 head->so_q0len++;
259 so->so_q0 = 0;
260 for (prev = &(head->so_q0); *prev; )
261 prev = &((*prev)->so_q0);
262 } else {
263 head->so_qlen++;
264 so->so_q = 0;
265 for (prev = &(head->so_q); *prev; )
266 prev = &((*prev)->so_q);
268 *prev = so;
272 soqremque(so, q)
273 register struct socket *so;
274 int q;
276 register struct socket *head, *prev, *next;
278 head = so->so_head;
279 prev = head;
280 for (;;) {
281 next = q ? prev->so_q : prev->so_q0;
282 if (next == so)
283 break;
284 if (next == 0)
285 return (0);
286 prev = next;
288 if (q == 0) {
289 prev->so_q0 = next->so_q0;
290 head->so_q0len--;
291 } else {
292 prev->so_q = next->so_q;
293 head->so_qlen--;
295 next->so_q0 = next->so_q = 0;
296 next->so_head = 0;
297 return (1);
301 * Socantsendmore indicates that no more data will be sent on the
302 * socket; it would normally be applied to a socket when the user
303 * informs the system that no more data is to be sent, by the protocol
304 * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data
305 * will be received, and will normally be applied to the socket by a
306 * protocol when it detects that the peer will send no more data.
307 * Data queued for reading in the socket may yet be read.
310 void
311 socantsendmore(so)
312 struct socket *so;
315 so->so_state |= SS_CANTSENDMORE;
316 sowakeup(so, &so->so_snd);
317 DEVENTS(__log(LOG_DEBUG,"socantsendmore(0x%08lx) called", so);)
320 void
321 socantrcvmore(so)
322 struct socket *so;
325 so->so_state |= SS_CANTRCVMORE;
326 sowakeup(so, &so->so_rcv);
327 DEVENTS(__log(LOG_DEBUG,"socantrcvmore(0x%08lx) called", so);)
328 soevent(so, FD_CLOSE);
332 * Socket select/wakeup routines.
336 * Queue a process for a select on a socket buffer.
338 void
339 sbselqueue(sb, cp)
340 struct sockbuf *sb;
341 struct SocketBase *cp;
343 selenter(cp, &sb->sb_sel);
344 sb->sb_flags |= SB_SEL; /* notify us if something happens */
348 * Wait for data to arrive at/drain from a socket buffer.
351 sbwait(sb, cp)
352 struct sockbuf *sb;
353 struct SocketBase *cp;
356 sb->sb_flags |= SB_WAIT;
357 return (tsleep(cp, (caddr_t)&sb->sb_cc, netio,
358 (sb->sb_timeo.tv_sec || sb->sb_timeo.tv_usec) ?
359 &sb->sb_timeo : NULL));
363 * Lock a sockbuf already known to be locked;
364 * return any error returned from sleep (EINTR).
367 sb_lock(sb, cp)
368 struct sockbuf *sb;
369 struct SocketBase *cp;
371 int error;
373 while (sb->sb_flags & SB_LOCK) {
374 sb->sb_flags |= SB_WANT;
375 if (error = tsleep(cp, (caddr_t)&sb->sb_flags, netio, NULL))
376 return (error);
378 sb->sb_flags |= SB_LOCK;
379 return (0);
383 * Wakeup processes waiting on a socket buffer.
384 * Do asynchronous notification via SIGIO
385 * if the socket has the SS_ASYNC flag set.
387 void
388 sowakeup(so, sb)
389 register struct socket *so;
390 register struct sockbuf *sb;
392 #ifndef AMITCP
393 struct proc *p;
394 #endif
395 if (sb->sb_flags & SB_SEL) {
396 sb->sb_flags &= ~SB_SEL; /* do not notify us any more */
397 selwakeup(&sb->sb_sel);
399 if (sb->sb_flags & SB_WAIT) {
400 sb->sb_flags &= ~SB_WAIT;
401 wakeup((caddr_t)&sb->sb_cc);
403 if (so->so_state & SS_ASYNC) {
404 #ifdef AMITCP
405 if (so->so_pgid)
406 Signal(so->so_pgid->thisTask, so->so_pgid->sigIOMask);
407 #else
408 if (so->so_pgid < 0)
409 gsignal(-so->so_pgid, SIGIO);
410 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
411 psignal(p, SIGIO);
412 #endif
416 /* Wakeup processes with socket events notification */
418 void sorwakeup(struct socket *so)
420 sowakeup(so, &so->so_rcv);
421 soevent(so, FD_READ);
424 void sowwakeup(struct socket *so)
426 sowakeup(so, &so->so_snd);
427 if (so->so_state & SS_NBIO)
428 soevent(so, FD_WRITE);
431 /* Queue an event for the socket */
433 void soevent(struct socket *so, u_long event)
435 struct soevent *tse;
436 struct soevent *se = NULL;
437 if (so->so_pgid && ((so->so_state & (SS_ASYNC | SS_NOFDREF)) == SS_ASYNC))
439 if (so->so_pgid->sigEventMask)
441 if (event & so->so_eventmask)
443 DEVENTS(__log(LOG_DEBUG,"Sending event 0x%08lx for socket 0x%08lx", event, so);)
444 ObtainSemaphore(&so->so_pgid->EventLock);
445 for (tse = (struct soevent *)so->so_pgid->EventList.mlh_Head; tse->node.mln_Succ; tse = (struct soevent *)tse->node.mln_Succ)
447 if (tse->socket == so) {
448 se = tse;
449 break;
452 if (se)
453 event |= se->events;
454 else {
455 se = bsd_malloc(sizeof(struct soevent), NULL, NULL);
456 if (se) {
457 se->socket = so;
458 AddTail((struct List *)&so->so_pgid->EventList, (struct Node *)se);
461 if (se)
463 se->events = event;
464 Signal(so->so_pgid->thisTask, so->so_pgid->sigEventMask);
466 else
467 __log(LOG_CRIT,"Unable to send socket event, out of memory");
468 ReleaseSemaphore(&so->so_pgid->EventLock);
475 * Socket buffer (struct sockbuf) utility routines.
477 * Each socket contains two socket buffers: one for sending data and
478 * one for receiving data. Each buffer contains a queue of mbufs,
479 * information about the number of mbufs and amount of data in the
480 * queue, and other fields allowing select() statements and notification
481 * on data availability to be implemented.
483 * Data stored in a socket buffer is maintained as a list of records.
484 * Each record is a list of mbufs chained together with the m_next
485 * field. Records are chained together with the m_nextpkt field. The upper
486 * level routine soreceive() expects the following conventions to be
487 * observed when placing information in the receive buffer:
489 * 1. If the protocol requires each message be preceded by the sender's
490 * name, then a record containing that name must be present before
491 * any associated data (mbuf's must be of type MT_SONAME).
492 * 2. If the protocol supports the exchange of ``access rights'' (really
493 * just additional data associated with the message), and there are
494 * ``rights'' to be received, then a record containing this data
495 * should be present (mbuf's must be of type MT_RIGHTS).
496 * 3. If a name or rights record exists, then it must be followed by
497 * a data record, perhaps of zero length.
499 * Before using a new socket structure it is first necessary to reserve
500 * buffer space to the socket, by calling sbreserve(). This should commit
501 * some of the available buffer space in the system buffer pool for the
502 * socket (currently, it does nothing but enforce limits). The space
503 * should be released by calling sbrelease() when the socket is destroyed.
507 soreserve(so, sndcc, rcvcc)
508 register struct socket *so;
509 u_long sndcc, rcvcc;
512 if (sbreserve(&so->so_snd, sndcc) == 0)
513 goto bad;
514 if (sbreserve(&so->so_rcv, rcvcc) == 0)
515 goto bad2;
516 if (so->so_rcv.sb_lowat == 0)
517 so->so_rcv.sb_lowat = 1;
518 if (so->so_snd.sb_lowat == 0)
519 so->so_snd.sb_lowat = mbconf.mclbytes;
520 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
521 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
522 return (0);
523 bad2:
524 sbrelease(&so->so_snd);
525 bad:
526 return (ENOBUFS);
530 * Allot mbufs to a sockbuf.
531 * Attempt to scale mbmax so that mbcnt doesn't become limiting
532 * if buffering efficiency is near the normal case.
535 sbreserve(sb, cc)
536 struct sockbuf *sb;
537 u_long cc;
540 if (cc > sb_max * mbconf.mclbytes / (MSIZE + mbconf.mclbytes))
541 return (0);
542 sb->sb_hiwat = cc;
543 sb->sb_mbmax = MIN(cc * 2, sb_max);
544 if (sb->sb_lowat > sb->sb_hiwat)
545 sb->sb_lowat = sb->sb_hiwat;
546 return (1);
550 * Free mbufs held by a socket, and reserved mbuf space.
552 void
553 sbrelease(sb)
554 struct sockbuf *sb;
557 sbflush(sb);
558 sb->sb_hiwat = sb->sb_mbmax = 0;
562 * Routines to add and remove
563 * data from an mbuf queue.
565 * The routines sbappend() or sbappendrecord() are normally called to
566 * append new mbufs to a socket buffer, after checking that adequate
567 * space is available, comparing the function sbspace() with the amount
568 * of data to be added. sbappendrecord() differs from sbappend() in
569 * that data supplied is treated as the beginning of a new record.
570 * To place a sender's address, optional access rights, and data in a
571 * socket receive buffer, sbappendaddr() should be used. To place
572 * access rights and data in a socket receive buffer, sbappendrights()
573 * should be used. In either case, the new data begins a new record.
574 * Note that unlike sbappend() and sbappendrecord(), these routines check
575 * for the caller that there will be enough space to store the data.
576 * Each fails if there is not enough space, or if it cannot find mbufs
577 * to store additional information in.
579 * Reliable protocols may use the socket send buffer to hold data
580 * awaiting acknowledgement. Data is normally copied from a socket
581 * send buffer in a protocol with m_copy for output to a peer,
582 * and then removing the data from the socket buffer with sbdrop()
583 * or sbdroprecord() when the data is acknowledged by the peer.
587 * Append mbuf chain m to the last record in the
588 * socket buffer sb. The additional space associated
589 * the mbuf chain is recorded in sb. Empty mbufs are
590 * discarded and mbufs are compacted where possible.
592 void
593 sbappend(sb, m)
594 struct sockbuf *sb;
595 struct mbuf *m;
597 register struct mbuf *n;
599 if (m == 0)
600 return;
601 if (n = sb->sb_mb) {
602 while (n->m_nextpkt)
603 n = n->m_nextpkt;
604 do {
605 #ifdef USE_M_EOR
606 if (n->m_flags & M_EOR) {
607 sbappendrecord(sb, m); /* XXXXXX!!!! */
608 return;
610 #endif
611 } while (n->m_next && (n = n->m_next));
613 sbcompress(sb, m, n);
616 #ifdef SOCKBUF_DEBUG
617 void
618 sbcheck(sb)
619 register struct sockbuf *sb;
621 register struct mbuf *m;
622 register int len = 0, mbcnt = 0;
624 for (m = sb->sb_mb; m; m = m->m_next) {
625 len += m->m_len;
626 mbcnt += MSIZE;
627 if (m->m_flags & M_EXT)
628 mbcnt += m->m_ext.ext_size;
629 if (m->m_nextpkt)
630 panic("sbcheck nextpkt");
632 if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
633 printf("cc %ld != %ld || mbcnt %ld != %ld\n", len, sb->sb_cc,
634 mbcnt, sb->sb_mbcnt);
635 panic("sbcheck");
638 #endif
641 * As above, except the mbuf chain
642 * begins a new record.
644 void
645 sbappendrecord(sb, m0)
646 register struct sockbuf *sb;
647 register struct mbuf *m0;
649 register struct mbuf *m;
651 if (m0 == 0)
652 return;
653 if (m = sb->sb_mb)
654 while (m->m_nextpkt)
655 m = m->m_nextpkt;
657 * Put the first mbuf on the queue.
658 * Note this permits zero length records.
660 sballoc(sb, m0);
661 if (m)
662 m->m_nextpkt = m0;
663 else
664 sb->sb_mb = m0;
665 m = m0->m_next;
666 m0->m_next = 0;
667 #ifdef USE_M_EOR
668 if (m && (m0->m_flags & M_EOR)) {
669 m0->m_flags &= ~M_EOR;
670 m->m_flags |= M_EOR;
672 #endif
673 sbcompress(sb, m, m0);
677 * As above except that OOB data
678 * is inserted at the beginning of the sockbuf,
679 * but after any other OOB data.
681 void
682 sbinsertoob(sb, m0)
683 register struct sockbuf *sb;
684 register struct mbuf *m0;
686 register struct mbuf *m;
687 register struct mbuf **mp;
689 if (m0 == 0)
690 return;
691 for (mp = &sb->sb_mb; m = *mp; mp = &((*mp)->m_nextpkt)) {
692 again:
693 switch (m->m_type) {
695 case MT_OOBDATA:
696 continue; /* WANT next train */
698 case MT_CONTROL:
699 if (m = m->m_next)
700 goto again; /* inspect THIS train further */
702 break;
705 * Put the first mbuf on the queue.
706 * Note this permits zero length records.
708 sballoc(sb, m0);
709 m0->m_nextpkt = *mp;
710 *mp = m0;
711 m = m0->m_next;
712 m0->m_next = 0;
713 #ifdef USE_M_EOR
714 if (m && (m0->m_flags & M_EOR)) {
715 m0->m_flags &= ~M_EOR;
716 m->m_flags |= M_EOR;
718 #endif
719 sbcompress(sb, m, m0);
723 * Append address and data, and optionally, control (ancillary) data
724 * to the receive queue of a socket. If present,
725 * m0 must include a packet header with total length.
726 * Returns 0 if no space in sockbuf or insufficient mbufs.
729 sbappendaddr(sb, asa, m0, control)
730 register struct sockbuf *sb;
731 struct sockaddr *asa;
732 struct mbuf *m0, *control;
734 register struct mbuf *m, *n;
735 int space = asa->sa_len;
737 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
738 panic("sbappendaddr");
739 if (m0)
740 space += m0->m_pkthdr.len;
741 for (n = control; n; n = n->m_next) {
742 space += n->m_len;
743 if (n->m_next == 0) /* keep pointer to last control buf */
744 break;
746 if (space > sbspace(sb))
747 return (0);
748 if (asa->sa_len > MLEN)
749 return (0);
750 MGET(m, M_DONTWAIT, MT_SONAME);
751 if (m == 0)
752 return (0);
753 m->m_len = asa->sa_len;
754 aligned_bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len);
755 if (n)
756 n->m_next = m0; /* concatenate data to control */
757 else
758 control = m0;
759 m->m_next = control;
760 for (n = m; n; n = n->m_next)
761 sballoc(sb, n);
762 if (n = sb->sb_mb) {
763 while (n->m_nextpkt)
764 n = n->m_nextpkt;
765 n->m_nextpkt = m;
766 } else
767 sb->sb_mb = m;
768 return (1);
772 sbappendcontrol(sb, m0, control)
773 struct sockbuf *sb;
774 struct mbuf *control, *m0;
776 register struct mbuf *m, *n;
777 int space = 0;
779 if (control == 0)
780 panic("sbappendcontrol");
781 for (m = control; ; m = m->m_next) {
782 space += m->m_len;
783 if (m->m_next == 0)
784 break;
786 n = m; /* save pointer to last control buffer */
787 for (m = m0; m; m = m->m_next)
788 space += m->m_len;
789 if (space > sbspace(sb))
790 return (0);
791 n->m_next = m0; /* concatenate data to control */
792 for (m = control; m; m = m->m_next)
793 sballoc(sb, m);
794 if (n = sb->sb_mb) {
795 while (n->m_nextpkt)
796 n = n->m_nextpkt;
797 n->m_nextpkt = control;
798 } else
799 sb->sb_mb = control;
800 return (1);
804 * Compress mbuf chain m into the socket
805 * buffer sb following mbuf n. If n
806 * is null, the buffer is presumed empty.
808 void
809 sbcompress(sb, m, n)
810 register struct sockbuf *sb;
811 register struct mbuf *m, *n;
813 #ifdef USE_M_EOR
814 register int eor = 0;
815 #endif
816 register struct mbuf *o;
818 while (m) {
819 #ifdef USE_M_EOR
820 eor |= m->m_flags & M_EOR;
821 #endif
822 if (m->m_len == 0 &&
824 #ifdef USE_M_EOR
825 eor == 0 ||
826 #endif
827 (((o = m->m_next) || (o = n)) &&
828 o->m_type == m->m_type))) {
829 m = m_free(m);
830 continue;
832 if (n && (n->m_flags & (M_EXT
833 #ifdef USE_M_EOR
834 | M_EOR
835 #endif
836 )) == 0 &&
837 (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] &&
838 n->m_type == m->m_type) {
839 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
840 (unsigned)m->m_len);
841 n->m_len += m->m_len;
842 sb->sb_cc += m->m_len;
843 m = m_free(m);
844 continue;
846 if (n)
847 n->m_next = m;
848 else
849 sb->sb_mb = m;
850 sballoc(sb, m);
851 n = m;
852 #ifdef USE_M_EOR
853 m->m_flags &= ~M_EOR;
854 #endif
855 m = m->m_next;
856 n->m_next = 0;
858 #ifdef USE_M_EOR
859 if (eor) {
860 if (n)
861 n->m_flags |= eor;
862 else
863 printf("semi-panic: sbcompress\n");
865 #endif
869 * Free all mbufs in a sockbuf.
870 * Check that all resources are reclaimed.
872 void
873 sbflush(sb)
874 register struct sockbuf *sb;
877 if (sb->sb_flags & SB_LOCK)
878 panic("sbflush");
879 while (sb->sb_mbcnt)
880 sbdrop(sb, (int)sb->sb_cc);
881 if (sb->sb_cc || sb->sb_mb)
882 panic("sbflush 2");
886 * Drop data from (the front of) a sockbuf.
888 void
889 sbdrop(sb, len)
890 register struct sockbuf *sb;
891 register int len;
893 register struct mbuf *m, *mn;
894 struct mbuf *next;
896 next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
897 while (len > 0) {
898 if (m == 0) {
899 if (next == 0)
900 panic("sbdrop");
901 m = next;
902 next = m->m_nextpkt;
903 continue;
905 if (m->m_len > len) {
906 m->m_len -= len;
907 m->m_data += len;
908 sb->sb_cc -= len;
909 break;
911 len -= m->m_len;
912 sbfree(sb, m);
913 MFREE(m, mn);
914 m = mn;
916 while (m && m->m_len == 0) {
917 sbfree(sb, m);
918 MFREE(m, mn);
919 m = mn;
921 if (m) {
922 sb->sb_mb = m;
923 m->m_nextpkt = next;
924 } else
925 sb->sb_mb = next;
929 * Drop a record off the front of a sockbuf
930 * and move the next record to the front.
932 void
933 sbdroprecord(sb)
934 register struct sockbuf *sb;
936 register struct mbuf *m, *mn;
938 m = sb->sb_mb;
939 if (m) {
940 sb->sb_mb = m->m_nextpkt;
941 do {
942 sbfree(sb, m);
943 MFREE(m, mn);
944 } while (m = mn);