2 * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
4 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
5 * Use is subject to license terms.
6 * Copyright (c) 2016 by Delphix. All rights reserved.
8 * Permission to use, copy, modify, and distribute this software and its
9 * documentation is hereby granted, provided that the above copyright
10 * notice appears in all copies.
12 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
13 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
14 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
15 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SUN SHALL NOT BE LIABLE FOR
16 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
17 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
19 * Copyright (c) 1994 The Australian National University.
20 * All rights reserved.
22 * Permission to use, copy, modify, and distribute this software and its
23 * documentation is hereby granted, provided that the above copyright
24 * notice appears in all copies. This software is provided without any
25 * warranty, express or implied. The Australian National University
26 * makes no representations about the suitability of this software for
29 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
30 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
31 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
32 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
35 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
36 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
37 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
38 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
39 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
42 * This driver is derived from the original SVR4 STREAMS PPP driver
43 * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
45 * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
46 * for improved performance and scalability.
49 #define RCSID "$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
51 #include <sys/types.h>
52 #include <sys/debug.h>
53 #include <sys/param.h>
55 #include <sys/stream.h>
56 #include <sys/stropts.h>
57 #include <sys/sysmacros.h>
58 #include <sys/errno.h>
60 #include <sys/cmn_err.h>
65 #include <sys/kstat.h>
66 #include <sys/strsun.h>
67 #include <sys/ethernet.h>
68 #include <sys/policy.h>
70 #include <net/ppp_defs.h>
71 #include <net/pppio.h>
76 * This is used to tag official Solaris sources. Please do not define
77 * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
80 /* MODINFO is limited to 32 characters. */
81 const char sppp_module_description
[] = "PPP 4.0 mux";
82 #else /* INTERNAL_BUILD */
83 const char sppp_module_description
[] = "ANU PPP mux";
86 static const char buildtime
[] = "Built " __DATE__
" at " __TIME__
91 #endif /* INTERNAL_BUILD */
93 static void sppp_inner_ioctl(queue_t
*, mblk_t
*);
94 static void sppp_outer_ioctl(queue_t
*, mblk_t
*);
95 static queue_t
*sppp_send(queue_t
*, mblk_t
**, spppstr_t
*);
96 static queue_t
*sppp_recv(queue_t
*, mblk_t
**, spppstr_t
*);
97 static void sppp_recv_nondata(queue_t
*, mblk_t
*, spppstr_t
*);
98 static queue_t
*sppp_outpkt(queue_t
*, mblk_t
**, int, spppstr_t
*);
99 static spppstr_t
*sppp_inpkt(queue_t
*, mblk_t
*, spppstr_t
*);
100 static int sppp_kstat_update(kstat_t
*, int);
101 static void sppp_release_pkts(sppa_t
*, uint16_t);
104 * sps_list contains the list of active per-stream instance state structures
105 * ordered on the minor device number (see sppp.h for details). All streams
106 * opened to this driver are threaded together in this list.
108 static spppstr_t
*sps_list
= NULL
;
110 * ppa_list contains the list of active per-attachment instance state
111 * structures ordered on the ppa id number (see sppp.h for details). All of
112 * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
113 * in this list. There is exactly one ppa structure for a given PPP interface,
114 * and multiple sps streams (upper streams) may share a ppa by performing
115 * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
117 static sppa_t
*ppa_list
= NULL
;
119 static const char *kstats_names
[] = { SPPP_KSTATS_NAMES
};
120 static const char *kstats64_names
[] = { SPPP_KSTATS64_NAMES
};
123 * map proto (which is an IANA defined ppp network protocol) to
124 * a bit position indicated by NP_* in ppa_npflag
127 sppp_ppp2np(uint16_t proto
)
143 * exclusive inner, exclusive outer.
146 * Common open procedure for module.
150 sppp_open(queue_t
*q
, dev_t
*devp
, int oflag
, int sflag
, cred_t
*credp
)
156 ASSERT(q
!= NULL
&& devp
!= NULL
);
157 ASSERT(sflag
!= MODOPEN
);
159 if (q
->q_ptr
!= NULL
) {
160 return (0); /* already open */
162 if (sflag
!= CLONEOPEN
) {
166 * The sps list is sorted using the minor number as the key. The
167 * following code walks the list to find the lowest valued minor
168 * number available to be used.
171 for (nextmn
= &sps_list
; (sps
= *nextmn
) != NULL
;
172 nextmn
= &sps
->sps_nextmn
) {
173 if (sps
->sps_mn_id
!= mn
) {
178 sps
= (spppstr_t
*)kmem_zalloc(sizeof (spppstr_t
), KM_SLEEP
);
179 ASSERT(sps
!= NULL
); /* KM_SLEEP must never return NULL */
180 sps
->sps_nextmn
= *nextmn
; /* insert stream in global list */
182 sps
->sps_mn_id
= mn
; /* save minor id for this stream */
183 sps
->sps_rq
= q
; /* save read queue pointer */
184 sps
->sps_sap
= -1; /* no sap bound to stream */
185 sps
->sps_dlstate
= DL_UNATTACHED
; /* dlpi state is unattached */
186 sps
->sps_npmode
= NPMODE_DROP
; /* drop all packets initially */
187 sps
->sps_zoneid
= crgetzoneid(credp
);
188 q
->q_ptr
= WR(q
)->q_ptr
= (caddr_t
)sps
;
190 * We explicitly disable the automatic queue scheduling for the
191 * write-side to obtain complete control over queuing during transmit.
192 * Packets will be queued at the upper write queue and the service
193 * routine will not be called until it gets scheduled by having the
194 * lower write service routine call the qenable(WR(uq)) for all streams
195 * attached to the same ppa instance.
198 *devp
= makedevice(getmajor(*devp
), mn
);
204 * Free storage used by a PPA. This is not called until the last PPA
205 * user closes their connection or reattaches to a different PPA.
208 sppp_free_ppa(sppa_t
*ppa
)
212 ASSERT(ppa
->ppa_refcnt
== 1);
213 if (ppa
->ppa_kstats
!= NULL
) {
214 kstat_delete(ppa
->ppa_kstats
);
215 ppa
->ppa_kstats
= NULL
;
217 mutex_destroy(&ppa
->ppa_sta_lock
);
218 mutex_destroy(&ppa
->ppa_npmutex
);
219 rw_destroy(&ppa
->ppa_sib_lock
);
221 while (*nextppa
!= NULL
) {
222 if (*nextppa
== ppa
) {
223 *nextppa
= ppa
->ppa_nextppa
;
226 nextppa
= &(*nextppa
)->ppa_nextppa
;
228 kmem_free(ppa
, sizeof (*ppa
));
232 * Create a new PPA. Caller must be exclusive on outer perimeter.
235 sppp_create_ppa(uint32_t ppa_id
, zoneid_t zoneid
)
240 char unit
[32]; /* Unit name */
246 * NOTE: unit *must* be named for the driver
247 * name plus the ppa number so that netstat
248 * can find the statistics.
250 (void) sprintf(unit
, "%s" "%d", PPP_DRV_NAME
, ppa_id
);
252 * Make sure we can allocate a buffer to
253 * contain the ppa to be sent upstream, as
254 * well as the actual ppa structure and its
255 * associated kstat structure.
257 ppa
= (sppa_t
*)kmem_zalloc(sizeof (sppa_t
),
259 ksp
= kstat_create(PPP_DRV_NAME
, ppa_id
, unit
, "net", KSTAT_TYPE_NAMED
,
260 sizeof (sppp_kstats_t
) / sizeof (kstat_named_t
), 0);
262 if (ppa
== NULL
|| ksp
== NULL
) {
264 kmem_free(ppa
, sizeof (sppa_t
));
271 ppa
->ppa_kstats
= ksp
; /* chain kstat structure */
272 ppa
->ppa_ppa_id
= ppa_id
; /* record ppa id */
273 ppa
->ppa_zoneid
= zoneid
; /* zone that owns this PPA */
274 ppa
->ppa_mtu
= PPP_MAXMTU
; /* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
275 ppa
->ppa_mru
= PPP_MAXMRU
; /* 65000 */
277 mutex_init(&ppa
->ppa_sta_lock
, NULL
, MUTEX_DRIVER
, NULL
);
278 mutex_init(&ppa
->ppa_npmutex
, NULL
, MUTEX_DRIVER
, NULL
);
279 rw_init(&ppa
->ppa_sib_lock
, NULL
, RW_DRIVER
, NULL
);
282 * Prepare and install kstat counters. Note that for netstat
283 * -i to work, there needs to be "ipackets", "opackets",
284 * "ierrors", and "oerrors" kstat named variables.
286 knt
= (kstat_named_t
*)ksp
->ks_data
;
287 for (cpp
= kstats_names
; cpp
< kstats_names
+ Dim(kstats_names
);
289 kstat_named_init(knt
, *cpp
, KSTAT_DATA_UINT32
);
292 for (cpp
= kstats64_names
; cpp
< kstats64_names
+ Dim(kstats64_names
);
294 kstat_named_init(knt
, *cpp
, KSTAT_DATA_UINT64
);
297 ksp
->ks_update
= sppp_kstat_update
;
298 ksp
->ks_private
= (void *)ppa
;
301 /* link to the next ppa and insert into global list */
302 availppa
= &ppa_list
;
303 while ((curppa
= *availppa
) != NULL
) {
304 if (ppa_id
< curppa
->ppa_ppa_id
)
306 availppa
= &curppa
->ppa_nextppa
;
308 ppa
->ppa_nextppa
= *availppa
;
317 * exclusive inner, exclusive outer.
320 * Common close procedure for module.
324 sppp_close(queue_t
*q
, int flags __unused
, cred_t
*credp __unused
)
332 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
333 sps
= (spppstr_t
*)q
->q_ptr
;
338 ASSERT(!IS_SPS_CONTROL(sps
));
339 goto close_unattached
;
341 if (IS_SPS_CONTROL(sps
)) {
345 ASSERT(ppa
->ppa_ctl
== sps
);
348 * STREAMS framework always issues I_UNLINK prior to close,
349 * since we only allow I_LINK under the control stream.
350 * A given ppa structure has at most one lower stream pointed
351 * by the ppa_lower_wq field, because we only allow a single
352 * linkage (I_LINK) to be done on the control stream.
354 ASSERT(ppa
->ppa_lower_wq
== NULL
);
356 * Walk through all of sibling streams attached to this ppa,
357 * and remove all references to this ppa. We have exclusive
358 * access for the entire driver here, so there's no need
359 * to hold ppa_sib_lock.
362 sib
= ppa
->ppa_streams
;
363 while (sib
!= NULL
) {
364 ASSERT(ppa
== sib
->sps_ppa
);
365 sib
->sps_npmode
= NPMODE_DROP
;
366 sib
->sps_flags
&= ~(SPS_PIOATTACH
| SPS_CACHED
);
368 * There should be a preallocated hangup
369 * message here. Fetch it and send it up to
370 * the stream head. This will cause IP to
371 * mark the interface as "down."
373 if ((mp
= sib
->sps_hangup
) != NULL
) {
374 sib
->sps_hangup
= NULL
;
376 * M_HANGUP works with IP, but snoop
377 * is lame and requires M_ERROR. Send
378 * up a clean error code instead.
380 * XXX if snoop is fixed, fix this, too.
383 *mp
->b_wptr
++ = ENXIO
;
384 putnext(sib
->sps_rq
, mp
);
386 qenable(WR(sib
->sps_rq
));
388 sib
= sib
->sps_nextsib
;
390 ASSERT(ppa
->ppa_refcnt
== cnt
);
392 ASSERT(ppa
->ppa_streams
!= NULL
);
393 ASSERT(ppa
->ppa_ctl
!= sps
);
395 if (sps
->sps_sap
== PPP_IP
) {
396 ppa
->ppa_ip_cache
= NULL
;
397 mp
= create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND
);
398 } else if (sps
->sps_sap
== PPP_IPV6
) {
399 ppa
->ppa_ip6_cache
= NULL
;
400 mp
= create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND
);
402 /* Tell the daemon the bad news. */
403 if (mp
!= NULL
&& ppa
->ppa_ctl
!= NULL
&&
404 (sps
->sps_npmode
== NPMODE_PASS
||
405 sps
->sps_npmode
== NPMODE_QUEUE
)) {
406 putnext(ppa
->ppa_ctl
->sps_rq
, mp
);
411 * Walk through all of sibling streams attached to the
412 * same ppa, and remove this stream from the sibling
413 * streams list. We have exclusive access for the
414 * entire driver here, so there's no need to hold
417 sib
= ppa
->ppa_streams
;
419 ppa
->ppa_streams
= sps
->sps_nextsib
;
421 while (sib
->sps_nextsib
!= NULL
) {
422 if (sib
->sps_nextsib
== sps
) {
423 sib
->sps_nextsib
= sps
->sps_nextsib
;
426 sib
= sib
->sps_nextsib
;
429 sps
->sps_nextsib
= NULL
;
430 freemsg(sps
->sps_hangup
);
431 sps
->sps_hangup
= NULL
;
433 * Check if this is a promiscous stream. If the SPS_PROMISC bit
434 * is still set, it means that the stream is closed without
435 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
436 * In this case, we simply decrement the promiscous counter,
437 * and it's safe to do it without holding ppa_sib_lock since
438 * we're exclusive (inner and outer) at this point.
440 if (IS_SPS_PROMISC(sps
)) {
441 ASSERT(ppa
->ppa_promicnt
> 0);
445 /* If we're the only one left, then delete now. */
446 if (ppa
->ppa_refcnt
<= 1)
451 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
452 for (nextmn
= &sps_list
; *nextmn
!= NULL
;
453 nextmn
= &(*nextmn
)->sps_nextmn
) {
454 if (*nextmn
== sps
) {
455 *nextmn
= sps
->sps_nextmn
;
459 kmem_free(sps
, sizeof (spppstr_t
));
464 sppp_ioctl(struct queue
*q
, mblk_t
*mp
)
472 struct ppp_idle
*pip
;
473 struct ppp_stats64
*psp
;
474 struct ppp_comp_stats
*pcsp
;
480 sps
= (spppstr_t
*)q
->q_ptr
;
483 iop
= (struct iocblk
*)mp
->b_rptr
;
484 switch (iop
->ioc_cmd
) {
486 if (!IS_SPS_CONTROL(sps
)) {
487 break; /* return EINVAL */
488 } else if (iop
->ioc_count
!= 2 * sizeof (uint32_t) ||
489 (mp
->b_cont
== NULL
)) {
494 ASSERT(mp
->b_cont
->b_rptr
!= NULL
);
495 ASSERT(sps
->sps_npmode
== NPMODE_PASS
);
496 sap
= ((uint32_t *)mp
->b_cont
->b_rptr
)[0];
497 npmode
= (enum NPmode
)((uint32_t *)mp
->b_cont
->b_rptr
)[1];
499 * Walk the sibling streams which belong to the same
500 * ppa, and try to find a stream with matching sap
503 rw_enter(&ppa
->ppa_sib_lock
, RW_WRITER
);
504 for (nextsib
= ppa
->ppa_streams
; nextsib
!= NULL
;
505 nextsib
= nextsib
->sps_nextsib
) {
506 if (nextsib
->sps_sap
== sap
) {
507 break; /* found it */
510 if (nextsib
== NULL
) {
511 rw_exit(&ppa
->ppa_sib_lock
);
512 break; /* return EINVAL */
514 nextsib
->sps_npmode
= npmode
;
515 if ((nextsib
->sps_npmode
!= NPMODE_QUEUE
) &&
516 (WR(nextsib
->sps_rq
)->q_first
!= NULL
)) {
517 qenable(WR(nextsib
->sps_rq
));
520 rw_exit(&ppa
->ppa_sib_lock
);
521 error
= 0; /* return success */
525 ASSERT(!IS_SPS_CONTROL(sps
));
528 } else if (!IS_PPA_TIMESTAMP(ppa
)) {
529 break; /* return EINVAL */
531 if ((nmp
= allocb(sizeof (struct ppp_idle
),
532 BPRI_MED
)) == NULL
) {
533 mutex_enter(&ppa
->ppa_sta_lock
);
534 ppa
->ppa_allocbfail
++;
535 mutex_exit(&ppa
->ppa_sta_lock
);
539 if (mp
->b_cont
!= NULL
) {
543 pip
= (struct ppp_idle
*)nmp
->b_wptr
;
544 nmp
->b_wptr
+= sizeof (struct ppp_idle
);
546 * Get current timestamp and subtract the tx and rx
547 * timestamps to get the actual idle time to be
550 hrtime
= gethrtime();
551 pip
->xmit_idle
= (hrtime
- ppa
->ppa_lasttx
) / 1000000000ul;
552 pip
->recv_idle
= (hrtime
- ppa
->ppa_lastrx
) / 1000000000ul;
553 count
= msgsize(nmp
);
555 break; /* return success (error is 0) */
557 nmp
= allocb(sizeof (uint32_t), BPRI_MED
);
562 if (mp
->b_cont
!= NULL
) {
567 * Let the requestor know that we are the PPP
568 * multiplexer (PPPTYP_MUX).
570 *(uint32_t *)nmp
->b_wptr
= PPPTYP_MUX
;
571 nmp
->b_wptr
+= sizeof (uint32_t);
572 count
= msgsize(nmp
);
573 error
= 0; /* return success */
575 case PPPIO_GETSTAT64
:
577 break; /* return EINVAL */
578 } else if ((ppa
->ppa_lower_wq
!= NULL
) &&
579 !IS_PPA_LASTMOD(ppa
)) {
580 mutex_enter(&ppa
->ppa_sta_lock
);
582 * We match sps_ioc_id on the M_IOC{ACK,NAK},
583 * so if the response hasn't come back yet,
584 * new ioctls must be queued instead.
586 if (IS_SPS_IOCQ(sps
)) {
587 mutex_exit(&ppa
->ppa_sta_lock
);
594 ppa
->ppa_ioctlsfwd
++;
596 * Record the ioctl CMD & ID - this will be
597 * used to check the ACK or NAK responses
600 sps
->sps_ioc_id
= iop
->ioc_id
;
601 sps
->sps_flags
|= SPS_IOCQ
;
602 mutex_exit(&ppa
->ppa_sta_lock
);
604 putnext(ppa
->ppa_lower_wq
, mp
);
605 return; /* don't ack or nak the request */
607 nmp
= allocb(sizeof (*psp
), BPRI_MED
);
609 mutex_enter(&ppa
->ppa_sta_lock
);
610 ppa
->ppa_allocbfail
++;
611 mutex_exit(&ppa
->ppa_sta_lock
);
615 if (mp
->b_cont
!= NULL
) {
619 psp
= (struct ppp_stats64
*)nmp
->b_wptr
;
621 * Copy the contents of ppp_stats64 structure for this
622 * ppa and return them to the caller.
624 mutex_enter(&ppa
->ppa_sta_lock
);
625 bcopy(&ppa
->ppa_stats
, psp
, sizeof (*psp
));
626 mutex_exit(&ppa
->ppa_sta_lock
);
627 nmp
->b_wptr
+= sizeof (*psp
);
628 count
= sizeof (*psp
);
629 error
= 0; /* return success */
633 break; /* return EINVAL */
634 } else if ((ppa
->ppa_lower_wq
!= NULL
) &&
635 !IS_PPA_LASTMOD(ppa
)) {
636 mutex_enter(&ppa
->ppa_sta_lock
);
638 * See comments in PPPIO_GETSTAT64 case
641 if (IS_SPS_IOCQ(sps
)) {
642 mutex_exit(&ppa
->ppa_sta_lock
);
649 ppa
->ppa_ioctlsfwd
++;
651 * Record the ioctl CMD & ID - this will be
652 * used to check the ACK or NAK responses
655 sps
->sps_ioc_id
= iop
->ioc_id
;
656 sps
->sps_flags
|= SPS_IOCQ
;
657 mutex_exit(&ppa
->ppa_sta_lock
);
659 putnext(ppa
->ppa_lower_wq
, mp
);
660 return; /* don't ack or nak the request */
662 nmp
= allocb(sizeof (struct ppp_comp_stats
), BPRI_MED
);
664 mutex_enter(&ppa
->ppa_sta_lock
);
665 ppa
->ppa_allocbfail
++;
666 mutex_exit(&ppa
->ppa_sta_lock
);
670 if (mp
->b_cont
!= NULL
) {
674 pcsp
= (struct ppp_comp_stats
*)nmp
->b_wptr
;
675 nmp
->b_wptr
+= sizeof (struct ppp_comp_stats
);
676 bzero((caddr_t
)pcsp
, sizeof (struct ppp_comp_stats
));
677 count
= msgsize(nmp
);
678 error
= 0; /* return success */
683 /* Success; tell the user. */
684 miocack(q
, mp
, count
, 0);
686 /* Failure; send error back upstream. */
687 miocnak(q
, mp
, 0, error
);
695 * shared inner, shared outer.
698 * Upper write-side put procedure. Messages from above arrive here.
701 sppp_uwput(queue_t
*q
, mblk_t
*mp
)
709 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
710 ASSERT(mp
!= NULL
&& mp
->b_rptr
!= NULL
);
711 sps
= (spppstr_t
*)q
->q_ptr
;
717 if (IS_SPS_CONTROL(sps
)) {
720 * Intentionally change this to a high priority
721 * message so it doesn't get queued up. M_PROTO is
722 * specifically used for signalling between pppd and its
723 * kernel-level component(s), such as ppptun, so we
724 * make sure that it doesn't get queued up behind
727 MTYPE(mp
) = M_PCPROTO
;
728 if ((ppa
->ppa_lower_wq
!= NULL
) &&
729 canputnext(ppa
->ppa_lower_wq
)) {
730 mutex_enter(&ppa
->ppa_sta_lock
);
732 mutex_exit(&ppa
->ppa_sta_lock
);
733 putnext(ppa
->ppa_lower_wq
, mp
);
735 mutex_enter(&ppa
->ppa_sta_lock
);
736 ppa
->ppa_mctlsfwderr
++;
737 mutex_exit(&ppa
->ppa_sta_lock
);
741 (void) sppp_mproto(q
, mp
, sps
);
746 if ((nextq
= sppp_send(q
, &mp
, sps
)) != NULL
)
751 iop
= (struct iocblk
*)mp
->b_rptr
;
752 switch (iop
->ioc_cmd
) {
754 case DL_IOC_HDR_INFO
:
761 case PPPIO_USETIMESTAMP
:
763 case PPPIO_UNBLOCKNP
:
764 qwriter(q
, mp
, sppp_inner_ioctl
, PERIM_INNER
);
769 qwriter(q
, mp
, sppp_outer_ioctl
, PERIM_OUTER
);
774 case PPPIO_GETSTAT64
:
777 * These require additional auto variables to
778 * handle, so (for optimization reasons)
779 * they're moved off to a separate function.
784 break; /* 32 bit interface gone */
786 if (iop
->ioc_cr
== NULL
||
787 secpolicy_ppp_config(iop
->ioc_cr
) != 0) {
790 } else if ((ppa
== NULL
) ||
791 (ppa
->ppa_lower_wq
== NULL
)) {
792 break; /* return EINVAL */
794 mutex_enter(&ppa
->ppa_sta_lock
);
796 * See comments in PPPIO_GETSTAT64 case
799 if (IS_SPS_IOCQ(sps
)) {
800 mutex_exit(&ppa
->ppa_sta_lock
);
807 ppa
->ppa_ioctlsfwd
++;
809 * Record the ioctl CMD & ID -
810 * this will be used to check the
811 * ACK or NAK responses coming from below.
813 sps
->sps_ioc_id
= iop
->ioc_id
;
814 sps
->sps_flags
|= SPS_IOCQ
;
815 mutex_exit(&ppa
->ppa_sta_lock
);
817 putnext(ppa
->ppa_lower_wq
, mp
);
818 return; /* don't ack or nak the request */
820 /* Failure; send error back upstream. */
821 miocnak(q
, mp
, 0, error
);
824 if (*mp
->b_rptr
& FLUSHW
) {
825 flushq(q
, FLUSHDATA
);
827 if (*mp
->b_rptr
& FLUSHR
) {
828 *mp
->b_rptr
&= ~FLUSHW
;
844 * exclusive inner, shared outer.
847 * Upper write-side service procedure. Note that this procedure does
848 * not get called when a message is placed on our write-side queue, since
849 * automatic queue scheduling has been turned off by noenable() when
850 * the queue was opened. We do this on purpose, as we explicitly control
851 * the write-side queue. Therefore, this procedure gets called when
852 * the lower write service procedure qenable() the upper write stream queue.
855 sppp_uwsrv(queue_t
*q
)
863 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
864 sps
= (spppstr_t
*)q
->q_ptr
;
866 while ((mp
= getq(q
)) != NULL
) {
867 if (MTYPE(mp
) == M_IOCTL
) {
869 if ((ppa
== NULL
) || (ppa
->ppa_lower_wq
== NULL
)) {
870 miocnak(q
, mp
, 0, EINVAL
);
874 iop
= (struct iocblk
*)mp
->b_rptr
;
875 mutex_enter(&ppa
->ppa_sta_lock
);
877 * See comments in PPPIO_GETSTAT64 case
880 if (IS_SPS_IOCQ(sps
)) {
881 mutex_exit(&ppa
->ppa_sta_lock
);
882 if (putbq(q
, mp
) == 0)
883 miocnak(q
, mp
, 0, EAGAIN
);
886 ppa
->ppa_ioctlsfwd
++;
887 sps
->sps_ioc_id
= iop
->ioc_id
;
888 sps
->sps_flags
|= SPS_IOCQ
;
889 mutex_exit(&ppa
->ppa_sta_lock
);
890 putnext(ppa
->ppa_lower_wq
, mp
);
893 sppp_outpkt(q
, &mp
, msgdsize(mp
), sps
)) == NULL
) {
895 if (putbq(q
, mp
) == 0)
906 sppp_remove_ppa(spppstr_t
*sps
)
909 sppa_t
*ppa
= sps
->sps_ppa
;
911 rw_enter(&ppa
->ppa_sib_lock
, RW_WRITER
);
912 if (ppa
->ppa_refcnt
<= 1) {
913 rw_exit(&ppa
->ppa_sib_lock
);
916 nextsib
= ppa
->ppa_streams
;
917 if (nextsib
== sps
) {
918 ppa
->ppa_streams
= sps
->sps_nextsib
;
920 while (nextsib
->sps_nextsib
!= NULL
) {
921 if (nextsib
->sps_nextsib
== sps
) {
922 nextsib
->sps_nextsib
=
926 nextsib
= nextsib
->sps_nextsib
;
931 * And if this stream was marked as promiscuous
932 * (SPS_PROMISC), then we need to update the
933 * promiscuous streams count. This should only happen
934 * when DL_DETACH_REQ is issued prior to marking the
935 * stream as non-promiscuous, through
936 * DL_PROMISCOFF_REQ request.
938 if (IS_SPS_PROMISC(sps
)) {
939 ASSERT(ppa
->ppa_promicnt
> 0);
942 rw_exit(&ppa
->ppa_sib_lock
);
944 sps
->sps_nextsib
= NULL
;
946 freemsg(sps
->sps_hangup
);
947 sps
->sps_hangup
= NULL
;
951 sppp_find_ppa(uint32_t ppa_id
)
955 for (ppa
= ppa_list
; ppa
!= NULL
; ppa
= ppa
->ppa_nextppa
) {
956 if (ppa
->ppa_ppa_id
== ppa_id
) {
957 break; /* found the ppa */
967 * exclusive inner, shared outer
970 * Called by sppp_uwput as a result of receiving ioctls which require
971 * an exclusive access at the inner perimeter.
974 sppp_inner_ioctl(queue_t
*q
, mblk_t
*mp
)
988 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
989 ASSERT(mp
!= NULL
&& mp
->b_rptr
!= NULL
);
991 sps
= (spppstr_t
*)q
->q_ptr
;
993 iop
= (struct iocblk
*)mp
->b_rptr
;
994 switch (iop
->ioc_cmd
) {
996 if (IS_SPS_CONTROL(sps
)) {
997 break; /* return EINVAL */
999 sps
->sps_flags
|= SPS_RAWDATA
;
1000 error
= 0; /* return success */
1002 case DL_IOC_HDR_INFO
:
1003 if (IS_SPS_CONTROL(sps
)) {
1004 break; /* return EINVAL */
1005 } else if ((mp
->b_cont
== NULL
) ||
1006 *((t_uscalar_t
*)mp
->b_cont
->b_rptr
) != DL_UNITDATA_REQ
||
1007 (MBLKL(mp
->b_cont
) < (sizeof (dl_unitdata_req_t
) +
1011 } else if (ppa
== NULL
) {
1015 if ((nmp
= allocb(PPP_HDRLEN
, BPRI_MED
)) == NULL
) {
1016 mutex_enter(&ppa
->ppa_sta_lock
);
1017 ppa
->ppa_allocbfail
++;
1018 mutex_exit(&ppa
->ppa_sta_lock
);
1022 *(uchar_t
*)nmp
->b_wptr
++ = PPP_ALLSTATIONS
;
1023 *(uchar_t
*)nmp
->b_wptr
++ = PPP_UI
;
1024 *(uchar_t
*)nmp
->b_wptr
++ = sps
->sps_sap
>> 8;
1025 *(uchar_t
*)nmp
->b_wptr
++ = sps
->sps_sap
& 0xff;
1026 ASSERT(MBLKL(nmp
) == PPP_HDRLEN
);
1029 sps
->sps_flags
|= SPS_FASTPATH
;
1030 error
= 0; /* return success */
1031 count
= msgsize(nmp
);
1034 if (IS_SPS_CONTROL(sps
) || IS_SPS_PIOATTACH(sps
) ||
1035 (sps
->sps_dlstate
!= DL_UNATTACHED
) ||
1036 (iop
->ioc_count
!= sizeof (uint32_t))) {
1037 break; /* return EINVAL */
1038 } else if (mp
->b_cont
== NULL
) {
1042 ASSERT(mp
->b_cont
->b_rptr
!= NULL
);
1043 /* If there's something here, it's detached. */
1045 sppp_remove_ppa(sps
);
1047 ppa_id
= *(uint32_t *)mp
->b_cont
->b_rptr
;
1048 ppa
= sppp_find_ppa(ppa_id
);
1050 * If we can't find it, then it's either because the requestor
1051 * has supplied a wrong ppa_id to be attached to, or because
1052 * the control stream for the specified ppa_id has been closed
1053 * before we get here.
1059 if (iop
->ioc_cr
== NULL
||
1060 ppa
->ppa_zoneid
!= crgetzoneid(iop
->ioc_cr
)) {
1065 * Preallocate the hangup message so that we're always
1066 * able to send this upstream in the event of a
1067 * catastrophic failure.
1069 if ((sps
->sps_hangup
= allocb(1, BPRI_MED
)) == NULL
) {
1074 * There are two ways to attach a stream to a ppa: one is
1075 * through DLPI (DL_ATTACH_REQ) and the other is through
1076 * PPPIO_ATTACH. This is why we need to distinguish whether or
1077 * not a stream was allocated via PPPIO_ATTACH, so that we can
1078 * properly detach it when we receive PPPIO_DETACH ioctl
1081 sps
->sps_flags
|= SPS_PIOATTACH
;
1084 * Add this stream to the head of the list of sibling streams
1085 * which belong to the same ppa as specified.
1087 rw_enter(&ppa
->ppa_sib_lock
, RW_WRITER
);
1089 sps
->sps_nextsib
= ppa
->ppa_streams
;
1090 ppa
->ppa_streams
= sps
;
1091 rw_exit(&ppa
->ppa_sib_lock
);
1092 error
= 0; /* return success */
1095 case PPPIO_UNBLOCKNP
:
1096 if (iop
->ioc_cr
== NULL
||
1097 secpolicy_ppp_config(iop
->ioc_cr
) != 0) {
1101 error
= miocpullup(mp
, sizeof (uint16_t));
1104 ASSERT(mp
->b_cont
->b_rptr
!= NULL
);
1105 proto
= *(uint16_t *)mp
->b_cont
->b_rptr
;
1106 if (iop
->ioc_cmd
== PPPIO_BLOCKNP
) {
1107 uint32_t npflagpos
= sppp_ppp2np(proto
);
1109 * Mark proto as blocked in ppa_npflag until the
1110 * corresponding queues for proto have been plumbed.
1112 if (npflagpos
!= 0) {
1113 mutex_enter(&ppa
->ppa_npmutex
);
1114 ppa
->ppa_npflag
|= (1 << npflagpos
);
1115 mutex_exit(&ppa
->ppa_npmutex
);
1121 * reset ppa_npflag and release proto
1122 * packets that were being held in control queue.
1124 sppp_release_pkts(ppa
, proto
);
1128 if (iop
->ioc_cr
== NULL
||
1129 secpolicy_ppp_config(iop
->ioc_cr
) != 0) {
1132 } else if (iop
->ioc_count
!= sizeof (uint32_t)) {
1133 break; /* return EINVAL */
1134 } else if (mp
->b_cont
== NULL
) {
1138 ASSERT(mp
->b_cont
->b_rptr
!= NULL
);
1139 dbgcmd
= *(uint32_t *)mp
->b_cont
->b_rptr
;
1141 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
1142 * that SPS_KDEBUG needs to be enabled for this upper stream.
1144 if (dbgcmd
== PPPDBG_LOG
+ PPPDBG_DRIVER
) {
1145 sps
->sps_flags
|= SPS_KDEBUG
;
1146 error
= 0; /* return success */
1150 * Otherwise, for any other values, we send them down only if
1151 * there is an attachment and if the attachment has something
1152 * linked underneath it.
1154 if ((ppa
== NULL
) || (ppa
->ppa_lower_wq
== NULL
)) {
1158 mutex_enter(&ppa
->ppa_sta_lock
);
1160 * See comments in PPPIO_GETSTAT64 case
1163 if (IS_SPS_IOCQ(sps
)) {
1164 mutex_exit(&ppa
->ppa_sta_lock
);
1171 ppa
->ppa_ioctlsfwd
++;
1173 * Record the ioctl CMD & ID -
1174 * this will be used to check the
1175 * ACK or NAK responses coming from below.
1177 sps
->sps_ioc_id
= iop
->ioc_id
;
1178 sps
->sps_flags
|= SPS_IOCQ
;
1179 mutex_exit(&ppa
->ppa_sta_lock
);
1181 putnext(ppa
->ppa_lower_wq
, mp
);
1182 return; /* don't ack or nak the request */
1184 if (!IS_SPS_PIOATTACH(sps
)) {
1185 break; /* return EINVAL */
1188 * The SPS_PIOATTACH flag set on the stream tells us that
1189 * the ppa field is still valid. In the event that the control
1190 * stream be closed prior to this stream's detachment, the
1191 * SPS_PIOATTACH flag would have been cleared from this stream
1192 * during close; in that case we won't get here.
1194 ASSERT(ppa
!= NULL
);
1195 ASSERT(ppa
->ppa_ctl
!= sps
);
1196 ASSERT(sps
->sps_dlstate
== DL_UNATTACHED
);
1199 * We don't actually detach anything until the stream is
1200 * closed or reattached.
1203 sps
->sps_flags
&= ~SPS_PIOATTACH
;
1204 error
= 0; /* return success */
1207 if (!IS_SPS_CONTROL(sps
)) {
1208 break; /* return EINVAL */
1210 ASSERT(ppa
!= NULL
);
1211 ppa
->ppa_flags
|= PPA_LASTMOD
;
1212 error
= 0; /* return success */
1215 if (!IS_SPS_CONTROL(sps
) ||
1216 (iop
->ioc_count
!= sizeof (uint32_t))) {
1217 break; /* return EINVAL */
1218 } else if (mp
->b_cont
== NULL
) {
1222 ASSERT(ppa
!= NULL
);
1223 ASSERT(mp
->b_cont
->b_rptr
!= NULL
);
1224 mru
= *(uint32_t *)mp
->b_cont
->b_rptr
;
1225 if ((mru
<= 0) || (mru
> PPP_MAXMRU
)) {
1229 if (mru
< PPP_MRU
) {
1232 ppa
->ppa_mru
= (uint16_t)mru
;
1234 * If there's something beneath this driver for the ppa, then
1235 * inform it (or them) of the MRU size. Only do this is we
1236 * are not the last PPP module on the stream.
1238 if (!IS_PPA_LASTMOD(ppa
) && (ppa
->ppa_lower_wq
!= NULL
)) {
1239 (void) putctl4(ppa
->ppa_lower_wq
, M_CTL
, PPPCTL_MRU
,
1242 error
= 0; /* return success */
1245 if (!IS_SPS_CONTROL(sps
) ||
1246 (iop
->ioc_count
!= sizeof (uint32_t))) {
1247 break; /* return EINVAL */
1248 } else if (mp
->b_cont
== NULL
) {
1252 ASSERT(ppa
!= NULL
);
1253 ASSERT(mp
->b_cont
->b_rptr
!= NULL
);
1254 mtu
= *(uint32_t *)mp
->b_cont
->b_rptr
;
1255 if ((mtu
<= 0) || (mtu
> PPP_MAXMTU
)) {
1259 ppa
->ppa_mtu
= (uint16_t)mtu
;
1261 * If there's something beneath this driver for the ppa, then
1262 * inform it (or them) of the MTU size. Only do this if we
1263 * are not the last PPP module on the stream.
1265 if (!IS_PPA_LASTMOD(ppa
) && (ppa
->ppa_lower_wq
!= NULL
)) {
1266 (void) putctl4(ppa
->ppa_lower_wq
, M_CTL
, PPPCTL_MTU
,
1269 error
= 0; /* return success */
1271 case PPPIO_USETIMESTAMP
:
1272 if (!IS_SPS_CONTROL(sps
)) {
1273 break; /* return EINVAL */
1275 if (!IS_PPA_TIMESTAMP(ppa
)) {
1276 hrtime
= gethrtime();
1277 ppa
->ppa_lasttx
= ppa
->ppa_lastrx
= hrtime
;
1278 ppa
->ppa_flags
|= PPA_TIMESTAMP
;
1285 /* Success; tell the user */
1286 miocack(q
, mp
, count
, 0);
1288 /* Failure; send error back upstream */
1289 miocnak(q
, mp
, 0, error
);
1294 * sppp_outer_ioctl()
1297 * exclusive inner, exclusive outer
1300 * Called by sppp_uwput as a result of receiving ioctls which require
1301 * an exclusive access at the outer perimeter.
1304 sppp_outer_ioctl(queue_t
*q
, mblk_t
*mp
)
1306 spppstr_t
*sps
= q
->q_ptr
;
1317 sps
= (spppstr_t
*)q
->q_ptr
;
1319 iop
= (struct iocblk
*)mp
->b_rptr
;
1320 switch (iop
->ioc_cmd
) {
1322 if (!IS_SPS_CONTROL(sps
)) {
1323 break; /* return EINVAL */
1324 } else if (ppa
->ppa_lower_wq
!= NULL
) {
1328 ASSERT(ppa
->ppa_ctl
!= NULL
);
1329 ASSERT(sps
->sps_npmode
== NPMODE_PASS
);
1330 ASSERT(mp
->b_cont
!= NULL
&& mp
->b_cont
->b_rptr
!= NULL
);
1332 lwq
= ((struct linkblk
*)mp
->b_cont
->b_rptr
)->l_qbot
;
1333 ASSERT(lwq
!= NULL
);
1335 ppa
->ppa_lower_wq
= lwq
;
1336 lwq
->q_ptr
= RD(lwq
)->q_ptr
= (caddr_t
)ppa
;
1338 * Unblock upper network streams which now feed this lower
1339 * stream. We don't need to hold ppa_sib_lock here, since we
1340 * are writer at the outer perimeter.
1342 if (WR(sps
->sps_rq
)->q_first
!= NULL
)
1343 qenable(WR(sps
->sps_rq
));
1344 for (nextsib
= ppa
->ppa_streams
; nextsib
!= NULL
;
1345 nextsib
= nextsib
->sps_nextsib
) {
1346 nextsib
->sps_npmode
= NPMODE_PASS
;
1347 if (WR(nextsib
->sps_rq
)->q_first
!= NULL
) {
1348 qenable(WR(nextsib
->sps_rq
));
1353 * Also unblock (run once) our lower read-side queue. This is
1354 * where packets received while doing the I_LINK may be
1355 * languishing; see sppp_lrsrv.
1360 * Send useful information down to the modules which are now
1361 * linked below this driver (for this particular ppa). Only
1362 * do this if we are not the last PPP module on the stream.
1364 if (!IS_PPA_LASTMOD(ppa
)) {
1365 (void) putctl8(lwq
, M_CTL
, PPPCTL_UNIT
,
1367 (void) putctl4(lwq
, M_CTL
, PPPCTL_MRU
, ppa
->ppa_mru
);
1368 (void) putctl4(lwq
, M_CTL
, PPPCTL_MTU
, ppa
->ppa_mtu
);
1371 if (IS_SPS_KDEBUG(sps
)) {
1372 SPDEBUG(PPP_DRV_NAME
1373 "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
1374 "flags=0x%b\n", sps
->sps_mn_id
,
1375 (void *)ppa
->ppa_lower_wq
, (void *)sps
,
1376 sps
->sps_flags
, SPS_FLAGS_STR
,
1377 (void *)ppa
, ppa
->ppa_flags
,
1380 error
= 0; /* return success */
1383 ASSERT(IS_SPS_CONTROL(sps
));
1384 ASSERT(ppa
!= NULL
);
1385 lwq
= ppa
->ppa_lower_wq
;
1386 ASSERT(mp
->b_cont
!= NULL
&& mp
->b_cont
->b_rptr
!= NULL
);
1387 ASSERT(lwq
== ((struct linkblk
*)mp
->b_cont
->b_rptr
)->l_qbot
);
1389 if (IS_SPS_KDEBUG(sps
)) {
1390 SPDEBUG(PPP_DRV_NAME
1391 "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
1392 "ppa=0x%p flags=0x%b\n", sps
->sps_mn_id
,
1393 (void *)lwq
, (void *)sps
, sps
->sps_flags
,
1394 SPS_FLAGS_STR
, (void *)ppa
, ppa
->ppa_flags
,
1398 * While accessing the outer perimeter exclusively, we
1399 * disassociate our ppa's lower_wq from the lower stream linked
1400 * beneath us, and we also disassociate our control stream from
1401 * the q_ptr of the lower stream.
1403 lwq
->q_ptr
= RD(lwq
)->q_ptr
= NULL
;
1404 ppa
->ppa_lower_wq
= NULL
;
1406 * Unblock streams which now feed back up the control stream,
1407 * and acknowledge the request. We don't need to hold
1408 * ppa_sib_lock here, since we are writer at the outer
1411 if (WR(sps
->sps_rq
)->q_first
!= NULL
)
1412 qenable(WR(sps
->sps_rq
));
1413 for (nextsib
= ppa
->ppa_streams
; nextsib
!= NULL
;
1414 nextsib
= nextsib
->sps_nextsib
) {
1415 if (WR(nextsib
->sps_rq
)->q_first
!= NULL
) {
1416 qenable(WR(nextsib
->sps_rq
));
1419 error
= 0; /* return success */
1423 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
1424 * on a stream which DLPI is used (since certain DLPI messages
1425 * will cause state transition reflected in sps_dlstate,
1426 * changing it from its default DL_UNATTACHED value). In other
1427 * words, we won't allow a network/snoop stream to become
1430 if (iop
->ioc_cr
== NULL
||
1431 secpolicy_ppp_config(iop
->ioc_cr
) != 0) {
1434 } else if (IS_SPS_CONTROL(sps
) || IS_SPS_PIOATTACH(sps
) ||
1435 (ppa
!= NULL
) || (sps
->sps_dlstate
!= DL_UNATTACHED
)) {
1436 break; /* return EINVAL */
1438 /* Get requested unit number (if any) */
1439 if (iop
->ioc_count
== sizeof (uint32_t) && mp
->b_cont
!= NULL
)
1440 ppa_id
= *(uint32_t *)mp
->b_cont
->b_rptr
;
1443 /* Get mblk to use for response message */
1444 nmp
= allocb(sizeof (uint32_t), BPRI_MED
);
1449 if (mp
->b_cont
!= NULL
) {
1450 freemsg(mp
->b_cont
);
1452 mp
->b_cont
= nmp
; /* chain our response mblk */
1454 * Walk the global ppa list and determine the lowest
1455 * available ppa_id number to be used.
1457 if (ppa_id
== (uint32_t)-1)
1459 zoneid
= crgetzoneid(iop
->ioc_cr
);
1460 for (ppa
= ppa_list
; ppa
!= NULL
; ppa
= ppa
->ppa_nextppa
) {
1461 if (ppa_id
== (uint32_t)-2) {
1462 if (ppa
->ppa_ctl
== NULL
&&
1463 ppa
->ppa_zoneid
== zoneid
)
1466 if (ppa_id
< ppa
->ppa_ppa_id
)
1468 if (ppa_id
== ppa
->ppa_ppa_id
)
1472 if (ppa_id
== (uint32_t)-2) {
1477 /* Clear timestamp and lastmod flags */
1480 ppa
= sppp_create_ppa(ppa_id
, zoneid
);
1487 sps
->sps_ppa
= ppa
; /* chain the ppa structure */
1488 sps
->sps_npmode
= NPMODE_PASS
; /* network packets may travel */
1489 sps
->sps_flags
|= SPS_CONTROL
; /* this is the control stream */
1491 ppa
->ppa_refcnt
++; /* new PPA reference */
1492 ppa
->ppa_ctl
= sps
; /* back ptr to upper stream */
1494 * Return the newly created ppa_id to the requestor and
1495 * acnowledge the request.
1497 *(uint32_t *)nmp
->b_wptr
= ppa
->ppa_ppa_id
;
1498 nmp
->b_wptr
+= sizeof (uint32_t);
1500 if (IS_SPS_KDEBUG(sps
)) {
1501 SPDEBUG(PPP_DRV_NAME
1502 "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
1503 "ppa=0x%p flags=0x%b\n", sps
->sps_mn_id
, ppa_id
,
1504 (void *)sps
, sps
->sps_flags
, SPS_FLAGS_STR
,
1505 (void *)ppa
, ppa
->ppa_flags
,
1508 count
= msgsize(nmp
);
1514 /* Success; tell the user. */
1515 miocack(q
, mp
, count
, 0);
1517 /* Failure; send error back upstream. */
1518 miocnak(q
, mp
, 0, error
);
1526 * shared inner, shared outer.
1529 * Called by sppp_uwput to handle M_DATA message type. Returns
1530 * queue_t for putnext, or NULL to mean that the packet was
1531 * handled internally.
1534 sppp_send(queue_t
*q
, mblk_t
**mpp
, spppstr_t
*sps
)
1543 ASSERT(mpp
!= NULL
);
1545 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
1546 ASSERT(mp
!= NULL
&& mp
->b_rptr
!= NULL
);
1547 ASSERT(sps
!= NULL
);
1548 ASSERT(q
->q_ptr
== sps
);
1550 * We only let M_DATA through if the sender is either the control
1551 * stream (for PPP control packets) or one of the network streams
1552 * (for IP packets) in IP fastpath mode. If this stream is not attached
1553 * to any ppas, then discard data coming down through this stream.
1557 ASSERT(!IS_SPS_CONTROL(sps
));
1559 } else if (!IS_SPS_CONTROL(sps
) && !IS_SPS_FASTPATH(sps
)) {
1563 merror(q
, mp
, error
);
1566 msize
= msgdsize(mp
);
1567 if (msize
> (ppa
->ppa_mtu
+ PPP_HDRLEN
)) {
1568 /* Log, and send it anyway */
1569 mutex_enter(&ppa
->ppa_sta_lock
);
1570 ppa
->ppa_otoolongs
++;
1571 mutex_exit(&ppa
->ppa_sta_lock
);
1572 } else if (msize
< PPP_HDRLEN
) {
1574 * Log, and send it anyway. We log it because we get things
1575 * in M_DATA form here, which tells us that the sender is
1576 * either IP in fastpath transmission mode, or pppd. In both
1577 * cases, they are currently expected to send the 4-bytes
1578 * PPP header in front of any possible payloads.
1580 mutex_enter(&ppa
->ppa_sta_lock
);
1582 mutex_exit(&ppa
->ppa_sta_lock
);
1585 if (IS_SPS_KDEBUG(sps
)) {
1586 SPDEBUG(PPP_DRV_NAME
1587 "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
1588 "ppa=0x%p flags=0x%b\n", sps
->sps_mn_id
, msize
,
1589 (void *)sps
, sps
->sps_flags
, SPS_FLAGS_STR
,
1590 (void *)ppa
, ppa
->ppa_flags
, PPA_FLAGS_STR
);
1593 * Should there be any promiscuous stream(s), send the data up
1594 * for each promiscuous stream that we recognize. Make sure that
1595 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
1596 * the control stream as we obviously never allow the control stream
1597 * to become promiscous and bind to PPP_ALLSAP.
1599 rw_enter(&ppa
->ppa_sib_lock
, RW_READER
);
1600 is_promisc
= sps
->sps_ppa
->ppa_promicnt
;
1602 ASSERT(ppa
->ppa_streams
!= NULL
);
1603 sppp_dlprsendup(ppa
->ppa_streams
, mp
, sps
->sps_sap
, B_TRUE
);
1605 rw_exit(&ppa
->ppa_sib_lock
);
1607 * Only time-stamp the packet with hrtime if the upper stream
1608 * is configured to do so. PPP control (negotiation) messages
1609 * are never considered link activity; only data is activity.
1611 if (!IS_SPS_CONTROL(sps
) && IS_PPA_TIMESTAMP(ppa
)) {
1612 ppa
->ppa_lasttx
= gethrtime();
1615 * If there's already a message in the write-side service queue,
1616 * then queue this message there as well, otherwise, try to send
1617 * it down to the module immediately below us.
1619 if (q
->q_first
!= NULL
||
1620 (nextq
= sppp_outpkt(q
, mpp
, msize
, sps
)) == NULL
) {
1622 if (mp
!= NULL
&& putq(q
, mp
) == 0) {
1623 mutex_enter(&ppa
->ppa_sta_lock
);
1624 ppa
->ppa_oqdropped
++;
1625 mutex_exit(&ppa
->ppa_sta_lock
);
1637 * shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
1638 * exclusive inner, shared outer (if called from sppp_wsrv).
1641 * Called from 1) sppp_uwput when processing a M_DATA fastpath message,
1642 * or 2) sppp_uwsrv when processing the upper write-side service queue.
1643 * For both cases, it prepares to send the data to the module below
1644 * this driver if there is a lower stream linked underneath. If none, then
1645 * the data will be sent upstream via the control channel to pppd.
1648 * Non-NULL queue_t if message should be sent now, otherwise
1649 * if *mpp == NULL, then message was freed, otherwise put *mpp
1650 * (back) on the queue. (Does not do putq/putbq, since it's
1651 * called both from srv and put procedures.)
1654 sppp_outpkt(queue_t
*q
, mblk_t
**mpp
, int msize
, spppstr_t
*sps
)
1661 ASSERT(mpp
!= NULL
);
1663 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
1664 ASSERT(mp
!= NULL
&& mp
->b_rptr
!= NULL
);
1665 ASSERT(sps
!= NULL
);
1668 npmode
= sps
->sps_npmode
;
1670 if (npmode
== NPMODE_QUEUE
) {
1671 ASSERT(!IS_SPS_CONTROL(sps
));
1672 return (NULL
); /* queue it for later */
1673 } else if (ppa
== NULL
|| ppa
->ppa_ctl
== NULL
||
1674 npmode
== NPMODE_DROP
|| npmode
== NPMODE_ERROR
) {
1676 * This can not be the control stream, as it must always have
1677 * a valid ppa, and its npmode must always be NPMODE_PASS.
1679 ASSERT(!IS_SPS_CONTROL(sps
));
1680 if (npmode
== NPMODE_DROP
) {
1684 * If we no longer have the control stream, or if the
1685 * mode is set to NPMODE_ERROR, then we need to tell IP
1686 * that the interface need to be marked as down. In
1687 * other words, we tell IP to be quiescent.
1689 merror(q
, mp
, EPROTO
);
1692 return (NULL
); /* don't queue it */
1695 * Do we have a driver stream linked underneath ? If not, we need to
1696 * notify pppd that the link needs to be brought up and configure
1697 * this upper stream to drop subsequent outgoing packets. This is
1698 * for demand-dialing, in which case pppd has done the IP plumbing
1699 * but hasn't linked the driver stream underneath us. Therefore, when
1700 * a packet is sent down the IP interface, a notification message
1701 * will be sent up the control stream to pppd in order for it to
1702 * establish the physical link. The driver stream is then expected
1703 * to be linked underneath after physical link establishment is done.
1705 if (ppa
->ppa_lower_wq
== NULL
) {
1706 ASSERT(ppa
->ppa_ctl
!= NULL
);
1707 ASSERT(ppa
->ppa_ctl
->sps_rq
!= NULL
);
1710 mpnew
= create_lsmsg(PPP_LINKSTAT_NEEDUP
);
1711 if (mpnew
== NULL
) {
1713 mutex_enter(&ppa
->ppa_sta_lock
);
1714 ppa
->ppa_allocbfail
++;
1715 mutex_exit(&ppa
->ppa_sta_lock
);
1716 return (NULL
); /* don't queue it */
1718 /* Include the data in the message for logging. */
1720 mutex_enter(&ppa
->ppa_sta_lock
);
1721 ppa
->ppa_lsneedup
++;
1722 mutex_exit(&ppa
->ppa_sta_lock
);
1724 * We need to set the mode to NPMODE_DROP, but should only
1725 * do so when this stream is not the control stream.
1727 if (!IS_SPS_CONTROL(sps
)) {
1728 sps
->sps_npmode
= NPMODE_DROP
;
1730 putnext(ppa
->ppa_ctl
->sps_rq
, mpnew
);
1731 return (NULL
); /* don't queue it */
1734 * If so, then try to send it down. The lower queue is only ever
1735 * detached while holding an exclusive lock on the whole driver,
1736 * so we can be confident that the lower queue is still there.
1738 if (bcanputnext(ppa
->ppa_lower_wq
, mp
->b_band
)) {
1739 mutex_enter(&ppa
->ppa_sta_lock
);
1740 ppa
->ppa_stats
.p
.ppp_opackets
++;
1741 if (IS_SPS_CONTROL(sps
)) {
1742 ppa
->ppa_opkt_ctl
++;
1744 ppa
->ppa_stats
.p
.ppp_obytes
+= msize
;
1745 mutex_exit(&ppa
->ppa_sta_lock
);
1746 return (ppa
->ppa_lower_wq
); /* don't queue it */
1748 return (NULL
); /* queue it for later */
1755 * exclusive inner, shared outer.
1758 * Lower write-side service procedure. No messages are ever placed on
1759 * the write queue here, this just back-enables all upper write side
1760 * service procedures.
1763 sppp_lwsrv(queue_t
*q
)
1768 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
1769 ppa
= (sppa_t
*)q
->q_ptr
;
1770 ASSERT(ppa
!= NULL
);
1772 rw_enter(&ppa
->ppa_sib_lock
, RW_READER
);
1773 if ((nextsib
= ppa
->ppa_ctl
) != NULL
&&
1774 WR(nextsib
->sps_rq
)->q_first
!= NULL
)
1775 qenable(WR(nextsib
->sps_rq
));
1776 for (nextsib
= ppa
->ppa_streams
; nextsib
!= NULL
;
1777 nextsib
= nextsib
->sps_nextsib
) {
1778 if (WR(nextsib
->sps_rq
)->q_first
!= NULL
) {
1779 qenable(WR(nextsib
->sps_rq
));
1782 rw_exit(&ppa
->ppa_sib_lock
);
1789 * shared inner, shared outer.
1792 * Lower read-side put procedure. Messages from below get here.
1793 * Data messages are handled separately to limit stack usage
1796 * Note that during I_UNLINK processing, it's possible for a downstream
1797 * message to enable upstream data (due to pass_wput() removing the
1798 * SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
1799 * In this case, the only thing above us is passthru, and we might as well
1803 sppp_lrput(queue_t
*q
, mblk_t
*mp
)
1808 if ((ppa
= q
->q_ptr
) == NULL
) {
1815 if (MTYPE(mp
) != M_DATA
) {
1816 sppp_recv_nondata(q
, mp
, sps
);
1817 } else if (sps
== NULL
) {
1819 } else if ((q
= sppp_recv(q
, &mp
, sps
)) != NULL
) {
1828 * exclusive inner, shared outer.
1831 * Lower read-side service procedure. This is run once after the I_LINK
1832 * occurs in order to clean up any packets that came in while we were
1833 * transferring in the lower stream. Otherwise, it's not used.
1836 sppp_lrsrv(queue_t
*q
)
1840 while ((mp
= getq(q
)) != NULL
)
1845 * sppp_recv_nondata()
1848 * shared inner, shared outer.
1851 * All received non-data messages come through here.
1854 sppp_recv_nondata(queue_t
*q
, mblk_t
*mp
, spppstr_t
*ctlsps
)
1860 ppa
= (sppa_t
*)q
->q_ptr
;
1861 ctlsps
= ppa
->ppa_ctl
;
1863 switch (MTYPE(mp
)) {
1865 mutex_enter(&ppa
->ppa_sta_lock
);
1866 if (*mp
->b_rptr
== PPPCTL_IERROR
) {
1867 ppa
->ppa_stats
.p
.ppp_ierrors
++;
1868 ppa
->ppa_ierr_low
++;
1869 ppa
->ppa_mctlsknown
++;
1870 } else if (*mp
->b_rptr
== PPPCTL_OERROR
) {
1871 ppa
->ppa_stats
.p
.ppp_oerrors
++;
1872 ppa
->ppa_oerr_low
++;
1873 ppa
->ppa_mctlsknown
++;
1875 ppa
->ppa_mctlsunknown
++;
1877 mutex_exit(&ppa
->ppa_sta_lock
);
1881 miocnak(q
, mp
, 0, EINVAL
);
1885 iop
= (struct iocblk
*)mp
->b_rptr
;
1886 ASSERT(iop
!= NULL
);
1888 * Attempt to match up the response with the stream that the
1889 * request came from. If ioc_id doesn't match the one that we
1890 * recorded, then discard this message.
1892 rw_enter(&ppa
->ppa_sib_lock
, RW_READER
);
1893 if ((destsps
= ctlsps
) == NULL
||
1894 destsps
->sps_ioc_id
!= iop
->ioc_id
) {
1895 destsps
= ppa
->ppa_streams
;
1896 while (destsps
!= NULL
) {
1897 if (destsps
->sps_ioc_id
== iop
->ioc_id
) {
1898 break; /* found the upper stream */
1900 destsps
= destsps
->sps_nextsib
;
1903 rw_exit(&ppa
->ppa_sib_lock
);
1904 if (destsps
== NULL
) {
1905 mutex_enter(&ppa
->ppa_sta_lock
);
1906 ppa
->ppa_ioctlsfwderr
++;
1907 mutex_exit(&ppa
->ppa_sta_lock
);
1911 mutex_enter(&ppa
->ppa_sta_lock
);
1912 ppa
->ppa_ioctlsfwdok
++;
1915 * Clear SPS_IOCQ and enable the lower write side queue,
1916 * this would allow the upper stream service routine
1917 * to start processing the queue for pending messages.
1918 * sppp_lwsrv -> sppp_uwsrv.
1920 destsps
->sps_flags
&= ~SPS_IOCQ
;
1921 mutex_exit(&ppa
->ppa_sta_lock
);
1922 qenable(WR(destsps
->sps_rq
));
1924 putnext(destsps
->sps_rq
, mp
);
1928 * Free the original mblk_t. We don't really want to send
1929 * a M_HANGUP message upstream, so we need to translate this
1930 * message into something else.
1935 mp
= create_lsmsg(PPP_LINKSTAT_HANGUP
);
1937 mutex_enter(&ppa
->ppa_sta_lock
);
1938 ppa
->ppa_allocbfail
++;
1939 mutex_exit(&ppa
->ppa_sta_lock
);
1942 mutex_enter(&ppa
->ppa_sta_lock
);
1944 mutex_exit(&ppa
->ppa_sta_lock
);
1945 putnext(ctlsps
->sps_rq
, mp
);
1948 if (*mp
->b_rptr
& FLUSHR
) {
1949 flushq(q
, FLUSHDATA
);
1951 if (*mp
->b_rptr
& FLUSHW
) {
1952 *mp
->b_rptr
&= ~FLUSHR
;
1959 if (ctlsps
!= NULL
&&
1960 (queclass(mp
) == QPCTL
) || canputnext(ctlsps
->sps_rq
)) {
1961 putnext(ctlsps
->sps_rq
, mp
);
1963 mutex_enter(&ppa
->ppa_sta_lock
);
1964 ppa
->ppa_iqdropped
++;
1965 mutex_exit(&ppa
->ppa_sta_lock
);
1976 * shared inner, shared outer.
1979 * Receive function called by sppp_lrput. Finds appropriate
1980 * receive stream and does accounting.
1983 sppp_recv(queue_t
*q
, mblk_t
**mpp
, spppstr_t
*ctlsps
)
1992 ASSERT(mpp
!= NULL
);
1994 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
1995 ASSERT(mp
!= NULL
&& mp
->b_rptr
!= NULL
);
1996 ASSERT(ctlsps
!= NULL
);
1997 ASSERT(IS_SPS_CONTROL(ctlsps
));
1998 ppa
= ctlsps
->sps_ppa
;
1999 ASSERT(ppa
!= NULL
&& ppa
->ppa_ctl
!= NULL
);
2002 mutex_enter(&ppa
->ppa_sta_lock
);
2003 ppa
->ppa_stats
.p
.ppp_ibytes
+= len
;
2004 mutex_exit(&ppa
->ppa_sta_lock
);
2006 * If the entire data size of the mblk is less than the length of the
2007 * PPP header, then free it. We can't do much with such message anyway,
2008 * since we can't really determine what the PPP protocol type is.
2010 if (len
< PPP_HDRLEN
) {
2011 /* Log, and free it */
2012 mutex_enter(&ppa
->ppa_sta_lock
);
2014 mutex_exit(&ppa
->ppa_sta_lock
);
2017 } else if (len
> (ppa
->ppa_mru
+ PPP_HDRLEN
)) {
2018 /* Log, and accept it anyway */
2019 mutex_enter(&ppa
->ppa_sta_lock
);
2020 ppa
->ppa_itoolongs
++;
2021 mutex_exit(&ppa
->ppa_sta_lock
);
2024 * We need at least be able to read the PPP protocol from the header,
2025 * so if the first message block is too small, then we concatenate the
2026 * rest of the following blocks into one message.
2028 if (MBLKL(mp
) < PPP_HDRLEN
) {
2029 zmp
= msgpullup(mp
, PPP_HDRLEN
);
2033 mutex_enter(&ppa
->ppa_sta_lock
);
2034 ppa
->ppa_allocbfail
++;
2035 mutex_exit(&ppa
->ppa_sta_lock
);
2041 * Hold this packet in the control-queue until
2042 * the matching network-layer upper stream for the PPP protocol (sap)
2043 * has not been plumbed and configured
2045 npflagpos
= sppp_ppp2np(PPP_PROTOCOL(mp
->b_rptr
));
2046 mutex_enter(&ppa
->ppa_npmutex
);
2047 if (npflagpos
!= 0 && (ppa
->ppa_npflag
& (1 << npflagpos
))) {
2049 * proto is currently blocked; Hold up to 4 packets
2052 if (ppa
->ppa_holdpkts
[npflagpos
] > 3 ||
2053 putq(ctlsps
->sps_rq
, mp
) == 0)
2056 ppa
->ppa_holdpkts
[npflagpos
]++;
2057 mutex_exit(&ppa
->ppa_npmutex
);
2060 mutex_exit(&ppa
->ppa_npmutex
);
2062 * Try to find a matching network-layer upper stream for the specified
2063 * PPP protocol (sap), and if none is found, send this frame up the
2066 destsps
= sppp_inpkt(q
, mp
, ctlsps
);
2067 if (destsps
== NULL
) {
2068 mutex_enter(&ppa
->ppa_sta_lock
);
2069 ppa
->ppa_ipkt_ctl
++;
2070 mutex_exit(&ppa
->ppa_sta_lock
);
2071 if (canputnext(ctlsps
->sps_rq
)) {
2072 if (IS_SPS_KDEBUG(ctlsps
)) {
2073 SPDEBUG(PPP_DRV_NAME
2074 "/%d: M_DATA recv (%d bytes) sps=0x%p "
2075 "flags=0x%b ppa=0x%p flags=0x%b\n",
2076 ctlsps
->sps_mn_id
, len
, (void *)ctlsps
,
2077 ctlsps
->sps_flags
, SPS_FLAGS_STR
,
2078 (void *)ppa
, ppa
->ppa_flags
,
2081 return (ctlsps
->sps_rq
);
2083 mutex_enter(&ppa
->ppa_sta_lock
);
2084 ppa
->ppa_iqdropped
++;
2085 mutex_exit(&ppa
->ppa_sta_lock
);
2090 if (canputnext(destsps
->sps_rq
)) {
2091 if (IS_SPS_KDEBUG(destsps
)) {
2092 SPDEBUG(PPP_DRV_NAME
2093 "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
2094 "ppa=0x%p flags=0x%b\n", destsps
->sps_mn_id
, len
,
2095 (void *)destsps
, destsps
->sps_flags
,
2096 SPS_FLAGS_STR
, (void *)ppa
, ppa
->ppa_flags
,
2100 * If fastpath is enabled on the network-layer stream, then
2101 * make sure we skip over the PPP header, otherwise, we wrap
2102 * the message in a DLPI message.
2104 if (IS_SPS_FASTPATH(destsps
)) {
2105 mp
->b_rptr
+= PPP_HDRLEN
;
2106 return (destsps
->sps_rq
);
2108 spppstr_t
*uqs
= (spppstr_t
*)destsps
->sps_rq
->q_ptr
;
2109 ASSERT(uqs
!= NULL
);
2110 mp
->b_rptr
+= PPP_HDRLEN
;
2111 mp
= sppp_dladdud(uqs
, mp
, uqs
->sps_sap
, B_FALSE
);
2114 return (destsps
->sps_rq
);
2116 mutex_enter(&ppa
->ppa_sta_lock
);
2117 ppa
->ppa_allocbfail
++;
2118 mutex_exit(&ppa
->ppa_sta_lock
);
2119 /* mp already freed by sppp_dladdud */
2124 mutex_enter(&ppa
->ppa_sta_lock
);
2125 ppa
->ppa_iqdropped
++;
2126 mutex_exit(&ppa
->ppa_sta_lock
);
2136 * shared inner, shared outer.
2139 * Find the destination upper stream for the received packet, called
2143 * ptr to destination upper network stream, or NULL for control stream.
2147 sppp_inpkt(queue_t
*q
, mblk_t
*mp
, spppstr_t
*ctlsps
)
2149 spppstr_t
*destsps
= NULL
;
2154 ASSERT(q
!= NULL
&& q
->q_ptr
!= NULL
);
2155 ASSERT(mp
!= NULL
&& mp
->b_rptr
!= NULL
);
2156 ASSERT(IS_SPS_CONTROL(ctlsps
));
2157 ppa
= ctlsps
->sps_ppa
;
2158 ASSERT(ppa
!= NULL
);
2160 * From RFC 1661 (Section 2):
2162 * The Protocol field is one or two octets, and its value identifies
2163 * the datagram encapsulated in the Information field of the packet.
2164 * The field is transmitted and received most significant octet first.
2166 * The structure of this field is consistent with the ISO 3309
2167 * extension mechanism for address fields. All Protocols MUST be odd;
2168 * the least significant bit of the least significant octet MUST equal
2169 * "1". Also, all Protocols MUST be assigned such that the least
2170 * significant bit of the most significant octet equals "0". Frames
2171 * received which don't comply with these rules MUST be treated as
2172 * having an unrecognized Protocol.
2174 * Protocol field values in the "0***" to "3***" range identify the
2175 * network-layer protocol of specific packets, and values in the
2176 * "8***" to "b***" range identify packets belonging to the associated
2177 * Network Control Protocols (NCPs), if any.
2179 * Protocol field values in the "4***" to "7***" range are used for
2180 * protocols with low volume traffic which have no associated NCP.
2181 * Protocol field values in the "c***" to "f***" range identify packets
2182 * as link-layer Control Protocols (such as LCP).
2184 proto
= PPP_PROTOCOL(mp
->b_rptr
);
2185 mutex_enter(&ppa
->ppa_sta_lock
);
2186 ppa
->ppa_stats
.p
.ppp_ipackets
++;
2187 mutex_exit(&ppa
->ppa_sta_lock
);
2189 * We check if this is not a network-layer protocol, and if so,
2190 * then send this packet up the control stream.
2192 if (proto
> 0x7fff) {
2193 goto inpkt_done
; /* send it up the control stream */
2196 * Try to grab the destination upper stream from the network-layer
2197 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
2198 * protocol types. Otherwise, if the type is not known to the cache,
2199 * or if its sap can't be matched with any of the upper streams, then
2200 * send this packet up the control stream so that it can be rejected.
2202 if (proto
== PPP_IP
) {
2203 destsps
= ppa
->ppa_ip_cache
;
2204 } else if (proto
== PPP_IPV6
) {
2205 destsps
= ppa
->ppa_ip6_cache
;
2208 * Toss this one away up the control stream if there's no matching sap;
2209 * this way the protocol can be rejected (destsps is NULL).
2214 * Only time-stamp the packet with hrtime if the upper stream
2215 * is configured to do so. PPP control (negotiation) messages
2216 * are never considered link activity; only data is activity.
2218 if (destsps
!= NULL
&& IS_PPA_TIMESTAMP(ppa
)) {
2219 ppa
->ppa_lastrx
= gethrtime();
2222 * Should there be any promiscuous stream(s), send the data up for
2223 * each promiscuous stream that we recognize. We skip the control
2224 * stream as we obviously never allow the control stream to become
2225 * promiscous and bind to PPP_ALLSAP.
2227 rw_enter(&ppa
->ppa_sib_lock
, RW_READER
);
2228 is_promisc
= ppa
->ppa_promicnt
;
2230 ASSERT(ppa
->ppa_streams
!= NULL
);
2231 sppp_dlprsendup(ppa
->ppa_streams
, mp
, proto
, B_TRUE
);
2233 rw_exit(&ppa
->ppa_sib_lock
);
2238 * sppp_kstat_update()
2241 * Update per-ppa kstat interface statistics.
2244 sppp_kstat_update(kstat_t
*ksp
, int rw
)
2246 register sppa_t
*ppa
;
2247 register sppp_kstats_t
*pppkp
;
2248 register struct pppstat64
*sp
;
2250 if (rw
== KSTAT_WRITE
) {
2254 ppa
= (sppa_t
*)ksp
->ks_private
;
2255 ASSERT(ppa
!= NULL
);
2257 pppkp
= (sppp_kstats_t
*)ksp
->ks_data
;
2258 sp
= &ppa
->ppa_stats
.p
;
2260 mutex_enter(&ppa
->ppa_sta_lock
);
2261 pppkp
->allocbfail
.value
.ui32
= ppa
->ppa_allocbfail
;
2262 pppkp
->mctlsfwd
.value
.ui32
= ppa
->ppa_mctlsfwd
;
2263 pppkp
->mctlsfwderr
.value
.ui32
= ppa
->ppa_mctlsfwderr
;
2264 pppkp
->rbytes
.value
.ui32
= sp
->ppp_ibytes
;
2265 pppkp
->rbytes64
.value
.ui64
= sp
->ppp_ibytes
;
2266 pppkp
->ierrors
.value
.ui32
= sp
->ppp_ierrors
;
2267 pppkp
->ierrors_lower
.value
.ui32
= ppa
->ppa_ierr_low
;
2268 pppkp
->ioctlsfwd
.value
.ui32
= ppa
->ppa_ioctlsfwd
;
2269 pppkp
->ioctlsfwdok
.value
.ui32
= ppa
->ppa_ioctlsfwdok
;
2270 pppkp
->ioctlsfwderr
.value
.ui32
= ppa
->ppa_ioctlsfwderr
;
2271 pppkp
->ipackets
.value
.ui32
= sp
->ppp_ipackets
;
2272 pppkp
->ipackets64
.value
.ui64
= sp
->ppp_ipackets
;
2273 pppkp
->ipackets_ctl
.value
.ui32
= ppa
->ppa_ipkt_ctl
;
2274 pppkp
->iqdropped
.value
.ui32
= ppa
->ppa_iqdropped
;
2275 pppkp
->irunts
.value
.ui32
= ppa
->ppa_irunts
;
2276 pppkp
->itoolongs
.value
.ui32
= ppa
->ppa_itoolongs
;
2277 pppkp
->lsneedup
.value
.ui32
= ppa
->ppa_lsneedup
;
2278 pppkp
->lsdown
.value
.ui32
= ppa
->ppa_lsdown
;
2279 pppkp
->mctlsknown
.value
.ui32
= ppa
->ppa_mctlsknown
;
2280 pppkp
->mctlsunknown
.value
.ui32
= ppa
->ppa_mctlsunknown
;
2281 pppkp
->obytes
.value
.ui32
= sp
->ppp_obytes
;
2282 pppkp
->obytes64
.value
.ui64
= sp
->ppp_obytes
;
2283 pppkp
->oerrors
.value
.ui32
= sp
->ppp_oerrors
;
2284 pppkp
->oerrors_lower
.value
.ui32
= ppa
->ppa_oerr_low
;
2285 pppkp
->opackets
.value
.ui32
= sp
->ppp_opackets
;
2286 pppkp
->opackets64
.value
.ui64
= sp
->ppp_opackets
;
2287 pppkp
->opackets_ctl
.value
.ui32
= ppa
->ppa_opkt_ctl
;
2288 pppkp
->oqdropped
.value
.ui32
= ppa
->ppa_oqdropped
;
2289 pppkp
->otoolongs
.value
.ui32
= ppa
->ppa_otoolongs
;
2290 pppkp
->orunts
.value
.ui32
= ppa
->ppa_orunts
;
2291 mutex_exit(&ppa
->ppa_sta_lock
);
2297 * Turn off proto in ppa_npflag to indicate that
2298 * the corresponding network protocol has been plumbed.
2299 * Release proto packets that were being held in the control
2300 * queue in anticipation of this event.
2303 sppp_release_pkts(sppa_t
*ppa
, uint16_t proto
)
2305 uint32_t npflagpos
= sppp_ppp2np(proto
);
2312 ASSERT(ppa
!= NULL
);
2314 if (npflagpos
== 0 || (ppa
->ppa_npflag
& (1 << npflagpos
)) == 0)
2317 mutex_enter(&ppa
->ppa_npmutex
);
2318 ppa
->ppa_npflag
&= ~(1 << npflagpos
);
2319 count
= ppa
->ppa_holdpkts
[npflagpos
];
2320 ppa
->ppa_holdpkts
[npflagpos
] = 0;
2321 mutex_exit(&ppa
->ppa_npmutex
);
2323 q
= ppa
->ppa_ctl
->sps_rq
;
2329 mp_proto
= PPP_PROTOCOL(mp
->b_rptr
);
2330 if (mp_proto
!= proto
) {
2336 if (mp_proto
== PPP_IP
) {
2337 destsps
= ppa
->ppa_ip_cache
;
2338 } else if (mp_proto
== PPP_IPV6
) {
2339 destsps
= ppa
->ppa_ip6_cache
;
2341 ASSERT(destsps
!= NULL
);
2343 if (IS_SPS_FASTPATH(destsps
)) {
2344 mp
->b_rptr
+= PPP_HDRLEN
;
2346 spppstr_t
*uqs
= (spppstr_t
*)destsps
->sps_rq
->q_ptr
;
2347 ASSERT(uqs
!= NULL
);
2348 mp
->b_rptr
+= PPP_HDRLEN
;
2349 mp
= sppp_dladdud(uqs
, mp
, uqs
->sps_sap
, B_FALSE
);
2351 mutex_enter(&ppa
->ppa_sta_lock
);
2352 ppa
->ppa_allocbfail
++;
2353 mutex_exit(&ppa
->ppa_sta_lock
);
2354 /* mp already freed by sppp_dladdud */
2359 if (canputnext(destsps
->sps_rq
)) {
2360 putnext(destsps
->sps_rq
, mp
);
2362 mutex_enter(&ppa
->ppa_sta_lock
);
2363 ppa
->ppa_iqdropped
++;
2364 mutex_exit(&ppa
->ppa_sta_lock
);