4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
28 * gld - Generic LAN Driver Version 2, PSARC/1997/382
30 * This is a utility module that provides generic facilities for
31 * LAN drivers. The DLPI protocol and most STREAMS interfaces
34 * It no longer provides compatibility with drivers
35 * implemented according to the GLD v0 documentation published
36 * in 1993. (See PSARC 2003/728)
40 #include <sys/types.h>
41 #include <sys/errno.h>
42 #include <sys/stropts.h>
43 #include <sys/stream.h>
46 #include <sys/modctl.h>
47 #include <sys/kstat.h>
48 #include <sys/debug.h>
50 #include <sys/sysmacros.h>
52 #include <sys/byteorder.h>
53 #include <sys/strsun.h>
54 #include <sys/strsubr.h>
56 #include <sys/pattr.h>
57 #include <sys/ethernet.h>
58 #include <sys/ib/clients/ibd/ibd.h>
59 #include <sys/policy.h>
60 #include <sys/atomic.h>
62 #include <sys/multidata.h>
64 #include <sys/gldpriv.h>
67 #include <sys/sunddi.h>
70 * Macros to increment statistics.
74 * Increase kstats. Note this operation is not atomic. It can be used when
75 * GLDM_LOCK_HELD_WRITE(macinfo).
77 #define BUMP(stats, vstats, stat, delta) do { \
78 ((stats)->stat) += (delta); \
79 _NOTE(CONSTANTCONDITION) \
80 if ((vstats) != NULL) \
81 ((struct gld_stats *)(vstats))->stat += (delta); \
82 _NOTE(CONSTANTCONDITION) \
85 #define ATOMIC_BUMP_STAT(stat, delta) do { \
86 _NOTE(CONSTANTCONDITION) \
87 if (sizeof ((stat)) == sizeof (uint32_t)) { \
88 atomic_add_32((uint32_t *)&(stat), (delta)); \
89 _NOTE(CONSTANTCONDITION) \
90 } else if (sizeof ((stat)) == sizeof (uint64_t)) { \
91 atomic_add_64((uint64_t *)&(stat), (delta)); \
93 _NOTE(CONSTANTCONDITION) \
96 #define ATOMIC_BUMP(stats, vstats, stat, delta) do { \
97 ATOMIC_BUMP_STAT((stats)->stat, (delta)); \
98 _NOTE(CONSTANTCONDITION) \
99 if ((vstats) != NULL) { \
100 ATOMIC_BUMP_STAT(((struct gld_stats *)(vstats))->stat, \
103 _NOTE(CONSTANTCONDITION) \
106 #define UPDATE_STATS(stats, vstats, pktinfo, delta) { \
107 if ((pktinfo).isBroadcast) { \
108 ATOMIC_BUMP((stats), (vstats), \
109 glds_brdcstxmt, (delta)); \
110 } else if ((pktinfo).isMulticast) { \
111 ATOMIC_BUMP((stats), (vstats), glds_multixmt, (delta)); \
113 ATOMIC_BUMP((stats), (vstats), glds_bytexmt64, \
114 ((pktinfo).pktLen)); \
115 ATOMIC_BUMP((stats), (vstats), glds_pktxmt64, (delta)); \
119 int gld_debug
= GLDERRS
;
122 /* called from gld_register */
123 static int gld_initstats(gld_mac_info_t
*);
125 /* called from kstat mechanism, and from wsrv's get_statistics */
126 static int gld_update_kstat(kstat_t
*, int);
128 /* statistics for additional vlans */
129 static int gld_init_vlan_stats(gld_vlan_t
*);
130 static int gld_update_vlan_kstat(kstat_t
*, int);
132 /* called from gld_getinfo */
133 static dev_info_t
*gld_finddevinfo(dev_t
);
135 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
136 /* also from the source routing stuff for sending RDE protocol packets */
137 static int gld_start(queue_t
*, mblk_t
*, int, uint32_t);
138 static int gld_start_mdt(queue_t
*, mblk_t
*, int);
140 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
141 static void gld_precv(gld_mac_info_t
*, mblk_t
*, uint32_t, struct gld_stats
*);
142 static void gld_precv_mdt(gld_mac_info_t
*, gld_vlan_t
*, mblk_t
*,
143 pdesc_t
*, pktinfo_t
*);
145 /* receive group: called from gld_recv and gld_precv* with maclock held */
146 static void gld_sendup(gld_mac_info_t
*, pktinfo_t
*, mblk_t
*,
148 static int gld_accept(gld_t
*, pktinfo_t
*);
149 static int gld_mcmatch(gld_t
*, pktinfo_t
*);
150 static int gld_multicast(unsigned char *, gld_t
*);
151 static int gld_paccept(gld_t
*, pktinfo_t
*);
152 static void gld_passon(gld_t
*, mblk_t
*, pktinfo_t
*,
153 void (*)(queue_t
*, mblk_t
*));
154 static mblk_t
*gld_addudind(gld_t
*, mblk_t
*, pktinfo_t
*, boolean_t
);
156 /* wsrv group: called from wsrv, single threaded per queue */
157 static int gld_ioctl(queue_t
*, mblk_t
*);
158 static void gld_fastpath(gld_t
*, queue_t
*, mblk_t
*);
159 static int gld_cmds(queue_t
*, mblk_t
*);
160 static mblk_t
*gld_bindack(queue_t
*, mblk_t
*);
161 static int gld_notify_req(queue_t
*, mblk_t
*);
162 static int gld_udqos(queue_t
*, mblk_t
*);
163 static int gld_bind(queue_t
*, mblk_t
*);
164 static int gld_unbind(queue_t
*, mblk_t
*);
165 static int gld_inforeq(queue_t
*, mblk_t
*);
166 static int gld_unitdata(queue_t
*, mblk_t
*);
167 static int gldattach(queue_t
*, mblk_t
*);
168 static int gldunattach(queue_t
*, mblk_t
*);
169 static int gld_enable_multi(queue_t
*, mblk_t
*);
170 static int gld_disable_multi(queue_t
*, mblk_t
*);
171 static void gld_send_disable_multi(gld_mac_info_t
*, gld_mcast_t
*);
172 static int gld_promisc(queue_t
*, mblk_t
*, t_uscalar_t
, boolean_t
);
173 static int gld_physaddr(queue_t
*, mblk_t
*);
174 static int gld_setaddr(queue_t
*, mblk_t
*);
175 static int gld_get_statistics(queue_t
*, mblk_t
*);
176 static int gld_cap(queue_t
*, mblk_t
*);
177 static int gld_cap_ack(queue_t
*, mblk_t
*);
178 static int gld_cap_enable(queue_t
*, mblk_t
*);
180 /* misc utilities, some requiring various mutexes held */
181 static int gld_start_mac(gld_mac_info_t
*);
182 static void gld_stop_mac(gld_mac_info_t
*);
183 static void gld_set_ipq(gld_t
*);
184 static void gld_flushqueue(queue_t
*);
185 static glddev_t
*gld_devlookup(int);
186 static int gld_findminor(glddev_t
*);
187 static void gldinsque(void *, void *);
188 static void gldremque(void *);
189 void gld_bitrevcopy(caddr_t
, caddr_t
, size_t);
190 void gld_bitreverse(uchar_t
*, size_t);
191 char *gld_macaddr_sprintf(char *, unsigned char *, int);
192 static gld_vlan_t
*gld_add_vlan(gld_mac_info_t
*, uint32_t vid
);
193 static void gld_rem_vlan(gld_vlan_t
*);
194 gld_vlan_t
*gld_find_vlan(gld_mac_info_t
*, uint32_t);
195 gld_vlan_t
*gld_get_vlan(gld_mac_info_t
*, uint32_t);
198 static void gld_check_assertions(void);
199 extern void gld_sr_dump(gld_mac_info_t
*);
203 * Allocate and zero-out "number" structures each of type "structure" in
206 #define GLD_GETSTRUCT(structure, number) \
207 (kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
209 #define abs(a) ((a) < 0 ? -(a) : a)
211 uint32_t gld_global_options
= GLD_OPT_NO_ETHRXSNAP
;
214 * The device is of DL_ETHER type and is able to support VLAN by itself.
216 #define VLAN_CAPABLE(macinfo) \
217 ((macinfo)->gldm_type == DL_ETHER && \
218 (macinfo)->gldm_send_tagged != NULL)
221 * The set of notifications generatable by GLD itself, the additional
222 * set that can be generated if the MAC driver provide the link-state
223 * tracking callback capability, and the set supported by the GLD
224 * notification code below.
226 * PLEASE keep these in sync with what the code actually does!
228 static const uint32_t gld_internal_notes
= DL_NOTE_PROMISC_ON_PHYS
|
229 DL_NOTE_PROMISC_OFF_PHYS
|
231 static const uint32_t gld_linkstate_notes
= DL_NOTE_LINK_DOWN
|
234 static const uint32_t gld_supported_notes
= DL_NOTE_PROMISC_ON_PHYS
|
235 DL_NOTE_PROMISC_OFF_PHYS
|
241 /* Media must correspond to #defines in gld.h */
242 static char *gld_media
[] = {
243 "unknown", /* GLDM_UNKNOWN - driver cannot determine media */
244 "aui", /* GLDM_AUI */
245 "bnc", /* GLDM_BNC */
246 "twpair", /* GLDM_TP */
247 "fiber", /* GLDM_FIBER */
248 "100baseT", /* GLDM_100BT */
249 "100vgAnyLan", /* GLDM_VGANYLAN */
250 "10baseT", /* GLDM_10BT */
251 "ring4", /* GLDM_RING4 */
252 "ring16", /* GLDM_RING16 */
253 "PHY/MII", /* GLDM_PHYMII */
254 "100baseTX", /* GLDM_100BTX */
255 "100baseT4", /* GLDM_100BT4 */
256 "unknown", /* skip */
257 "ipib", /* GLDM_IB */
260 /* Must correspond to #defines in gld.h */
261 static char *gld_duplex
[] = {
262 "unknown", /* GLD_DUPLEX_UNKNOWN - not known or not applicable */
263 "half", /* GLD_DUPLEX_HALF */
264 "full" /* GLD_DUPLEX_FULL */
268 * Interface types currently supported by GLD.
269 * If you add new types, you must check all "XXX" strings in the GLD source
270 * for implementation issues that may affect the support of your new type.
271 * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
272 * require generalizing this GLD source to handle the new cases. In other
273 * words there are assumptions built into the code in a few places that must
274 * be fixed. Be sure to turn on DEBUG/ASSERT code when testing a new type.
276 static gld_interface_t interfaces
[] = {
282 sizeof (struct ether_header
),
292 /* Fiber Distributed data interface */
296 sizeof (struct fddi_mac_frm
),
306 /* Token Ring interface */
310 -1, /* variable header size */
324 sizeof (struct ipoib_header
),
326 gld_interpret_mdt_ib
,
336 * bit reversal lookup table.
338 static uchar_t bit_rev
[] = {
339 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
340 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
341 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
342 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
343 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
344 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
345 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
346 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
347 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
348 0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
349 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
350 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
351 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
352 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
353 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
354 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
355 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
356 0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
357 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
358 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
359 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
360 0x3f, 0xbf, 0x7f, 0xff,
364 * User priorities, mapped from b_band.
366 static uint32_t user_priority
[] = {
367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
369 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
370 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
371 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
372 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
373 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
374 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
375 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
376 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
377 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
378 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
379 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
380 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
381 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
382 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
385 #define UPRI(gld, band) ((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
387 static struct glddevice gld_device_list
; /* Per-system root of GLD tables */
390 * Module linkage information for the kernel.
393 static struct modldrv modlmisc
= {
394 &mod_miscops
, /* Type of module - a utility provider */
395 "Generic LAN Driver (" GLD_VERSION_STRING
")"
401 static struct modlinkage modlinkage
= {
402 MODREV_1
, &modlmisc
, NULL
410 /* initialize gld_device_list mutex */
411 mutex_init(&gld_device_list
.gld_devlock
, NULL
, MUTEX_DRIVER
, NULL
);
413 /* initialize device driver (per-major) list */
414 gld_device_list
.gld_next
=
415 gld_device_list
.gld_prev
= &gld_device_list
;
417 if ((e
= mod_install(&modlinkage
)) != 0)
418 mutex_destroy(&gld_device_list
.gld_devlock
);
428 if ((e
= mod_remove(&modlinkage
)) != 0)
431 ASSERT(gld_device_list
.gld_next
==
432 (glddev_t
*)&gld_device_list
.gld_next
);
433 ASSERT(gld_device_list
.gld_prev
==
434 (glddev_t
*)&gld_device_list
.gld_next
);
435 mutex_destroy(&gld_device_list
.gld_devlock
);
441 _info(struct modinfo
*modinfop
)
443 return (mod_info(&modlinkage
, modinfop
));
447 * GLD service routines
450 /* So this gld binary maybe can be forward compatible with future v2 drivers */
451 #define GLD_MAC_RESERVED (16 * sizeof (caddr_t))
455 gld_mac_alloc(dev_info_t
*devinfo
)
457 gld_mac_info_t
*macinfo
;
459 macinfo
= kmem_zalloc(sizeof (gld_mac_info_t
) + GLD_MAC_RESERVED
,
463 * The setting of gldm_driver_version will not be documented or allowed
464 * until a future release.
466 macinfo
->gldm_driver_version
= GLD_VERSION_200
;
469 * GLD's version. This also is undocumented for now, but will be
470 * available if needed in the future.
472 macinfo
->gldm_GLD_version
= GLD_VERSION
;
478 * gld_mac_free must be called after the driver has removed interrupts
479 * and completely stopped calling gld_recv() and gld_sched(). At that
480 * point the interrupt routine is guaranteed by the system to have been
481 * exited and the maclock is no longer needed. Of course, it is
482 * expected (required) that (assuming gld_register() succeeded),
483 * gld_unregister() was called before gld_mac_free().
486 gld_mac_free(gld_mac_info_t
*macinfo
)
489 ASSERT(macinfo
->gldm_GLD_version
== GLD_VERSION
);
492 * Assert that if we made it through gld_register, then we must
495 ASSERT(!GLDM_LOCK_INITED(macinfo
) ||
496 (macinfo
->gldm_GLD_flags
& GLD_UNREGISTERED
));
498 GLDM_LOCK_DESTROY(macinfo
);
500 kmem_free(macinfo
, sizeof (gld_mac_info_t
) + GLD_MAC_RESERVED
);
504 * gld_register -- called once per device instance (PPA)
506 * During its attach routine, a real device driver will register with GLD
507 * so that later opens and dl_attach_reqs will work. The arguments are the
508 * devinfo pointer, the device name, and a macinfo structure describing the
509 * physical device instance.
512 gld_register(dev_info_t
*devinfo
, char *devname
, gld_mac_info_t
*macinfo
)
515 int major
= ddi_name_to_major(devname
), i
;
517 gld_mac_pvt_t
*mac_pvt
;
519 char pbuf
[3*GLD_MAX_ADDRLEN
];
520 gld_interface_t
*ifp
;
522 ASSERT(devinfo
!= NULL
);
523 ASSERT(macinfo
!= NULL
);
525 if (macinfo
->gldm_driver_version
!= GLD_VERSION
)
526 return (DDI_FAILURE
);
528 mediatype
= macinfo
->gldm_type
;
531 * Entry points should be ready for us.
533 * set_multicast and get_stats are optional in v0.
534 * intr is only required if you add an interrupt.
536 ASSERT(macinfo
->gldm_reset
!= NULL
);
537 ASSERT(macinfo
->gldm_start
!= NULL
);
538 ASSERT(macinfo
->gldm_stop
!= NULL
);
539 ASSERT(macinfo
->gldm_set_mac_addr
!= NULL
);
540 ASSERT(macinfo
->gldm_set_promiscuous
!= NULL
);
541 ASSERT(macinfo
->gldm_send
!= NULL
);
543 ASSERT(macinfo
->gldm_maxpkt
>= macinfo
->gldm_minpkt
);
544 ASSERT(macinfo
->gldm_GLD_version
== GLD_VERSION
);
545 ASSERT(macinfo
->gldm_broadcast_addr
!= NULL
);
546 ASSERT(macinfo
->gldm_vendor_addr
!= NULL
);
547 ASSERT(macinfo
->gldm_ident
!= NULL
);
549 if (macinfo
->gldm_addrlen
> GLD_MAX_ADDRLEN
) {
550 cmn_err(CE_WARN
, "GLD: %s driver gldm_addrlen %d > %d not sup"
551 "ported", devname
, macinfo
->gldm_addrlen
, GLD_MAX_ADDRLEN
);
552 return (DDI_FAILURE
);
556 * GLD only functions properly with saplen == -2
558 if (macinfo
->gldm_saplen
!= -2) {
559 cmn_err(CE_WARN
, "GLD: %s driver gldm_saplen %d != -2 "
560 "not supported", devname
, macinfo
->gldm_saplen
);
561 return (DDI_FAILURE
);
565 if (ddi_getprop(DDI_DEV_T_NONE
, devinfo
, 0, "fast_recv", 0))
566 macinfo
->gldm_options
|= GLDOPT_FAST_RECV
;
568 mutex_enter(&gld_device_list
.gld_devlock
);
569 glddev
= gld_devlookup(major
);
572 * Allocate per-driver (major) data structure if necessary
574 if (glddev
== NULL
) {
575 /* first occurrence of this device name (major number) */
576 glddev
= GLD_GETSTRUCT(glddev_t
, 1);
577 if (glddev
== NULL
) {
578 mutex_exit(&gld_device_list
.gld_devlock
);
579 return (DDI_FAILURE
);
581 (void) strncpy(glddev
->gld_name
, devname
,
582 sizeof (glddev
->gld_name
) - 1);
583 glddev
->gld_major
= major
;
584 glddev
->gld_nextminor
= GLD_MIN_CLONE_MINOR
;
585 glddev
->gld_mac_next
= glddev
->gld_mac_prev
=
586 (gld_mac_info_t
*)&glddev
->gld_mac_next
;
587 glddev
->gld_str_next
= glddev
->gld_str_prev
=
588 (gld_t
*)&glddev
->gld_str_next
;
589 mutex_init(&glddev
->gld_devlock
, NULL
, MUTEX_DRIVER
, NULL
);
591 /* allow increase of number of supported multicast addrs */
592 glddev
->gld_multisize
= ddi_getprop(DDI_DEV_T_NONE
,
593 devinfo
, 0, "multisize", GLD_MAX_MULTICAST
);
596 * Optionally restrict DLPI provider style
598 * -1 - don't create style 1 nodes
599 * -2 - don't create style 2 nodes
601 glddev
->gld_styles
= ddi_getprop(DDI_DEV_T_NONE
, devinfo
, 0,
602 "gld-provider-styles", 0);
604 /* Stuff that's needed before any PPA gets attached */
605 glddev
->gld_type
= macinfo
->gldm_type
;
606 glddev
->gld_minsdu
= macinfo
->gldm_minpkt
;
607 glddev
->gld_saplen
= macinfo
->gldm_saplen
;
608 glddev
->gld_addrlen
= macinfo
->gldm_addrlen
;
609 glddev
->gld_broadcast
= kmem_zalloc(macinfo
->gldm_addrlen
,
611 bcopy(macinfo
->gldm_broadcast_addr
,
612 glddev
->gld_broadcast
, macinfo
->gldm_addrlen
);
613 glddev
->gld_maxsdu
= macinfo
->gldm_maxpkt
;
614 gldinsque(glddev
, gld_device_list
.gld_prev
);
616 glddev
->gld_ndevice
++;
617 /* Now glddev can't go away until we unregister this mac (or fail) */
618 mutex_exit(&gld_device_list
.gld_devlock
);
621 * Per-instance initialization
625 * Initialize per-mac structure that is private to GLD.
626 * Set up interface pointer. These are device class specific pointers
627 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
629 for (i
= 0; i
< sizeof (interfaces
)/sizeof (*interfaces
); i
++) {
630 if (mediatype
!= interfaces
[i
].mac_type
)
633 macinfo
->gldm_mac_pvt
= kmem_zalloc(sizeof (gld_mac_pvt_t
),
635 ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->interfacep
= ifp
=
641 cmn_err(CE_WARN
, "GLD: this version does not support %s driver "
642 "of type %d", devname
, mediatype
);
647 * Driver can only register MTU within legal media range.
649 if (macinfo
->gldm_maxpkt
> ifp
->mtu_size
) {
650 cmn_err(CE_WARN
, "GLD: oversize MTU is specified by driver %s",
656 * Correct margin size if it is not set.
658 if (VLAN_CAPABLE(macinfo
) && (macinfo
->gldm_margin
== 0))
659 macinfo
->gldm_margin
= VTAG_SIZE
;
662 * For now, only Infiniband drivers can use MDT. Do not add
663 * support for Ethernet, FDDI or TR.
665 if (macinfo
->gldm_mdt_pre
!= NULL
) {
666 if (mediatype
!= DL_IB
) {
667 cmn_err(CE_WARN
, "GLD: MDT not supported for %s "
668 "driver of type %d", devname
, mediatype
);
673 * Validate entry points.
675 if ((macinfo
->gldm_mdt_send
== NULL
) ||
676 (macinfo
->gldm_mdt_post
== NULL
)) {
677 cmn_err(CE_WARN
, "GLD: invalid MDT entry points for "
678 "%s driver of type %d", devname
, mediatype
);
681 macinfo
->gldm_options
|= GLDOPT_MDT
;
684 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
685 mac_pvt
->major_dev
= glddev
;
687 mac_pvt
->curr_macaddr
= kmem_zalloc(macinfo
->gldm_addrlen
, KM_SLEEP
);
689 * XXX Do bit-reversed devices store gldm_vendor in canonical
690 * format or in wire format? Also gldm_broadcast. For now
691 * we are assuming canonical, but I'm not sure that makes the
692 * most sense for ease of driver implementation.
694 bcopy(macinfo
->gldm_vendor_addr
, mac_pvt
->curr_macaddr
,
695 macinfo
->gldm_addrlen
);
696 mac_pvt
->statistics
= kmem_zalloc(sizeof (struct gld_stats
), KM_SLEEP
);
699 * The available set of notifications is those generatable by GLD
700 * itself, plus those corresponding to the capabilities of the MAC
701 * driver, intersected with those supported by gld_notify_ind() above.
703 mac_pvt
->notifications
= gld_internal_notes
;
704 if (macinfo
->gldm_capabilities
& GLD_CAP_LINKSTATE
)
705 mac_pvt
->notifications
|= gld_linkstate_notes
;
706 mac_pvt
->notifications
&= gld_supported_notes
;
708 GLDM_LOCK_INIT(macinfo
);
710 ddi_set_driver_private(devinfo
, macinfo
);
713 * Now atomically get a PPA and put ourselves on the mac list.
715 mutex_enter(&glddev
->gld_devlock
);
718 if (macinfo
->gldm_ppa
!= ddi_get_instance(devinfo
))
719 cmn_err(CE_WARN
, "%s%d instance != ppa %d",
720 ddi_driver_name(devinfo
), ddi_get_instance(devinfo
),
725 * Create style 2 node (gated by gld-provider-styles property).
727 * NOTE: When the CLONE_DEV flag is specified to
728 * ddi_create_minor_node() the minor number argument is
729 * immaterial. Opens of that node will go via the clone
730 * driver and gld_open() will always be passed a dev_t with
733 if (glddev
->gld_styles
!= -2) {
734 if (ddi_create_minor_node(devinfo
, glddev
->gld_name
, S_IFCHR
,
735 0, DDI_NT_NET
, CLONE_DEV
) == DDI_FAILURE
) {
736 mutex_exit(&glddev
->gld_devlock
);
742 * Create style 1 node (gated by gld-provider-styles property)
744 if (glddev
->gld_styles
!= -1) {
745 (void) sprintf(minordev
, "%s%d", glddev
->gld_name
,
747 if (ddi_create_minor_node(devinfo
, minordev
, S_IFCHR
,
748 GLD_STYLE1_PPA_TO_MINOR(macinfo
->gldm_ppa
), DDI_NT_NET
,
750 mutex_exit(&glddev
->gld_devlock
);
755 /* add ourselves to this major device's linked list of instances */
756 gldinsque(macinfo
, glddev
->gld_mac_prev
);
758 mutex_exit(&glddev
->gld_devlock
);
761 * Unfortunately we need the ppa before we call gld_initstats();
762 * otherwise we would like to do this just above the mutex_enter
763 * above. In which case we could have set MAC_READY inside the
764 * mutex and we wouldn't have needed to check it in open and
765 * DL_ATTACH. We wouldn't like to do the initstats/kstat_create
766 * inside the mutex because it might get taken in our kstat_update
767 * routine and cause a deadlock with kstat_chain_lock.
770 /* gld_initstats() calls (*ifp->init)() */
771 if (gld_initstats(macinfo
) != GLD_SUCCESS
) {
772 mutex_enter(&glddev
->gld_devlock
);
774 mutex_exit(&glddev
->gld_devlock
);
779 * Need to indicate we are NOW ready to process interrupts;
780 * any interrupt before this is set is for someone else.
781 * This flag is also now used to tell open, et. al. that this
782 * mac is now fully ready and available for use.
784 GLDM_LOCK(macinfo
, RW_WRITER
);
785 macinfo
->gldm_GLD_flags
|= GLD_MAC_READY
;
786 GLDM_UNLOCK(macinfo
);
788 /* log local ethernet address -- XXX not DDI compliant */
789 if (macinfo
->gldm_addrlen
== sizeof (struct ether_addr
))
790 (void) localetheraddr(
791 (struct ether_addr
*)macinfo
->gldm_vendor_addr
, NULL
);
793 /* now put announcement into the message buffer */
794 cmn_err(CE_CONT
, "!%s%d: %s: type \"%s\" mac address %s\n",
796 macinfo
->gldm_ppa
, macinfo
->gldm_ident
,
797 mac_pvt
->interfacep
->mac_string
,
798 gld_macaddr_sprintf(pbuf
, macinfo
->gldm_vendor_addr
,
799 macinfo
->gldm_addrlen
));
801 ddi_report_dev(devinfo
);
802 return (DDI_SUCCESS
);
805 ddi_remove_minor_node(devinfo
, NULL
);
806 GLDM_LOCK_DESTROY(macinfo
);
807 if (mac_pvt
->curr_macaddr
!= NULL
)
808 kmem_free(mac_pvt
->curr_macaddr
, macinfo
->gldm_addrlen
);
809 if (mac_pvt
->statistics
!= NULL
)
810 kmem_free(mac_pvt
->statistics
, sizeof (struct gld_stats
));
811 kmem_free(macinfo
->gldm_mac_pvt
, sizeof (gld_mac_pvt_t
));
812 macinfo
->gldm_mac_pvt
= NULL
;
815 mutex_enter(&gld_device_list
.gld_devlock
);
816 glddev
->gld_ndevice
--;
818 * Note that just because this goes to zero here does not necessarily
819 * mean that we were the one who added the glddev above. It's
820 * possible that the first mac unattached while were were in here
821 * failing to attach the second mac. But we're now the last.
823 if (glddev
->gld_ndevice
== 0) {
824 /* There should be no macinfos left */
825 ASSERT(glddev
->gld_mac_next
==
826 (gld_mac_info_t
*)&glddev
->gld_mac_next
);
827 ASSERT(glddev
->gld_mac_prev
==
828 (gld_mac_info_t
*)&glddev
->gld_mac_next
);
831 * There should be no DL_UNATTACHED streams: the system
832 * should not have detached the "first" devinfo which has
833 * all the open style 2 streams.
835 * XXX This is not clear. See gld_getinfo and Bug 1165519
837 ASSERT(glddev
->gld_str_next
== (gld_t
*)&glddev
->gld_str_next
);
838 ASSERT(glddev
->gld_str_prev
== (gld_t
*)&glddev
->gld_str_next
);
841 mutex_destroy(&glddev
->gld_devlock
);
842 if (glddev
->gld_broadcast
!= NULL
)
843 kmem_free(glddev
->gld_broadcast
, glddev
->gld_addrlen
);
844 kmem_free(glddev
, sizeof (glddev_t
));
846 mutex_exit(&gld_device_list
.gld_devlock
);
848 return (DDI_FAILURE
);
852 * gld_unregister (macinfo)
853 * remove the macinfo structure from local structures
854 * this is cleanup for a driver to be unloaded
857 gld_unregister(gld_mac_info_t
*macinfo
)
859 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
860 glddev_t
*glddev
= mac_pvt
->major_dev
;
861 gld_interface_t
*ifp
;
862 int multisize
= sizeof (gld_mcast_t
) * glddev
->gld_multisize
;
864 mutex_enter(&glddev
->gld_devlock
);
865 GLDM_LOCK(macinfo
, RW_WRITER
);
867 if (mac_pvt
->nvlan
> 0) {
868 GLDM_UNLOCK(macinfo
);
869 mutex_exit(&glddev
->gld_devlock
);
870 return (DDI_FAILURE
);
877 for (i
= 0; i
< VLAN_HASHSZ
; i
++) {
878 if ((mac_pvt
->vlan_hash
[i
] != NULL
))
881 "mac_pvt->vlan_hash[%d] != NULL",
882 __FILE__
, __LINE__
, i
);
887 /* Delete this mac */
890 /* Disallow further entries to gld_recv() and gld_sched() */
891 macinfo
->gldm_GLD_flags
|= GLD_UNREGISTERED
;
893 GLDM_UNLOCK(macinfo
);
894 mutex_exit(&glddev
->gld_devlock
);
896 ifp
= ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->interfacep
;
897 (*ifp
->uninit
)(macinfo
);
899 ASSERT(mac_pvt
->kstatp
);
900 kstat_delete(mac_pvt
->kstatp
);
902 ASSERT(GLDM_LOCK_INITED(macinfo
));
903 kmem_free(mac_pvt
->curr_macaddr
, macinfo
->gldm_addrlen
);
904 kmem_free(mac_pvt
->statistics
, sizeof (struct gld_stats
));
906 if (mac_pvt
->mcast_table
!= NULL
)
907 kmem_free(mac_pvt
->mcast_table
, multisize
);
908 kmem_free(macinfo
->gldm_mac_pvt
, sizeof (gld_mac_pvt_t
));
909 macinfo
->gldm_mac_pvt
= NULL
;
911 /* We now have one fewer instance for this major device */
912 mutex_enter(&gld_device_list
.gld_devlock
);
913 glddev
->gld_ndevice
--;
914 if (glddev
->gld_ndevice
== 0) {
915 /* There should be no macinfos left */
916 ASSERT(glddev
->gld_mac_next
==
917 (gld_mac_info_t
*)&glddev
->gld_mac_next
);
918 ASSERT(glddev
->gld_mac_prev
==
919 (gld_mac_info_t
*)&glddev
->gld_mac_next
);
922 * There should be no DL_UNATTACHED streams: the system
923 * should not have detached the "first" devinfo which has
924 * all the open style 2 streams.
926 * XXX This is not clear. See gld_getinfo and Bug 1165519
928 ASSERT(glddev
->gld_str_next
== (gld_t
*)&glddev
->gld_str_next
);
929 ASSERT(glddev
->gld_str_prev
== (gld_t
*)&glddev
->gld_str_next
);
931 ddi_remove_minor_node(macinfo
->gldm_devinfo
, NULL
);
933 mutex_destroy(&glddev
->gld_devlock
);
934 if (glddev
->gld_broadcast
!= NULL
)
935 kmem_free(glddev
->gld_broadcast
, glddev
->gld_addrlen
);
936 kmem_free(glddev
, sizeof (glddev_t
));
938 mutex_exit(&gld_device_list
.gld_devlock
);
940 return (DDI_SUCCESS
);
945 * called from gld_register
948 gld_initstats(gld_mac_info_t
*macinfo
)
950 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
951 struct gldkstats
*sp
;
954 gld_interface_t
*ifp
;
956 glddev
= mac_pvt
->major_dev
;
958 if ((ksp
= kstat_create(glddev
->gld_name
, macinfo
->gldm_ppa
,
959 NULL
, "net", KSTAT_TYPE_NAMED
,
960 sizeof (struct gldkstats
) / sizeof (kstat_named_t
), 0)) == NULL
) {
962 "GLD: failed to create kstat structure for %s%d",
963 glddev
->gld_name
, macinfo
->gldm_ppa
);
964 return (GLD_FAILURE
);
966 mac_pvt
->kstatp
= ksp
;
968 ksp
->ks_update
= gld_update_kstat
;
969 ksp
->ks_private
= (void *)macinfo
;
972 kstat_named_init(&sp
->glds_pktrcv
, "ipackets", KSTAT_DATA_UINT32
);
973 kstat_named_init(&sp
->glds_pktxmt
, "opackets", KSTAT_DATA_UINT32
);
974 kstat_named_init(&sp
->glds_errrcv
, "ierrors", KSTAT_DATA_ULONG
);
975 kstat_named_init(&sp
->glds_errxmt
, "oerrors", KSTAT_DATA_ULONG
);
976 kstat_named_init(&sp
->glds_bytexmt
, "obytes", KSTAT_DATA_UINT32
);
977 kstat_named_init(&sp
->glds_bytercv
, "rbytes", KSTAT_DATA_UINT32
);
978 kstat_named_init(&sp
->glds_multixmt
, "multixmt", KSTAT_DATA_ULONG
);
979 kstat_named_init(&sp
->glds_multircv
, "multircv", KSTAT_DATA_ULONG
);
980 kstat_named_init(&sp
->glds_brdcstxmt
, "brdcstxmt", KSTAT_DATA_ULONG
);
981 kstat_named_init(&sp
->glds_brdcstrcv
, "brdcstrcv", KSTAT_DATA_ULONG
);
982 kstat_named_init(&sp
->glds_blocked
, "blocked", KSTAT_DATA_ULONG
);
983 kstat_named_init(&sp
->glds_noxmtbuf
, "noxmtbuf", KSTAT_DATA_ULONG
);
984 kstat_named_init(&sp
->glds_norcvbuf
, "norcvbuf", KSTAT_DATA_ULONG
);
985 kstat_named_init(&sp
->glds_xmtretry
, "xmtretry", KSTAT_DATA_ULONG
);
986 kstat_named_init(&sp
->glds_intr
, "intr", KSTAT_DATA_ULONG
);
987 kstat_named_init(&sp
->glds_pktrcv64
, "ipackets64", KSTAT_DATA_UINT64
);
988 kstat_named_init(&sp
->glds_pktxmt64
, "opackets64", KSTAT_DATA_UINT64
);
989 kstat_named_init(&sp
->glds_bytexmt64
, "obytes64", KSTAT_DATA_UINT64
);
990 kstat_named_init(&sp
->glds_bytercv64
, "rbytes64", KSTAT_DATA_UINT64
);
991 kstat_named_init(&sp
->glds_unknowns
, "unknowns", KSTAT_DATA_ULONG
);
992 kstat_named_init(&sp
->glds_speed
, "ifspeed", KSTAT_DATA_UINT64
);
993 kstat_named_init(&sp
->glds_media
, "media", KSTAT_DATA_CHAR
);
994 kstat_named_init(&sp
->glds_prom
, "promisc", KSTAT_DATA_CHAR
);
996 kstat_named_init(&sp
->glds_overflow
, "oflo", KSTAT_DATA_ULONG
);
997 kstat_named_init(&sp
->glds_underflow
, "uflo", KSTAT_DATA_ULONG
);
998 kstat_named_init(&sp
->glds_missed
, "missed", KSTAT_DATA_ULONG
);
1000 kstat_named_init(&sp
->glds_xmtbadinterp
, "xmt_badinterp",
1002 kstat_named_init(&sp
->glds_rcvbadinterp
, "rcv_badinterp",
1005 ifp
= ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->interfacep
;
1007 (*ifp
->init
)(macinfo
);
1011 return (GLD_SUCCESS
);
1014 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1016 gld_update_kstat(kstat_t
*ksp
, int rw
)
1018 gld_mac_info_t
*macinfo
;
1019 gld_mac_pvt_t
*mac_pvt
;
1020 struct gldkstats
*gsp
;
1021 struct gld_stats
*stats
;
1023 if (rw
== KSTAT_WRITE
)
1026 macinfo
= (gld_mac_info_t
*)ksp
->ks_private
;
1027 ASSERT(macinfo
!= NULL
);
1029 GLDM_LOCK(macinfo
, RW_WRITER
);
1031 if (!(macinfo
->gldm_GLD_flags
& GLD_MAC_READY
)) {
1032 GLDM_UNLOCK(macinfo
);
1033 return (EIO
); /* this one's not ready yet */
1036 if (macinfo
->gldm_GLD_flags
& GLD_UNREGISTERED
) {
1037 GLDM_UNLOCK(macinfo
);
1038 return (EIO
); /* this one's not ready any more */
1041 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
1042 gsp
= mac_pvt
->kstatp
->ks_data
;
1044 stats
= mac_pvt
->statistics
;
1046 if (macinfo
->gldm_get_stats
)
1047 (void) (*macinfo
->gldm_get_stats
)(macinfo
, stats
);
1049 gsp
->glds_pktxmt
.value
.ui32
= stats
->glds_pktxmt64
& 0xffffffff;
1050 gsp
->glds_bytexmt
.value
.ui32
= stats
->glds_bytexmt64
& 0xffffffff;
1051 gsp
->glds_multixmt
.value
.ul
= stats
->glds_multixmt
;
1052 gsp
->glds_brdcstxmt
.value
.ul
= stats
->glds_brdcstxmt
;
1053 gsp
->glds_noxmtbuf
.value
.ul
= stats
->glds_noxmtbuf
; /* 0 for now */
1054 gsp
->glds_xmtretry
.value
.ul
= stats
->glds_xmtretry
;
1056 gsp
->glds_pktxmt64
.value
.ui64
= stats
->glds_pktxmt64
;
1057 gsp
->glds_bytexmt64
.value
.ui64
= stats
->glds_bytexmt64
;
1058 gsp
->glds_xmtbadinterp
.value
.ui32
= stats
->glds_xmtbadinterp
;
1060 gsp
->glds_pktrcv
.value
.ui32
= stats
->glds_pktrcv64
& 0xffffffff;
1061 gsp
->glds_errxmt
.value
.ul
= stats
->glds_errxmt
;
1062 gsp
->glds_errrcv
.value
.ul
= stats
->glds_errrcv
;
1063 gsp
->glds_bytercv
.value
.ui32
= stats
->glds_bytercv64
& 0xffffffff;
1064 gsp
->glds_multircv
.value
.ul
= stats
->glds_multircv
;
1065 gsp
->glds_brdcstrcv
.value
.ul
= stats
->glds_brdcstrcv
;
1066 gsp
->glds_blocked
.value
.ul
= stats
->glds_blocked
;
1067 gsp
->glds_overflow
.value
.ul
= stats
->glds_overflow
;
1068 gsp
->glds_underflow
.value
.ul
= stats
->glds_underflow
;
1069 gsp
->glds_missed
.value
.ul
= stats
->glds_missed
;
1070 gsp
->glds_norcvbuf
.value
.ul
= stats
->glds_norcvbuf
+
1071 stats
->glds_gldnorcvbuf
;
1072 gsp
->glds_intr
.value
.ul
= stats
->glds_intr
;
1074 gsp
->glds_speed
.value
.ui64
= stats
->glds_speed
;
1075 gsp
->glds_unknowns
.value
.ul
= stats
->glds_unknowns
;
1076 gsp
->glds_pktrcv64
.value
.ui64
= stats
->glds_pktrcv64
;
1077 gsp
->glds_bytercv64
.value
.ui64
= stats
->glds_bytercv64
;
1078 gsp
->glds_rcvbadinterp
.value
.ui32
= stats
->glds_rcvbadinterp
;
1081 (void) strcpy(gsp
->glds_prom
.value
.c
, "phys");
1082 else if (mac_pvt
->nprom_multi
)
1083 (void) strcpy(gsp
->glds_prom
.value
.c
, "multi");
1085 (void) strcpy(gsp
->glds_prom
.value
.c
, "off");
1087 (void) strcpy(gsp
->glds_media
.value
.c
, gld_media
[
1088 stats
->glds_media
< sizeof (gld_media
) / sizeof (gld_media
[0])
1089 ? stats
->glds_media
: 0]);
1091 switch (macinfo
->gldm_type
) {
1093 gsp
->glds_frame
.value
.ul
= stats
->glds_frame
;
1094 gsp
->glds_crc
.value
.ul
= stats
->glds_crc
;
1095 gsp
->glds_collisions
.value
.ul
= stats
->glds_collisions
;
1096 gsp
->glds_excoll
.value
.ul
= stats
->glds_excoll
;
1097 gsp
->glds_defer
.value
.ul
= stats
->glds_defer
;
1098 gsp
->glds_short
.value
.ul
= stats
->glds_short
;
1099 gsp
->glds_xmtlatecoll
.value
.ul
= stats
->glds_xmtlatecoll
;
1100 gsp
->glds_nocarrier
.value
.ul
= stats
->glds_nocarrier
;
1101 gsp
->glds_dot3_first_coll
.value
.ui32
=
1102 stats
->glds_dot3_first_coll
;
1103 gsp
->glds_dot3_multi_coll
.value
.ui32
=
1104 stats
->glds_dot3_multi_coll
;
1105 gsp
->glds_dot3_sqe_error
.value
.ui32
=
1106 stats
->glds_dot3_sqe_error
;
1107 gsp
->glds_dot3_mac_xmt_error
.value
.ui32
=
1108 stats
->glds_dot3_mac_xmt_error
;
1109 gsp
->glds_dot3_mac_rcv_error
.value
.ui32
=
1110 stats
->glds_dot3_mac_rcv_error
;
1111 gsp
->glds_dot3_frame_too_long
.value
.ui32
=
1112 stats
->glds_dot3_frame_too_long
;
1113 (void) strcpy(gsp
->glds_duplex
.value
.c
, gld_duplex
[
1114 stats
->glds_duplex
<
1115 sizeof (gld_duplex
) / sizeof (gld_duplex
[0]) ?
1116 stats
->glds_duplex
: 0]);
1119 gsp
->glds_dot5_line_error
.value
.ui32
=
1120 stats
->glds_dot5_line_error
;
1121 gsp
->glds_dot5_burst_error
.value
.ui32
=
1122 stats
->glds_dot5_burst_error
;
1123 gsp
->glds_dot5_signal_loss
.value
.ui32
=
1124 stats
->glds_dot5_signal_loss
;
1125 gsp
->glds_dot5_ace_error
.value
.ui32
=
1126 stats
->glds_dot5_ace_error
;
1127 gsp
->glds_dot5_internal_error
.value
.ui32
=
1128 stats
->glds_dot5_internal_error
;
1129 gsp
->glds_dot5_lost_frame_error
.value
.ui32
=
1130 stats
->glds_dot5_lost_frame_error
;
1131 gsp
->glds_dot5_frame_copied_error
.value
.ui32
=
1132 stats
->glds_dot5_frame_copied_error
;
1133 gsp
->glds_dot5_token_error
.value
.ui32
=
1134 stats
->glds_dot5_token_error
;
1135 gsp
->glds_dot5_freq_error
.value
.ui32
=
1136 stats
->glds_dot5_freq_error
;
1139 gsp
->glds_fddi_mac_error
.value
.ui32
=
1140 stats
->glds_fddi_mac_error
;
1141 gsp
->glds_fddi_mac_lost
.value
.ui32
=
1142 stats
->glds_fddi_mac_lost
;
1143 gsp
->glds_fddi_mac_token
.value
.ui32
=
1144 stats
->glds_fddi_mac_token
;
1145 gsp
->glds_fddi_mac_tvx_expired
.value
.ui32
=
1146 stats
->glds_fddi_mac_tvx_expired
;
1147 gsp
->glds_fddi_mac_late
.value
.ui32
=
1148 stats
->glds_fddi_mac_late
;
1149 gsp
->glds_fddi_mac_ring_op
.value
.ui32
=
1150 stats
->glds_fddi_mac_ring_op
;
1158 GLDM_UNLOCK(macinfo
);
1161 gld_check_assertions();
1162 if (gld_debug
& GLDRDE
)
1163 gld_sr_dump(macinfo
);
1170 gld_init_vlan_stats(gld_vlan_t
*vlan
)
1172 gld_mac_info_t
*mac
= vlan
->gldv_mac
;
1173 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)mac
->gldm_mac_pvt
;
1174 struct gldkstats
*sp
;
1180 glddev
= mac_pvt
->major_dev
;
1181 name
= glddev
->gld_name
;
1182 instance
= (vlan
->gldv_id
* GLD_VLAN_SCALE
) + mac
->gldm_ppa
;
1184 if ((ksp
= kstat_create(name
, instance
,
1185 NULL
, "net", KSTAT_TYPE_NAMED
,
1186 sizeof (struct gldkstats
) / sizeof (kstat_named_t
), 0)) == NULL
) {
1188 "GLD: failed to create kstat structure for %s%d",
1190 return (GLD_FAILURE
);
1193 vlan
->gldv_kstatp
= ksp
;
1195 ksp
->ks_update
= gld_update_vlan_kstat
;
1196 ksp
->ks_private
= (void *)vlan
;
1199 kstat_named_init(&sp
->glds_pktrcv
, "ipackets", KSTAT_DATA_UINT32
);
1200 kstat_named_init(&sp
->glds_pktxmt
, "opackets", KSTAT_DATA_UINT32
);
1201 kstat_named_init(&sp
->glds_errrcv
, "ierrors", KSTAT_DATA_ULONG
);
1202 kstat_named_init(&sp
->glds_errxmt
, "oerrors", KSTAT_DATA_ULONG
);
1203 kstat_named_init(&sp
->glds_bytexmt
, "obytes", KSTAT_DATA_UINT32
);
1204 kstat_named_init(&sp
->glds_bytercv
, "rbytes", KSTAT_DATA_UINT32
);
1205 kstat_named_init(&sp
->glds_multixmt
, "multixmt", KSTAT_DATA_ULONG
);
1206 kstat_named_init(&sp
->glds_multircv
, "multircv", KSTAT_DATA_ULONG
);
1207 kstat_named_init(&sp
->glds_brdcstxmt
, "brdcstxmt", KSTAT_DATA_ULONG
);
1208 kstat_named_init(&sp
->glds_brdcstrcv
, "brdcstrcv", KSTAT_DATA_ULONG
);
1209 kstat_named_init(&sp
->glds_blocked
, "blocked", KSTAT_DATA_ULONG
);
1210 kstat_named_init(&sp
->glds_noxmtbuf
, "noxmtbuf", KSTAT_DATA_ULONG
);
1211 kstat_named_init(&sp
->glds_norcvbuf
, "norcvbuf", KSTAT_DATA_ULONG
);
1212 kstat_named_init(&sp
->glds_xmtretry
, "xmtretry", KSTAT_DATA_ULONG
);
1213 kstat_named_init(&sp
->glds_intr
, "intr", KSTAT_DATA_ULONG
);
1214 kstat_named_init(&sp
->glds_pktrcv64
, "ipackets64", KSTAT_DATA_UINT64
);
1215 kstat_named_init(&sp
->glds_pktxmt64
, "opackets64", KSTAT_DATA_UINT64
);
1216 kstat_named_init(&sp
->glds_bytexmt64
, "obytes64", KSTAT_DATA_UINT64
);
1217 kstat_named_init(&sp
->glds_bytercv64
, "rbytes64", KSTAT_DATA_UINT64
);
1218 kstat_named_init(&sp
->glds_unknowns
, "unknowns", KSTAT_DATA_ULONG
);
1219 kstat_named_init(&sp
->glds_speed
, "ifspeed", KSTAT_DATA_UINT64
);
1220 kstat_named_init(&sp
->glds_media
, "media", KSTAT_DATA_CHAR
);
1221 kstat_named_init(&sp
->glds_prom
, "promisc", KSTAT_DATA_CHAR
);
1223 kstat_named_init(&sp
->glds_overflow
, "oflo", KSTAT_DATA_ULONG
);
1224 kstat_named_init(&sp
->glds_underflow
, "uflo", KSTAT_DATA_ULONG
);
1225 kstat_named_init(&sp
->glds_missed
, "missed", KSTAT_DATA_ULONG
);
1227 kstat_named_init(&sp
->glds_xmtbadinterp
, "xmt_badinterp",
1229 kstat_named_init(&sp
->glds_rcvbadinterp
, "rcv_badinterp",
1233 return (GLD_SUCCESS
);
1237 gld_update_vlan_kstat(kstat_t
*ksp
, int rw
)
1240 gld_mac_info_t
*macinfo
;
1241 struct gldkstats
*gsp
;
1242 struct gld_stats
*stats
;
1243 gld_mac_pvt_t
*mac_pvt
;
1246 if (rw
== KSTAT_WRITE
)
1249 vlan
= (gld_vlan_t
*)ksp
->ks_private
;
1250 ASSERT(vlan
!= NULL
);
1252 macinfo
= vlan
->gldv_mac
;
1253 GLDM_LOCK(macinfo
, RW_WRITER
);
1255 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
1257 gsp
= vlan
->gldv_kstatp
->ks_data
;
1259 stats
= vlan
->gldv_stats
;
1261 gsp
->glds_pktxmt
.value
.ui32
= stats
->glds_pktxmt64
& 0xffffffff;
1262 gsp
->glds_bytexmt
.value
.ui32
= stats
->glds_bytexmt64
& 0xffffffff;
1263 gsp
->glds_errxmt
.value
.ul
= stats
->glds_errxmt
;
1264 gsp
->glds_multixmt
.value
.ul
= stats
->glds_multixmt
;
1265 gsp
->glds_brdcstxmt
.value
.ul
= stats
->glds_brdcstxmt
;
1266 gsp
->glds_noxmtbuf
.value
.ul
= stats
->glds_noxmtbuf
;
1267 gsp
->glds_xmtretry
.value
.ul
= stats
->glds_xmtretry
;
1268 gsp
->glds_pktxmt64
.value
.ui64
= stats
->glds_pktxmt64
;
1269 gsp
->glds_bytexmt64
.value
.ui64
= stats
->glds_bytexmt64
;
1271 gsp
->glds_pktrcv
.value
.ui32
= stats
->glds_pktrcv64
& 0xffffffff;
1272 gsp
->glds_bytercv
.value
.ui32
= stats
->glds_bytercv64
& 0xffffffff;
1273 gsp
->glds_errrcv
.value
.ul
= stats
->glds_errrcv
;
1274 gsp
->glds_multircv
.value
.ul
= stats
->glds_multircv
;
1275 gsp
->glds_brdcstrcv
.value
.ul
= stats
->glds_brdcstrcv
;
1276 gsp
->glds_blocked
.value
.ul
= stats
->glds_blocked
;
1277 gsp
->glds_pktrcv64
.value
.ui64
= stats
->glds_pktrcv64
;
1278 gsp
->glds_bytercv64
.value
.ui64
= stats
->glds_bytercv64
;
1279 gsp
->glds_unknowns
.value
.ul
= stats
->glds_unknowns
;
1280 gsp
->glds_xmtbadinterp
.value
.ui32
= stats
->glds_xmtbadinterp
;
1281 gsp
->glds_rcvbadinterp
.value
.ui32
= stats
->glds_rcvbadinterp
;
1283 gsp
->glds_speed
.value
.ui64
= mac_pvt
->statistics
->glds_speed
;
1284 media
= mac_pvt
->statistics
->glds_media
;
1285 (void) strcpy(gsp
->glds_media
.value
.c
,
1286 gld_media
[media
< sizeof (gld_media
) / sizeof (gld_media
[0]) ?
1289 GLDM_UNLOCK(macinfo
);
1294 * The device dependent driver specifies gld_getinfo as its getinfo routine.
1298 gld_getinfo(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **resultp
)
1300 dev_info_t
*devinfo
;
1301 minor_t minor
= getminor((dev_t
)arg
);
1302 int rc
= DDI_FAILURE
;
1305 case DDI_INFO_DEVT2DEVINFO
:
1306 if ((devinfo
= gld_finddevinfo((dev_t
)arg
)) != NULL
) {
1307 *(dev_info_t
**)resultp
= devinfo
;
1311 case DDI_INFO_DEVT2INSTANCE
:
1312 /* Need static mapping for deferred attach */
1313 if (minor
== GLD_USE_STYLE2
) {
1315 * Style 2: this minor number does not correspond to
1316 * any particular instance number.
1319 } else if (minor
<= GLD_MAX_STYLE1_MINOR
) {
1320 /* Style 1: calculate the PPA from the minor */
1321 *resultp
= (void *)(uintptr_t)
1322 GLD_STYLE1_MINOR_TO_PPA(minor
);
1325 /* Clone: look for it. Not a static mapping */
1326 if ((devinfo
= gld_finddevinfo((dev_t
)arg
)) != NULL
) {
1327 *resultp
= (void *)(uintptr_t)
1328 ddi_get_instance(devinfo
);
1338 /* called from gld_getinfo */
1340 gld_finddevinfo(dev_t dev
)
1342 minor_t minor
= getminor(dev
);
1344 gld_mac_info_t
*mac
;
1347 dev_info_t
*devinfo
= NULL
;
1350 if (minor
== GLD_USE_STYLE2
) {
1352 * Style 2: this minor number does not correspond to
1353 * any particular instance number.
1355 * XXX We don't know what to say. See Bug 1165519.
1360 mutex_enter(&gld_device_list
.gld_devlock
); /* hold the device */
1362 device
= gld_devlookup(getmajor(dev
));
1363 if (device
== NULL
) {
1364 /* There are no attached instances of this device */
1365 mutex_exit(&gld_device_list
.gld_devlock
);
1370 * Search all attached macs and streams.
1372 * XXX We don't bother checking the DL_UNATTACHED streams since
1373 * we don't know what devinfo we should report back even if we
1374 * found the minor. Maybe we should associate streams that are
1375 * not currently attached to a PPA with the "first" devinfo node
1376 * of the major device to attach -- the one that created the
1377 * minor node for the generic device.
1379 mutex_enter(&device
->gld_devlock
);
1381 for (mac
= device
->gld_mac_next
;
1382 mac
!= (gld_mac_info_t
*)&device
->gld_mac_next
;
1383 mac
= mac
->gldm_next
) {
1384 gld_mac_pvt_t
*pvt
= (gld_mac_pvt_t
*)mac
->gldm_mac_pvt
;
1386 if (!(mac
->gldm_GLD_flags
& GLD_MAC_READY
))
1387 continue; /* this one's not ready yet */
1388 if (minor
<= GLD_MAX_STYLE1_MINOR
) {
1389 /* Style 1 -- look for the corresponding PPA */
1390 if (minor
== GLD_STYLE1_PPA_TO_MINOR(mac
->gldm_ppa
)) {
1391 devinfo
= mac
->gldm_devinfo
;
1392 goto out
; /* found it! */
1394 continue; /* not this PPA */
1397 /* We are looking for a clone */
1398 for (i
= 0; i
< VLAN_HASHSZ
; i
++) {
1399 for (vlan
= pvt
->vlan_hash
[i
];
1400 vlan
!= NULL
; vlan
= vlan
->gldv_next
) {
1401 for (str
= vlan
->gldv_str_next
;
1402 str
!= (gld_t
*)&vlan
->gldv_str_next
;
1403 str
= str
->gld_next
) {
1404 ASSERT(str
->gld_mac_info
== mac
);
1405 if (minor
== str
->gld_minor
) {
1406 devinfo
= mac
->gldm_devinfo
;
1414 mutex_exit(&device
->gld_devlock
);
1415 mutex_exit(&gld_device_list
.gld_devlock
);
1420 * STREAMS open routine. The device dependent driver specifies this as its
1425 gld_open(queue_t
*q
, dev_t
*dev
, int flag
, int sflag
, cred_t
*cred
)
1427 gld_mac_pvt_t
*mac_pvt
;
1430 gld_mac_info_t
*macinfo
;
1431 minor_t minor
= getminor(*dev
);
1437 if (minor
> GLD_MAX_STYLE1_MINOR
)
1440 ASSERT(q
->q_ptr
== NULL
); /* Clone device gives us a fresh Q */
1442 /* Find our per-major glddev_t structure */
1443 mutex_enter(&gld_device_list
.gld_devlock
);
1444 glddev
= gld_devlookup(getmajor(*dev
));
1447 * This glddev will hang around since detach (and therefore
1448 * gld_unregister) can't run while we're here in the open routine.
1450 mutex_exit(&gld_device_list
.gld_devlock
);
1456 if (gld_debug
& GLDPROT
) {
1457 if (minor
== GLD_USE_STYLE2
)
1458 cmn_err(CE_NOTE
, "gld_open(%p, Style 2)", (void *)q
);
1460 cmn_err(CE_NOTE
, "gld_open(%p, Style 1, minor = %d)",
1466 * get a per-stream structure and link things together so we
1467 * can easily find them later.
1469 gld
= kmem_zalloc(sizeof (gld_t
), KM_SLEEP
);
1472 * fill in the structure and state info
1475 gld
->gld_device
= glddev
;
1476 gld
->gld_state
= DL_UNATTACHED
;
1479 * we must atomically find a free minor number and add the stream
1480 * to a list, because gld_findminor has to traverse the lists to
1481 * determine which minor numbers are free.
1483 mutex_enter(&glddev
->gld_devlock
);
1485 /* find a free minor device number for the clone */
1486 gld
->gld_minor
= gld_findminor(glddev
);
1487 if (gld
->gld_minor
== 0) {
1488 mutex_exit(&glddev
->gld_devlock
);
1489 kmem_free(gld
, sizeof (gld_t
));
1493 #ifdef GLD_VERBOSE_DEBUG
1494 if (gld_debug
& GLDPROT
)
1495 cmn_err(CE_NOTE
, "gld_open() gld ptr: %p minor: %d",
1496 (void *)gld
, gld
->gld_minor
);
1499 if (minor
== GLD_USE_STYLE2
) {
1500 gld
->gld_style
= DL_STYLE2
;
1501 *dev
= makedevice(getmajor(*dev
), gld
->gld_minor
);
1502 WR(q
)->q_ptr
= q
->q_ptr
= (caddr_t
)gld
;
1503 gldinsque(gld
, glddev
->gld_str_prev
);
1504 #ifdef GLD_VERBOSE_DEBUG
1505 if (gld_debug
& GLDPROT
)
1506 cmn_err(CE_NOTE
, "GLDstruct added to device list");
1508 (void) qassociate(q
, -1);
1512 gld
->gld_style
= DL_STYLE1
;
1514 /* the PPA is actually 1 less than the minordev */
1515 ppa
= GLD_STYLE1_MINOR_TO_PPA(minor
);
1517 for (macinfo
= glddev
->gld_mac_next
;
1518 macinfo
!= (gld_mac_info_t
*)(&glddev
->gld_mac_next
);
1519 macinfo
= macinfo
->gldm_next
) {
1520 ASSERT(macinfo
!= NULL
);
1521 if (macinfo
->gldm_ppa
!= ppa
)
1524 if (!(macinfo
->gldm_GLD_flags
& GLD_MAC_READY
))
1525 continue; /* this one's not ready yet */
1528 * we found the correct PPA
1530 GLDM_LOCK(macinfo
, RW_WRITER
);
1532 gld
->gld_mac_info
= macinfo
;
1534 if (macinfo
->gldm_send_tagged
!= NULL
)
1535 gld
->gld_send
= macinfo
->gldm_send_tagged
;
1537 gld
->gld_send
= macinfo
->gldm_send
;
1539 /* now ready for action */
1540 gld
->gld_state
= DL_UNBOUND
;
1542 if ((vlan
= gld_get_vlan(macinfo
, VLAN_VID_NONE
)) == NULL
) {
1543 GLDM_UNLOCK(macinfo
);
1544 mutex_exit(&glddev
->gld_devlock
);
1545 kmem_free(gld
, sizeof (gld_t
));
1549 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
1550 if (!mac_pvt
->started
) {
1551 if (gld_start_mac(macinfo
) != GLD_SUCCESS
) {
1553 GLDM_UNLOCK(macinfo
);
1554 mutex_exit(&glddev
->gld_devlock
);
1555 kmem_free(gld
, sizeof (gld_t
));
1560 gld
->gld_vlan
= vlan
;
1561 vlan
->gldv_nstreams
++;
1562 gldinsque(gld
, vlan
->gldv_str_prev
);
1563 *dev
= makedevice(getmajor(*dev
), gld
->gld_minor
);
1564 WR(q
)->q_ptr
= q
->q_ptr
= (caddr_t
)gld
;
1566 GLDM_UNLOCK(macinfo
);
1567 #ifdef GLD_VERBOSE_DEBUG
1568 if (gld_debug
& GLDPROT
)
1570 "GLDstruct added to instance list");
1575 if (gld
->gld_state
== DL_UNATTACHED
) {
1576 mutex_exit(&glddev
->gld_devlock
);
1577 kmem_free(gld
, sizeof (gld_t
));
1582 mutex_exit(&glddev
->gld_devlock
);
1583 noenable(WR(q
)); /* We'll do the qenables manually */
1584 qprocson(q
); /* start the queues running */
1590 * normal stream close call checks current status and cleans up
1591 * data structures that were dynamically allocated
1595 gld_close(queue_t
*q
, int flag
, cred_t
*cred
)
1597 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
1598 glddev_t
*glddev
= gld
->gld_device
;
1604 if (gld_debug
& GLDPROT
) {
1605 cmn_err(CE_NOTE
, "gld_close(%p, Style %d)",
1606 (void *)q
, (gld
->gld_style
& 0x1) + 1);
1610 /* Hold all device streams lists still while we check for a macinfo */
1611 mutex_enter(&glddev
->gld_devlock
);
1613 if (gld
->gld_mac_info
!= NULL
) {
1614 /* If there's a macinfo, block recv while we change state */
1615 GLDM_LOCK(gld
->gld_mac_info
, RW_WRITER
);
1616 gld
->gld_flags
|= GLD_STR_CLOSING
; /* no more rcv putnexts */
1617 GLDM_UNLOCK(gld
->gld_mac_info
);
1619 /* no mac DL_ATTACHED right now */
1620 gld
->gld_flags
|= GLD_STR_CLOSING
;
1623 mutex_exit(&glddev
->gld_devlock
);
1626 * qprocsoff before we call gld_unbind/gldunattach, so that
1627 * we know wsrv isn't in there trying to undo what we're doing.
1631 ASSERT(gld
->gld_wput_count
== 0);
1632 gld
->gld_wput_count
= 0; /* just in case */
1634 if (gld
->gld_state
== DL_IDLE
) {
1635 /* Need to unbind */
1636 ASSERT(gld
->gld_mac_info
!= NULL
);
1637 (void) gld_unbind(WR(q
), NULL
);
1640 if (gld
->gld_state
== DL_UNBOUND
) {
1643 * For style 2 stream, gldunattach also
1644 * associate queue with NULL dip
1646 ASSERT(gld
->gld_mac_info
!= NULL
);
1647 (void) gldunattach(WR(q
), NULL
);
1650 /* disassociate the stream from the device */
1651 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
1654 * Since we unattached above (if necessary), we know that we're
1655 * on the per-major list of unattached streams, rather than a
1656 * per-PPA list. So we know we should hold the devlock.
1658 mutex_enter(&glddev
->gld_devlock
);
1659 gldremque(gld
); /* remove from Style 2 list */
1660 mutex_exit(&glddev
->gld_devlock
);
1662 kmem_free(gld
, sizeof (gld_t
));
1669 * simple read service procedure
1670 * purpose is to avoid the time it takes for packets
1671 * to move through IP so we can get them off the board
1672 * as fast as possible due to limited PC resources.
1674 * This is not normally used in the current implementation. It
1675 * can be selected with the undocumented property "fast_recv".
1676 * If that property is set, gld_recv will send the packet
1677 * upstream with a putq() rather than a putnext(), thus causing
1678 * this routine to be scheduled.
1681 gld_rsrv(queue_t
*q
)
1685 while ((mp
= getq(q
)) != NULL
) {
1686 if (canputnext(q
)) {
1697 * general gld stream write put routine. Receives fastpath data from upper
1698 * modules and processes it immediately. ioctl and M_PROTO/M_PCPROTO are
1699 * queued for later processing by the service procedure.
1703 gld_wput(queue_t
*q
, mblk_t
*mp
)
1705 gld_t
*gld
= (gld_t
*)(q
->q_ptr
);
1707 boolean_t multidata
= B_TRUE
;
1711 if (gld_debug
& GLDTRACE
)
1712 cmn_err(CE_NOTE
, "gld_wput(%p %p): type %x",
1713 (void *)q
, (void *)mp
, DB_TYPE(mp
));
1715 switch (DB_TYPE(mp
)) {
1718 /* fast data / raw support */
1719 /* we must be DL_ATTACHED and DL_BOUND to do this */
1720 /* Tricky to access memory without taking the mutex */
1721 if ((gld
->gld_flags
& (GLD_RAW
| GLD_FAST
)) == 0 ||
1722 gld
->gld_state
!= DL_IDLE
) {
1723 merror(q
, mp
, EPROTO
);
1727 * Cleanup MBLK_VTAG in case it is set by other
1728 * modules. MBLK_VTAG is used to save the vtag information.
1730 GLD_CLEAR_MBLK_VTAG(mp
);
1731 multidata
= B_FALSE
;
1734 /* Only call gld_start() directly if nothing queued ahead */
1735 /* No guarantees about ordering with different threads */
1740 * This can happen if wsrv has taken off the last mblk but
1741 * is still processing it.
1744 if (gld
->gld_in_wsrv
)
1748 * Keep a count of current wput calls to start.
1749 * Nonzero count delays any attempted DL_UNBIND.
1750 * See comments above gld_start().
1752 atomic_inc_32((uint32_t *)&gld
->gld_wput_count
);
1755 /* Recheck state now wput_count is set to prevent DL_UNBIND */
1756 /* If this Q is in process of DL_UNBIND, don't call start */
1757 if (gld
->gld_state
!= DL_IDLE
|| gld
->gld_in_unbind
) {
1758 /* Extremely unlikely */
1759 atomic_dec_32((uint32_t *)&gld
->gld_wput_count
);
1764 * Get the priority value. Note that in raw mode, the
1765 * per-packet priority value kept in b_band is ignored.
1767 upri
= (gld
->gld_flags
& GLD_RAW
) ? gld
->gld_upri
:
1768 UPRI(gld
, mp
->b_band
);
1770 rc
= (multidata
) ? gld_start_mdt(q
, mp
, GLD_WPUT
) :
1771 gld_start(q
, mp
, GLD_WPUT
, upri
);
1773 /* Allow DL_UNBIND again */
1775 atomic_dec_32((uint32_t *)&gld
->gld_wput_count
);
1777 if (rc
== GLD_NORESOURCES
)
1779 break; /* Done with this packet */
1782 /* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1788 /* ioctl relies on wsrv single threading per queue */
1798 case M_FLUSH
: /* canonical flush handling */
1799 /* XXX Should these be FLUSHALL? */
1800 if (*mp
->b_rptr
& FLUSHW
)
1802 if (*mp
->b_rptr
& FLUSHR
) {
1804 *mp
->b_rptr
&= ~FLUSHW
;
1812 /* these rely on wsrv single threading per queue */
1819 if (gld_debug
& GLDETRACE
)
1821 "gld: Unexpected packet type from queue: 0x%x",
1830 * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1833 * wsrv is single-threaded per Q. We make use of this to avoid taking the
1834 * lock for reading data items that are only ever written by us.
1838 gld_wsrv(queue_t
*q
)
1841 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
1842 gld_mac_info_t
*macinfo
;
1843 union DL_primitives
*prim
;
1845 boolean_t multidata
;
1849 if (gld_debug
& GLDTRACE
)
1850 cmn_err(CE_NOTE
, "gld_wsrv(%p)", (void *)q
);
1853 ASSERT(!gld
->gld_in_wsrv
);
1855 gld
->gld_xwait
= B_FALSE
; /* We are now going to process this Q */
1857 if (q
->q_first
== NULL
)
1860 macinfo
= gld
->gld_mac_info
;
1863 * Help wput avoid a call to gld_start if there might be a message
1864 * previously queued by that thread being processed here.
1866 gld
->gld_in_wsrv
= B_TRUE
;
1869 while ((mp
= getq(q
)) != NULL
) {
1870 switch (DB_TYPE(mp
)) {
1873 multidata
= (DB_TYPE(mp
) == M_MULTIDATA
);
1876 * retry of a previously processed UNITDATA_REQ
1877 * or is a RAW or FAST message from above.
1879 if (macinfo
== NULL
) {
1880 /* No longer attached to a PPA, drop packet */
1885 gld
->gld_sched_ran
= B_FALSE
;
1889 * Get the priority value. Note that in raw mode, the
1890 * per-packet priority value kept in b_band is ignored.
1892 upri
= (gld
->gld_flags
& GLD_RAW
) ? gld
->gld_upri
:
1893 UPRI(gld
, mp
->b_band
);
1895 err
= (multidata
) ? gld_start_mdt(q
, mp
, GLD_WSRV
) :
1896 gld_start(q
, mp
, GLD_WSRV
, upri
);
1897 if (err
== GLD_NORESOURCES
) {
1898 /* gld_sched will qenable us later */
1899 gld
->gld_xwait
= B_TRUE
; /* want qenable */
1902 * v2: we're not holding the lock; it's
1903 * possible that the driver could have already
1904 * called gld_sched (following up on its
1905 * return of GLD_NORESOURCES), before we got a
1906 * chance to do the putbq() and set gld_xwait.
1907 * So if we saw a call to gld_sched that
1908 * examined this queue, since our call to
1909 * gld_start() above, then it's possible we've
1910 * already seen the only call to gld_sched()
1911 * we're ever going to see. So we better retry
1912 * transmitting this packet right now.
1914 if (gld
->gld_sched_ran
) {
1916 if (gld_debug
& GLDTRACE
)
1917 cmn_err(CE_NOTE
, "gld_wsrv: "
1918 "sched was called");
1920 break; /* try again right now */
1922 gld
->gld_in_wsrv
= B_FALSE
;
1928 (void) gld_ioctl(q
, mp
);
1932 if (macinfo
== NULL
) {
1937 if (macinfo
->gldm_mctl
!= NULL
) {
1938 GLDM_LOCK(macinfo
, RW_WRITER
);
1939 (void) (*macinfo
->gldm_mctl
) (macinfo
, q
, mp
);
1940 GLDM_UNLOCK(macinfo
);
1942 /* This driver doesn't recognize, just drop */
1947 case M_PROTO
: /* Will be an DLPI message of some type */
1949 if ((err
= gld_cmds(q
, mp
)) != GLDE_OK
) {
1950 if (err
== GLDE_RETRY
) {
1951 gld
->gld_in_wsrv
= B_FALSE
;
1952 return (0); /* quit while we're ahead */
1954 prim
= (union DL_primitives
*)mp
->b_rptr
;
1955 dlerrorack(q
, mp
, prim
->dl_primitive
, err
, 0);
1960 /* This should never happen */
1962 if (gld_debug
& GLDERRS
)
1964 "gld_wsrv: db_type(%x) not supported",
1965 mp
->b_datap
->db_type
);
1967 freemsg(mp
); /* unknown types are discarded */
1973 gld
->gld_in_wsrv
= B_FALSE
;
1978 * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1980 * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1982 * In particular, we must avoid calling gld_precv*() if we came from wput().
1983 * gld_precv*() is where we, on the transmit side, loop back our outgoing
1984 * packets to the receive side if we are in physical promiscuous mode.
1985 * Since the receive side holds a lock across its call to the upstream
1986 * putnext, and that upstream module could well have looped back to our
1987 * wput() routine on the same thread, we cannot call gld_precv* from here
1988 * for fear of causing a recursive lock entry in our receive code.
1990 * There is a problem here when coming from gld_wput(). While wput
1991 * only comes here if the queue is attached to a PPA and bound to a SAP
1992 * and there are no messages on the queue ahead of the M_DATA that could
1993 * change that, it is theoretically possible that another thread could
1994 * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1995 * could wake up and process them, before we finish processing this
1996 * send of the M_DATA. This can only possibly happen on a Style 2 RAW or
1997 * FAST (fastpath) stream: non RAW/FAST streams always go through wsrv(),
1998 * and Style 1 streams only DL_DETACH in the close routine, where
1999 * qprocsoff() protects us. If this happens we could end up calling
2000 * gldm_send() after we have detached the stream and possibly called
2001 * gldm_stop(). Worse, once the number of attached streams goes to zero,
2002 * detach/unregister could be called, and the macinfo could go away entirely.
2004 * No one has ever seen this happen.
2006 * It is some trouble to fix this, and we would rather not add any mutex
2007 * logic into the wput() routine, which is supposed to be a "fast"
2010 * What I've done is use an atomic counter to keep a count of the number
2011 * of threads currently calling gld_start() from wput() on this stream.
2012 * If DL_DETACH sees this as nonzero, it putbqs the request back onto
2013 * the queue and qenables, hoping to have better luck next time. Since
2014 * people shouldn't be trying to send after they've asked to DL_DETACH,
2015 * hopefully very soon all the wput=>start threads should have returned
2016 * and the DL_DETACH will succeed. It's hard to test this since the odds
2017 * of the failure even trying to happen are so small. I probably could
2018 * have ignored the whole issue and never been the worse for it.
2020 * Because some GLDv2 Ethernet drivers do not allow the size of transmitted
2021 * packet to be greater than ETHERMAX, we must first strip the VLAN tag
2022 * from a tagged packet before passing it to the driver's gld_send() entry
2023 * point function, and pass the VLAN tag as a separate argument. The
2024 * gld_send() function may fail. In that case, the packet will need to be
2025 * queued in order to be processed again in GLD's service routine. As the
2026 * VTAG has already been stripped at that time, we save the VTAG information
2027 * in (the unused fields of) dblk using GLD_SAVE_MBLK_VTAG(), so that the
2028 * VTAG can also be queued and be able to be got when gld_start() is called
2029 * next time from gld_wsrv().
2031 * Some rules to use GLD_{CLEAR|SAVE}_MBLK_VTAG macros:
2033 * - GLD_SAVE_MBLK_VTAG() must be called to save the VTAG information each time
2034 * the message is queued by putbq().
2036 * - GLD_CLEAR_MBLK_VTAG() must be called to clear the bogus VTAG information
2037 * (if any) in dblk before the message is passed to the gld_start() function.
2040 gld_start(queue_t
*q
, mblk_t
*mp
, int caller
, uint32_t upri
)
2043 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
2044 gld_mac_info_t
*macinfo
;
2045 gld_mac_pvt_t
*mac_pvt
;
2047 gld_interface_t
*ifp
;
2050 uint32_t raw_vtag
= 0;
2052 struct gld_stats
*stats0
, *stats
= NULL
;
2054 ASSERT(DB_TYPE(mp
) == M_DATA
);
2055 macinfo
= gld
->gld_mac_info
;
2056 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
2057 ifp
= mac_pvt
->interfacep
;
2058 vlan
= (gld_vlan_t
*)gld
->gld_vlan
;
2059 vid
= vlan
->gldv_id
;
2062 * If this interface is a VLAN, the kstats of corresponding
2063 * "VLAN 0" should also be updated. Note that the gld_vlan_t
2064 * structure for VLAN 0 might not exist if there are no DLPI
2065 * consumers attaching on VLAN 0. Fortunately we can directly
2066 * access VLAN 0's kstats from macinfo.
2068 * Therefore, stats0 (VLAN 0's kstats) must always be
2069 * updated, and stats must to be updated if it is not NULL.
2071 stats0
= mac_pvt
->statistics
;
2072 if (vid
!= VLAN_VID_NONE
)
2073 stats
= vlan
->gldv_stats
;
2075 if ((*ifp
->interpreter
)(macinfo
, mp
, &pktinfo
, GLD_TX
) != 0) {
2077 if (gld_debug
& GLDERRS
)
2079 "gld_start: failed to interpret outbound packet");
2084 vtag
= VLAN_VID_NONE
;
2085 raw_vtag
= GLD_GET_MBLK_VTAG(mp
);
2086 if (GLD_VTAG_TCI(raw_vtag
) != 0) {
2087 uint16_t raw_pri
, raw_vid
, evid
;
2092 raw_pri
= GLD_VTAG_PRI(raw_vtag
);
2093 raw_vid
= GLD_VTAG_VID(raw_vtag
);
2094 GLD_CLEAR_MBLK_VTAG(mp
);
2096 if (gld
->gld_flags
& GLD_RAW
) {
2098 * In raw mode, we only expect untagged packets or
2099 * special priority-tagged packets on a VLAN stream.
2100 * Drop the packet if its VID is not zero.
2102 if (vid
!= VLAN_VID_NONE
&& raw_vid
!= VLAN_VID_NONE
)
2106 * If it is raw mode, use the per-stream priority if
2107 * the priority is not specified in the packet.
2108 * Otherwise, ignore the priority bits in the packet.
2110 upri
= (raw_pri
!= 0) ? raw_pri
: upri
;
2113 if (vid
== VLAN_VID_NONE
&& vid
!= raw_vid
) {
2114 gld_vlan_t
*tmp_vlan
;
2117 * This link is a physical link but the packet is
2118 * a VLAN tagged packet, the kstats of corresponding
2119 * VLAN (if any) should also be updated.
2121 tmp_vlan
= gld_find_vlan(macinfo
, raw_vid
);
2122 if (tmp_vlan
!= NULL
)
2123 stats
= tmp_vlan
->gldv_stats
;
2126 evid
= (vid
== VLAN_VID_NONE
) ? raw_vid
: vid
;
2127 if (evid
!= VLAN_VID_NONE
|| upri
!= 0)
2128 vtag
= GLD_MAKE_VTAG(upri
, VLAN_CFI_ETHER
, evid
);
2132 * Get vtag from the attached PPA of this stream.
2134 if ((vid
!= VLAN_VID_NONE
) ||
2135 ((macinfo
->gldm_type
== DL_ETHER
) && (upri
!= 0))) {
2136 vtag
= GLD_MAKE_VTAG(upri
, VLAN_CFI_ETHER
, vid
);
2141 * We're not holding the lock for this check. If the promiscuous
2142 * state is in flux it doesn't matter much if we get this wrong.
2144 if (mac_pvt
->nprom
> 0) {
2146 * We want to loopback to the receive side, but to avoid
2147 * recursive lock entry: if we came from wput(), which
2148 * could have looped back via IP from our own receive
2149 * interrupt thread, we decline this request. wput()
2150 * will then queue the packet for wsrv(). This means
2151 * that when snoop is running we don't get the advantage
2152 * of the wput() multithreaded direct entry to the
2153 * driver's send routine.
2155 if (caller
== GLD_WPUT
) {
2156 GLD_SAVE_MBLK_VTAG(mp
, raw_vtag
);
2157 (void) putbq(q
, mp
);
2158 return (GLD_NORESOURCES
);
2160 if (macinfo
->gldm_capabilities
& GLD_CAP_ZEROCOPY
)
2161 nmp
= dupmsg_noloan(mp
);
2165 nmp
= NULL
; /* we need no loopback */
2167 if (ifp
->hdr_size
> 0 &&
2168 pktinfo
.pktLen
> ifp
->hdr_size
+ (vtag
== 0 ? 0 : VTAG_SIZE
) +
2169 macinfo
->gldm_maxpkt
) {
2171 freemsg(nmp
); /* free the duped message */
2173 if (gld_debug
& GLDERRS
)
2175 "gld_start: oversize outbound packet, size %d,"
2176 "max %d", pktinfo
.pktLen
,
2177 ifp
->hdr_size
+ (vtag
== 0 ? 0 : VTAG_SIZE
) +
2178 macinfo
->gldm_maxpkt
);
2183 rc
= (*gld
->gld_send
)(macinfo
, mp
, vtag
);
2185 if (rc
!= GLD_SUCCESS
) {
2186 if (rc
== GLD_NORESOURCES
) {
2187 ATOMIC_BUMP(stats0
, stats
, glds_xmtretry
, 1);
2188 GLD_SAVE_MBLK_VTAG(mp
, raw_vtag
);
2189 (void) putbq(q
, mp
);
2191 /* transmit error; drop the packet */
2193 /* We're supposed to count failed attempts as well */
2194 UPDATE_STATS(stats0
, stats
, pktinfo
, 1);
2196 if (gld_debug
& GLDERRS
)
2198 "gld_start: gldm_send failed %d", rc
);
2202 freemsg(nmp
); /* free the dupped message */
2206 UPDATE_STATS(stats0
, stats
, pktinfo
, 1);
2209 * Loopback case. The message needs to be returned back on
2210 * the read side. This would silently fail if the dupmsg fails
2211 * above. This is probably OK, if there is no memory to dup the
2212 * block, then there isn't much we could do anyway.
2215 GLDM_LOCK(macinfo
, RW_WRITER
);
2216 gld_precv(macinfo
, nmp
, vtag
, stats
);
2217 GLDM_UNLOCK(macinfo
);
2220 return (GLD_SUCCESS
);
2224 ATOMIC_BUMP(stats0
, stats
, glds_xmtbadinterp
, 1);
2225 return (GLD_BADARG
);
2229 * With MDT V.2 a single message mp can have one header area and multiple
2230 * payload areas. A packet is described by dl_pkt_info, and each packet can
2231 * span multiple payload areas (currently with TCP, each packet will have one
2232 * header and at the most two payload areas). MACs might have a limit on the
2233 * number of payload segments (i.e. per packet scatter-gather limit), and
2234 * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2235 * might also have a limit on the total number of payloads in a message, and
2236 * that is specified by mdt_max_pld.
2239 gld_start_mdt(queue_t
*q
, mblk_t
*mp
, int caller
)
2242 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
2243 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
2244 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
2245 int numpacks
, mdtpacks
;
2246 gld_interface_t
*ifp
= mac_pvt
->interfacep
;
2248 gld_vlan_t
*vlan
= (gld_vlan_t
*)gld
->gld_vlan
;
2249 boolean_t doloop
= B_FALSE
;
2256 ASSERT(DB_TYPE(mp
) == M_MULTIDATA
);
2259 * We're not holding the lock for this check. If the promiscuous
2260 * state is in flux it doesn't matter much if we get this wrong.
2262 if (mac_pvt
->nprom
> 0) {
2264 * We want to loopback to the receive side, but to avoid
2265 * recursive lock entry: if we came from wput(), which
2266 * could have looped back via IP from our own receive
2267 * interrupt thread, we decline this request. wput()
2268 * will then queue the packet for wsrv(). This means
2269 * that when snoop is running we don't get the advantage
2270 * of the wput() multithreaded direct entry to the
2271 * driver's send routine.
2273 if (caller
== GLD_WPUT
) {
2274 (void) putbq(q
, mp
);
2275 return (GLD_NORESOURCES
);
2280 * unlike the M_DATA case, we don't have to call
2281 * dupmsg_noloan here because mmd_transform
2282 * (called by gld_precv_mdt) will make a copy of
2287 while (mp
!= NULL
) {
2289 * The lower layer driver only gets a single multidata
2290 * message; this also makes it easier to handle noresources.
2292 nextmp
= mp
->b_cont
;
2296 * Get number of packets in this message; if nothing
2297 * to transmit, go to next message.
2299 dlmdp
= mmd_getmultidata(mp
);
2300 if ((mdtpacks
= (int)mmd_getcnt(dlmdp
, NULL
, NULL
)) == 0) {
2307 * Run interpreter to populate media specific pktinfo fields.
2308 * This collects per MDT message information like sap,
2309 * broad/multicast etc.
2311 (void) (*ifp
->interpreter_mdt
)(macinfo
, mp
, NULL
, &pktinfo
,
2314 numpacks
= (*macinfo
->gldm_mdt_pre
)(macinfo
, mp
, &cookie
);
2318 * Driver indicates it can transmit at least 1, and
2319 * possibly all, packets in MDT message.
2321 int count
= numpacks
;
2323 for (dl_pkt
= mmd_getfirstpdesc(dlmdp
, &pinfo
);
2325 dl_pkt
= mmd_getnextpdesc(dl_pkt
, &pinfo
)) {
2327 * Format this packet by adding link header and
2328 * adjusting pdescinfo to include it; get
2331 (void) (*ifp
->interpreter_mdt
)(macinfo
, NULL
,
2332 &pinfo
, &pktinfo
, GLD_MDT_TXPKT
);
2334 totLen
+= pktinfo
.pktLen
;
2337 * Loop back packet before handing to the
2341 mmd_adjpdesc(dl_pkt
, &pinfo
) != NULL
) {
2342 GLDM_LOCK(macinfo
, RW_WRITER
);
2343 gld_precv_mdt(macinfo
, vlan
, mp
,
2345 GLDM_UNLOCK(macinfo
);
2349 * And send off to driver.
2351 (*macinfo
->gldm_mdt_send
)(macinfo
, cookie
,
2355 * Be careful not to invoke getnextpdesc if we
2356 * already sent the last packet, since driver
2357 * might have posted it to hardware causing a
2358 * completion and freemsg() so the MDT data
2359 * structures might not be valid anymore.
2364 (*macinfo
->gldm_mdt_post
)(macinfo
, mp
, cookie
);
2365 pktinfo
.pktLen
= totLen
;
2366 UPDATE_STATS(vlan
->gldv_stats
, NULL
, pktinfo
, numpacks
);
2369 * In the noresources case (when driver indicates it
2370 * can not transmit all packets in the MDT message),
2371 * adjust to skip the first few packets on retrial.
2373 if (numpacks
!= mdtpacks
) {
2375 * Release already processed packet descriptors.
2377 for (count
= 0; count
< numpacks
; count
++) {
2378 dl_pkt
= mmd_getfirstpdesc(dlmdp
,
2380 mmd_rempdesc(dl_pkt
);
2382 ATOMIC_BUMP(vlan
->gldv_stats
, NULL
,
2384 mp
->b_cont
= nextmp
;
2385 (void) putbq(q
, mp
);
2386 return (GLD_NORESOURCES
);
2388 } else if (numpacks
== 0) {
2390 * Driver indicates it can not transmit any packets
2391 * currently and will request retrial later.
2393 ATOMIC_BUMP(vlan
->gldv_stats
, NULL
, glds_xmtretry
, 1);
2394 mp
->b_cont
= nextmp
;
2395 (void) putbq(q
, mp
);
2396 return (GLD_NORESOURCES
);
2398 ASSERT(numpacks
== -1);
2400 * We're supposed to count failed attempts as well.
2402 dl_pkt
= mmd_getfirstpdesc(dlmdp
, &pinfo
);
2403 while (dl_pkt
!= NULL
) {
2405 * Call interpreter to determine total packet
2406 * bytes that are being dropped.
2408 (void) (*ifp
->interpreter_mdt
)(macinfo
, NULL
,
2409 &pinfo
, &pktinfo
, GLD_MDT_TXPKT
);
2411 totLen
+= pktinfo
.pktLen
;
2413 dl_pkt
= mmd_getnextpdesc(dl_pkt
, &pinfo
);
2415 pktinfo
.pktLen
= totLen
;
2416 UPDATE_STATS(vlan
->gldv_stats
, NULL
, pktinfo
, mdtpacks
);
2419 * Transmit error; drop the message, move on
2426 * Process the next multidata block, if there is one.
2431 return (GLD_SUCCESS
);
2435 * gld_intr (macinfo)
2438 gld_intr(gld_mac_info_t
*macinfo
)
2440 ASSERT(macinfo
!= NULL
);
2442 if (!(macinfo
->gldm_GLD_flags
& GLD_MAC_READY
))
2443 return (DDI_INTR_UNCLAIMED
);
2445 return ((*macinfo
->gldm_intr
)(macinfo
));
2449 * gld_sched (macinfo)
2451 * This routine scans the streams that refer to a specific macinfo
2452 * structure and causes the STREAMS scheduler to try to run them if
2453 * they are marked as waiting for the transmit buffer.
2456 gld_sched(gld_mac_info_t
*macinfo
)
2458 gld_mac_pvt_t
*mac_pvt
;
2463 ASSERT(macinfo
!= NULL
);
2465 GLDM_LOCK(macinfo
, RW_WRITER
);
2467 if (macinfo
->gldm_GLD_flags
& GLD_UNREGISTERED
) {
2468 /* We're probably being called from a leftover interrupt */
2469 GLDM_UNLOCK(macinfo
);
2473 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
2475 for (i
= 0; i
< VLAN_HASHSZ
; i
++) {
2476 for (vlan
= mac_pvt
->vlan_hash
[i
];
2477 vlan
!= NULL
; vlan
= vlan
->gldv_next
) {
2478 for (gld
= vlan
->gldv_str_next
;
2479 gld
!= (gld_t
*)&vlan
->gldv_str_next
;
2480 gld
= gld
->gld_next
) {
2481 ASSERT(gld
->gld_mac_info
== macinfo
);
2482 gld
->gld_sched_ran
= B_TRUE
;
2484 if (gld
->gld_xwait
) {
2485 gld
->gld_xwait
= B_FALSE
;
2486 qenable(WR(gld
->gld_qptr
));
2492 GLDM_UNLOCK(macinfo
);
2496 * gld_precv (macinfo, mp, vtag, stats)
2497 * called from gld_start to loopback a packet when in promiscuous mode
2499 * VLAN 0's statistics need to be updated. If stats is not NULL,
2500 * it needs to be updated as well.
2503 gld_precv(gld_mac_info_t
*macinfo
, mblk_t
*mp
, uint32_t vtag
,
2504 struct gld_stats
*stats
)
2506 gld_mac_pvt_t
*mac_pvt
;
2507 gld_interface_t
*ifp
;
2510 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo
));
2512 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
2513 ifp
= mac_pvt
->interfacep
;
2516 * call the media specific packet interpreter routine
2518 if ((*ifp
->interpreter
)(macinfo
, mp
, &pktinfo
, GLD_RXLOOP
) != 0) {
2520 BUMP(mac_pvt
->statistics
, stats
, glds_rcvbadinterp
, 1);
2522 if (gld_debug
& GLDERRS
)
2524 "gld_precv: interpreter failed");
2530 * Update the vtag information.
2532 pktinfo
.isTagged
= (vtag
!= VLAN_VID_NONE
);
2533 pktinfo
.vid
= GLD_VTAG_VID(vtag
);
2534 pktinfo
.cfi
= GLD_VTAG_CFI(vtag
);
2535 pktinfo
.user_pri
= GLD_VTAG_PRI(vtag
);
2537 gld_sendup(macinfo
, &pktinfo
, mp
, gld_paccept
);
2541 * Called from gld_start_mdt to loopback packet(s) when in promiscuous mode.
2542 * Note that 'vlan' is always a physical link, because MDT can only be
2543 * enabled on non-VLAN streams.
2547 gld_precv_mdt(gld_mac_info_t
*macinfo
, gld_vlan_t
*vlan
, mblk_t
*mp
,
2548 pdesc_t
*dl_pkt
, pktinfo_t
*pktinfo
)
2551 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
2552 gld_interface_t
*ifp
= mac_pvt
->interfacep
;
2554 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo
));
2557 * Get source/destination.
2559 (void) (*ifp
->interpreter_mdt
)(macinfo
, mp
, NULL
, pktinfo
,
2561 if ((adjmp
= mmd_transform(dl_pkt
)) != NULL
)
2562 gld_sendup(macinfo
, pktinfo
, adjmp
, gld_paccept
);
2566 * gld_recv (macinfo, mp)
2567 * called with an mac-level packet in a mblock; take the maclock,
2568 * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2570 * V0 drivers already are holding the mutex when they call us.
2573 gld_recv(gld_mac_info_t
*macinfo
, mblk_t
*mp
)
2575 gld_recv_tagged(macinfo
, mp
, VLAN_VTAG_NONE
);
2579 gld_recv_tagged(gld_mac_info_t
*macinfo
, mblk_t
*mp
, uint32_t vtag
)
2581 gld_mac_pvt_t
*mac_pvt
;
2582 char pbuf
[3*GLD_MAX_ADDRLEN
];
2584 gld_interface_t
*ifp
;
2585 queue_t
*ipq
= NULL
;
2586 gld_vlan_t
*vlan
= NULL
, *vlan0
= NULL
, *vlann
= NULL
;
2587 struct gld_stats
*stats0
, *stats
= NULL
;
2591 ASSERT(macinfo
!= NULL
);
2592 ASSERT(mp
->b_datap
->db_ref
);
2594 GLDM_LOCK(macinfo
, RW_READER
);
2596 if (macinfo
->gldm_GLD_flags
& GLD_UNREGISTERED
) {
2597 /* We're probably being called from a leftover interrupt */
2603 * If this packet is a VLAN tagged packet, the kstats of corresponding
2604 * "VLAN 0" should also be updated. We can directly access VLAN 0's
2605 * kstats from macinfo.
2607 * Further, the packets needs to be passed to VLAN 0 if there is
2608 * any DLPI consumer on VLAN 0 who is interested in tagged packets
2609 * (DL_PROMISC_SAP is on or is bounded to ETHERTYPE_VLAN SAP).
2611 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
2612 stats0
= mac_pvt
->statistics
;
2614 vid
= GLD_VTAG_VID(vtag
);
2615 vlan0
= gld_find_vlan(macinfo
, VLAN_VID_NONE
);
2616 if (vid
!= VLAN_VID_NONE
) {
2618 * If there are no physical DLPI consumers interested in the
2619 * VLAN packet, clear vlan0.
2621 if ((vlan0
!= NULL
) && (vlan0
->gldv_nvlan_sap
== 0))
2624 * vlann is the VLAN with the same VID as the VLAN packet.
2626 vlann
= gld_find_vlan(macinfo
, vid
);
2628 stats
= vlann
->gldv_stats
;
2631 vlan
= (vid
== VLAN_VID_NONE
) ? vlan0
: vlann
;
2633 ifp
= mac_pvt
->interfacep
;
2634 err
= (*ifp
->interpreter
)(macinfo
, mp
, &pktinfo
, GLD_RXQUICK
);
2636 BUMP(stats0
, stats
, glds_bytercv64
, pktinfo
.pktLen
);
2637 BUMP(stats0
, stats
, glds_pktrcv64
, 1);
2639 if ((vlann
== NULL
) && (vlan0
== NULL
)) {
2645 * Check whether underlying media code supports the IPQ hack:
2647 * - the interpreter could quickly parse the packet
2648 * - the device type supports IPQ (ethernet and IPoIB)
2649 * - there is one, and only one, IP stream bound (to this VLAN)
2650 * - that stream is a "fastpath" stream
2651 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2652 * - there are no streams in promiscuous mode (on this VLAN)
2653 * - if this packet is tagged, there is no need to send this
2654 * packet to physical streams
2656 if ((err
!= 0) && ((vlan
!= NULL
) && (vlan
->gldv_nprom
== 0)) &&
2657 (vlan
== vlan0
|| vlan0
== NULL
)) {
2658 switch (pktinfo
.ethertype
) {
2660 ipq
= vlan
->gldv_ipq
;
2662 case ETHERTYPE_IPV6
:
2663 ipq
= vlan
->gldv_ipv6q
;
2669 * Special case for IP; we can simply do the putnext here, if:
2670 * o The IPQ hack is possible (ipq != NULL).
2671 * o the packet is specifically for me, and therefore:
2672 * - the packet is not multicast or broadcast (fastpath only
2673 * wants unicast packets).
2675 * o the stream is not asserting flow control.
2681 * Skip the mac header. We know there is no LLC1/SNAP header
2684 mp
->b_rptr
+= pktinfo
.macLen
;
2690 * call the media specific packet interpreter routine
2692 if ((*ifp
->interpreter
)(macinfo
, mp
, &pktinfo
, GLD_RX
) != 0) {
2693 BUMP(stats0
, stats
, glds_rcvbadinterp
, 1);
2695 if (gld_debug
& GLDERRS
)
2697 "gld_recv_tagged: interpreter failed");
2704 * This is safe even if vtag is VLAN_VTAG_NONE
2707 pktinfo
.cfi
= GLD_VTAG_CFI(vtag
);
2709 if (pktinfo
.cfi
!= VLAN_CFI_ETHER
)
2710 cmn_err(CE_WARN
, "gld_recv_tagged: non-ETHER CFI");
2712 pktinfo
.user_pri
= GLD_VTAG_PRI(vtag
);
2713 pktinfo
.isTagged
= (vtag
!= VLAN_VID_NONE
);
2716 if ((gld_debug
& GLDRECV
) &&
2717 (!(gld_debug
& GLDNOBR
) ||
2718 (!pktinfo
.isBroadcast
&& !pktinfo
.isMulticast
))) {
2719 char pbuf2
[3*GLD_MAX_ADDRLEN
];
2721 cmn_err(CE_CONT
, "gld_recv_tagged: machdr=<%s -> %s>\n",
2722 gld_macaddr_sprintf(pbuf
, pktinfo
.shost
,
2723 macinfo
->gldm_addrlen
), gld_macaddr_sprintf(pbuf2
,
2724 pktinfo
.dhost
, macinfo
->gldm_addrlen
));
2725 cmn_err(CE_CONT
, "gld_recv_tagged: VlanId %d UserPri %d\n",
2728 cmn_err(CE_CONT
, "gld_recv_tagged: ethertype: %4x Len: %4d "
2729 "Hdr: %d,%d isMulticast: %s\n",
2734 pktinfo
.isMulticast
? "Y" : "N");
2738 gld_sendup(macinfo
, &pktinfo
, mp
, gld_accept
);
2741 GLDM_UNLOCK(macinfo
);
2744 /* =================================================================== */
2745 /* receive group: called from gld_recv and gld_precv* with maclock held */
2746 /* =================================================================== */
2749 * Search all the streams attached to the specified VLAN looking for
2750 * those eligible to receive the packet.
2751 * Note that in order to avoid an extra dupmsg(), if this is the first
2752 * eligible stream, remember it (in fgldp) so that we can send up the
2753 * message after this function.
2755 * Return errno if fails. Currently the only error is ENOMEM.
2758 gld_sendup_vlan(gld_vlan_t
*vlan
, pktinfo_t
*pktinfo
, mblk_t
*mp
,
2759 int (*acceptfunc
)(), void (*send
)(), int (*cansend
)(), gld_t
**fgldp
)
2765 ASSERT(vlan
!= NULL
);
2766 for (gld
= vlan
->gldv_str_next
; gld
!= (gld_t
*)&vlan
->gldv_str_next
;
2767 gld
= gld
->gld_next
) {
2768 #ifdef GLD_VERBOSE_DEBUG
2769 cmn_err(CE_NOTE
, "gld_sendup_vlan: SAP: %4x QPTR: %p "
2770 "QSTATE: %s", gld
->gld_sap
, (void *)gld
->gld_qptr
,
2771 gld
->gld_state
== DL_IDLE
? "IDLE" : "NOT IDLE");
2773 ASSERT(gld
->gld_qptr
!= NULL
);
2774 ASSERT(gld
->gld_state
== DL_IDLE
||
2775 gld
->gld_state
== DL_UNBOUND
);
2776 ASSERT(gld
->gld_vlan
== vlan
);
2778 if (gld
->gld_state
!= DL_IDLE
)
2779 continue; /* not eligible to receive */
2780 if (gld
->gld_flags
& GLD_STR_CLOSING
)
2781 continue; /* not eligible to receive */
2784 if ((gld_debug
& GLDRECV
) &&
2785 (!(gld_debug
& GLDNOBR
) ||
2786 (!pktinfo
->isBroadcast
&& !pktinfo
->isMulticast
)))
2788 "gld_sendup: queue sap: %4x promis: %s %s %s",
2790 gld
->gld_flags
& GLD_PROM_PHYS
? "phys " : " ",
2791 gld
->gld_flags
& GLD_PROM_SAP
? "sap " : " ",
2792 gld
->gld_flags
& GLD_PROM_MULT
? "multi" : " ");
2796 * The accept function differs depending on whether this is
2797 * a packet that we received from the wire or a loopback.
2799 if ((*acceptfunc
)(gld
, pktinfo
)) {
2801 pktinfo
->wasAccepted
= 1; /* known protocol */
2803 if (!(*cansend
)(gld
->gld_qptr
)) {
2805 * Upper stream is not accepting messages, i.e.
2806 * it is flow controlled, therefore we will
2807 * forgo sending the message up this stream.
2810 if (gld_debug
& GLDETRACE
)
2812 "gld_sendup: canput failed");
2814 BUMP(vlan
->gldv_stats
, NULL
, glds_blocked
, 1);
2815 qenable(gld
->gld_qptr
);
2820 * In order to avoid an extra dupmsg(), remember this
2821 * gld if this is the first eligible stream.
2823 if (*fgldp
== NULL
) {
2828 /* duplicate the packet for this stream */
2831 BUMP(vlan
->gldv_stats
, NULL
,
2832 glds_gldnorcvbuf
, 1);
2834 if (gld_debug
& GLDERRS
)
2836 "gld_sendup: dupmsg failed");
2838 /* couldn't get resources; drop it */
2842 /* pass the message up the stream */
2843 gld_passon(gld
, nmp
, pktinfo
, send
);
2850 * gld_sendup (macinfo, pktinfo, mp, acceptfunc)
2851 * called with an ethernet packet in an mblk; must decide whether
2852 * packet is for us and which streams to queue it to.
2855 gld_sendup(gld_mac_info_t
*macinfo
, pktinfo_t
*pktinfo
,
2856 mblk_t
*mp
, int (*acceptfunc
)())
2859 void (*send
)(queue_t
*qp
, mblk_t
*mp
);
2860 int (*cansend
)(queue_t
*qp
);
2861 gld_vlan_t
*vlan0
, *vlann
= NULL
;
2862 struct gld_stats
*stats0
, *stats
= NULL
;
2866 if (gld_debug
& GLDTRACE
)
2867 cmn_err(CE_NOTE
, "gld_sendup(%p, %p)", (void *)mp
,
2872 ASSERT(macinfo
!= NULL
);
2873 ASSERT(pktinfo
!= NULL
);
2874 ASSERT(GLDM_LOCK_HELD(macinfo
));
2877 * The tagged packets should also be looped back (transmit-side)
2878 * or sent up (receive-side) to VLAN 0 if VLAN 0 is set to
2879 * DL_PROMISC_SAP or there is any DLPI consumer bind to the
2880 * ETHERTYPE_VLAN SAP. The kstats of VLAN 0 needs to be updated
2883 stats0
= ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->statistics
;
2884 vlan0
= gld_find_vlan(macinfo
, VLAN_VID_NONE
);
2885 if (pktinfo
->vid
!= VLAN_VID_NONE
) {
2886 if ((vlan0
!= NULL
) && (vlan0
->gldv_nvlan_sap
== 0))
2888 vlann
= gld_find_vlan(macinfo
, pktinfo
->vid
);
2890 stats
= vlann
->gldv_stats
;
2893 ASSERT((vlan0
!= NULL
) || (vlann
!= NULL
));
2896 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2897 * gld_recv returns to the caller's interrupt routine. The total
2898 * network throughput would normally be lower when selecting this
2899 * option, because we putq the messages and process them later,
2900 * instead of sending them with putnext now. Some time critical
2901 * device might need this, so it's here but undocumented.
2903 if (macinfo
->gldm_options
& GLDOPT_FAST_RECV
) {
2904 send
= (void (*)(queue_t
*, mblk_t
*))putq
;
2907 send
= (void (*)(queue_t
*, mblk_t
*))putnext
;
2908 cansend
= canputnext
;
2912 * Send the packets for all eligible streams.
2914 if (vlan0
!= NULL
) {
2915 err
= gld_sendup_vlan(vlan0
, pktinfo
, mp
, acceptfunc
, send
,
2918 if ((err
== 0) && (vlann
!= NULL
)) {
2919 err
= gld_sendup_vlan(vlann
, pktinfo
, mp
, acceptfunc
, send
,
2924 /* send the original dup of the packet up the first stream found */
2926 gld_passon(fgld
, mp
, pktinfo
, send
);
2928 freemsg(mp
); /* no streams matched */
2930 /* We do not count looped back packets */
2931 if (acceptfunc
== gld_paccept
)
2932 return; /* transmit loopback case */
2934 if (pktinfo
->isBroadcast
)
2935 BUMP(stats0
, stats
, glds_brdcstrcv
, 1);
2936 else if (pktinfo
->isMulticast
)
2937 BUMP(stats0
, stats
, glds_multircv
, 1);
2939 /* No stream accepted this packet */
2940 if (!pktinfo
->wasAccepted
)
2941 BUMP(stats0
, stats
, glds_unknowns
, 1);
2944 #define GLD_IS_PHYS(gld) \
2945 (((gld_vlan_t *)gld->gld_vlan)->gldv_id == VLAN_VID_NONE)
2948 * A packet matches a stream if:
2949 * The stream's VLAN id is the same as the one in the packet.
2950 * and the stream accepts EtherType encoded packets and the type matches
2951 * or the stream accepts LLC packets and the packet is an LLC packet
2953 #define MATCH(stream, pktinfo) \
2954 ((((gld_vlan_t *)stream->gld_vlan)->gldv_id == pktinfo->vid) && \
2955 ((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2956 (!stream->gld_ethertype && pktinfo->isLLC)))
2959 * This function validates a packet for sending up a particular
2960 * stream. The message header has been parsed and its characteristic
2961 * are recorded in the pktinfo data structure. The streams stack info
2962 * are presented in gld data structures.
2965 gld_accept(gld_t
*gld
, pktinfo_t
*pktinfo
)
2968 * if there is no match do not bother checking further.
2969 * Note that it is okay to examine gld_vlan because
2970 * macinfo->gldm_lock is held.
2972 * Because all tagged packets have SAP value ETHERTYPE_VLAN,
2973 * these packets will pass the SAP filter check if the stream
2974 * is a ETHERTYPE_VLAN listener.
2976 if ((!MATCH(gld
, pktinfo
) && !(gld
->gld_flags
& GLD_PROM_SAP
) &&
2977 !(GLD_IS_PHYS(gld
) && gld
->gld_sap
== ETHERTYPE_VLAN
&&
2978 pktinfo
->isTagged
)))
2982 * We don't accept any packet from the hardware if we originated it.
2983 * (Contrast gld_paccept, the send-loopback accept function.)
2985 if (pktinfo
->isLooped
)
2989 * If the packet is broadcast or sent to us directly we will accept it.
2990 * Also we will accept multicast packets requested by the stream.
2992 if (pktinfo
->isForMe
|| pktinfo
->isBroadcast
||
2993 gld_mcmatch(gld
, pktinfo
))
2997 * Finally, accept anything else if we're in promiscuous mode
2999 if (gld
->gld_flags
& GLD_PROM_PHYS
)
3006 * Return TRUE if the given multicast address is one
3007 * of those that this particular Stream is interested in.
3010 gld_mcmatch(gld_t
*gld
, pktinfo_t
*pktinfo
)
3013 * Return FALSE if not a multicast address.
3015 if (!pktinfo
->isMulticast
)
3019 * Check if all multicasts have been enabled for this Stream
3021 if (gld
->gld_flags
& GLD_PROM_MULT
)
3025 * Return FALSE if no multicast addresses enabled for this Stream.
3027 if (!gld
->gld_mcast
)
3031 * Otherwise, look for it in the table.
3033 return (gld_multicast(pktinfo
->dhost
, gld
));
3037 * gld_multicast determines if the address is a multicast address for
3041 gld_multicast(unsigned char *macaddr
, gld_t
*gld
)
3045 ASSERT(GLDM_LOCK_HELD(gld
->gld_mac_info
));
3047 if (!gld
->gld_mcast
)
3050 for (i
= 0; i
< gld
->gld_multicnt
; i
++) {
3051 if (gld
->gld_mcast
[i
]) {
3052 ASSERT(gld
->gld_mcast
[i
]->gldm_refcnt
);
3053 if (mac_eq(gld
->gld_mcast
[i
]->gldm_addr
, macaddr
,
3054 gld
->gld_mac_info
->gldm_addrlen
))
3063 * accept function for looped back packets
3066 gld_paccept(gld_t
*gld
, pktinfo_t
*pktinfo
)
3069 * Note that it is okay to examine gld_vlan because macinfo->gldm_lock
3072 * If a stream is a ETHERTYPE_VLAN listener, it must
3073 * accept all tagged packets as those packets have SAP value
3076 return (gld
->gld_flags
& GLD_PROM_PHYS
&&
3077 (MATCH(gld
, pktinfo
) || gld
->gld_flags
& GLD_PROM_SAP
||
3078 (GLD_IS_PHYS(gld
) && gld
->gld_sap
== ETHERTYPE_VLAN
&&
3079 pktinfo
->isTagged
)));
3084 gld_passon(gld_t
*gld
, mblk_t
*mp
, pktinfo_t
*pktinfo
,
3085 void (*send
)(queue_t
*qp
, mblk_t
*mp
))
3087 boolean_t is_phys
= GLD_IS_PHYS(gld
);
3089 boolean_t addtag
= B_FALSE
;
3093 if (gld_debug
& GLDTRACE
)
3094 cmn_err(CE_NOTE
, "gld_passon(%p, %p, %p)", (void *)gld
,
3095 (void *)mp
, (void *)pktinfo
);
3097 if ((gld_debug
& GLDRECV
) && (!(gld_debug
& GLDNOBR
) ||
3098 (!pktinfo
->isBroadcast
&& !pktinfo
->isMulticast
)))
3099 cmn_err(CE_NOTE
, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
3100 (void *)gld
->gld_qptr
->q_next
, (void *)mp
, gld
->gld_minor
,
3104 * Figure out how much of the packet header to throw away.
3106 * Normal DLPI (non RAW/FAST) streams also want the
3107 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
3109 if (gld
->gld_flags
& GLD_RAW
) {
3111 * The packet will be tagged in the following cases:
3112 * - if priority is not 0
3113 * - a tagged packet sent on a physical link
3115 if ((pktinfo
->isTagged
&& is_phys
) || (pktinfo
->user_pri
!= 0))
3120 * The packet will be tagged if it meets all below conditions:
3121 * - this is a physical stream
3122 * - this packet is tagged packet
3123 * - the stream is either a DL_PROMISC_SAP listener or a
3124 * ETHERTYPE_VLAN listener
3126 if (is_phys
&& pktinfo
->isTagged
&&
3127 ((gld
->gld_sap
== ETHERTYPE_VLAN
) ||
3128 (gld
->gld_flags
& GLD_PROM_SAP
))) {
3132 skiplen
= pktinfo
->macLen
; /* skip mac header */
3133 if (gld
->gld_ethertype
)
3134 skiplen
+= pktinfo
->hdrLen
; /* skip any extra */
3136 if (skiplen
>= pktinfo
->pktLen
) {
3138 * If the interpreter did its job right, then it cannot be
3139 * asking us to skip more bytes than are in the packet!
3140 * However, there could be zero data bytes left after the
3141 * amount to skip. DLPI specifies that passed M_DATA blocks
3142 * should contain at least one byte of data, so if we have
3143 * none we just drop it.
3145 ASSERT(!(skiplen
> pktinfo
->pktLen
));
3151 mblk_t
*savemp
= mp
;
3153 vtag
= GLD_MAKE_VTAG(pktinfo
->user_pri
, pktinfo
->cfi
,
3154 is_phys
? pktinfo
->vid
: VLAN_VID_NONE
);
3155 if ((mp
= gld_insert_vtag_ether(mp
, vtag
)) == NULL
) {
3162 * Skip over the header(s), taking care to possibly handle message
3163 * fragments shorter than the amount we need to skip. Hopefully
3164 * the driver will put the entire packet, or at least the entire
3165 * header, into a single message block. But we handle it if not.
3167 while (skiplen
>= MBLKL(mp
)) {
3168 mblk_t
*savemp
= mp
;
3169 skiplen
-= MBLKL(mp
);
3171 ASSERT(mp
!= NULL
); /* because skiplen < pktinfo->pktLen */
3174 mp
->b_rptr
+= skiplen
;
3176 /* Add M_PROTO if necessary, and pass upstream */
3177 if (((gld
->gld_flags
& GLD_FAST
) && !pktinfo
->isMulticast
&&
3178 !pktinfo
->isBroadcast
) || (gld
->gld_flags
& GLD_RAW
)) {
3179 /* RAW/FAST: just send up the M_DATA */
3180 (*send
)(gld
->gld_qptr
, mp
);
3182 /* everybody else wants to see a unitdata_ind structure */
3183 mp
= gld_addudind(gld
, mp
, pktinfo
, addtag
);
3185 (*send
)(gld
->gld_qptr
, mp
);
3186 /* if it failed, gld_addudind already bumped statistic */
3191 * gld_addudind(gld, mp, pktinfo)
3192 * format a DL_UNITDATA_IND message to be sent upstream to the user
3195 gld_addudind(gld_t
*gld
, mblk_t
*mp
, pktinfo_t
*pktinfo
, boolean_t tagged
)
3197 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
3198 gld_vlan_t
*vlan
= (gld_vlan_t
*)gld
->gld_vlan
;
3199 dl_unitdata_ind_t
*dludindp
;
3205 if (gld_debug
& GLDTRACE
)
3206 cmn_err(CE_NOTE
, "gld_addudind(%p, %p, %p)", (void *)gld
,
3207 (void *)mp
, (void *)pktinfo
);
3209 ASSERT(macinfo
!= NULL
);
3212 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
3213 * might as well discard since we can't go further
3215 size
= sizeof (dl_unitdata_ind_t
) +
3216 2 * (macinfo
->gldm_addrlen
+ abs(macinfo
->gldm_saplen
));
3217 if ((nmp
= allocb(size
, BPRI_MED
)) == NULL
) {
3219 BUMP(vlan
->gldv_stats
, NULL
, glds_gldnorcvbuf
, 1);
3221 if (gld_debug
& GLDERRS
)
3223 "gld_addudind: allocb failed");
3227 DB_TYPE(nmp
) = M_PROTO
;
3228 nmp
->b_rptr
= nmp
->b_datap
->db_lim
- size
;
3231 type
= ETHERTYPE_VLAN
;
3233 type
= (gld
->gld_ethertype
) ? pktinfo
->ethertype
: 0;
3237 * now setup the DL_UNITDATA_IND header
3239 * XXX This looks broken if the saps aren't two bytes.
3241 dludindp
= (dl_unitdata_ind_t
*)nmp
->b_rptr
;
3242 dludindp
->dl_primitive
= DL_UNITDATA_IND
;
3243 dludindp
->dl_src_addr_length
=
3244 dludindp
->dl_dest_addr_length
= macinfo
->gldm_addrlen
+
3245 abs(macinfo
->gldm_saplen
);
3246 dludindp
->dl_dest_addr_offset
= sizeof (dl_unitdata_ind_t
);
3247 dludindp
->dl_src_addr_offset
= dludindp
->dl_dest_addr_offset
+
3248 dludindp
->dl_dest_addr_length
;
3250 dludindp
->dl_group_address
= (pktinfo
->isMulticast
||
3251 pktinfo
->isBroadcast
);
3253 nmp
->b_wptr
= nmp
->b_rptr
+ dludindp
->dl_dest_addr_offset
;
3255 mac_copy(pktinfo
->dhost
, nmp
->b_wptr
, macinfo
->gldm_addrlen
);
3256 nmp
->b_wptr
+= macinfo
->gldm_addrlen
;
3258 ASSERT(macinfo
->gldm_saplen
== -2); /* XXX following code assumes */
3259 *(ushort_t
*)(nmp
->b_wptr
) = type
;
3260 nmp
->b_wptr
+= abs(macinfo
->gldm_saplen
);
3262 ASSERT(nmp
->b_wptr
== nmp
->b_rptr
+ dludindp
->dl_src_addr_offset
);
3264 mac_copy(pktinfo
->shost
, nmp
->b_wptr
, macinfo
->gldm_addrlen
);
3265 nmp
->b_wptr
+= macinfo
->gldm_addrlen
;
3267 *(ushort_t
*)(nmp
->b_wptr
) = type
;
3268 nmp
->b_wptr
+= abs(macinfo
->gldm_saplen
);
3270 if (pktinfo
->nosource
)
3271 dludindp
->dl_src_addr_offset
= dludindp
->dl_src_addr_length
= 0;
3276 /* ======================================================= */
3277 /* wsrv group: called from wsrv, single threaded per queue */
3278 /* ======================================================= */
3281 * We go to some trouble to avoid taking the same lock during normal
3282 * transmit processing as we do during normal receive processing.
3284 * Elements of the per-instance macinfo and per-stream gld_t structures
3285 * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3286 * (Elements of the gld_mac_pvt_t structure are considered part of the
3287 * macinfo structure for purposes of this discussion).
3289 * However, it is more complicated than that:
3291 * Elements of the macinfo structure that are set before the macinfo
3292 * structure is added to its device list by gld_register(), and never
3293 * thereafter modified, are accessed without requiring taking the lock.
3294 * A similar rule applies to those elements of the gld_t structure that
3295 * are written by gld_open() before the stream is added to any list.
3297 * Most other elements of the macinfo structure may only be read or
3298 * written while holding the maclock.
3300 * Most writable elements of the gld_t structure are written only
3301 * within the single-threaded domain of wsrv() and subsidiaries.
3302 * (This domain includes open/close while qprocs are not on.)
3303 * The maclock need not be taken while within that domain
3304 * simply to read those elements. Writing to them, even within
3305 * that domain, or reading from it outside that domain, requires
3306 * holding the maclock. Exception: if the stream is not
3307 * presently attached to a PPA, there is no associated macinfo,
3308 * and no maclock need be taken.
3310 * The curr_macaddr element of the mac private structure is also
3311 * protected by the GLDM_LOCK rwlock/mutex, like most other members
3312 * of that structure. However, there are a few instances in the
3313 * transmit path where we choose to forgo lock protection when
3314 * reading this variable. This is to avoid lock contention between
3315 * threads executing the DL_UNITDATA_REQ case and receive threads.
3316 * In doing so we will take a small risk or a few corrupted packets
3317 * during the short an rare times when someone is changing the interface's
3318 * physical address. We consider the small cost in this rare case to be
3319 * worth the benefit of reduced lock contention under normal operating
3320 * conditions. The risk/cost is small because:
3321 * 1. there is no guarantee at this layer of uncorrupted delivery.
3322 * 2. the physaddr doesn't change very often - no performance hit.
3323 * 3. if the physaddr changes, other stuff is going to be screwed
3324 * up for a while anyway, while other sites refigure ARP, etc.,
3325 * so losing a couple of packets is the least of our worries.
3327 * The list of streams associated with a macinfo is protected by
3328 * two locks: the per-macinfo maclock, and the per-major-device
3329 * gld_devlock. Both must be held to modify the list, but either
3330 * may be held to protect the list during reading/traversing. This
3331 * allows independent locking for multiple instances in the receive
3332 * path (using macinfo), while facilitating routines that must search
3333 * the entire set of streams associated with a major device, such as
3334 * gld_findminor(), gld_finddevinfo(), close(). The "nstreams"
3335 * macinfo element, and the gld_mac_info gld_t element, are similarly
3336 * protected, since they change at exactly the same time macinfo
3337 * streams list does.
3339 * The list of macinfo structures associated with a major device
3340 * structure is protected by the gld_devlock, as is the per-major
3341 * list of Style 2 streams in the DL_UNATTACHED state.
3343 * The list of major devices is kept on a module-global list
3344 * gld_device_list, which has its own lock to protect the list.
3346 * When it is necessary to hold more than one lock at a time, they
3347 * are acquired in this "outside in" order:
3348 * gld_device_list.gld_devlock
3349 * glddev->gld_devlock
3350 * GLDM_LOCK(macinfo)
3352 * Finally, there are some "volatile" elements of the gld_t structure
3353 * used for synchronization between various routines that don't share
3354 * the same mutexes. See the routines for details. These are:
3355 * gld_xwait between gld_wsrv() and gld_sched()
3356 * gld_sched_ran between gld_wsrv() and gld_sched()
3357 * gld_in_unbind between gld_wput() and wsrv's gld_unbind()
3358 * gld_wput_count between gld_wput() and wsrv's gld_unbind()
3359 * gld_in_wsrv between gld_wput() and gld_wsrv()
3360 * (used in conjunction with q->q_first)
3365 * handles all ioctl requests passed downstream. This routine is
3366 * passed a pointer to the message block with the ioctl request in it, and a
3367 * pointer to the queue so it can respond to the ioctl request with an ack.
3370 gld_ioctl(queue_t
*q
, mblk_t
*mp
)
3372 struct iocblk
*iocp
;
3374 gld_mac_info_t
*macinfo
;
3377 if (gld_debug
& GLDTRACE
)
3378 cmn_err(CE_NOTE
, "gld_ioctl(%p %p)", (void *)q
, (void *)mp
);
3380 gld
= (gld_t
*)q
->q_ptr
;
3381 iocp
= (struct iocblk
*)mp
->b_rptr
;
3382 switch (iocp
->ioc_cmd
) {
3383 case DLIOCRAW
: /* raw M_DATA mode */
3384 gld
->gld_flags
|= GLD_RAW
;
3385 DB_TYPE(mp
) = M_IOCACK
;
3389 case DL_IOC_HDR_INFO
: /* fastpath */
3391 * DL_IOC_HDR_INFO should only come from IP. The one
3392 * initiated from user-land should not be allowed.
3394 if ((gld_global_options
& GLD_OPT_NO_FASTPATH
) ||
3395 (iocp
->ioc_cr
!= kcred
)) {
3396 miocnak(q
, mp
, 0, EINVAL
);
3399 gld_fastpath(gld
, q
, mp
);
3402 case DLIOCMARGININFO
: { /* margin size */
3405 if ((macinfo
= gld
->gld_mac_info
) == NULL
) {
3406 miocnak(q
, mp
, 0, EINVAL
);
3410 if ((err
= miocpullup(mp
, sizeof (uint32_t))) != 0) {
3411 miocnak(q
, mp
, 0, err
);
3415 *((uint32_t *)mp
->b_cont
->b_rptr
) = macinfo
->gldm_margin
;
3416 miocack(q
, mp
, sizeof (uint32_t), 0);
3420 macinfo
= gld
->gld_mac_info
;
3421 if (macinfo
== NULL
|| macinfo
->gldm_ioctl
== NULL
) {
3422 miocnak(q
, mp
, 0, EINVAL
);
3426 GLDM_LOCK(macinfo
, RW_WRITER
);
3427 (void) (*macinfo
->gldm_ioctl
) (macinfo
, q
, mp
);
3428 GLDM_UNLOCK(macinfo
);
3435 * Since the rules for "fastpath" mode don't seem to be documented
3436 * anywhere, I will describe GLD's rules for fastpath users here:
3438 * Once in this mode you remain there until close.
3439 * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3440 * You must be bound (DL_IDLE) to transmit.
3441 * There are other rules not listed above.
3444 gld_fastpath(gld_t
*gld
, queue_t
*q
, mblk_t
*mp
)
3446 gld_interface_t
*ifp
;
3447 gld_mac_info_t
*macinfo
;
3448 dl_unitdata_req_t
*dludp
;
3450 t_scalar_t off
, len
;
3454 if (gld
->gld_state
!= DL_IDLE
) {
3455 miocnak(q
, mp
, 0, EINVAL
);
3459 macinfo
= gld
->gld_mac_info
;
3460 ASSERT(macinfo
!= NULL
);
3461 maclen
= macinfo
->gldm_addrlen
+ abs(macinfo
->gldm_saplen
);
3463 error
= miocpullup(mp
, sizeof (dl_unitdata_req_t
) + maclen
);
3465 miocnak(q
, mp
, 0, error
);
3469 dludp
= (dl_unitdata_req_t
*)mp
->b_cont
->b_rptr
;
3470 off
= dludp
->dl_dest_addr_offset
;
3471 len
= dludp
->dl_dest_addr_length
;
3472 if (dludp
->dl_primitive
!= DL_UNITDATA_REQ
||
3473 !MBLKIN(mp
->b_cont
, off
, len
) || len
!= maclen
) {
3474 miocnak(q
, mp
, 0, EINVAL
);
3479 * We take the fastpath request as a declaration that they will accept
3480 * M_DATA messages from us, whether or not we are willing to accept
3481 * M_DATA from them. This allows us to have fastpath in one direction
3482 * (flow upstream) even on media with Source Routing, where we are
3483 * unable to provide a fixed MAC header to be prepended to downstream
3484 * flowing packets. So we set GLD_FAST whether or not we decide to
3485 * allow them to send M_DATA down to us.
3487 GLDM_LOCK(macinfo
, RW_WRITER
);
3488 gld
->gld_flags
|= GLD_FAST
;
3489 GLDM_UNLOCK(macinfo
);
3491 ifp
= ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->interfacep
;
3493 /* This will fail for Source Routing media */
3494 /* Also on Ethernet on 802.2 SAPs */
3495 if ((nmp
= (*ifp
->mkfastpath
)(gld
, mp
)) == NULL
) {
3496 miocnak(q
, mp
, 0, ENOMEM
);
3501 * Link new mblk in after the "request" mblks.
3504 miocack(q
, mp
, msgdsize(mp
->b_cont
), 0);
3509 * process the DL commands as defined in dlpi.h
3510 * note that the primitives return status which is passed back
3511 * to the service procedure. If the value is GLDE_RETRY, then
3512 * it is assumed that processing must stop and the primitive has
3513 * been put back onto the queue. If the value is any other error,
3514 * then an error ack is generated by the service procedure.
3517 gld_cmds(queue_t
*q
, mblk_t
*mp
)
3519 union DL_primitives
*dlp
= (union DL_primitives
*)mp
->b_rptr
;
3520 gld_t
*gld
= (gld_t
*)(q
->q_ptr
);
3521 int result
= DL_BADPRIM
;
3522 int mblkl
= MBLKL(mp
);
3525 /* Make sure we have at least dlp->dl_primitive */
3526 if (mblkl
< sizeof (dlp
->dl_primitive
))
3527 return (DL_BADPRIM
);
3529 dlreq
= dlp
->dl_primitive
;
3531 if (gld_debug
& GLDTRACE
)
3533 "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3534 (void *)q
, (void *)mp
, (void *)dlp
, dlreq
);
3539 if (mblkl
< DL_UDQOS_REQ_SIZE
)
3541 result
= gld_udqos(q
, mp
);
3545 if (mblkl
< DL_BIND_REQ_SIZE
)
3547 result
= gld_bind(q
, mp
);
3551 if (mblkl
< DL_UNBIND_REQ_SIZE
)
3553 result
= gld_unbind(q
, mp
);
3556 case DL_UNITDATA_REQ
:
3557 if (mblkl
< DL_UNITDATA_REQ_SIZE
)
3559 result
= gld_unitdata(q
, mp
);
3563 if (mblkl
< DL_INFO_REQ_SIZE
)
3565 result
= gld_inforeq(q
, mp
);
3569 if (mblkl
< DL_ATTACH_REQ_SIZE
)
3571 if (gld
->gld_style
== DL_STYLE2
)
3572 result
= gldattach(q
, mp
);
3574 result
= DL_NOTSUPPORTED
;
3578 if (mblkl
< DL_DETACH_REQ_SIZE
)
3580 if (gld
->gld_style
== DL_STYLE2
)
3581 result
= gldunattach(q
, mp
);
3583 result
= DL_NOTSUPPORTED
;
3586 case DL_ENABMULTI_REQ
:
3587 if (mblkl
< DL_ENABMULTI_REQ_SIZE
)
3589 result
= gld_enable_multi(q
, mp
);
3592 case DL_DISABMULTI_REQ
:
3593 if (mblkl
< DL_DISABMULTI_REQ_SIZE
)
3595 result
= gld_disable_multi(q
, mp
);
3598 case DL_PHYS_ADDR_REQ
:
3599 if (mblkl
< DL_PHYS_ADDR_REQ_SIZE
)
3601 result
= gld_physaddr(q
, mp
);
3604 case DL_SET_PHYS_ADDR_REQ
:
3605 if (mblkl
< DL_SET_PHYS_ADDR_REQ_SIZE
)
3607 result
= gld_setaddr(q
, mp
);
3610 case DL_PROMISCON_REQ
:
3611 if (mblkl
< DL_PROMISCON_REQ_SIZE
)
3613 result
= gld_promisc(q
, mp
, dlreq
, B_TRUE
);
3616 case DL_PROMISCOFF_REQ
:
3617 if (mblkl
< DL_PROMISCOFF_REQ_SIZE
)
3619 result
= gld_promisc(q
, mp
, dlreq
, B_FALSE
);
3622 case DL_GET_STATISTICS_REQ
:
3623 if (mblkl
< DL_GET_STATISTICS_REQ_SIZE
)
3625 result
= gld_get_statistics(q
, mp
);
3628 case DL_CAPABILITY_REQ
:
3629 if (mblkl
< DL_CAPABILITY_REQ_SIZE
)
3631 result
= gld_cap(q
, mp
);
3635 if (mblkl
< DL_NOTIFY_REQ_SIZE
)
3637 result
= gld_notify_req(q
, mp
);
3644 case DL_CONTROL_REQ
:
3645 case DL_PASSIVE_REQ
:
3646 result
= DL_NOTSUPPORTED
;
3651 if (gld_debug
& GLDERRS
)
3653 "gld_cmds: unknown M_PROTO message: %d",
3656 result
= DL_BADPRIM
;
3663 gld_cap(queue_t
*q
, mblk_t
*mp
)
3665 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
3666 dl_capability_req_t
*dlp
= (dl_capability_req_t
*)mp
->b_rptr
;
3668 if (gld
->gld_state
== DL_UNATTACHED
)
3669 return (DL_OUTSTATE
);
3671 if (dlp
->dl_sub_length
== 0)
3672 return (gld_cap_ack(q
, mp
));
3674 return (gld_cap_enable(q
, mp
));
3678 gld_cap_ack(queue_t
*q
, mblk_t
*mp
)
3680 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
3681 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
3682 gld_interface_t
*ifp
;
3683 dl_capability_ack_t
*dlap
;
3684 dl_capability_sub_t
*dlsp
;
3685 size_t size
= sizeof (dl_capability_ack_t
);
3688 ifp
= ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->interfacep
;
3690 if (macinfo
->gldm_capabilities
& GLD_CAP_CKSUM_ANY
)
3691 subsize
+= sizeof (dl_capability_sub_t
) +
3692 sizeof (dl_capab_hcksum_t
);
3693 if (macinfo
->gldm_capabilities
& GLD_CAP_ZEROCOPY
)
3694 subsize
+= sizeof (dl_capability_sub_t
) +
3695 sizeof (dl_capab_zerocopy_t
);
3696 if (macinfo
->gldm_options
& GLDOPT_MDT
)
3697 subsize
+= (sizeof (dl_capability_sub_t
) +
3698 sizeof (dl_capab_mdt_t
));
3700 if ((mp
= mexchange(q
, mp
, size
+ subsize
, M_PROTO
,
3701 DL_CAPABILITY_ACK
)) == NULL
)
3704 dlap
= (dl_capability_ack_t
*)mp
->b_rptr
;
3705 dlap
->dl_sub_offset
= 0;
3706 if ((dlap
->dl_sub_length
= subsize
) != 0)
3707 dlap
->dl_sub_offset
= sizeof (dl_capability_ack_t
);
3708 dlsp
= (dl_capability_sub_t
*)&dlap
[1];
3710 if (macinfo
->gldm_capabilities
& GLD_CAP_CKSUM_ANY
) {
3711 dl_capab_hcksum_t
*dlhp
= (dl_capab_hcksum_t
*)&dlsp
[1];
3713 dlsp
->dl_cap
= DL_CAPAB_HCKSUM
;
3714 dlsp
->dl_length
= sizeof (dl_capab_hcksum_t
);
3716 dlhp
->hcksum_version
= HCKSUM_VERSION_1
;
3718 dlhp
->hcksum_txflags
= 0;
3719 if (macinfo
->gldm_capabilities
& GLD_CAP_CKSUM_PARTIAL
)
3720 dlhp
->hcksum_txflags
|= HCKSUM_INET_PARTIAL
;
3721 if (macinfo
->gldm_capabilities
& GLD_CAP_CKSUM_FULL_V4
)
3722 dlhp
->hcksum_txflags
|= HCKSUM_INET_FULL_V4
;
3723 if (macinfo
->gldm_capabilities
& GLD_CAP_CKSUM_FULL_V6
)
3724 dlhp
->hcksum_txflags
|= HCKSUM_INET_FULL_V6
;
3725 if (macinfo
->gldm_capabilities
& GLD_CAP_CKSUM_IPHDR
)
3726 dlhp
->hcksum_txflags
|= HCKSUM_IPHDRCKSUM
;
3728 dlcapabsetqid(&(dlhp
->hcksum_mid
), RD(q
));
3729 dlsp
= (dl_capability_sub_t
*)&dlhp
[1];
3732 if (macinfo
->gldm_capabilities
& GLD_CAP_ZEROCOPY
) {
3733 dl_capab_zerocopy_t
*dlzp
= (dl_capab_zerocopy_t
*)&dlsp
[1];
3735 dlsp
->dl_cap
= DL_CAPAB_ZEROCOPY
;
3736 dlsp
->dl_length
= sizeof (dl_capab_zerocopy_t
);
3737 dlzp
->zerocopy_version
= ZEROCOPY_VERSION_1
;
3738 dlzp
->zerocopy_flags
= DL_CAPAB_VMSAFE_MEM
;
3740 dlcapabsetqid(&(dlzp
->zerocopy_mid
), RD(q
));
3741 dlsp
= (dl_capability_sub_t
*)&dlzp
[1];
3744 if (macinfo
->gldm_options
& GLDOPT_MDT
) {
3745 dl_capab_mdt_t
*dlmp
= (dl_capab_mdt_t
*)&dlsp
[1];
3747 dlsp
->dl_cap
= DL_CAPAB_MDT
;
3748 dlsp
->dl_length
= sizeof (dl_capab_mdt_t
);
3750 dlmp
->mdt_version
= MDT_VERSION_2
;
3751 dlmp
->mdt_max_pld
= macinfo
->gldm_mdt_segs
;
3752 dlmp
->mdt_span_limit
= macinfo
->gldm_mdt_sgl
;
3753 dlcapabsetqid(&dlmp
->mdt_mid
, OTHERQ(q
));
3754 dlmp
->mdt_flags
= DL_CAPAB_MDT_ENABLE
;
3755 dlmp
->mdt_hdr_head
= ifp
->hdr_size
;
3756 dlmp
->mdt_hdr_tail
= 0;
3764 gld_cap_enable(queue_t
*q
, mblk_t
*mp
)
3766 dl_capability_req_t
*dlp
;
3767 dl_capability_sub_t
*dlsp
;
3768 dl_capab_hcksum_t
*dlhp
;
3774 dlp
= (dl_capability_req_t
*)mp
->b_rptr
;
3775 dlp
->dl_primitive
= DL_CAPABILITY_ACK
;
3777 off
= dlp
->dl_sub_offset
;
3778 len
= dlp
->dl_sub_length
;
3780 if (!MBLKIN(mp
, off
, len
))
3781 return (DL_BADPRIM
);
3785 dlsp
= (dl_capability_sub_t
*)(mp
->b_rptr
+ off
);
3786 size
= sizeof (dl_capability_sub_t
) + dlsp
->dl_length
;
3787 if (off
+ size
> end
)
3788 return (DL_BADPRIM
);
3790 switch (dlsp
->dl_cap
) {
3791 case DL_CAPAB_HCKSUM
:
3792 dlhp
= (dl_capab_hcksum_t
*)&dlsp
[1];
3793 /* nothing useful we can do with the contents */
3794 dlcapabsetqid(&(dlhp
->hcksum_mid
), RD(q
));
3808 * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3809 * requested the specific <notification> that the message carries AND is
3810 * eligible and ready to receive the notification immediately.
3812 * This routine ignores flow control. Notifications will be sent regardless.
3814 * In all cases, the original message passed in is freed at the end of
3818 gld_notify_qs(gld_mac_info_t
*macinfo
, mblk_t
*mp
, uint32_t notification
)
3820 gld_mac_pvt_t
*mac_pvt
;
3826 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo
));
3828 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
3831 * Search all the streams attached to this macinfo looking
3832 * for those eligible to receive the present notification.
3834 for (i
= 0; i
< VLAN_HASHSZ
; i
++) {
3835 for (vlan
= mac_pvt
->vlan_hash
[i
];
3836 vlan
!= NULL
; vlan
= vlan
->gldv_next
) {
3837 for (gld
= vlan
->gldv_str_next
;
3838 gld
!= (gld_t
*)&vlan
->gldv_str_next
;
3839 gld
= gld
->gld_next
) {
3840 ASSERT(gld
->gld_qptr
!= NULL
);
3841 ASSERT(gld
->gld_state
== DL_IDLE
||
3842 gld
->gld_state
== DL_UNBOUND
);
3843 ASSERT(gld
->gld_mac_info
== macinfo
);
3845 if (gld
->gld_flags
& GLD_STR_CLOSING
)
3846 continue; /* not eligible - skip */
3847 if (!(notification
& gld
->gld_notifications
))
3848 continue; /* not wanted - skip */
3849 if ((nmp
= dupmsg(mp
)) == NULL
)
3850 continue; /* can't copy - skip */
3853 * All OK; send dup'd notification up this
3856 qreply(WR(gld
->gld_qptr
), nmp
);
3862 * Drop the original message block now
3868 * For each (understood) bit in the <notifications> argument, contruct
3869 * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3870 * eligible queues if <q> is NULL.
3873 gld_notify_ind(gld_mac_info_t
*macinfo
, uint32_t notifications
, queue_t
*q
)
3875 gld_mac_pvt_t
*mac_pvt
;
3876 dl_notify_ind_t
*dlnip
;
3877 struct gld_stats
*stats
;
3882 GLDM_LOCK(macinfo
, RW_WRITER
);
3885 * The following cases shouldn't happen, but just in case the
3886 * MAC driver calls gld_linkstate() at an inappropriate time, we
3889 if (!(macinfo
->gldm_GLD_flags
& GLD_MAC_READY
)) {
3890 GLDM_UNLOCK(macinfo
);
3891 return; /* not ready yet */
3894 if (macinfo
->gldm_GLD_flags
& GLD_UNREGISTERED
) {
3895 GLDM_UNLOCK(macinfo
);
3896 return; /* not ready anymore */
3900 * Make sure the kstats are up to date, 'cos we use some of
3901 * the kstat values below, specifically the link speed ...
3903 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
3904 stats
= mac_pvt
->statistics
;
3905 if (macinfo
->gldm_get_stats
)
3906 (void) (*macinfo
->gldm_get_stats
)(macinfo
, stats
);
3908 for (bit
= 1; notifications
!= 0; bit
<<= 1) {
3909 if ((notifications
& bit
) == 0)
3911 notifications
&= ~bit
;
3913 size
= DL_NOTIFY_IND_SIZE
;
3914 if (bit
== DL_NOTE_PHYS_ADDR
)
3915 size
+= macinfo
->gldm_addrlen
;
3916 if ((mp
= allocb(size
, BPRI_MED
)) == NULL
)
3919 mp
->b_datap
->db_type
= M_PROTO
;
3920 mp
->b_wptr
= mp
->b_rptr
+ size
;
3921 dlnip
= (dl_notify_ind_t
*)mp
->b_rptr
;
3922 dlnip
->dl_primitive
= DL_NOTIFY_IND
;
3923 dlnip
->dl_notification
= 0;
3925 dlnip
->dl_addr_length
= 0;
3926 dlnip
->dl_addr_offset
= 0;
3929 case DL_NOTE_PROMISC_ON_PHYS
:
3930 case DL_NOTE_PROMISC_OFF_PHYS
:
3931 if (mac_pvt
->nprom
!= 0)
3932 dlnip
->dl_notification
= bit
;
3935 case DL_NOTE_LINK_DOWN
:
3936 if (macinfo
->gldm_linkstate
== GLD_LINKSTATE_DOWN
)
3937 dlnip
->dl_notification
= bit
;
3940 case DL_NOTE_LINK_UP
:
3941 if (macinfo
->gldm_linkstate
== GLD_LINKSTATE_UP
)
3942 dlnip
->dl_notification
= bit
;
3947 * Conversion required here:
3948 * GLD keeps the speed in bit/s in a uint64
3949 * DLPI wants it in kb/s in a uint32
3950 * Fortunately this is still big enough for 10Gb/s!
3952 dlnip
->dl_notification
= bit
;
3953 dlnip
->dl_data
= stats
->glds_speed
/1000ULL;
3956 case DL_NOTE_PHYS_ADDR
:
3957 dlnip
->dl_notification
= bit
;
3958 dlnip
->dl_data
= DL_CURR_PHYS_ADDR
;
3959 dlnip
->dl_addr_offset
= sizeof (dl_notify_ind_t
);
3960 dlnip
->dl_addr_length
= macinfo
->gldm_addrlen
+
3961 abs(macinfo
->gldm_saplen
);
3962 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
3963 mac_copy(mac_pvt
->curr_macaddr
,
3964 mp
->b_rptr
+ sizeof (dl_notify_ind_t
),
3965 macinfo
->gldm_addrlen
);
3972 if (dlnip
->dl_notification
== 0)
3977 gld_notify_qs(macinfo
, mp
, bit
);
3980 GLDM_UNLOCK(macinfo
);
3984 * gld_notify_req - handle a DL_NOTIFY_REQ message
3987 gld_notify_req(queue_t
*q
, mblk_t
*mp
)
3989 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
3990 gld_mac_info_t
*macinfo
;
3992 dl_notify_req_t
*dlnrp
;
3993 dl_notify_ack_t
*dlnap
;
3995 ASSERT(gld
!= NULL
);
3996 ASSERT(gld
->gld_qptr
== RD(q
));
3998 dlnrp
= (dl_notify_req_t
*)mp
->b_rptr
;
4001 if (gld_debug
& GLDTRACE
)
4002 cmn_err(CE_NOTE
, "gld_notify_req(%p %p)",
4003 (void *)q
, (void *)mp
);
4006 if (gld
->gld_state
== DL_UNATTACHED
) {
4008 if (gld_debug
& GLDERRS
)
4009 cmn_err(CE_NOTE
, "gld_notify_req: wrong state (%d)",
4012 return (DL_OUTSTATE
);
4016 * Remember what notifications are required by this stream
4018 macinfo
= gld
->gld_mac_info
;
4019 pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
4021 gld
->gld_notifications
= dlnrp
->dl_notifications
& pvt
->notifications
;
4024 * The return DL_NOTIFY_ACK carries the bitset of notifications
4025 * that this driver can provide, independently of which ones have
4026 * previously been or are now being requested.
4028 if ((mp
= mexchange(q
, mp
, sizeof (dl_notify_ack_t
), M_PCPROTO
,
4029 DL_NOTIFY_ACK
)) == NULL
)
4032 dlnap
= (dl_notify_ack_t
*)mp
->b_rptr
;
4033 dlnap
->dl_notifications
= pvt
->notifications
;
4037 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
4038 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
4039 * that provide the current status.
4041 gld_notify_ind(macinfo
, gld
->gld_notifications
, q
);
4048 * Called by driver to tell GLD the state of the physical link.
4049 * As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
4050 * notification to each client that has previously requested such
4054 gld_linkstate(gld_mac_info_t
*macinfo
, int32_t newstate
)
4056 uint32_t notification
;
4062 case GLD_LINKSTATE_DOWN
:
4063 notification
= DL_NOTE_LINK_DOWN
;
4066 case GLD_LINKSTATE_UP
:
4067 notification
= DL_NOTE_LINK_UP
| DL_NOTE_SPEED
;
4070 case GLD_LINKSTATE_UNKNOWN
:
4075 GLDM_LOCK(macinfo
, RW_WRITER
);
4076 if (macinfo
->gldm_linkstate
== newstate
)
4079 macinfo
->gldm_linkstate
= newstate
;
4080 GLDM_UNLOCK(macinfo
);
4083 gld_notify_ind(macinfo
, notification
, NULL
);
4087 * gld_udqos - set the current QoS parameters (priority only at the moment).
4090 gld_udqos(queue_t
*q
, mblk_t
*mp
)
4092 dl_udqos_req_t
*dlp
;
4093 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4096 dl_qos_cl_sel1_t
*selp
;
4099 ASSERT(gld
->gld_qptr
== RD(q
));
4102 if (gld_debug
& GLDTRACE
)
4103 cmn_err(CE_NOTE
, "gld_udqos(%p %p)", (void *)q
, (void *)mp
);
4106 if (gld
->gld_state
!= DL_IDLE
) {
4108 if (gld_debug
& GLDERRS
)
4109 cmn_err(CE_NOTE
, "gld_udqos: wrong state (%d)",
4112 return (DL_OUTSTATE
);
4115 dlp
= (dl_udqos_req_t
*)mp
->b_rptr
;
4116 off
= dlp
->dl_qos_offset
;
4117 len
= dlp
->dl_qos_length
;
4119 if (len
!= sizeof (dl_qos_cl_sel1_t
) || !MBLKIN(mp
, off
, len
))
4120 return (DL_BADQOSTYPE
);
4122 selp
= (dl_qos_cl_sel1_t
*)(mp
->b_rptr
+ off
);
4123 if (selp
->dl_qos_type
!= DL_QOS_CL_SEL1
)
4124 return (DL_BADQOSTYPE
);
4126 if (selp
->dl_trans_delay
!= 0 &&
4127 selp
->dl_trans_delay
!= DL_QOS_DONT_CARE
)
4128 return (DL_BADQOSPARAM
);
4129 if (selp
->dl_protection
!= 0 &&
4130 selp
->dl_protection
!= DL_QOS_DONT_CARE
)
4131 return (DL_BADQOSPARAM
);
4132 if (selp
->dl_residual_error
!= 0 &&
4133 selp
->dl_residual_error
!= DL_QOS_DONT_CARE
)
4134 return (DL_BADQOSPARAM
);
4135 if (selp
->dl_priority
< 0 || selp
->dl_priority
> 7)
4136 return (DL_BADQOSPARAM
);
4138 gld
->gld_upri
= selp
->dl_priority
;
4140 dlokack(q
, mp
, DL_UDQOS_REQ
);
4145 gld_bindack(queue_t
*q
, mblk_t
*mp
)
4147 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4148 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
4149 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
4152 t_uscalar_t addrlen
;
4155 addrlen
= macinfo
->gldm_addrlen
+ abs(macinfo
->gldm_saplen
);
4156 size
= sizeof (dl_bind_ack_t
) + addrlen
;
4157 if ((mp
= mexchange(q
, mp
, size
, M_PCPROTO
, DL_BIND_ACK
)) == NULL
)
4160 dlp
= (dl_bind_ack_t
*)mp
->b_rptr
;
4161 dlp
->dl_sap
= gld
->gld_sap
;
4162 dlp
->dl_addr_length
= addrlen
;
4163 dlp
->dl_addr_offset
= sizeof (dl_bind_ack_t
);
4164 dlp
->dl_max_conind
= 0;
4165 dlp
->dl_xidtest_flg
= 0;
4167 mac_copy(mac_pvt
->curr_macaddr
, (uchar_t
*)&dlp
[1],
4168 macinfo
->gldm_addrlen
);
4169 sapp
= mp
->b_rptr
+ dlp
->dl_addr_offset
+ macinfo
->gldm_addrlen
;
4170 *(ushort_t
*)sapp
= gld
->gld_sap
;
4176 * gld_bind - determine if a SAP is already allocated and whether it is legal
4177 * to do the bind at this time
4180 gld_bind(queue_t
*q
, mblk_t
*mp
)
4184 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4185 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
4188 ASSERT(gld
->gld_qptr
== RD(q
));
4191 if (gld_debug
& GLDTRACE
)
4192 cmn_err(CE_NOTE
, "gld_bind(%p %p)", (void *)q
, (void *)mp
);
4195 dlp
= (dl_bind_req_t
*)mp
->b_rptr
;
4199 if (gld_debug
& GLDPROT
)
4200 cmn_err(CE_NOTE
, "gld_bind: lsap=%lx", sap
);
4203 if (gld
->gld_state
!= DL_UNBOUND
) {
4205 if (gld_debug
& GLDERRS
)
4206 cmn_err(CE_NOTE
, "gld_bind: bound or not attached (%d)",
4209 return (DL_OUTSTATE
);
4213 if (dlp
->dl_service_mode
!= DL_CLDLS
) {
4214 return (DL_UNSUPPORTED
);
4216 if (dlp
->dl_xidtest_flg
& (DL_AUTO_XID
| DL_AUTO_TEST
)) {
4221 * Check sap validity and decide whether this stream accepts
4222 * IEEE 802.2 (LLC) packets.
4224 if (sap
> ETHERTYPE_MAX
)
4228 * Decide whether the SAP value selects EtherType encoding/decoding.
4229 * For compatibility with monolithic ethernet drivers, the range of
4230 * SAP values is different for DL_ETHER media.
4232 switch (macinfo
->gldm_type
) {
4234 gld
->gld_ethertype
= (sap
> ETHERMTU
);
4237 gld
->gld_ethertype
= (sap
> GLD_MAX_802_SAP
);
4241 /* if we get to here, then the SAP is legal enough */
4242 GLDM_LOCK(macinfo
, RW_WRITER
);
4243 gld
->gld_state
= DL_IDLE
; /* bound and ready */
4245 if ((macinfo
->gldm_type
== DL_ETHER
) && (sap
== ETHERTYPE_VLAN
))
4246 ((gld_vlan_t
*)gld
->gld_vlan
)->gldv_nvlan_sap
++;
4250 if (gld_debug
& GLDPROT
)
4251 cmn_err(CE_NOTE
, "gld_bind: ok - sap = %d", gld
->gld_sap
);
4255 mp
= gld_bindack(q
, mp
);
4256 GLDM_UNLOCK(macinfo
);
4267 * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
4268 * The stream is still open and can be re-bound.
4271 gld_unbind(queue_t
*q
, mblk_t
*mp
)
4273 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4274 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
4279 if (gld_debug
& GLDTRACE
)
4280 cmn_err(CE_NOTE
, "gld_unbind(%p %p)", (void *)q
, (void *)mp
);
4283 if (gld
->gld_state
!= DL_IDLE
) {
4285 if (gld_debug
& GLDERRS
)
4286 cmn_err(CE_NOTE
, "gld_unbind: wrong state (%d)",
4289 return (DL_OUTSTATE
);
4294 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
4295 * See comments above gld_start().
4297 gld
->gld_in_unbind
= B_TRUE
; /* disallow wput=>start */
4299 if (gld
->gld_wput_count
!= 0) {
4300 gld
->gld_in_unbind
= B_FALSE
;
4301 ASSERT(mp
); /* we didn't come from close */
4303 if (gld_debug
& GLDETRACE
)
4304 cmn_err(CE_NOTE
, "gld_unbind: defer for wput");
4306 (void) putbq(q
, mp
);
4307 qenable(q
); /* try again soon */
4308 return (GLDE_RETRY
);
4311 GLDM_LOCK(macinfo
, RW_WRITER
);
4312 if ((macinfo
->gldm_type
== DL_ETHER
) &&
4313 (gld
->gld_sap
== ETHERTYPE_VLAN
)) {
4314 ((gld_vlan_t
*)gld
->gld_vlan
)->gldv_nvlan_sap
--;
4316 gld
->gld_state
= DL_UNBOUND
;
4319 GLDM_UNLOCK(macinfo
);
4322 gld
->gld_in_unbind
= B_FALSE
;
4324 /* mp is NULL if we came from close */
4326 gld_flushqueue(q
); /* flush the queues */
4327 dlokack(q
, mp
, DL_UNBIND_REQ
);
4333 * gld_inforeq - generate the response to an info request
4336 gld_inforeq(queue_t
*q
, mblk_t
*mp
)
4342 gld_mac_info_t
*macinfo
;
4343 gld_mac_pvt_t
*mac_pvt
;
4345 int range_offset
= 0;
4354 if (gld_debug
& GLDTRACE
)
4355 cmn_err(CE_NOTE
, "gld_inforeq(%p %p)", (void *)q
, (void *)mp
);
4357 gld
= (gld_t
*)q
->q_ptr
;
4359 glddev
= gld
->gld_device
;
4362 if (gld
->gld_state
== DL_IDLE
|| gld
->gld_state
== DL_UNBOUND
) {
4363 macinfo
= gld
->gld_mac_info
;
4364 ASSERT(macinfo
!= NULL
);
4366 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
4368 addr_length
= macinfo
->gldm_addrlen
;
4369 sap_length
= macinfo
->gldm_saplen
;
4370 brdcst_length
= macinfo
->gldm_addrlen
;
4372 addr_length
= glddev
->gld_addrlen
;
4373 sap_length
= glddev
->gld_saplen
;
4374 brdcst_length
= glddev
->gld_addrlen
;
4377 bufsize
= sizeof (dl_info_ack_t
);
4379 addr_offset
= bufsize
;
4380 bufsize
+= addr_length
;
4381 bufsize
+= abs(sap_length
);
4383 brdcst_offset
= bufsize
;
4384 bufsize
+= brdcst_length
;
4386 if (((gld_vlan_t
*)gld
->gld_vlan
) != NULL
) {
4387 sel_offset
= P2ROUNDUP(bufsize
, sizeof (int64_t));
4388 bufsize
= sel_offset
+ sizeof (dl_qos_cl_sel1_t
);
4390 range_offset
= P2ROUNDUP(bufsize
, sizeof (int64_t));
4391 bufsize
= range_offset
+ sizeof (dl_qos_cl_range1_t
);
4394 if ((mp
= mexchange(q
, mp
, bufsize
, M_PCPROTO
, DL_INFO_ACK
)) == NULL
)
4395 return (GLDE_OK
); /* nothing more to be done */
4397 bzero(mp
->b_rptr
, bufsize
);
4399 dlp
= (dl_info_ack_t
*)mp
->b_rptr
;
4400 dlp
->dl_primitive
= DL_INFO_ACK
;
4401 dlp
->dl_version
= DL_VERSION_2
;
4402 dlp
->dl_service_mode
= DL_CLDLS
;
4403 dlp
->dl_current_state
= gld
->gld_state
;
4404 dlp
->dl_provider_style
= gld
->gld_style
;
4406 if (sel_offset
!= 0) {
4407 dl_qos_cl_sel1_t
*selp
;
4408 dl_qos_cl_range1_t
*rangep
;
4410 ASSERT(range_offset
!= 0);
4412 dlp
->dl_qos_offset
= sel_offset
;
4413 dlp
->dl_qos_length
= sizeof (dl_qos_cl_sel1_t
);
4414 dlp
->dl_qos_range_offset
= range_offset
;
4415 dlp
->dl_qos_range_length
= sizeof (dl_qos_cl_range1_t
);
4417 selp
= (dl_qos_cl_sel1_t
*)(mp
->b_rptr
+ sel_offset
);
4418 selp
->dl_qos_type
= DL_QOS_CL_SEL1
;
4419 selp
->dl_priority
= gld
->gld_upri
;
4421 rangep
= (dl_qos_cl_range1_t
*)(mp
->b_rptr
+ range_offset
);
4422 rangep
->dl_qos_type
= DL_QOS_CL_RANGE1
;
4423 rangep
->dl_priority
.dl_min
= 0;
4424 rangep
->dl_priority
.dl_max
= 7;
4427 if (gld
->gld_state
== DL_IDLE
|| gld
->gld_state
== DL_UNBOUND
) {
4428 dlp
->dl_min_sdu
= macinfo
->gldm_minpkt
;
4429 dlp
->dl_max_sdu
= macinfo
->gldm_maxpkt
;
4430 dlp
->dl_mac_type
= macinfo
->gldm_type
;
4431 dlp
->dl_addr_length
= addr_length
+ abs(sap_length
);
4432 dlp
->dl_sap_length
= sap_length
;
4434 if (gld
->gld_state
== DL_IDLE
) {
4436 * If we are bound to a non-LLC SAP on any medium
4437 * other than Ethernet, then we need room for a
4438 * SNAP header. So we have to adjust the MTU size
4439 * accordingly. XXX I suppose this should be done
4440 * in gldutil.c, but it seems likely that this will
4441 * always be true for everything GLD supports but
4442 * Ethernet. Check this if you add another medium.
4444 if ((macinfo
->gldm_type
== DL_TPR
||
4445 macinfo
->gldm_type
== DL_FDDI
) &&
4447 dlp
->dl_max_sdu
-= LLC_SNAP_HDR_LEN
;
4449 /* copy macaddr and sap */
4450 dlp
->dl_addr_offset
= addr_offset
;
4452 mac_copy(mac_pvt
->curr_macaddr
, mp
->b_rptr
+
4453 addr_offset
, macinfo
->gldm_addrlen
);
4454 sapp
= mp
->b_rptr
+ addr_offset
+
4455 macinfo
->gldm_addrlen
;
4456 *(ushort_t
*)sapp
= gld
->gld_sap
;
4458 dlp
->dl_addr_offset
= 0;
4461 /* copy broadcast addr */
4462 dlp
->dl_brdcst_addr_length
= macinfo
->gldm_addrlen
;
4463 dlp
->dl_brdcst_addr_offset
= brdcst_offset
;
4464 mac_copy((caddr_t
)macinfo
->gldm_broadcast_addr
,
4465 mp
->b_rptr
+ brdcst_offset
, brdcst_length
);
4468 * No PPA is attached.
4469 * The best we can do is use the values provided
4470 * by the first mac that called gld_register.
4472 dlp
->dl_min_sdu
= glddev
->gld_minsdu
;
4473 dlp
->dl_max_sdu
= glddev
->gld_maxsdu
;
4474 dlp
->dl_mac_type
= glddev
->gld_type
;
4475 dlp
->dl_addr_length
= addr_length
+ abs(sap_length
);
4476 dlp
->dl_sap_length
= sap_length
;
4477 dlp
->dl_addr_offset
= 0;
4478 dlp
->dl_brdcst_addr_offset
= brdcst_offset
;
4479 dlp
->dl_brdcst_addr_length
= brdcst_length
;
4480 mac_copy((caddr_t
)glddev
->gld_broadcast
,
4481 mp
->b_rptr
+ brdcst_offset
, brdcst_length
);
4488 * gld_unitdata (q, mp)
4489 * send a datagram. Destination address/lsap is in M_PROTO
4490 * message (first mblock), data is in remainder of message.
4494 gld_unitdata(queue_t
*q
, mblk_t
*mp
)
4496 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4497 dl_unitdata_req_t
*dlp
= (dl_unitdata_req_t
*)mp
->b_rptr
;
4498 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
4501 gld_interface_t
*ifp
;
4510 if (gld_debug
& GLDTRACE
)
4511 cmn_err(CE_NOTE
, "gld_unitdata(%p %p)", (void *)q
, (void *)mp
);
4514 if (gld
->gld_state
!= DL_IDLE
) {
4516 if (gld_debug
& GLDERRS
)
4517 cmn_err(CE_NOTE
, "gld_unitdata: wrong state (%d)",
4520 dluderrorind(q
, mp
, mp
->b_rptr
+ dlp
->dl_dest_addr_offset
,
4521 dlp
->dl_dest_addr_length
, DL_OUTSTATE
, 0);
4524 ASSERT(macinfo
!= NULL
);
4526 if (!MBLKIN(mp
, dlp
->dl_dest_addr_offset
, dlp
->dl_dest_addr_length
) ||
4527 dlp
->dl_dest_addr_length
!=
4528 macinfo
->gldm_addrlen
+ abs(macinfo
->gldm_saplen
)) {
4529 dluderrorind(q
, mp
, mp
->b_rptr
+ dlp
->dl_dest_addr_offset
,
4530 dlp
->dl_dest_addr_length
, DL_BADADDR
, 0);
4534 upri
= dlp
->dl_priority
.dl_max
;
4536 msglen
= msgdsize(mp
);
4537 if (msglen
== 0 || msglen
> macinfo
->gldm_maxpkt
) {
4539 if (gld_debug
& GLDERRS
)
4540 cmn_err(CE_NOTE
, "gld_unitdata: bad msglen (%d)",
4543 dluderrorind(q
, mp
, mp
->b_rptr
+ dlp
->dl_dest_addr_offset
,
4544 dlp
->dl_dest_addr_length
, DL_BADDATA
, 0);
4548 ASSERT(mp
->b_cont
!= NULL
); /* because msgdsize(mp) is nonzero */
4550 ifp
= ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->interfacep
;
4552 /* grab any checksum information that may be present */
4553 hcksum_retrieve(mp
->b_cont
, NULL
, NULL
, &start
, &stuff
, &end
,
4557 * Prepend a valid header for transmission
4559 if ((nmp
= (*ifp
->mkunitdata
)(gld
, mp
)) == NULL
) {
4561 if (gld_debug
& GLDERRS
)
4562 cmn_err(CE_NOTE
, "gld_unitdata: mkunitdata failed.");
4564 dluderrorind(q
, mp
, mp
->b_rptr
+ dlp
->dl_dest_addr_offset
,
4565 dlp
->dl_dest_addr_length
, DL_SYSERR
, ENOSR
);
4569 /* apply any checksum information to the first block in the chain */
4570 (void) hcksum_assoc(nmp
, NULL
, NULL
, start
, stuff
, end
, value
,
4573 GLD_CLEAR_MBLK_VTAG(nmp
);
4574 if (gld_start(q
, nmp
, GLD_WSRV
, upri
) == GLD_NORESOURCES
) {
4576 return (GLDE_RETRY
);
4584 * DLPI DL_ATTACH_REQ
4585 * this attaches the stream to a PPA
4588 gldattach(queue_t
*q
, mblk_t
*mp
)
4590 dl_attach_req_t
*at
;
4591 gld_mac_info_t
*macinfo
;
4592 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4594 gld_mac_pvt_t
*mac_pvt
;
4599 at
= (dl_attach_req_t
*)mp
->b_rptr
;
4601 if (gld
->gld_state
!= DL_UNATTACHED
)
4602 return (DL_OUTSTATE
);
4604 ASSERT(!gld
->gld_mac_info
);
4606 ppa
= at
->dl_ppa
% GLD_VLAN_SCALE
; /* 0 .. 999 */
4607 vid
= at
->dl_ppa
/ GLD_VLAN_SCALE
; /* 0 .. 4094 */
4608 if (vid
> VLAN_VID_MAX
)
4611 glddev
= gld
->gld_device
;
4612 mutex_enter(&glddev
->gld_devlock
);
4613 for (macinfo
= glddev
->gld_mac_next
;
4614 macinfo
!= (gld_mac_info_t
*)&glddev
->gld_mac_next
;
4615 macinfo
= macinfo
->gldm_next
) {
4618 ASSERT(macinfo
!= NULL
);
4619 if (macinfo
->gldm_ppa
!= ppa
)
4622 if (!(macinfo
->gldm_GLD_flags
& GLD_MAC_READY
))
4623 continue; /* this one's not ready yet */
4628 if (vid
!= VLAN_VID_NONE
&& !VLAN_CAPABLE(macinfo
)) {
4629 mutex_exit(&glddev
->gld_devlock
);
4634 * We found the correct PPA, hold the instance
4636 inst
= ddi_get_instance(macinfo
->gldm_devinfo
);
4637 if (inst
== -1 || qassociate(q
, inst
) != 0) {
4638 mutex_exit(&glddev
->gld_devlock
);
4642 /* Take the stream off the per-driver-class list */
4646 * We must hold the lock to prevent multiple calls
4647 * to the reset and start routines.
4649 GLDM_LOCK(macinfo
, RW_WRITER
);
4651 gld
->gld_mac_info
= macinfo
;
4653 if (macinfo
->gldm_send_tagged
!= NULL
)
4654 gld
->gld_send
= macinfo
->gldm_send_tagged
;
4656 gld
->gld_send
= macinfo
->gldm_send
;
4658 if ((vlan
= gld_get_vlan(macinfo
, vid
)) == NULL
) {
4659 GLDM_UNLOCK(macinfo
);
4660 gldinsque(gld
, glddev
->gld_str_prev
);
4661 mutex_exit(&glddev
->gld_devlock
);
4662 (void) qassociate(q
, -1);
4666 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
4667 if (!mac_pvt
->started
) {
4668 if (gld_start_mac(macinfo
) != GLD_SUCCESS
) {
4670 GLDM_UNLOCK(macinfo
);
4671 gldinsque(gld
, glddev
->gld_str_prev
);
4672 mutex_exit(&glddev
->gld_devlock
);
4673 dlerrorack(q
, mp
, DL_ATTACH_REQ
, DL_SYSERR
,
4675 (void) qassociate(q
, -1);
4680 gld
->gld_vlan
= vlan
;
4681 vlan
->gldv_nstreams
++;
4682 gldinsque(gld
, vlan
->gldv_str_prev
);
4683 gld
->gld_state
= DL_UNBOUND
;
4684 GLDM_UNLOCK(macinfo
);
4687 if (gld_debug
& GLDPROT
) {
4688 cmn_err(CE_NOTE
, "gldattach(%p, %p, PPA = %d)",
4689 (void *)q
, (void *)mp
, macinfo
->gldm_ppa
);
4692 mutex_exit(&glddev
->gld_devlock
);
4693 dlokack(q
, mp
, DL_ATTACH_REQ
);
4696 mutex_exit(&glddev
->gld_devlock
);
4701 * gldunattach(q, mp)
4702 * DLPI DL_DETACH_REQ
4703 * detaches the mac layer from the stream
4706 gldunattach(queue_t
*q
, mblk_t
*mp
)
4708 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4709 glddev_t
*glddev
= gld
->gld_device
;
4710 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
4711 int state
= gld
->gld_state
;
4713 gld_mac_pvt_t
*mac_pvt
;
4717 int op
= GLD_MAC_PROMISC_NOOP
;
4719 if (state
!= DL_UNBOUND
)
4720 return (DL_OUTSTATE
);
4722 ASSERT(macinfo
!= NULL
);
4723 ASSERT(gld
->gld_sap
== 0);
4724 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
4727 if (gld_debug
& GLDPROT
) {
4728 cmn_err(CE_NOTE
, "gldunattach(%p, %p, PPA = %d)",
4729 (void *)q
, (void *)mp
, macinfo
->gldm_ppa
);
4733 GLDM_LOCK(macinfo
, RW_WRITER
);
4735 if (gld
->gld_mcast
) {
4736 for (i
= 0; i
< gld
->gld_multicnt
; i
++) {
4739 if ((mcast
= gld
->gld_mcast
[i
]) != NULL
) {
4740 ASSERT(mcast
->gldm_refcnt
);
4741 gld_send_disable_multi(macinfo
, mcast
);
4744 kmem_free(gld
->gld_mcast
,
4745 sizeof (gld_mcast_t
*) * gld
->gld_multicnt
);
4746 gld
->gld_mcast
= NULL
;
4747 gld
->gld_multicnt
= 0;
4750 /* decide if we need to turn off any promiscuity */
4751 phys_off
= (gld
->gld_flags
& GLD_PROM_PHYS
&&
4752 --mac_pvt
->nprom
== 0);
4753 mult_off
= (gld
->gld_flags
& GLD_PROM_MULT
&&
4754 --mac_pvt
->nprom_multi
== 0);
4757 op
= (mac_pvt
->nprom_multi
== 0) ? GLD_MAC_PROMISC_NONE
:
4758 GLD_MAC_PROMISC_MULTI
;
4759 } else if (mult_off
) {
4760 op
= (mac_pvt
->nprom
== 0) ? GLD_MAC_PROMISC_NONE
:
4761 GLD_MAC_PROMISC_NOOP
; /* phys overrides multi */
4764 if (op
!= GLD_MAC_PROMISC_NOOP
)
4765 (void) (*macinfo
->gldm_set_promiscuous
)(macinfo
, op
);
4767 vlan
= (gld_vlan_t
*)gld
->gld_vlan
;
4768 if (gld
->gld_flags
& GLD_PROM_PHYS
)
4770 if (gld
->gld_flags
& GLD_PROM_MULT
)
4772 if (gld
->gld_flags
& GLD_PROM_SAP
) {
4774 vlan
->gldv_nvlan_sap
--;
4777 gld
->gld_flags
&= ~(GLD_PROM_PHYS
| GLD_PROM_SAP
| GLD_PROM_MULT
);
4779 GLDM_UNLOCK(macinfo
);
4782 gld_notify_ind(macinfo
, DL_NOTE_PROMISC_OFF_PHYS
, NULL
);
4785 * We need to hold both locks when modifying the mac stream list
4786 * to protect findminor as well as everyone else.
4788 mutex_enter(&glddev
->gld_devlock
);
4789 GLDM_LOCK(macinfo
, RW_WRITER
);
4791 /* disassociate this stream with its vlan and underlying mac */
4794 if (--vlan
->gldv_nstreams
== 0) {
4796 gld
->gld_vlan
= NULL
;
4799 gld
->gld_mac_info
= NULL
;
4800 gld
->gld_state
= DL_UNATTACHED
;
4802 /* cleanup mac layer if last vlan */
4803 if (mac_pvt
->nvlan
== 0) {
4804 gld_stop_mac(macinfo
);
4805 macinfo
->gldm_GLD_flags
&= ~GLD_INTR_WAIT
;
4808 /* make sure no references to this gld for gld_v0_sched */
4809 if (mac_pvt
->last_sched
== gld
)
4810 mac_pvt
->last_sched
= NULL
;
4812 GLDM_UNLOCK(macinfo
);
4814 /* put the stream on the unattached Style 2 list */
4815 gldinsque(gld
, glddev
->gld_str_prev
);
4817 mutex_exit(&glddev
->gld_devlock
);
4819 /* There will be no mp if we were called from close */
4821 dlokack(q
, mp
, DL_DETACH_REQ
);
4823 if (gld
->gld_style
== DL_STYLE2
)
4824 (void) qassociate(q
, -1);
4829 * gld_enable_multi (q, mp)
4830 * Enables multicast address on the stream. If the mac layer
4831 * isn't enabled for this address, enable at that level as well.
4834 gld_enable_multi(queue_t
*q
, mblk_t
*mp
)
4836 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
4838 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
4839 unsigned char *maddr
;
4840 dl_enabmulti_req_t
*multi
;
4843 gld_mac_pvt_t
*mac_pvt
;
4846 if (gld_debug
& GLDPROT
) {
4847 cmn_err(CE_NOTE
, "gld_enable_multi(%p, %p)", (void *)q
,
4852 if (gld
->gld_state
== DL_UNATTACHED
)
4853 return (DL_OUTSTATE
);
4855 ASSERT(macinfo
!= NULL
);
4856 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
4858 if (macinfo
->gldm_set_multicast
== NULL
) {
4859 return (DL_UNSUPPORTED
);
4862 multi
= (dl_enabmulti_req_t
*)mp
->b_rptr
;
4864 if (!MBLKIN(mp
, multi
->dl_addr_offset
, multi
->dl_addr_length
) ||
4865 multi
->dl_addr_length
!= macinfo
->gldm_addrlen
)
4866 return (DL_BADADDR
);
4868 /* request appears to be valid */
4870 glddev
= mac_pvt
->major_dev
;
4871 ASSERT(glddev
== gld
->gld_device
);
4873 maddr
= mp
->b_rptr
+ multi
->dl_addr_offset
;
4876 * The multicast addresses live in a per-device table, along
4877 * with a reference count. Each stream has a table that
4878 * points to entries in the device table, with the reference
4879 * count reflecting the number of streams pointing at it. If
4880 * this multicast address is already in the per-device table,
4881 * all we have to do is point at it.
4883 GLDM_LOCK(macinfo
, RW_WRITER
);
4885 /* does this address appear in current table? */
4886 if (gld
->gld_mcast
== NULL
) {
4887 /* no mcast addresses -- allocate table */
4888 gld
->gld_mcast
= GLD_GETSTRUCT(gld_mcast_t
*,
4889 glddev
->gld_multisize
);
4890 if (gld
->gld_mcast
== NULL
) {
4891 GLDM_UNLOCK(macinfo
);
4892 dlerrorack(q
, mp
, DL_ENABMULTI_REQ
, DL_SYSERR
, ENOSR
);
4895 gld
->gld_multicnt
= glddev
->gld_multisize
;
4897 for (i
= 0; i
< gld
->gld_multicnt
; i
++) {
4898 if (gld
->gld_mcast
[i
] &&
4899 mac_eq(gld
->gld_mcast
[i
]->gldm_addr
,
4900 maddr
, macinfo
->gldm_addrlen
)) {
4901 /* this is a match -- just succeed */
4902 ASSERT(gld
->gld_mcast
[i
]->gldm_refcnt
);
4903 GLDM_UNLOCK(macinfo
);
4904 dlokack(q
, mp
, DL_ENABMULTI_REQ
);
4911 * it wasn't in the stream so check to see if the mac layer has it
4914 if (mac_pvt
->mcast_table
== NULL
) {
4915 mac_pvt
->mcast_table
= GLD_GETSTRUCT(gld_mcast_t
,
4916 glddev
->gld_multisize
);
4917 if (mac_pvt
->mcast_table
== NULL
) {
4918 GLDM_UNLOCK(macinfo
);
4919 dlerrorack(q
, mp
, DL_ENABMULTI_REQ
, DL_SYSERR
, ENOSR
);
4923 for (i
= 0; i
< glddev
->gld_multisize
; i
++) {
4924 if (mac_pvt
->mcast_table
[i
].gldm_refcnt
&&
4925 mac_eq(mac_pvt
->mcast_table
[i
].gldm_addr
,
4926 maddr
, macinfo
->gldm_addrlen
)) {
4927 mcast
= &mac_pvt
->mcast_table
[i
];
4932 if (mcast
== NULL
) {
4933 /* not in mac layer -- find an empty mac slot to fill in */
4934 for (i
= 0; i
< glddev
->gld_multisize
; i
++) {
4935 if (mac_pvt
->mcast_table
[i
].gldm_refcnt
== 0) {
4936 mcast
= &mac_pvt
->mcast_table
[i
];
4937 mac_copy(maddr
, mcast
->gldm_addr
,
4938 macinfo
->gldm_addrlen
);
4943 if (mcast
== NULL
) {
4944 /* couldn't get a mac layer slot */
4945 GLDM_UNLOCK(macinfo
);
4946 return (DL_TOOMANY
);
4949 /* now we have a mac layer slot in mcast -- get a stream slot */
4950 for (i
= 0; i
< gld
->gld_multicnt
; i
++) {
4951 if (gld
->gld_mcast
[i
] != NULL
)
4953 /* found an empty slot */
4954 if (!mcast
->gldm_refcnt
) {
4955 /* set mcast in hardware */
4956 unsigned char cmaddr
[GLD_MAX_ADDRLEN
];
4958 ASSERT(sizeof (cmaddr
) >= macinfo
->gldm_addrlen
);
4959 cmac_copy(maddr
, cmaddr
,
4960 macinfo
->gldm_addrlen
, macinfo
);
4962 rc
= (*macinfo
->gldm_set_multicast
)
4963 (macinfo
, cmaddr
, GLD_MULTI_ENABLE
);
4964 if (rc
== GLD_NOTSUPPORTED
) {
4965 GLDM_UNLOCK(macinfo
);
4966 return (DL_NOTSUPPORTED
);
4967 } else if (rc
== GLD_NORESOURCES
) {
4968 GLDM_UNLOCK(macinfo
);
4969 return (DL_TOOMANY
);
4970 } else if (rc
== GLD_BADARG
) {
4971 GLDM_UNLOCK(macinfo
);
4972 return (DL_BADADDR
);
4973 } else if (rc
== GLD_RETRY
) {
4975 * The putbq and gld_xwait must be
4976 * within the lock to prevent races
4979 (void) putbq(q
, mp
);
4980 gld
->gld_xwait
= B_TRUE
;
4981 GLDM_UNLOCK(macinfo
);
4982 return (GLDE_RETRY
);
4983 } else if (rc
!= GLD_SUCCESS
) {
4984 GLDM_UNLOCK(macinfo
);
4985 dlerrorack(q
, mp
, DL_ENABMULTI_REQ
,
4990 gld
->gld_mcast
[i
] = mcast
;
4991 mcast
->gldm_refcnt
++;
4992 GLDM_UNLOCK(macinfo
);
4993 dlokack(q
, mp
, DL_ENABMULTI_REQ
);
4997 /* couldn't get a stream slot */
4998 GLDM_UNLOCK(macinfo
);
4999 return (DL_TOOMANY
);
5004 * gld_disable_multi (q, mp)
5005 * Disable the multicast address on the stream. If last
5006 * reference for the mac layer, disable there as well.
5009 gld_disable_multi(queue_t
*q
, mblk_t
*mp
)
5012 gld_mac_info_t
*macinfo
;
5013 unsigned char *maddr
;
5014 dl_disabmulti_req_t
*multi
;
5019 if (gld_debug
& GLDPROT
) {
5020 cmn_err(CE_NOTE
, "gld_disable_multi(%p, %p)", (void *)q
,
5025 gld
= (gld_t
*)q
->q_ptr
;
5026 if (gld
->gld_state
== DL_UNATTACHED
)
5027 return (DL_OUTSTATE
);
5029 macinfo
= gld
->gld_mac_info
;
5030 ASSERT(macinfo
!= NULL
);
5031 if (macinfo
->gldm_set_multicast
== NULL
) {
5032 return (DL_UNSUPPORTED
);
5035 multi
= (dl_disabmulti_req_t
*)mp
->b_rptr
;
5037 if (!MBLKIN(mp
, multi
->dl_addr_offset
, multi
->dl_addr_length
) ||
5038 multi
->dl_addr_length
!= macinfo
->gldm_addrlen
)
5039 return (DL_BADADDR
);
5041 maddr
= mp
->b_rptr
+ multi
->dl_addr_offset
;
5043 /* request appears to be valid */
5044 /* does this address appear in current table? */
5045 GLDM_LOCK(macinfo
, RW_WRITER
);
5046 if (gld
->gld_mcast
!= NULL
) {
5047 for (i
= 0; i
< gld
->gld_multicnt
; i
++)
5048 if (((mcast
= gld
->gld_mcast
[i
]) != NULL
) &&
5049 mac_eq(mcast
->gldm_addr
,
5050 maddr
, macinfo
->gldm_addrlen
)) {
5051 ASSERT(mcast
->gldm_refcnt
);
5052 gld_send_disable_multi(macinfo
, mcast
);
5053 gld
->gld_mcast
[i
] = NULL
;
5054 GLDM_UNLOCK(macinfo
);
5055 dlokack(q
, mp
, DL_DISABMULTI_REQ
);
5059 GLDM_UNLOCK(macinfo
);
5060 return (DL_NOTENAB
); /* not an enabled address */
5064 * gld_send_disable_multi(macinfo, mcast)
5065 * this function is used to disable a multicast address if the reference
5066 * count goes to zero. The disable request will then be forwarded to the
5070 gld_send_disable_multi(gld_mac_info_t
*macinfo
, gld_mcast_t
*mcast
)
5072 ASSERT(macinfo
!= NULL
);
5073 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo
));
5074 ASSERT(mcast
!= NULL
);
5075 ASSERT(mcast
->gldm_refcnt
);
5077 if (!mcast
->gldm_refcnt
) {
5078 return; /* "cannot happen" */
5081 if (--mcast
->gldm_refcnt
> 0) {
5086 * This must be converted from canonical form to device form.
5087 * The refcnt is now zero so we can trash the data.
5089 if (macinfo
->gldm_options
& GLDOPT_CANONICAL_ADDR
)
5090 gld_bitreverse(mcast
->gldm_addr
, macinfo
->gldm_addrlen
);
5092 /* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
5093 (void) (*macinfo
->gldm_set_multicast
)
5094 (macinfo
, mcast
->gldm_addr
, GLD_MULTI_DISABLE
);
5098 * gld_promisc (q, mp, req, on)
5099 * enable or disable the use of promiscuous mode with the hardware
5102 gld_promisc(queue_t
*q
, mblk_t
*mp
, t_uscalar_t req
, boolean_t on
)
5105 gld_mac_info_t
*macinfo
;
5106 gld_mac_pvt_t
*mac_pvt
;
5108 union DL_primitives
*prim
;
5109 int macrc
= GLD_SUCCESS
;
5110 int dlerr
= GLDE_OK
;
5111 int op
= GLD_MAC_PROMISC_NOOP
;
5114 if (gld_debug
& GLDTRACE
)
5115 cmn_err(CE_NOTE
, "gld_promisc(%p, %p, %d, %d)",
5116 (void *)q
, (void *)mp
, req
, on
);
5120 prim
= (union DL_primitives
*)mp
->b_rptr
;
5122 /* XXX I think spec allows promisc in unattached state */
5123 gld
= (gld_t
*)q
->q_ptr
;
5124 if (gld
->gld_state
== DL_UNATTACHED
)
5125 return (DL_OUTSTATE
);
5127 macinfo
= gld
->gld_mac_info
;
5128 ASSERT(macinfo
!= NULL
);
5129 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5131 vlan
= (gld_vlan_t
*)gld
->gld_vlan
;
5132 ASSERT(vlan
!= NULL
);
5134 GLDM_LOCK(macinfo
, RW_WRITER
);
5137 * Work out what request (if any) has to be made to the MAC layer
5140 switch (prim
->promiscon_req
.dl_level
) {
5142 dlerr
= DL_UNSUPPORTED
; /* this is an error */
5145 case DL_PROMISC_PHYS
:
5146 if (mac_pvt
->nprom
== 0)
5147 op
= GLD_MAC_PROMISC_PHYS
;
5150 case DL_PROMISC_MULTI
:
5151 if (mac_pvt
->nprom_multi
== 0)
5152 if (mac_pvt
->nprom
== 0)
5153 op
= GLD_MAC_PROMISC_MULTI
;
5156 case DL_PROMISC_SAP
:
5157 /* We can do this without reference to the MAC */
5161 switch (prim
->promiscoff_req
.dl_level
) {
5163 dlerr
= DL_UNSUPPORTED
; /* this is an error */
5166 case DL_PROMISC_PHYS
:
5167 if (!(gld
->gld_flags
& GLD_PROM_PHYS
))
5169 else if (mac_pvt
->nprom
== 1)
5170 if (mac_pvt
->nprom_multi
)
5171 op
= GLD_MAC_PROMISC_MULTI
;
5173 op
= GLD_MAC_PROMISC_NONE
;
5176 case DL_PROMISC_MULTI
:
5177 if (!(gld
->gld_flags
& GLD_PROM_MULT
))
5179 else if (mac_pvt
->nprom_multi
== 1)
5180 if (mac_pvt
->nprom
== 0)
5181 op
= GLD_MAC_PROMISC_NONE
;
5184 case DL_PROMISC_SAP
:
5185 if (!(gld
->gld_flags
& GLD_PROM_SAP
))
5188 /* We can do this without reference to the MAC */
5194 * The request was invalid in some way so no need to continue.
5196 if (dlerr
!= GLDE_OK
) {
5197 GLDM_UNLOCK(macinfo
);
5202 * Issue the request to the MAC layer, if required
5204 if (op
!= GLD_MAC_PROMISC_NOOP
) {
5205 macrc
= (*macinfo
->gldm_set_promiscuous
)(macinfo
, op
);
5209 * On success, update the appropriate flags & refcounts
5211 if (macrc
== GLD_SUCCESS
) {
5213 switch (prim
->promiscon_req
.dl_level
) {
5214 case DL_PROMISC_PHYS
:
5217 gld
->gld_flags
|= GLD_PROM_PHYS
;
5220 case DL_PROMISC_MULTI
:
5221 mac_pvt
->nprom_multi
++;
5223 gld
->gld_flags
|= GLD_PROM_MULT
;
5226 case DL_PROMISC_SAP
:
5227 gld
->gld_flags
|= GLD_PROM_SAP
;
5229 vlan
->gldv_nvlan_sap
++;
5236 switch (prim
->promiscoff_req
.dl_level
) {
5237 case DL_PROMISC_PHYS
:
5240 gld
->gld_flags
&= ~GLD_PROM_PHYS
;
5243 case DL_PROMISC_MULTI
:
5244 mac_pvt
->nprom_multi
--;
5246 gld
->gld_flags
&= ~GLD_PROM_MULT
;
5249 case DL_PROMISC_SAP
:
5250 gld
->gld_flags
&= ~GLD_PROM_SAP
;
5251 vlan
->gldv_nvlan_sap
--;
5259 } else if (macrc
== GLD_RETRY
) {
5261 * The putbq and gld_xwait must be within the lock to
5262 * prevent races with gld_sched.
5264 (void) putbq(q
, mp
);
5265 gld
->gld_xwait
= B_TRUE
;
5268 GLDM_UNLOCK(macinfo
);
5271 * Finally, decide how to reply.
5273 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
5274 * layer but failed. In such cases, we can return a DL_* error
5275 * code and let the caller send an error-ack reply upstream, or
5276 * we can send a reply here and then return GLDE_OK so that the
5277 * caller doesn't also respond.
5279 * If physical-promiscuous mode was (successfully) switched on or
5280 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
5283 case GLD_NOTSUPPORTED
:
5284 return (DL_NOTSUPPORTED
);
5286 case GLD_NORESOURCES
:
5287 dlerrorack(q
, mp
, req
, DL_SYSERR
, ENOSR
);
5291 return (GLDE_RETRY
);
5294 dlerrorack(q
, mp
, req
, DL_SYSERR
, EIO
);
5298 dlokack(q
, mp
, req
);
5303 case GLD_MAC_PROMISC_NOOP
:
5306 case GLD_MAC_PROMISC_PHYS
:
5307 gld_notify_ind(macinfo
, DL_NOTE_PROMISC_ON_PHYS
, NULL
);
5311 gld_notify_ind(macinfo
, DL_NOTE_PROMISC_OFF_PHYS
, NULL
);
5320 * get the current or factory physical address value
5323 gld_physaddr(queue_t
*q
, mblk_t
*mp
)
5325 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
5326 gld_mac_info_t
*macinfo
;
5327 union DL_primitives
*prim
= (union DL_primitives
*)mp
->b_rptr
;
5328 unsigned char addr
[GLD_MAX_ADDRLEN
];
5330 if (gld
->gld_state
== DL_UNATTACHED
)
5331 return (DL_OUTSTATE
);
5333 macinfo
= (gld_mac_info_t
*)gld
->gld_mac_info
;
5334 ASSERT(macinfo
!= NULL
);
5335 ASSERT(macinfo
->gldm_addrlen
<= GLD_MAX_ADDRLEN
);
5337 switch (prim
->physaddr_req
.dl_addr_type
) {
5338 case DL_FACT_PHYS_ADDR
:
5339 mac_copy((caddr_t
)macinfo
->gldm_vendor_addr
,
5340 (caddr_t
)addr
, macinfo
->gldm_addrlen
);
5342 case DL_CURR_PHYS_ADDR
:
5343 /* make a copy so we don't hold the lock across qreply */
5344 GLDM_LOCK(macinfo
, RW_WRITER
);
5346 ((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)->curr_macaddr
,
5347 (caddr_t
)addr
, macinfo
->gldm_addrlen
);
5348 GLDM_UNLOCK(macinfo
);
5351 return (DL_BADPRIM
);
5353 dlphysaddrack(q
, mp
, (caddr_t
)addr
, macinfo
->gldm_addrlen
);
5359 * change the hardware's physical address to a user specified value
5362 gld_setaddr(queue_t
*q
, mblk_t
*mp
)
5364 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
5365 gld_mac_info_t
*macinfo
;
5366 gld_mac_pvt_t
*mac_pvt
;
5367 union DL_primitives
*prim
= (union DL_primitives
*)mp
->b_rptr
;
5368 unsigned char *addr
;
5369 unsigned char cmaddr
[GLD_MAX_ADDRLEN
];
5373 if (gld
->gld_state
== DL_UNATTACHED
)
5374 return (DL_OUTSTATE
);
5376 vlan
= (gld_vlan_t
*)gld
->gld_vlan
;
5377 ASSERT(vlan
!= NULL
);
5379 if (vlan
->gldv_id
!= VLAN_VID_NONE
)
5380 return (DL_NOTSUPPORTED
);
5382 macinfo
= (gld_mac_info_t
*)gld
->gld_mac_info
;
5383 ASSERT(macinfo
!= NULL
);
5384 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5386 if (!MBLKIN(mp
, prim
->set_physaddr_req
.dl_addr_offset
,
5387 prim
->set_physaddr_req
.dl_addr_length
) ||
5388 prim
->set_physaddr_req
.dl_addr_length
!= macinfo
->gldm_addrlen
)
5389 return (DL_BADADDR
);
5391 GLDM_LOCK(macinfo
, RW_WRITER
);
5393 /* now do the set at the hardware level */
5394 addr
= mp
->b_rptr
+ prim
->set_physaddr_req
.dl_addr_offset
;
5395 ASSERT(sizeof (cmaddr
) >= macinfo
->gldm_addrlen
);
5396 cmac_copy(addr
, cmaddr
, macinfo
->gldm_addrlen
, macinfo
);
5398 rc
= (*macinfo
->gldm_set_mac_addr
)(macinfo
, cmaddr
);
5399 if (rc
== GLD_SUCCESS
)
5400 mac_copy(addr
, mac_pvt
->curr_macaddr
,
5401 macinfo
->gldm_addrlen
);
5403 GLDM_UNLOCK(macinfo
);
5408 case GLD_NOTSUPPORTED
:
5409 return (DL_NOTSUPPORTED
);
5411 return (DL_BADADDR
);
5412 case GLD_NORESOURCES
:
5413 dlerrorack(q
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_SYSERR
, ENOSR
);
5416 dlerrorack(q
, mp
, DL_SET_PHYS_ADDR_REQ
, DL_SYSERR
, EIO
);
5420 gld_notify_ind(macinfo
, DL_NOTE_PHYS_ADDR
, NULL
);
5422 dlokack(q
, mp
, DL_SET_PHYS_ADDR_REQ
);
5427 gld_get_statistics(queue_t
*q
, mblk_t
*mp
)
5429 dl_get_statistics_ack_t
*dlsp
;
5430 gld_t
*gld
= (gld_t
*)q
->q_ptr
;
5431 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
5432 gld_mac_pvt_t
*mac_pvt
;
5434 if (gld
->gld_state
== DL_UNATTACHED
)
5435 return (DL_OUTSTATE
);
5437 ASSERT(macinfo
!= NULL
);
5439 mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5440 (void) gld_update_kstat(mac_pvt
->kstatp
, KSTAT_READ
);
5442 mp
= mexchange(q
, mp
, DL_GET_STATISTICS_ACK_SIZE
+
5443 sizeof (struct gldkstats
), M_PCPROTO
, DL_GET_STATISTICS_ACK
);
5446 return (GLDE_OK
); /* mexchange already sent merror */
5448 dlsp
= (dl_get_statistics_ack_t
*)mp
->b_rptr
;
5449 dlsp
->dl_primitive
= DL_GET_STATISTICS_ACK
;
5450 dlsp
->dl_stat_length
= sizeof (struct gldkstats
);
5451 dlsp
->dl_stat_offset
= DL_GET_STATISTICS_ACK_SIZE
;
5453 GLDM_LOCK(macinfo
, RW_WRITER
);
5454 bcopy(mac_pvt
->kstatp
->ks_data
,
5455 (mp
->b_rptr
+ DL_GET_STATISTICS_ACK_SIZE
),
5456 sizeof (struct gldkstats
));
5457 GLDM_UNLOCK(macinfo
);
5463 /* =================================================== */
5464 /* misc utilities, some requiring various mutexes held */
5465 /* =================================================== */
5468 * Initialize and start the driver.
5471 gld_start_mac(gld_mac_info_t
*macinfo
)
5474 unsigned char cmaddr
[GLD_MAX_ADDRLEN
];
5475 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5477 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo
));
5478 ASSERT(!mac_pvt
->started
);
5480 rc
= (*macinfo
->gldm_reset
)(macinfo
);
5481 if (rc
!= GLD_SUCCESS
)
5482 return (GLD_FAILURE
);
5484 /* set the addr after we reset the device */
5485 ASSERT(sizeof (cmaddr
) >= macinfo
->gldm_addrlen
);
5486 cmac_copy(((gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
)
5487 ->curr_macaddr
, cmaddr
, macinfo
->gldm_addrlen
, macinfo
);
5489 rc
= (*macinfo
->gldm_set_mac_addr
)(macinfo
, cmaddr
);
5490 ASSERT(rc
!= GLD_BADARG
); /* this address was good before */
5491 if (rc
!= GLD_SUCCESS
&& rc
!= GLD_NOTSUPPORTED
)
5492 return (GLD_FAILURE
);
5494 rc
= (*macinfo
->gldm_start
)(macinfo
);
5495 if (rc
!= GLD_SUCCESS
)
5496 return (GLD_FAILURE
);
5498 mac_pvt
->started
= B_TRUE
;
5499 return (GLD_SUCCESS
);
5506 gld_stop_mac(gld_mac_info_t
*macinfo
)
5508 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5510 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo
));
5511 ASSERT(mac_pvt
->started
);
5513 (void) (*macinfo
->gldm_stop
)(macinfo
);
5515 mac_pvt
->started
= B_FALSE
;
5520 * gld_set_ipq will set a pointer to the queue which is bound to the
5522 * o the device type is ethernet or IPoIB.
5523 * o there is no stream in SAP promiscuous mode.
5524 * o there is exactly one stream bound to the IP sap.
5525 * o the stream is in "fastpath" mode.
5528 gld_set_ipq(gld_t
*gld
)
5531 gld_mac_info_t
*macinfo
= gld
->gld_mac_info
;
5532 gld_t
*ip_gld
= NULL
;
5533 uint_t ipq_candidates
= 0;
5534 gld_t
*ipv6_gld
= NULL
;
5535 uint_t ipv6q_candidates
= 0;
5537 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo
));
5539 /* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5540 if (((macinfo
->gldm_type
!= DL_ETHER
) &&
5541 (macinfo
->gldm_type
!= DL_IB
)) ||
5542 (gld_global_options
& GLD_OPT_NO_IPQ
))
5545 vlan
= (gld_vlan_t
*)gld
->gld_vlan
;
5546 ASSERT(vlan
!= NULL
);
5548 /* clear down any previously defined ipqs */
5549 vlan
->gldv_ipq
= NULL
;
5550 vlan
->gldv_ipv6q
= NULL
;
5552 /* Try to find a single stream eligible to receive IP packets */
5553 for (gld
= vlan
->gldv_str_next
;
5554 gld
!= (gld_t
*)&vlan
->gldv_str_next
; gld
= gld
->gld_next
) {
5555 if (gld
->gld_state
!= DL_IDLE
)
5556 continue; /* not eligible to receive */
5557 if (gld
->gld_flags
& GLD_STR_CLOSING
)
5558 continue; /* not eligible to receive */
5560 if (gld
->gld_sap
== ETHERTYPE_IP
) {
5565 if (gld
->gld_sap
== ETHERTYPE_IPV6
) {
5571 if (ipq_candidates
== 1) {
5572 ASSERT(ip_gld
!= NULL
);
5574 if (ip_gld
->gld_flags
& GLD_FAST
) /* eligible for ipq */
5575 vlan
->gldv_ipq
= ip_gld
->gld_qptr
;
5578 if (ipv6q_candidates
== 1) {
5579 ASSERT(ipv6_gld
!= NULL
);
5581 if (ipv6_gld
->gld_flags
& GLD_FAST
) /* eligible for ipq */
5582 vlan
->gldv_ipv6q
= ipv6_gld
->gld_qptr
;
5587 * gld_flushqueue (q)
5588 * used by DLPI primitives that require flushing the queues.
5589 * essentially, this is DL_UNBIND_REQ.
5592 gld_flushqueue(queue_t
*q
)
5594 /* flush all data in both queues */
5595 /* XXX Should these be FLUSHALL? */
5596 flushq(q
, FLUSHDATA
);
5597 flushq(WR(q
), FLUSHDATA
);
5598 /* flush all the queues upstream */
5599 (void) putctl1(q
, M_FLUSH
, FLUSHRW
);
5603 * gld_devlookup (major)
5604 * search the device table for the device with specified
5605 * major number and return a pointer to it if it exists
5608 gld_devlookup(int major
)
5610 struct glddevice
*dev
;
5612 ASSERT(mutex_owned(&gld_device_list
.gld_devlock
));
5614 for (dev
= gld_device_list
.gld_next
;
5615 dev
!= &gld_device_list
;
5616 dev
= dev
->gld_next
) {
5618 if (dev
->gld_major
== major
)
5625 * gld_findminor(device)
5626 * Returns a minor number currently unused by any stream in the current
5627 * device class (major) list.
5630 gld_findminor(glddev_t
*device
)
5633 gld_mac_info_t
*nextmac
;
5634 gld_vlan_t
*nextvlan
;
5638 ASSERT(mutex_owned(&device
->gld_devlock
));
5641 if (device
->gld_nextminor
>= GLD_MIN_CLONE_MINOR
&&
5642 device
->gld_nextminor
<= GLD_MAX_CLONE_MINOR
)
5643 return (device
->gld_nextminor
++);
5645 /* The steady way */
5646 for (minor
= GLD_MIN_CLONE_MINOR
; minor
<= GLD_MAX_CLONE_MINOR
;
5648 /* Search all unattached streams */
5649 for (next
= device
->gld_str_next
;
5650 next
!= (gld_t
*)&device
->gld_str_next
;
5651 next
= next
->gld_next
) {
5652 if (minor
== next
->gld_minor
)
5655 /* Search all attached streams; we don't need maclock because */
5656 /* mac stream list is protected by devlock as well as maclock */
5657 for (nextmac
= device
->gld_mac_next
;
5658 nextmac
!= (gld_mac_info_t
*)&device
->gld_mac_next
;
5659 nextmac
= nextmac
->gldm_next
) {
5660 gld_mac_pvt_t
*pvt
=
5661 (gld_mac_pvt_t
*)nextmac
->gldm_mac_pvt
;
5663 if (!(nextmac
->gldm_GLD_flags
& GLD_MAC_READY
))
5664 continue; /* this one's not ready yet */
5666 for (i
= 0; i
< VLAN_HASHSZ
; i
++) {
5667 for (nextvlan
= pvt
->vlan_hash
[i
];
5669 nextvlan
= nextvlan
->gldv_next
) {
5670 for (next
= nextvlan
->gldv_str_next
;
5672 (gld_t
*)&nextvlan
->gldv_str_next
;
5673 next
= next
->gld_next
) {
5674 if (minor
== next
->gld_minor
)
5683 /* don't need to do anything */
5686 cmn_err(CE_WARN
, "GLD ran out of minor numbers for %s",
5692 * version of insque/remque for use by this driver
5695 struct qelem
*q_forw
;
5696 struct qelem
*q_back
;
5697 /* rest of structure */
5701 gldinsque(void *elem
, void *pred
)
5703 struct qelem
*pelem
= elem
;
5704 struct qelem
*ppred
= pred
;
5705 struct qelem
*pnext
= ppred
->q_forw
;
5707 pelem
->q_forw
= pnext
;
5708 pelem
->q_back
= ppred
;
5709 ppred
->q_forw
= pelem
;
5710 pnext
->q_back
= pelem
;
5714 gldremque(void *arg
)
5716 struct qelem
*pelem
= arg
;
5717 struct qelem
*elem
= arg
;
5719 pelem
->q_forw
->q_back
= pelem
->q_back
;
5720 pelem
->q_back
->q_forw
= pelem
->q_forw
;
5721 elem
->q_back
= elem
->q_forw
= NULL
;
5725 gld_add_vlan(gld_mac_info_t
*macinfo
, uint32_t vid
)
5727 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5731 pp
= &(mac_pvt
->vlan_hash
[vid
% VLAN_HASHSZ
]);
5732 while ((p
= *pp
) != NULL
) {
5733 ASSERT(p
->gldv_id
!= vid
);
5734 pp
= &(p
->gldv_next
);
5737 if ((p
= kmem_zalloc(sizeof (gld_vlan_t
), KM_NOSLEEP
)) == NULL
)
5740 p
->gldv_mac
= macinfo
;
5743 if (vid
== VLAN_VID_NONE
) {
5744 p
->gldv_ptag
= VLAN_VTAG_NONE
;
5745 p
->gldv_stats
= mac_pvt
->statistics
;
5746 p
->gldv_kstatp
= NULL
;
5748 p
->gldv_ptag
= GLD_MK_PTAG(VLAN_CFI_ETHER
, vid
);
5749 p
->gldv_stats
= kmem_zalloc(sizeof (struct gld_stats
),
5752 if (gld_init_vlan_stats(p
) != GLD_SUCCESS
) {
5753 kmem_free(p
->gldv_stats
, sizeof (struct gld_stats
));
5754 kmem_free(p
, sizeof (gld_vlan_t
));
5759 p
->gldv_str_next
= p
->gldv_str_prev
= (gld_t
*)&p
->gldv_str_next
;
5767 gld_rem_vlan(gld_vlan_t
*vlan
)
5769 gld_mac_info_t
*macinfo
= vlan
->gldv_mac
;
5770 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5774 pp
= &(mac_pvt
->vlan_hash
[vlan
->gldv_id
% VLAN_HASHSZ
]);
5775 while ((p
= *pp
) != NULL
) {
5776 if (p
->gldv_id
== vlan
->gldv_id
)
5778 pp
= &(p
->gldv_next
);
5784 if (p
->gldv_id
!= VLAN_VID_NONE
) {
5785 ASSERT(p
->gldv_kstatp
!= NULL
);
5786 kstat_delete(p
->gldv_kstatp
);
5787 kmem_free(p
->gldv_stats
, sizeof (struct gld_stats
));
5789 kmem_free(p
, sizeof (gld_vlan_t
));
5793 gld_find_vlan(gld_mac_info_t
*macinfo
, uint32_t vid
)
5795 gld_mac_pvt_t
*mac_pvt
= (gld_mac_pvt_t
*)macinfo
->gldm_mac_pvt
;
5798 p
= mac_pvt
->vlan_hash
[vid
% VLAN_HASHSZ
];
5800 if (p
->gldv_id
== vid
)
5808 gld_get_vlan(gld_mac_info_t
*macinfo
, uint32_t vid
)
5812 if ((vlan
= gld_find_vlan(macinfo
, vid
)) == NULL
)
5813 vlan
= gld_add_vlan(macinfo
, vid
);
5820 * This is essentially bcopy, with the ability to bit reverse the
5821 * the source bytes. The MAC addresses bytes as transmitted by FDDI
5822 * interfaces are bit reversed.
5825 gld_bitrevcopy(caddr_t src
, caddr_t target
, size_t n
)
5828 *target
++ = bit_rev
[(uchar_t
)*src
++];
5833 * Convert the bit order by swaping all the bits, using a
5837 gld_bitreverse(uchar_t
*rptr
, size_t n
)
5840 *rptr
= bit_rev
[*rptr
];
5846 gld_macaddr_sprintf(char *etherbuf
, unsigned char *ap
, int len
)
5849 char *cp
= etherbuf
;
5850 static char digits
[] = "0123456789abcdef";
5852 for (i
= 0; i
< len
; i
++) {
5853 *cp
++ = digits
[*ap
>> 4];
5854 *cp
++ = digits
[*ap
++ & 0xf];
5863 gld_check_assertions()
5866 gld_mac_info_t
*mac
;
5871 mutex_enter(&gld_device_list
.gld_devlock
);
5873 for (dev
= gld_device_list
.gld_next
;
5874 dev
!= (glddev_t
*)&gld_device_list
.gld_next
;
5875 dev
= dev
->gld_next
) {
5876 mutex_enter(&dev
->gld_devlock
);
5877 ASSERT(dev
->gld_broadcast
!= NULL
);
5878 for (str
= dev
->gld_str_next
;
5879 str
!= (gld_t
*)&dev
->gld_str_next
;
5880 str
= str
->gld_next
) {
5881 ASSERT(str
->gld_device
== dev
);
5882 ASSERT(str
->gld_mac_info
== NULL
);
5883 ASSERT(str
->gld_qptr
!= NULL
);
5884 ASSERT(str
->gld_minor
>= GLD_MIN_CLONE_MINOR
);
5885 ASSERT(str
->gld_multicnt
== 0);
5886 ASSERT(str
->gld_mcast
== NULL
);
5887 ASSERT(!(str
->gld_flags
&
5888 (GLD_PROM_PHYS
|GLD_PROM_MULT
|GLD_PROM_SAP
)));
5889 ASSERT(str
->gld_sap
== 0);
5890 ASSERT(str
->gld_state
== DL_UNATTACHED
);
5892 for (mac
= dev
->gld_mac_next
;
5893 mac
!= (gld_mac_info_t
*)&dev
->gld_mac_next
;
5894 mac
= mac
->gldm_next
) {
5896 gld_mac_pvt_t
*pvt
= (gld_mac_pvt_t
*)mac
->gldm_mac_pvt
;
5898 if (!(mac
->gldm_GLD_flags
& GLD_MAC_READY
))
5899 continue; /* this one's not ready yet */
5901 GLDM_LOCK(mac
, RW_WRITER
);
5902 ASSERT(mac
->gldm_devinfo
!= NULL
);
5903 ASSERT(mac
->gldm_mac_pvt
!= NULL
);
5904 ASSERT(pvt
->interfacep
!= NULL
);
5905 ASSERT(pvt
->kstatp
!= NULL
);
5906 ASSERT(pvt
->statistics
!= NULL
);
5907 ASSERT(pvt
->major_dev
== dev
);
5909 for (i
= 0; i
< VLAN_HASHSZ
; i
++) {
5910 for (vlan
= pvt
->vlan_hash
[i
];
5911 vlan
!= NULL
; vlan
= vlan
->gldv_next
) {
5914 ASSERT(vlan
->gldv_mac
== mac
);
5916 for (str
= vlan
->gldv_str_next
;
5918 (gld_t
*)&vlan
->gldv_str_next
;
5919 str
= str
->gld_next
) {
5920 ASSERT(str
->gld_device
== dev
);
5921 ASSERT(str
->gld_mac_info
==
5923 ASSERT(str
->gld_qptr
!= NULL
);
5924 ASSERT(str
->gld_minor
>=
5925 GLD_MIN_CLONE_MINOR
);
5927 str
->gld_multicnt
== 0 ||
5931 ASSERT(vlan
->gldv_nstreams
== nstr
);
5935 ASSERT(pvt
->nvlan
== nvlan
);
5938 mutex_exit(&dev
->gld_devlock
);
5940 mutex_exit(&gld_device_list
.gld_devlock
);