4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
27 * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver
30 #include <sys/types.h>
31 #include <sys/debug.h>
32 #include <sys/stream.h>
33 #include <sys/cmn_err.h>
35 #include <sys/crc32.h>
36 #include <sys/modctl.h>
38 #include <sys/strsun.h>
39 #include <sys/kstat.h>
40 #include <sys/pattr.h>
42 #include <sys/strsubr.h>
43 #include <sys/mac_provider.h>
44 #include <sys/mac_ether.h>
46 #include <sys/ethernet.h>
49 #include <sys/policy.h>
51 #include <sys/sunddi.h>
52 #include <sys/byteorder.h>
57 typedef void (*fptrv_t
)();
75 msg_t hme_debug_level
= NO_MSG
;
77 static char *msg_string
[] = {
93 #define SEVERITY_NONE 0
94 #define SEVERITY_LOW 0
95 #define SEVERITY_MID 1
96 #define SEVERITY_HIGH 2
97 #define SEVERITY_UNKNOWN 99
100 #define HME_CODEVIOL_BUG
102 #define KIOIP KSTAT_INTR_PTR(hmep->hme_intrstats)
105 * The following variables are used for checking fixes in Sbus/FEPS 2.0
107 static int hme_urun_fix
= 0; /* Bug fixed in Sbus/FEPS 2.0 */
110 * The following variables are used for configuring various features
112 static int hme_64bit_enable
= 1; /* Use 64-bit sbus transfers */
113 static int hme_reject_own
= 1; /* Reject packets with own SA */
114 static int hme_ngu_enable
= 0; /* Never Give Up mode */
116 char *hme_priv_prop
[] = {
124 static int hme_lance_mode
= 1; /* to enable lance mode */
125 static int hme_ipg0
= 16;
126 static int hme_ipg1
= 8;
127 static int hme_ipg2
= 4;
130 * The following parameters may be configured by the user. If they are not
131 * configured by the user, the values will be based on the capabilities of
133 * The value "HME_NOTUSR" is ORed with the parameter value to indicate values
134 * which are NOT configured by the user.
137 #define HME_NOTUSR 0x0f000000
138 #define HME_MASK_1BIT 0x1
139 #define HME_MASK_5BIT 0x1f
140 #define HME_MASK_8BIT 0xff
143 * All strings used by hme messaging functions
146 static char *no_xcvr_msg
=
147 "No transceiver found.";
149 static char *burst_size_msg
=
150 "Could not identify the burst size";
152 static char *unk_rx_ringsz_msg
=
153 "Unknown receive RINGSZ";
155 static char *add_intr_fail_msg
=
156 "ddi_add_intr(9F) failed";
158 static char *mregs_4global_reg_fail_msg
=
159 "ddi_regs_map_setup(9F) for global reg failed";
161 static char *mregs_4etx_reg_fail_msg
=
162 "ddi_map_regs for etx reg failed";
164 static char *mregs_4erx_reg_fail_msg
=
165 "ddi_map_regs for erx reg failed";
167 static char *mregs_4bmac_reg_fail_msg
=
168 "ddi_map_regs for bmac reg failed";
170 static char *mregs_4mif_reg_fail_msg
=
171 "ddi_map_regs for mif reg failed";
173 static char *init_fail_gen_msg
=
174 "Failed to initialize hardware/driver";
176 static char *ddi_nregs_fail_msg
=
177 "ddi_dev_nregs failed(9F), returned %d";
179 static char *bad_num_regs_msg
=
180 "Invalid number of registers.";
185 * Function prototypes.
187 /* these two are global so that qfe can use them */
188 int hmeattach(dev_info_t
*, ddi_attach_cmd_t
);
189 int hmedetach(dev_info_t
*, ddi_detach_cmd_t
);
190 int hmequiesce(dev_info_t
*);
191 static boolean_t
hmeinit_xfer_params(struct hme
*);
192 static uint_t
hmestop(struct hme
*);
193 static void hmestatinit(struct hme
*);
194 static int hmeallocthings(struct hme
*);
195 static void hmefreethings(struct hme
*);
196 static int hmeallocbuf(struct hme
*, hmebuf_t
*, int);
197 static int hmeallocbufs(struct hme
*);
198 static void hmefreebufs(struct hme
*);
199 static void hmeget_hm_rev_property(struct hme
*);
200 static boolean_t
hmestart(struct hme
*, mblk_t
*);
201 static uint_t
hmeintr(caddr_t
);
202 static void hmereclaim(struct hme
*);
203 static int hmeinit(struct hme
*);
204 static void hmeuninit(struct hme
*hmep
);
205 static mblk_t
*hmeread(struct hme
*, hmebuf_t
*, uint32_t);
206 static void hmesavecntrs(struct hme
*);
207 static void hme_fatal_err(struct hme
*, uint_t
);
208 static void hme_nonfatal_err(struct hme
*, uint_t
);
209 static int hmeburstsizes(struct hme
*);
210 static void send_bit(struct hme
*, uint16_t);
211 static uint16_t get_bit_std(uint8_t, struct hme
*);
212 static uint16_t hme_bb_mii_read(struct hme
*, uint8_t, uint8_t);
213 static void hme_bb_mii_write(struct hme
*, uint8_t, uint8_t, uint16_t);
214 static void hme_bb_force_idle(struct hme
*);
215 static uint16_t hme_mii_read(void *, uint8_t, uint8_t);
216 static void hme_mii_write(void *, uint8_t, uint8_t, uint16_t);
217 static void hme_setup_mac_address(struct hme
*, dev_info_t
*);
218 static void hme_mii_notify(void *, link_state_t
);
220 static void hme_fault_msg(struct hme
*, uint_t
, msg_t
, char *, ...);
222 static void hme_check_acc_handle(char *, uint_t
, struct hme
*,
226 * Nemo (GLDv3) Functions.
228 static int hme_m_stat(void *, uint_t
, uint64_t *);
229 static int hme_m_start(void *);
230 static void hme_m_stop(void *);
231 static int hme_m_promisc(void *, boolean_t
);
232 static int hme_m_multicst(void *, boolean_t
, const uint8_t *);
233 static int hme_m_unicst(void *, const uint8_t *);
234 static mblk_t
*hme_m_tx(void *, mblk_t
*);
235 static boolean_t
hme_m_getcapab(void *, mac_capab_t
, void *);
236 static int hme_m_getprop(void *, const char *, mac_prop_id_t
, uint_t
, void *);
237 static void hme_m_propinfo(void *, const char *, mac_prop_id_t
,
238 mac_prop_info_handle_t
);
239 static int hme_m_setprop(void *, const char *, mac_prop_id_t
, uint_t
,
242 static mii_ops_t hme_mii_ops
= {
250 static mac_callbacks_t hme_m_callbacks
= {
251 MC_GETCAPAB
| MC_SETPROP
| MC_GETPROP
| MC_PROPINFO
,
269 DDI_DEFINE_STREAM_OPS(hme_dev_ops
, nulldev
, nulldev
, hmeattach
, hmedetach
,
270 nodev
, NULL
, D_MP
, NULL
, hmequiesce
);
272 #define HME_FAULT_MSG1(p, s, t, f) \
273 hme_fault_msg((p), (s), (t), (f));
275 #define HME_FAULT_MSG2(p, s, t, f, a) \
276 hme_fault_msg((p), (s), (t), (f), (a));
278 #define HME_FAULT_MSG3(p, s, t, f, a, b) \
279 hme_fault_msg((p), (s), (t), (f), (a), (b));
281 #define HME_FAULT_MSG4(p, s, t, f, a, b, c) \
282 hme_fault_msg((p), (s), (t), (f), (a), (b), (c));
284 #define CHECK_MIFREG() \
285 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh)
286 #define CHECK_ETXREG() \
287 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh)
288 #define CHECK_ERXREG() \
289 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh)
290 #define CHECK_MACREG() \
291 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh)
292 #define CHECK_GLOBREG() \
293 hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh)
296 * Claim the device is ultra-capable of burst in the beginning. Use
297 * the value returned by ddi_dma_burstsizes() to actually set the HME
298 * global configuration register later.
300 * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports
301 * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains
302 * the the burstsizes in both the lo and hi words.
304 #define HMELIMADDRLO ((uint64_t)0x00000000)
305 #define HMELIMADDRHI ((uint64_t)0xffffffff)
308 * Note that rx and tx data buffers can be arbitrarily aligned, but
309 * that the descriptor rings need to be aligned on 2K boundaries, per
312 static ddi_dma_attr_t hme_dma_attr
= {
313 DMA_ATTR_V0
, /* version number. */
314 (uint64_t)HMELIMADDRLO
, /* low address */
315 (uint64_t)HMELIMADDRHI
, /* high address */
316 (uint64_t)0x00ffffff, /* address counter max */
317 (uint64_t)HME_HMDALIGN
, /* alignment */
318 (uint_t
)0x00700070, /* dlim_burstsizes for 32 and 64 bit xfers */
319 (uint32_t)0x1, /* minimum transfer size */
320 (uint64_t)0x7fffffff, /* maximum transfer size */
321 (uint64_t)0x00ffffff, /* maximum segment size */
322 1, /* scatter/gather list length */
323 512, /* granularity */
324 0 /* attribute flags */
327 static ddi_device_acc_attr_t hme_buf_attr
= {
330 DDI_STRICTORDER_ACC
, /* probably could allow merging & caching */
334 static uchar_t pci_latency_timer
= 0;
337 * Module linkage information for the kernel.
339 static struct modldrv modldrv
= {
340 &mod_driverops
, /* Type of module. This one is a driver */
341 "Sun HME 10/100 Mb Ethernet",
342 &hme_dev_ops
, /* driver ops */
345 static struct modlinkage modlinkage
= {
346 MODREV_1
, &modldrv
, NULL
349 /* <<<<<<<<<<<<<<<<<<<<<< Register operations >>>>>>>>>>>>>>>>>>>>> */
351 #define GET_MIFREG(reg) \
352 ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg)
353 #define PUT_MIFREG(reg, value) \
354 ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value)
356 #define GET_ETXREG(reg) \
357 ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg)
358 #define PUT_ETXREG(reg, value) \
359 ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value)
360 #define GET_ERXREG(reg) \
361 ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg)
362 #define PUT_ERXREG(reg, value) \
363 ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value)
364 #define GET_MACREG(reg) \
365 ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg)
366 #define PUT_MACREG(reg, value) \
367 ddi_put32(hmep->hme_bmacregh, \
368 (uint32_t *)&hmep->hme_bmacregp->reg, value)
369 #define GET_GLOBREG(reg) \
370 ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg)
371 #define PUT_GLOBREG(reg, value) \
372 ddi_put32(hmep->hme_globregh, \
373 (uint32_t *)&hmep->hme_globregp->reg, value)
374 #define PUT_TMD(ptr, paddr, len, flags) \
375 ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_addr, paddr); \
376 ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags, \
378 #define GET_TMD_FLAGS(ptr) \
379 ddi_get32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags)
380 #define PUT_RMD(ptr, paddr) \
381 ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_addr, paddr); \
382 ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags, \
383 (uint32_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
384 #define GET_RMD_FLAGS(ptr) \
385 ddi_get32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags)
387 #define GET_ROM8(offset) \
388 ddi_get8((hmep->hme_romh), (offset))
391 * Ether_copy is not endian-correct. Define an endian-correct version.
393 #define ether_bcopy(a, b) (bcopy(a, b, 6))
396 * Ether-type is specifically big-endian, but data region is unknown endian
398 #define get_ether_type(ptr) \
399 (((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13])))
401 /* <<<<<<<<<<<<<<<<<<<<<< Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */
403 #define BMAC_DEFAULT_JAMSIZE (0x04) /* jamsize equals 4 */
404 #define BMAC_LONG_JAMSIZE (0x10) /* jamsize equals 0x10 */
405 static int jamsize
= BMAC_DEFAULT_JAMSIZE
;
409 * Calculate the bit in the multicast address filter that selects the given
414 hmeladrf_bit(const uint8_t *addr
)
418 CRC32(crc
, addr
, ETHERADDRL
, -1U, crc32_table
);
421 * Just want the 6 most significant bits.
426 /* <<<<<<<<<<<<<<<<<<<<<<<< Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
429 send_bit(struct hme
*hmep
, uint16_t x
)
431 PUT_MIFREG(mif_bbdata
, x
);
432 PUT_MIFREG(mif_bbclk
, HME_BBCLK_LOW
);
433 PUT_MIFREG(mif_bbclk
, HME_BBCLK_HIGH
);
438 * To read the MII register bits according to the IEEE Standard
441 get_bit_std(uint8_t phyad
, struct hme
*hmep
)
445 PUT_MIFREG(mif_bbclk
, HME_BBCLK_LOW
);
446 drv_usecwait(1); /* wait for >330 ns for stable data */
447 if (phyad
== HME_INTERNAL_PHYAD
)
448 x
= (GET_MIFREG(mif_cfg
) & HME_MIF_CFGM0
) ? 1 : 0;
450 x
= (GET_MIFREG(mif_cfg
) & HME_MIF_CFGM1
) ? 1 : 0;
451 PUT_MIFREG(mif_bbclk
, HME_BBCLK_HIGH
);
455 #define SEND_BIT(x) send_bit(hmep, x)
456 #define GET_BIT_STD(phyad, x) x = get_bit_std(phyad, hmep)
460 hme_bb_mii_write(struct hme
*hmep
, uint8_t phyad
, uint8_t regad
, uint16_t data
)
464 PUT_MIFREG(mif_bbopenb
, 1); /* Enable the MII driver */
465 (void) hme_bb_force_idle(hmep
);
466 SEND_BIT(0); SEND_BIT(1); /* <ST> */
467 SEND_BIT(0); SEND_BIT(1); /* <OP> */
469 for (i
= 4; i
>= 0; i
--) { /* <AAAAA> */
470 SEND_BIT((phyad
>> i
) & 1);
473 for (i
= 4; i
>= 0; i
--) { /* <RRRRR> */
474 SEND_BIT((regad
>> i
) & 1);
477 SEND_BIT(1); SEND_BIT(0); /* <TA> */
479 for (i
= 0xf; i
>= 0; i
--) { /* <DDDDDDDDDDDDDDDD> */
480 SEND_BIT((data
>> i
) & 1);
483 PUT_MIFREG(mif_bbopenb
, 0); /* Disable the MII driver */
487 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
489 hme_bb_mii_read(struct hme
*hmep
, uint8_t phyad
, uint8_t regad
)
495 PUT_MIFREG(mif_bbopenb
, 1); /* Enable the MII driver */
496 (void) hme_bb_force_idle(hmep
);
497 SEND_BIT(0); SEND_BIT(1); /* <ST> */
498 SEND_BIT(1); SEND_BIT(0); /* <OP> */
499 for (i
= 4; i
>= 0; i
--) { /* <AAAAA> */
500 SEND_BIT((phyad
>> i
) & 1);
502 for (i
= 4; i
>= 0; i
--) { /* <RRRRR> */
503 SEND_BIT((regad
>> i
) & 1);
506 PUT_MIFREG(mif_bbopenb
, 0); /* Disable the MII driver */
508 GET_BIT_STD(phyad
, x
);
509 GET_BIT_STD(phyad
, x
); /* <TA> */
510 for (i
= 0xf; i
>= 0; i
--) { /* <DDDDDDDDDDDDDDDD> */
511 GET_BIT_STD(phyad
, x
);
515 * Kludge to get the Transceiver out of hung mode
517 GET_BIT_STD(phyad
, x
);
518 GET_BIT_STD(phyad
, x
);
519 GET_BIT_STD(phyad
, x
);
526 hme_bb_force_idle(struct hme
*hmep
)
530 for (i
= 0; i
< 33; i
++) {
535 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
538 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
540 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
542 hme_mii_read(void *arg
, uint8_t phyad
, uint8_t regad
)
544 struct hme
*hmep
= arg
;
549 tmp_mif
= GET_MIFREG(mif_cfg
);
550 tmp_xif
= GET_MACREG(xifc
);
553 case HME_EXTERNAL_PHYAD
:
554 PUT_MIFREG(mif_cfg
, tmp_mif
| HME_MIF_CFGPS
);
555 PUT_MACREG(xifc
, tmp_xif
| BMAC_XIFC_MIIBUFDIS
);
557 case HME_INTERNAL_PHYAD
:
558 PUT_MIFREG(mif_cfg
, tmp_mif
& ~(HME_MIF_CFGPS
));
559 PUT_MACREG(xifc
, tmp_xif
& ~(BMAC_XIFC_MIIBUFDIS
));
565 if (!hmep
->hme_frame_enable
) {
566 frame
= (hme_bb_mii_read(hmep
, phyad
, regad
));
567 PUT_MACREG(xifc
, tmp_xif
);
568 PUT_MIFREG(mif_cfg
, tmp_mif
);
569 return (frame
& 0xffff);
572 PUT_MIFREG(mif_frame
,
573 HME_MIF_FRREAD
| (phyad
<< HME_MIF_FRPHYAD_SHIFT
) |
574 (regad
<< HME_MIF_FRREGAD_SHIFT
));
576 * HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
578 HMEDELAY((GET_MIFREG(mif_frame
) & HME_MIF_FRTA0
), 300);
579 frame
= GET_MIFREG(mif_frame
);
582 PUT_MACREG(xifc
, tmp_xif
);
583 PUT_MIFREG(mif_cfg
, tmp_mif
);
585 if ((frame
& HME_MIF_FRTA0
) == 0) {
588 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, MII_MSG
,
592 return ((uint16_t)(frame
& HME_MIF_FRDATA
));
596 hme_mii_write(void *arg
, uint8_t phyad
, uint8_t regad
, uint16_t data
)
598 struct hme
*hmep
= arg
;
603 tmp_mif
= GET_MIFREG(mif_cfg
);
604 tmp_xif
= GET_MACREG(xifc
);
607 case HME_EXTERNAL_PHYAD
:
608 PUT_MIFREG(mif_cfg
, tmp_mif
| HME_MIF_CFGPS
);
609 PUT_MACREG(xifc
, tmp_xif
| BMAC_XIFC_MIIBUFDIS
);
611 case HME_INTERNAL_PHYAD
:
612 PUT_MIFREG(mif_cfg
, tmp_mif
& ~(HME_MIF_CFGPS
));
613 PUT_MACREG(xifc
, tmp_xif
& ~(BMAC_XIFC_MIIBUFDIS
));
619 if (!hmep
->hme_frame_enable
) {
620 hme_bb_mii_write(hmep
, phyad
, regad
, data
);
621 PUT_MACREG(xifc
, tmp_xif
);
622 PUT_MIFREG(mif_cfg
, tmp_mif
);
626 PUT_MIFREG(mif_frame
,
627 HME_MIF_FRWRITE
| (phyad
<< HME_MIF_FRPHYAD_SHIFT
) |
628 (regad
<< HME_MIF_FRREGAD_SHIFT
) | data
);
630 * HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
632 HMEDELAY((GET_MIFREG(mif_frame
) & HME_MIF_FRTA0
), 300);
633 frame
= GET_MIFREG(mif_frame
);
634 PUT_MACREG(xifc
, tmp_xif
);
635 PUT_MIFREG(mif_cfg
, tmp_mif
);
637 if ((frame
& HME_MIF_FRTA0
) == 0) {
638 HME_FAULT_MSG1(hmep
, SEVERITY_MID
, MII_MSG
,
639 "MIF Write failure");
644 hme_mii_notify(void *arg
, link_state_t link
)
646 struct hme
*hmep
= arg
;
648 if (link
== LINK_STATE_UP
) {
649 (void) hmeinit(hmep
);
651 mac_link_update(hmep
->hme_mh
, link
);
654 /* <<<<<<<<<<<<<<<<<<<<<<<<<<< LOADABLE ENTRIES >>>>>>>>>>>>>>>>>>>>>>> */
661 mac_init_ops(&hme_dev_ops
, "hme");
662 if ((status
= mod_install(&modlinkage
)) != 0) {
663 mac_fini_ops(&hme_dev_ops
);
673 if ((status
= mod_remove(&modlinkage
)) == 0) {
674 mac_fini_ops(&hme_dev_ops
);
680 _info(struct modinfo
*modinfop
)
682 return (mod_info(&modlinkage
, modinfop
));
686 * ddi_dma_sync() a TMD or RMD descriptor.
688 #define HMESYNCRMD(num, who) \
689 (void) ddi_dma_sync(hmep->hme_rmd_dmah, \
690 (num * sizeof (struct hme_rmd)), \
691 sizeof (struct hme_rmd), \
694 #define HMESYNCTMD(num, who) \
695 (void) ddi_dma_sync(hmep->hme_tmd_dmah, \
696 (num * sizeof (struct hme_tmd)), \
697 sizeof (struct hme_tmd), \
701 * Ethernet broadcast address definition.
703 static struct ether_addr etherbroadcastaddr
= {
704 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
708 * MIB II broadcast/multicast packets
710 #define IS_BROADCAST(pkt) (bcmp(pkt, ðerbroadcastaddr, ETHERADDRL) == 0)
711 #define IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
712 #define BUMP_InNUcast(hmep, pkt) \
713 if (IS_MULTICAST(pkt)) { \
714 if (IS_BROADCAST(pkt)) { \
715 hmep->hme_brdcstrcv++; \
717 hmep->hme_multircv++; \
720 #define BUMP_OutNUcast(hmep, pkt) \
721 if (IS_MULTICAST(pkt)) { \
722 if (IS_BROADCAST(pkt)) { \
723 hmep->hme_brdcstxmt++; \
725 hmep->hme_multixmt++; \
730 hme_create_prop_from_kw(dev_info_t
*dip
, char *vpdname
, char *vpdstr
)
734 struct ether_addr local_mac
;
736 if (strcmp(vpdname
, "NA") == 0) {
737 (void) strcpy(propstr
, "local-mac-address");
739 } else if (strcmp(vpdname
, "Z0") == 0) {
740 (void) strcpy(propstr
, "model");
742 } else if (strcmp(vpdname
, "Z1") == 0) {
743 (void) strcpy(propstr
, "board-model");
749 if (strcmp(propstr
, "local-mac-address") == 0) {
750 for (i
= 0; i
< ETHERADDRL
; i
++)
751 local_mac
.ether_addr_octet
[i
] =
753 if (ddi_prop_create(DDI_DEV_T_NONE
, dip
,
754 DDI_PROP_CANSLEEP
, propstr
,
755 (char *)local_mac
.ether_addr_octet
, ETHERADDRL
)
757 return (DDI_FAILURE
);
760 if (ddi_prop_create(DDI_DEV_T_NONE
, dip
,
761 DDI_PROP_CANSLEEP
, propstr
, vpdstr
,
762 strlen(vpdstr
)+1) != DDI_SUCCESS
) {
763 return (DDI_FAILURE
);
771 * Get properties from old VPD
775 hme_get_oldvpd_props(dev_info_t
*dip
, int vpd_base
)
778 int vpd_start
, vpd_len
, kw_start
, kw_len
, kw_ptr
;
780 char kw_fieldstr
[256];
783 hmep
= ddi_get_driver_private(dip
);
785 vpd_start
= vpd_base
;
787 if ((GET_ROM8(&hmep
->hme_romp
[vpd_start
]) & 0xff) != 0x90) {
788 return (1); /* error */
793 /* Get local-mac-address */
794 kw_start
= vpd_start
+ 3; /* Location of 1st keyword */
796 while ((kw_ptr
- kw_start
) < vpd_len
) { /* Get all keywords */
797 kw_namestr
[0] = GET_ROM8(&hmep
->hme_romp
[kw_ptr
]);
798 kw_namestr
[1] = GET_ROM8(&hmep
->hme_romp
[kw_ptr
+1]);
799 kw_namestr
[2] = '\0';
800 kw_len
= (int)(GET_ROM8(&hmep
->hme_romp
[kw_ptr
+2]) & 0xff);
801 for (i
= 0, kw_ptr
+= 3; i
< kw_len
; i
++)
802 kw_fieldstr
[i
] = GET_ROM8(&hmep
->hme_romp
[kw_ptr
+i
]);
803 kw_fieldstr
[i
] = '\0';
804 if (hme_create_prop_from_kw(dip
, kw_namestr
, kw_fieldstr
)) {
805 return (DDI_FAILURE
);
810 if (ddi_prop_create(DDI_DEV_T_NONE
, dip
, DDI_PROP_CANSLEEP
, "model",
811 "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS
) {
812 return (DDI_FAILURE
);
819 * Get properties from new VPD
820 * for CompactPCI cards
823 hme_get_newvpd_props(dev_info_t
*dip
, int vpd_base
)
826 int vpd_start
, vpd_len
, kw_start
, kw_len
, kw_ptr
;
828 char kw_fieldstr
[256];
831 hmep
= ddi_get_driver_private(dip
);
833 maxvpdsize
= 1024; /* Real size not known until after it is read */
835 vpd_start
= (int)((GET_ROM8(&(hmep
->hme_romp
[vpd_base
+1])) & 0xff) |
836 ((GET_ROM8(&hmep
->hme_romp
[vpd_base
+2]) & 0xff) << 8)) +3;
837 vpd_start
= vpd_base
+ vpd_start
;
838 while (vpd_start
< (vpd_base
+ maxvpdsize
)) { /* Get all VPDs */
839 if ((GET_ROM8(&hmep
->hme_romp
[vpd_start
]) & 0xff) != 0x90) {
840 break; /* no VPD found */
842 vpd_len
= (int)((GET_ROM8(&hmep
->hme_romp
[vpd_start
843 + 1]) & 0xff) | (GET_ROM8(&hmep
->hme_romp
[vpd_start
846 /* Get all keywords in this VPD */
847 kw_start
= vpd_start
+ 3; /* Location of 1st keyword */
849 while ((kw_ptr
- kw_start
) < vpd_len
) { /* Get all keywords */
850 kw_namestr
[0] = GET_ROM8(&hmep
->hme_romp
[kw_ptr
]);
851 kw_namestr
[1] = GET_ROM8(&hmep
->hme_romp
[kw_ptr
+1]);
852 kw_namestr
[2] = '\0';
854 (int)(GET_ROM8(&hmep
->hme_romp
[kw_ptr
+2]) & 0xff);
855 for (i
= 0, kw_ptr
+= 3; i
< kw_len
; i
++)
857 GET_ROM8(&hmep
->hme_romp
[kw_ptr
+i
]);
858 kw_fieldstr
[i
] = '\0';
859 if (hme_create_prop_from_kw(dip
, kw_namestr
,
861 return (DDI_FAILURE
);
865 vpd_start
+= (vpd_len
+ 3);
872 * Get properties from VPD
875 hme_get_vpd_props(dev_info_t
*dip
)
878 int v0
, v1
, vpd_base
;
879 int i
, epromsrchlimit
;
882 hmep
= ddi_get_driver_private(dip
);
884 v0
= (int)(GET_ROM8(&(hmep
->hme_romp
[0])));
885 v1
= (int)(GET_ROM8(&(hmep
->hme_romp
[1])));
886 v0
= ((v0
& 0xff) << 8 | v1
);
888 if ((v0
& 0xffff) != 0x55aa) {
889 cmn_err(CE_NOTE
, " Valid pci prom not found \n");
893 epromsrchlimit
= 4096;
894 for (i
= 2; i
< epromsrchlimit
; i
++) {
896 if (((GET_ROM8(&(hmep
->hme_romp
[i
])) & 0xff) == 'P') &&
897 ((GET_ROM8(&(hmep
->hme_romp
[i
+1])) & 0xff) == 'C') &&
898 ((GET_ROM8(&(hmep
->hme_romp
[i
+2])) & 0xff) == 'I') &&
899 ((GET_ROM8(&(hmep
->hme_romp
[i
+3])) & 0xff) == 'R')) {
901 (int)((GET_ROM8(&(hmep
->hme_romp
[i
+8])) & 0xff) |
902 (GET_ROM8(&(hmep
->hme_romp
[i
+9])) & 0xff) << 8);
903 break; /* VPD pointer found */
909 cmn_err(CE_NOTE
, " Vital Product Data pointer not found \n");
913 v0
= (int)(GET_ROM8(&(hmep
->hme_romp
[vpd_base
])));
915 if (hme_get_newvpd_props(dip
, vpd_base
))
918 } else if (v0
== 0x90) {
919 /* If we are are SUNW,qfe card, look for the Nth "NA" descr */
920 if ((GET_ROM8(&hmep
->hme_romp
[vpd_base
+ 12]) != 0x79) &&
921 GET_ROM8(&hmep
->hme_romp
[vpd_base
+ 4 * 12]) == 0x79) {
922 vpd_base
+= hmep
->hme_devno
* 12;
924 if (hme_get_oldvpd_props(dip
, vpd_base
))
928 return (1); /* unknown start byte in VPD */
932 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
933 * cards, so we have to extract it from the ebus bridge that is
934 * function zero of the same device. This is a bit of an ugly hack.
935 * (The ebus bridge leaves the entire ROM mapped at base address
943 ddi_acc_handle_t acch
;
948 hme_mapebusrom(dev_info_t
*dip
, void *arg
)
953 ebus_rom_t
*rom
= arg
;
954 struct hme
*hmep
= rom
->hmep
;
957 * We only want to look at our peers. Skip our parent.
959 if (dip
== rom
->parent
) {
960 return (DDI_WALK_PRUNESIB
);
963 if (ddi_get_parent(dip
) != rom
->parent
)
964 return (DDI_WALK_CONTINUE
);
966 if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
, 0,
967 "reg", ®s
, &nregs
)) != DDI_PROP_SUCCESS
) {
968 return (DDI_WALK_PRUNECHILD
);
973 return (DDI_WALK_PRUNECHILD
);
979 * Look for function 0 on our bus and device. If the device doesn't
980 * match, it might be an alternate peer, in which case we don't want
981 * to examine any of its children.
983 if ((PCI_REG_BUS_G(reg
) != rom
->bus
) ||
984 (PCI_REG_DEV_G(reg
) != rom
->dev
) ||
985 (PCI_REG_FUNC_G(reg
) != 0)) {
986 return (DDI_WALK_PRUNECHILD
);
989 (void) ddi_regs_map_setup(dip
, 1, &rom
->romp
, 0, 0, &hmep
->hme_dev_attr
,
992 * If we can't map the registers, the caller will notice that
995 return (DDI_WALK_TERMINATE
);
999 hmeget_promebus(dev_info_t
*dip
)
1006 hmep
= ddi_get_driver_private(dip
);
1008 bzero(&rom
, sizeof (rom
));
1011 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
1012 * cards, so we have to extract it from the eBus bridge that is
1013 * function zero. This is a bit of an ugly hack.
1015 if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
, 0,
1016 "reg", ®s
, &nregs
)) != DDI_PROP_SUCCESS
) {
1017 return (DDI_FAILURE
);
1021 ddi_prop_free(regs
);
1022 return (DDI_FAILURE
);
1025 rom
.bus
= PCI_REG_BUS_G(regs
[0]);
1026 rom
.dev
= PCI_REG_DEV_G(regs
[0]);
1027 hmep
->hme_devno
= rom
.dev
;
1028 rom
.parent
= ddi_get_parent(dip
);
1031 * The implementation of ddi_walk_devs says that we must not
1032 * be called during autoconfiguration. However, it turns out
1033 * that it is safe to call this during our attach routine,
1034 * because we are not a nexus device.
1036 * Previously we rooted our search at our immediate parent,
1037 * but this triggered an assertion panic in debug kernels.
1039 ddi_walk_devs(ddi_root_node(), hme_mapebusrom
, &rom
);
1042 hmep
->hme_romh
= rom
.acch
;
1043 hmep
->hme_romp
= (unsigned char *)rom
.romp
;
1044 return (DDI_SUCCESS
);
1046 return (DDI_FAILURE
);
1050 hmeget_promprops(dev_info_t
*dip
)
1054 ddi_acc_handle_t cfg_handle
;
1076 hmep
= ddi_get_driver_private(dip
);
1080 * map configuration space
1082 if (ddi_regs_map_setup(hmep
->dip
, 0, (caddr_t
*)&cfg_ptr
,
1083 0, 0, &hmep
->hme_dev_attr
, &cfg_handle
)) {
1084 return (DDI_FAILURE
);
1088 * Enable bus-master and memory accesses
1090 ddi_put16(cfg_handle
, &cfg_ptr
->command
,
1091 PCI_COMM_SERR_ENABLE
| PCI_COMM_PARITY_DETECT
|
1092 PCI_COMM_MAE
| PCI_COMM_ME
);
1095 * Enable rom accesses
1097 rom_bar
= ddi_get32(cfg_handle
, &cfg_ptr
->base30
);
1098 ddi_put32(cfg_handle
, &cfg_ptr
->base30
, rom_bar
| 1);
1101 if ((ddi_regs_map_setup(dip
, 2, (caddr_t
*)&(hmep
->hme_romp
), 0, 0,
1102 &hmep
->hme_dev_attr
, &hmep
->hme_romh
) != DDI_SUCCESS
) &&
1103 (hmeget_promebus(dip
) != DDI_SUCCESS
)) {
1106 ddi_regs_map_free(&cfg_handle
);
1107 return (DDI_FAILURE
);
1109 if (hme_get_vpd_props(dip
))
1110 return (DDI_FAILURE
);
1113 ddi_regs_map_free(&hmep
->hme_romh
);
1115 ddi_regs_map_free(&cfg_handle
);
1116 return (DDI_SUCCESS
);
1121 hmeget_hm_rev_property(struct hme
*hmep
)
1126 hm_rev
= hmep
->asic_rev
;
1129 case HME_2P1_REVID_OBP
:
1130 HME_FAULT_MSG2(hmep
, SEVERITY_NONE
, DISPLAY_MSG
,
1131 "SBus 2.1 Found (Rev Id = %x)", hm_rev
);
1132 hmep
->hme_frame_enable
= 1;
1136 HME_FAULT_MSG2(hmep
, SEVERITY_NONE
, DISPLAY_MSG
,
1137 "SBus 2.0 Found (Rev Id = %x)", hm_rev
);
1141 HME_FAULT_MSG2(hmep
, SEVERITY_NONE
, DISPLAY_MSG
,
1142 "PCI IO 1.0 Found (Rev Id = %x)", hm_rev
);
1146 HME_FAULT_MSG3(hmep
, SEVERITY_NONE
, DISPLAY_MSG
,
1147 "%s (Rev Id = %x) Found",
1148 (hm_rev
== HME_2C0_REVID
) ? "PCI IO 2.0" : "Sbus", hm_rev
);
1149 hmep
->hme_frame_enable
= 1;
1150 hmep
->hme_lance_mode_enable
= 1;
1151 hmep
->hme_rxcv_enable
= 1;
1157 * Interface exists: make available by filling in network interface
1158 * record. System will initialize the interface when it is ready
1159 * to accept packets.
1162 hmeattach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
1165 mac_register_t
*macp
= NULL
;
1168 int prop_len
= sizeof (int);
1169 ddi_acc_handle_t cfg_handle
;
1185 if ((hmep
= ddi_get_driver_private(dip
)) == NULL
)
1186 return (DDI_FAILURE
);
1188 hmep
->hme_flags
&= ~HMESUSPENDED
;
1190 mii_resume(hmep
->hme_mii
);
1192 if (hmep
->hme_started
)
1193 (void) hmeinit(hmep
);
1194 return (DDI_SUCCESS
);
1197 return (DDI_FAILURE
);
1201 * Allocate soft device data structure
1203 hmep
= kmem_zalloc(sizeof (*hmep
), KM_SLEEP
);
1206 * Might as well set up elements of data structure
1209 hmep
->instance
= ddi_get_instance(dip
);
1210 hmep
->pagesize
= ddi_ptob(dip
, (ulong_t
)1); /* IOMMU PSize */
1213 * Might as well setup the driver private
1214 * structure as part of the dip.
1216 ddi_set_driver_private(dip
, hmep
);
1219 * Reject this device if it's in a slave-only slot.
1221 if (ddi_slaveonly(dip
) == DDI_SUCCESS
) {
1222 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1223 "Dev not used - dev in slave only slot");
1228 * Map in the device registers.
1230 * Reg # 0 is the Global register set
1231 * Reg # 1 is the ETX register set
1232 * Reg # 2 is the ERX register set
1233 * Reg # 3 is the BigMAC register set.
1234 * Reg # 4 is the MIF register set
1236 if (ddi_dev_nregs(dip
, ®no
) != (DDI_SUCCESS
)) {
1237 HME_FAULT_MSG2(hmep
, SEVERITY_HIGH
, INIT_MSG
,
1238 ddi_nregs_fail_msg
, regno
);
1244 hmep
->hme_cheerio_mode
= 0;
1247 case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */
1248 hmep
->hme_cheerio_mode
= 1;
1251 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
1256 /* Initialize device attributes structure */
1257 hmep
->hme_dev_attr
.devacc_attr_version
= DDI_DEVICE_ATTR_V0
;
1259 if (hmep
->hme_cheerio_mode
)
1260 hmep
->hme_dev_attr
.devacc_attr_endian_flags
=
1261 DDI_STRUCTURE_LE_ACC
;
1263 hmep
->hme_dev_attr
.devacc_attr_endian_flags
=
1264 DDI_STRUCTURE_BE_ACC
;
1266 hmep
->hme_dev_attr
.devacc_attr_dataorder
= DDI_STRICTORDER_ACC
;
1268 if (hmep
->hme_cheerio_mode
) {
1272 const char *pdrvname
;
1275 * Map the PCI config space
1277 if (pci_config_setup(dip
, &hmep
->pci_config_handle
) !=
1279 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1280 "pci_config_setup() failed..");
1284 if (ddi_regs_map_setup(dip
, 1,
1285 (caddr_t
*)&(hmep
->hme_globregp
), 0, 0,
1286 &hmep
->hme_dev_attr
, &hmep
->hme_globregh
)) {
1287 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1288 mregs_4global_reg_fail_msg
);
1291 hmep
->hme_etxregh
= hmep
->hme_erxregh
= hmep
->hme_bmacregh
=
1292 hmep
->hme_mifregh
= hmep
->hme_globregh
;
1295 (void *)(((caddr_t
)hmep
->hme_globregp
) + 0x2000);
1297 (void *)(((caddr_t
)hmep
->hme_globregp
) + 0x4000);
1298 hmep
->hme_bmacregp
=
1299 (void *)(((caddr_t
)hmep
->hme_globregp
) + 0x6000);
1301 (void *)(((caddr_t
)hmep
->hme_globregp
) + 0x7000);
1304 * Get parent pci bridge info.
1306 pdip
= ddi_get_parent(dip
);
1307 pdrvname
= ddi_driver_name(pdip
);
1309 oldLT
= pci_config_get8(hmep
->pci_config_handle
,
1310 PCI_CONF_LATENCY_TIMER
);
1312 * Honor value set in /etc/system
1313 * "set hme:pci_latency_timer=0xYY"
1315 if (pci_latency_timer
)
1316 newLT
= pci_latency_timer
;
1318 * Modify LT for simba
1320 else if (strcmp("simba", pdrvname
) == 0)
1323 * Ensure minimum cheerio latency timer of 0x50
1324 * Usually OBP or pci bridge should set this value
1326 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8
1327 * Some system set cheerio LT at 0x40
1329 else if (oldLT
< 0x40)
1333 * Now program cheerio's pci latency timer with newLT
1336 pci_config_put8(hmep
->pci_config_handle
,
1337 PCI_CONF_LATENCY_TIMER
, (uchar_t
)newLT
);
1338 } else { /* Map register sets */
1339 if (ddi_regs_map_setup(dip
, 0,
1340 (caddr_t
*)&(hmep
->hme_globregp
), 0, 0,
1341 &hmep
->hme_dev_attr
, &hmep
->hme_globregh
)) {
1342 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1343 mregs_4global_reg_fail_msg
);
1346 if (ddi_regs_map_setup(dip
, 1,
1347 (caddr_t
*)&(hmep
->hme_etxregp
), 0, 0,
1348 &hmep
->hme_dev_attr
, &hmep
->hme_etxregh
)) {
1349 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1350 mregs_4etx_reg_fail_msg
);
1353 if (ddi_regs_map_setup(dip
, 2,
1354 (caddr_t
*)&(hmep
->hme_erxregp
), 0, 0,
1355 &hmep
->hme_dev_attr
, &hmep
->hme_erxregh
)) {
1356 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1357 mregs_4erx_reg_fail_msg
);
1360 if (ddi_regs_map_setup(dip
, 3,
1361 (caddr_t
*)&(hmep
->hme_bmacregp
), 0, 0,
1362 &hmep
->hme_dev_attr
, &hmep
->hme_bmacregh
)) {
1363 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1364 mregs_4bmac_reg_fail_msg
);
1368 if (ddi_regs_map_setup(dip
, 4,
1369 (caddr_t
*)&(hmep
->hme_mifregp
), 0, 0,
1370 &hmep
->hme_dev_attr
, &hmep
->hme_mifregh
)) {
1371 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1372 mregs_4mif_reg_fail_msg
);
1375 } /* Endif cheerio_mode */
1378 * Based on the hm-rev, set some capabilities
1379 * Set up default capabilities for HM 2.0
1381 hmep
->hme_frame_enable
= 0;
1382 hmep
->hme_lance_mode_enable
= 0;
1383 hmep
->hme_rxcv_enable
= 0;
1385 /* NEW routine to get the properties */
1387 if (ddi_getlongprop_buf(DDI_DEV_T_ANY
, hmep
->dip
, 0, "hm-rev",
1388 (caddr_t
)&hm_rev
, &prop_len
) == DDI_PROP_SUCCESS
) {
1390 hmep
->asic_rev
= hm_rev
;
1391 hmeget_hm_rev_property(hmep
);
1394 * hm_rev property not found so, this is
1395 * case of hot insertion of card without interpreting fcode.
1396 * Get it from revid in config space after mapping it.
1398 if (ddi_regs_map_setup(hmep
->dip
, 0, (caddr_t
*)&cfg_ptr
,
1399 0, 0, &hmep
->hme_dev_attr
, &cfg_handle
)) {
1400 return (DDI_FAILURE
);
1403 * Since this is cheerio-based PCI card, we write 0xC in the
1404 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits
1405 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1)
1407 hm_rev
= ddi_get8(cfg_handle
, &cfg_ptr
->revid
);
1408 hm_rev
= HME_1C0_REVID
| (hm_rev
& HME_REV_VERS_MASK
);
1409 hmep
->asic_rev
= hm_rev
;
1410 if (ddi_prop_create(DDI_DEV_T_NONE
, dip
, DDI_PROP_CANSLEEP
,
1411 "hm-rev", (caddr_t
)&hm_rev
, sizeof (hm_rev
)) !=
1413 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, AUTOCONFIG_MSG
,
1414 "ddi_prop_create error for hm_rev");
1416 ddi_regs_map_free(&cfg_handle
);
1418 hmeget_hm_rev_property(hmep
);
1420 /* get info via VPD */
1421 if (hmeget_promprops(dip
) != DDI_SUCCESS
) {
1422 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, AUTOCONFIG_MSG
,
1427 if (ddi_intr_hilevel(dip
, 0)) {
1428 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, NFATAL_ERR_MSG
,
1429 " high-level interrupts are not supported");
1434 * Get intr. block cookie so that mutex locks can be initialized.
1436 if (ddi_get_iblock_cookie(dip
, 0, &hmep
->hme_cookie
) != DDI_SUCCESS
)
1440 * Initialize mutex's for this device.
1442 mutex_init(&hmep
->hme_xmitlock
, NULL
, MUTEX_DRIVER
, hmep
->hme_cookie
);
1443 mutex_init(&hmep
->hme_intrlock
, NULL
, MUTEX_DRIVER
, hmep
->hme_cookie
);
1446 * Quiesce the hardware.
1448 (void) hmestop(hmep
);
1451 * Add interrupt to system
1453 if (ddi_add_intr(dip
, 0, (ddi_iblock_cookie_t
*)NULL
,
1454 (ddi_idevice_cookie_t
*)NULL
, hmeintr
, (caddr_t
)hmep
)) {
1455 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, CONFIG_MSG
,
1461 * Set up the ethernet mac address.
1463 hme_setup_mac_address(hmep
, dip
);
1465 if (!hmeinit_xfer_params(hmep
))
1468 if (hmeburstsizes(hmep
) == DDI_FAILURE
) {
1469 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
, burst_size_msg
);
1473 if (hmeallocthings(hmep
) != DDI_SUCCESS
) {
1474 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, CONFIG_MSG
,
1475 "resource allocation failed");
1479 if (hmeallocbufs(hmep
) != DDI_SUCCESS
) {
1480 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, CONFIG_MSG
,
1481 "buffer allocation failed");
1487 /* our external (preferred) PHY is at address 0 */
1488 (void) ddi_prop_update_int(DDI_DEV_T_NONE
, dip
, "first-phy", 0);
1490 hmep
->hme_mii
= mii_alloc(hmep
, dip
, &hme_mii_ops
);
1491 if (hmep
->hme_mii
== NULL
) {
1492 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, CONFIG_MSG
,
1493 "mii_alloc failed");
1496 /* force a probe for the PHY */
1497 mii_probe(hmep
->hme_mii
);
1499 if ((macp
= mac_alloc(MAC_VERSION
)) == NULL
) {
1500 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, CONFIG_MSG
,
1501 "mac_alloc failed");
1504 macp
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
1505 macp
->m_driver
= hmep
;
1507 macp
->m_src_addr
= hmep
->hme_ouraddr
.ether_addr_octet
;
1508 macp
->m_callbacks
= &hme_m_callbacks
;
1509 macp
->m_min_sdu
= 0;
1510 macp
->m_max_sdu
= ETHERMTU
;
1511 macp
->m_margin
= VLAN_TAGSZ
;
1512 macp
->m_priv_props
= hme_priv_prop
;
1513 if (mac_register(macp
, &hmep
->hme_mh
) != 0) {
1520 ddi_report_dev(dip
);
1521 return (DDI_SUCCESS
);
1528 if (hmep
->hme_cookie
)
1529 ddi_remove_intr(dip
, 0, (ddi_iblock_cookie_t
)0);
1532 mii_free(hmep
->hme_mii
);
1535 mutex_destroy(&hmep
->hme_xmitlock
);
1536 mutex_destroy(&hmep
->hme_intrlock
);
1539 if (hmep
->hme_globregh
)
1540 ddi_regs_map_free(&hmep
->hme_globregh
);
1541 if (hmep
->hme_cheerio_mode
== 0) {
1542 if (hmep
->hme_etxregh
)
1543 ddi_regs_map_free(&hmep
->hme_etxregh
);
1544 if (hmep
->hme_erxregh
)
1545 ddi_regs_map_free(&hmep
->hme_erxregh
);
1546 if (hmep
->hme_bmacregh
)
1547 ddi_regs_map_free(&hmep
->hme_bmacregh
);
1548 if (hmep
->hme_mifregh
)
1549 ddi_regs_map_free(&hmep
->hme_mifregh
);
1551 if (hmep
->pci_config_handle
)
1552 (void) pci_config_teardown(&hmep
->pci_config_handle
);
1553 hmep
->hme_etxregh
= hmep
->hme_erxregh
= hmep
->hme_bmacregh
=
1554 hmep
->hme_mifregh
= hmep
->hme_globregh
= NULL
;
1558 hmefreethings(hmep
);
1562 kmem_free((caddr_t
)hmep
, sizeof (*hmep
));
1563 ddi_set_driver_private(dip
, NULL
);
1566 return (DDI_FAILURE
);
1570 hmedetach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
1574 if ((hmep
= ddi_get_driver_private(dip
)) == NULL
)
1575 return (DDI_FAILURE
);
1582 mii_suspend(hmep
->hme_mii
);
1583 hmep
->hme_flags
|= HMESUSPENDED
;
1585 return (DDI_SUCCESS
);
1588 return (DDI_FAILURE
);
1592 if (mac_unregister(hmep
->hme_mh
) != 0) {
1593 return (DDI_FAILURE
);
1597 * Make driver quiescent, we don't want to prevent the
1598 * detach on failure. Note that this should be redundant,
1599 * since mac_stop should already have called hmeuninit().
1601 if (!(hmep
->hme_flags
& HMESUSPENDED
)) {
1602 (void) hmestop(hmep
);
1606 mii_free(hmep
->hme_mii
);
1609 * Remove instance of the intr
1611 ddi_remove_intr(dip
, 0, (ddi_iblock_cookie_t
)0);
1614 * Unregister kstats.
1616 if (hmep
->hme_ksp
!= NULL
)
1617 kstat_delete(hmep
->hme_ksp
);
1618 if (hmep
->hme_intrstats
!= NULL
)
1619 kstat_delete(hmep
->hme_intrstats
);
1621 hmep
->hme_ksp
= NULL
;
1622 hmep
->hme_intrstats
= NULL
;
1625 * Destroy all mutexes and data structures allocated during
1628 * Note: at this time we should be the only thread accessing
1629 * the structures for this instance.
1632 if (hmep
->hme_globregh
)
1633 ddi_regs_map_free(&hmep
->hme_globregh
);
1634 if (hmep
->hme_cheerio_mode
== 0) {
1635 if (hmep
->hme_etxregh
)
1636 ddi_regs_map_free(&hmep
->hme_etxregh
);
1637 if (hmep
->hme_erxregh
)
1638 ddi_regs_map_free(&hmep
->hme_erxregh
);
1639 if (hmep
->hme_bmacregh
)
1640 ddi_regs_map_free(&hmep
->hme_bmacregh
);
1641 if (hmep
->hme_mifregh
)
1642 ddi_regs_map_free(&hmep
->hme_mifregh
);
1644 if (hmep
->pci_config_handle
)
1645 (void) pci_config_teardown(&hmep
->pci_config_handle
);
1646 hmep
->hme_etxregh
= hmep
->hme_erxregh
= hmep
->hme_bmacregh
=
1647 hmep
->hme_mifregh
= hmep
->hme_globregh
= NULL
;
1650 mutex_destroy(&hmep
->hme_xmitlock
);
1651 mutex_destroy(&hmep
->hme_intrlock
);
1653 hmefreethings(hmep
);
1656 ddi_set_driver_private(dip
, NULL
);
1657 kmem_free(hmep
, sizeof (struct hme
));
1659 return (DDI_SUCCESS
);
1663 hmequiesce(dev_info_t
*dip
)
1667 if ((hmep
= ddi_get_driver_private(dip
)) == NULL
)
1668 return (DDI_FAILURE
);
1670 (void) hmestop(hmep
);
1671 return (DDI_SUCCESS
);
1675 hmeinit_xfer_params(struct hme
*hmep
)
1677 int hme_ipg1_conf
, hme_ipg2_conf
;
1678 int hme_ipg0_conf
, hme_lance_mode_conf
;
1679 int prop_len
= sizeof (int);
1685 * Set up the start-up values for user-configurable parameters
1686 * Get the values from the global variables first.
1687 * Use the MASK to limit the value to allowed maximum.
1689 hmep
->hme_ipg1
= hme_ipg1
& HME_MASK_8BIT
;
1690 hmep
->hme_ipg2
= hme_ipg2
& HME_MASK_8BIT
;
1691 hmep
->hme_ipg0
= hme_ipg0
& HME_MASK_5BIT
;
1694 * Get the parameter values configured in .conf file.
1696 if (ddi_getlongprop_buf(DDI_DEV_T_ANY
, dip
, 0, "ipg1",
1697 (caddr_t
)&hme_ipg1_conf
, &prop_len
) == DDI_PROP_SUCCESS
) {
1698 hmep
->hme_ipg1
= hme_ipg1_conf
& HME_MASK_8BIT
;
1701 if (ddi_getlongprop_buf(DDI_DEV_T_ANY
, dip
, 0, "ipg2",
1702 (caddr_t
)&hme_ipg2_conf
, &prop_len
) == DDI_PROP_SUCCESS
) {
1703 hmep
->hme_ipg2
= hme_ipg2_conf
& HME_MASK_8BIT
;
1706 if (ddi_getlongprop_buf(DDI_DEV_T_ANY
, dip
, 0, "ipg0",
1707 (caddr_t
)&hme_ipg0_conf
, &prop_len
) == DDI_PROP_SUCCESS
) {
1708 hmep
->hme_ipg0
= hme_ipg0_conf
& HME_MASK_5BIT
;
1711 if (ddi_getlongprop_buf(DDI_DEV_T_ANY
, dip
, 0, "lance_mode",
1712 (caddr_t
)&hme_lance_mode_conf
, &prop_len
) == DDI_PROP_SUCCESS
) {
1713 hmep
->hme_lance_mode
= hme_lance_mode_conf
& HME_MASK_1BIT
;
1720 * Return 0 upon success, 1 on failure.
1723 hmestop(struct hme
*hmep
)
1726 * Disable the Tx dma engine.
1728 PUT_ETXREG(config
, (GET_ETXREG(config
) & ~HMET_CONFIG_TXDMA_EN
));
1729 HMEDELAY(((GET_ETXREG(state_mach
) & 0x1f) == 0x1), HMEMAXRSTDELAY
);
1732 * Disable the Rx dma engine.
1734 PUT_ERXREG(config
, (GET_ERXREG(config
) & ~HMER_CONFIG_RXDMA_EN
));
1735 HMEDELAY(((GET_ERXREG(state_mach
) & 0x3f) == 0), HMEMAXRSTDELAY
);
1738 * By this time all things should be quiet, so hit the
1739 * chip with a reset.
1741 PUT_GLOBREG(reset
, HMEG_RESET_GLOBAL
);
1743 HMEDELAY((GET_GLOBREG(reset
) == 0), HMEMAXRSTDELAY
);
1744 if (GET_GLOBREG(reset
)) {
1753 hmestat_kstat_update(kstat_t
*ksp
, int rw
)
1756 struct hmekstat
*hkp
;
1758 hmep
= (struct hme
*)ksp
->ks_private
;
1759 hkp
= (struct hmekstat
*)ksp
->ks_data
;
1761 if (rw
!= KSTAT_READ
)
1765 * Update all the stats by reading all the counter registers.
1766 * Counter register stats are not updated till they overflow
1770 mutex_enter(&hmep
->hme_xmitlock
);
1771 if (hmep
->hme_flags
& HMERUNNING
) {
1775 mutex_exit(&hmep
->hme_xmitlock
);
1777 hkp
->hk_cvc
.value
.ul
= hmep
->hme_cvc
;
1778 hkp
->hk_lenerr
.value
.ul
= hmep
->hme_lenerr
;
1779 hkp
->hk_buff
.value
.ul
= hmep
->hme_buff
;
1780 hkp
->hk_missed
.value
.ul
= hmep
->hme_missed
;
1781 hkp
->hk_allocbfail
.value
.ul
= hmep
->hme_allocbfail
;
1782 hkp
->hk_babl
.value
.ul
= hmep
->hme_babl
;
1783 hkp
->hk_tmder
.value
.ul
= hmep
->hme_tmder
;
1784 hkp
->hk_txlaterr
.value
.ul
= hmep
->hme_txlaterr
;
1785 hkp
->hk_rxlaterr
.value
.ul
= hmep
->hme_rxlaterr
;
1786 hkp
->hk_slvparerr
.value
.ul
= hmep
->hme_slvparerr
;
1787 hkp
->hk_txparerr
.value
.ul
= hmep
->hme_txparerr
;
1788 hkp
->hk_rxparerr
.value
.ul
= hmep
->hme_rxparerr
;
1789 hkp
->hk_slverrack
.value
.ul
= hmep
->hme_slverrack
;
1790 hkp
->hk_txerrack
.value
.ul
= hmep
->hme_txerrack
;
1791 hkp
->hk_rxerrack
.value
.ul
= hmep
->hme_rxerrack
;
1792 hkp
->hk_txtagerr
.value
.ul
= hmep
->hme_txtagerr
;
1793 hkp
->hk_rxtagerr
.value
.ul
= hmep
->hme_rxtagerr
;
1794 hkp
->hk_eoperr
.value
.ul
= hmep
->hme_eoperr
;
1795 hkp
->hk_notmds
.value
.ul
= hmep
->hme_notmds
;
1796 hkp
->hk_notbufs
.value
.ul
= hmep
->hme_notbufs
;
1797 hkp
->hk_norbufs
.value
.ul
= hmep
->hme_norbufs
;
1802 hkp
->hk_inits
.value
.ul
= hmep
->inits
;
1803 hkp
->hk_phyfail
.value
.ul
= hmep
->phyfail
;
1808 hkp
->hk_asic_rev
.value
.ul
= hmep
->asic_rev
;
1814 hmestatinit(struct hme
*hmep
)
1817 struct hmekstat
*hkp
;
1822 instance
= hmep
->instance
;
1823 driver
= ddi_driver_name(hmep
->dip
);
1825 if ((ksp
= kstat_create(driver
, instance
,
1826 "driver_info", "net", KSTAT_TYPE_NAMED
,
1827 sizeof (struct hmekstat
) / sizeof (kstat_named_t
), 0)) == NULL
) {
1828 HME_FAULT_MSG1(hmep
, SEVERITY_UNKNOWN
, INIT_MSG
,
1829 "kstat_create failed");
1833 (void) snprintf(buf
, sizeof (buf
), "%sc%d", driver
, instance
);
1834 hmep
->hme_intrstats
= kstat_create(driver
, instance
, buf
, "controller",
1835 KSTAT_TYPE_INTR
, 1, KSTAT_FLAG_PERSISTENT
);
1836 if (hmep
->hme_intrstats
)
1837 kstat_install(hmep
->hme_intrstats
);
1839 hmep
->hme_ksp
= ksp
;
1840 hkp
= (struct hmekstat
*)ksp
->ks_data
;
1841 kstat_named_init(&hkp
->hk_cvc
, "code_violations",
1843 kstat_named_init(&hkp
->hk_lenerr
, "len_errors",
1845 kstat_named_init(&hkp
->hk_buff
, "buff",
1847 kstat_named_init(&hkp
->hk_missed
, "missed",
1849 kstat_named_init(&hkp
->hk_nocanput
, "nocanput",
1851 kstat_named_init(&hkp
->hk_allocbfail
, "allocbfail",
1853 kstat_named_init(&hkp
->hk_babl
, "babble",
1855 kstat_named_init(&hkp
->hk_tmder
, "tmd_error",
1857 kstat_named_init(&hkp
->hk_txlaterr
, "tx_late_error",
1859 kstat_named_init(&hkp
->hk_rxlaterr
, "rx_late_error",
1861 kstat_named_init(&hkp
->hk_slvparerr
, "slv_parity_error",
1863 kstat_named_init(&hkp
->hk_txparerr
, "tx_parity_error",
1865 kstat_named_init(&hkp
->hk_rxparerr
, "rx_parity_error",
1867 kstat_named_init(&hkp
->hk_slverrack
, "slv_error_ack",
1869 kstat_named_init(&hkp
->hk_txerrack
, "tx_error_ack",
1871 kstat_named_init(&hkp
->hk_rxerrack
, "rx_error_ack",
1873 kstat_named_init(&hkp
->hk_txtagerr
, "tx_tag_error",
1875 kstat_named_init(&hkp
->hk_rxtagerr
, "rx_tag_error",
1877 kstat_named_init(&hkp
->hk_eoperr
, "eop_error",
1879 kstat_named_init(&hkp
->hk_notmds
, "no_tmds",
1881 kstat_named_init(&hkp
->hk_notbufs
, "no_tbufs",
1883 kstat_named_init(&hkp
->hk_norbufs
, "no_rbufs",
1889 kstat_named_init(&hkp
->hk_inits
, "inits",
1891 kstat_named_init(&hkp
->hk_phyfail
, "phy_failures",
1897 kstat_named_init(&hkp
->hk_asic_rev
, "asic_rev",
1900 ksp
->ks_update
= hmestat_kstat_update
;
1901 ksp
->ks_private
= (void *) hmep
;
1906 hme_m_getprop(void *arg
, const char *name
, mac_prop_id_t num
, uint_t sz
,
1909 struct hme
*hmep
= arg
;
1913 rv
= mii_m_getprop(hmep
->hme_mii
, name
, num
, sz
, val
);
1918 case MAC_PROP_PRIVATE
:
1924 if (strcmp(name
, "_ipg0") == 0) {
1925 value
= hmep
->hme_ipg0
;
1926 } else if (strcmp(name
, "_ipg1") == 0) {
1927 value
= hmep
->hme_ipg1
;
1928 } else if (strcmp(name
, "_ipg2") == 0) {
1929 value
= hmep
->hme_ipg2
;
1930 } else if (strcmp(name
, "_lance_mode") == 0) {
1931 value
= hmep
->hme_lance_mode
;
1935 (void) snprintf(val
, sz
, "%d", value
);
1940 hme_m_propinfo(void *arg
, const char *name
, mac_prop_id_t num
,
1941 mac_prop_info_handle_t mph
)
1943 struct hme
*hmep
= arg
;
1945 mii_m_propinfo(hmep
->hme_mii
, name
, num
, mph
);
1948 case MAC_PROP_PRIVATE
: {
1952 if (strcmp(name
, "_ipg0") == 0) {
1953 default_val
= hme_ipg0
;
1954 } else if (strcmp(name
, "_ipg1") == 0) {
1955 default_val
= hme_ipg1
;
1956 } else if (strcmp(name
, "_ipg2") == 0) {
1957 default_val
= hme_ipg2
;
1958 } if (strcmp(name
, "_lance_mode") == 0) {
1959 default_val
= hme_lance_mode
;
1964 (void) snprintf(valstr
, sizeof (valstr
), "%d", default_val
);
1965 mac_prop_info_set_default_str(mph
, valstr
);
1972 hme_m_setprop(void *arg
, const char *name
, mac_prop_id_t num
, uint_t sz
,
1975 struct hme
*hmep
= arg
;
1978 boolean_t init
= B_FALSE
;
1980 rv
= mii_m_setprop(hmep
->hme_mii
, name
, num
, sz
, val
);
1986 case MAC_PROP_PRIVATE
:
1992 (void) ddi_strtol(val
, NULL
, 0, &lval
);
1994 if (strcmp(name
, "_ipg1") == 0) {
1995 if ((lval
>= 0) && (lval
<= 255)) {
1996 hmep
->hme_ipg1
= lval
& 0xff;
2002 } else if (strcmp(name
, "_ipg2") == 0) {
2003 if ((lval
>= 0) && (lval
<= 255)) {
2004 hmep
->hme_ipg2
= lval
& 0xff;
2010 } else if (strcmp(name
, "_ipg0") == 0) {
2011 if ((lval
>= 0) && (lval
<= 31)) {
2012 hmep
->hme_ipg0
= lval
& 0xff;
2017 } else if (strcmp(name
, "_lance_mode") == 0) {
2018 if ((lval
>= 0) && (lval
<= 1)) {
2019 hmep
->hme_lance_mode
= lval
& 0xff;
2030 (void) hmeinit(hmep
);
2038 hme_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
)
2041 case MAC_CAPAB_HCKSUM
:
2042 *(uint32_t *)cap_data
= HCKSUM_INET_PARTIAL
;
2050 hme_m_promisc(void *arg
, boolean_t on
)
2052 struct hme
*hmep
= arg
;
2054 hmep
->hme_promisc
= on
;
2055 (void) hmeinit(hmep
);
2060 hme_m_unicst(void *arg
, const uint8_t *macaddr
)
2062 struct hme
*hmep
= arg
;
2065 * Set new interface local address and re-init device.
2066 * This is destructive to any other streams attached
2069 mutex_enter(&hmep
->hme_intrlock
);
2070 bcopy(macaddr
, &hmep
->hme_ouraddr
, ETHERADDRL
);
2071 mutex_exit(&hmep
->hme_intrlock
);
2072 (void) hmeinit(hmep
);
2077 hme_m_multicst(void *arg
, boolean_t add
, const uint8_t *macaddr
)
2079 struct hme
*hmep
= arg
;
2081 boolean_t doinit
= B_FALSE
;
2084 * If this address's bit was not already set in the local address
2085 * filter, add it and re-initialize the Hardware.
2087 ladrf_bit
= hmeladrf_bit(macaddr
);
2089 mutex_enter(&hmep
->hme_intrlock
);
2091 hmep
->hme_ladrf_refcnt
[ladrf_bit
]++;
2092 if (hmep
->hme_ladrf_refcnt
[ladrf_bit
] == 1) {
2093 hmep
->hme_ladrf
[ladrf_bit
>> 4] |=
2094 1 << (ladrf_bit
& 0xf);
2099 hmep
->hme_ladrf_refcnt
[ladrf_bit
]--;
2100 if (hmep
->hme_ladrf_refcnt
[ladrf_bit
] == 0) {
2101 hmep
->hme_ladrf
[ladrf_bit
>> 4] &=
2102 ~(1 << (ladrf_bit
& 0xf));
2106 mutex_exit(&hmep
->hme_intrlock
);
2109 (void) hmeinit(hmep
);
2116 hme_m_start(void *arg
)
2118 struct hme
*hmep
= arg
;
2120 if (hmeinit(hmep
) != 0) {
2121 /* initialization failed -- really want DL_INITFAILED */
2124 hmep
->hme_started
= B_TRUE
;
2125 mii_start(hmep
->hme_mii
);
2131 hme_m_stop(void *arg
)
2133 struct hme
*hmep
= arg
;
2135 mii_stop(hmep
->hme_mii
);
2136 hmep
->hme_started
= B_FALSE
;
2141 hme_m_stat(void *arg
, uint_t stat
, uint64_t *val
)
2143 struct hme
*hmep
= arg
;
2145 mutex_enter(&hmep
->hme_xmitlock
);
2146 if (hmep
->hme_flags
& HMERUNNING
) {
2150 mutex_exit(&hmep
->hme_xmitlock
);
2153 if (mii_m_getstat(hmep
->hme_mii
, stat
, val
) == 0) {
2157 case MAC_STAT_IPACKETS
:
2158 *val
= hmep
->hme_ipackets
;
2160 case MAC_STAT_RBYTES
:
2161 *val
= hmep
->hme_rbytes
;
2163 case MAC_STAT_IERRORS
:
2164 *val
= hmep
->hme_ierrors
;
2166 case MAC_STAT_OPACKETS
:
2167 *val
= hmep
->hme_opackets
;
2169 case MAC_STAT_OBYTES
:
2170 *val
= hmep
->hme_obytes
;
2172 case MAC_STAT_OERRORS
:
2173 *val
= hmep
->hme_oerrors
;
2175 case MAC_STAT_MULTIRCV
:
2176 *val
= hmep
->hme_multircv
;
2178 case MAC_STAT_MULTIXMT
:
2179 *val
= hmep
->hme_multixmt
;
2181 case MAC_STAT_BRDCSTRCV
:
2182 *val
= hmep
->hme_brdcstrcv
;
2184 case MAC_STAT_BRDCSTXMT
:
2185 *val
= hmep
->hme_brdcstxmt
;
2187 case MAC_STAT_UNDERFLOWS
:
2188 *val
= hmep
->hme_uflo
;
2190 case MAC_STAT_OVERFLOWS
:
2191 *val
= hmep
->hme_oflo
;
2193 case MAC_STAT_COLLISIONS
:
2194 *val
= hmep
->hme_coll
;
2196 case MAC_STAT_NORCVBUF
:
2197 *val
= hmep
->hme_norcvbuf
;
2199 case MAC_STAT_NOXMTBUF
:
2200 *val
= hmep
->hme_noxmtbuf
;
2202 case ETHER_STAT_LINK_DUPLEX
:
2203 *val
= hmep
->hme_duplex
;
2205 case ETHER_STAT_ALIGN_ERRORS
:
2206 *val
= hmep
->hme_align_errors
;
2208 case ETHER_STAT_FCS_ERRORS
:
2209 *val
= hmep
->hme_fcs_errors
;
2211 case ETHER_STAT_EX_COLLISIONS
:
2212 *val
= hmep
->hme_excol
;
2214 case ETHER_STAT_DEFER_XMTS
:
2215 *val
= hmep
->hme_defer_xmts
;
2217 case ETHER_STAT_SQE_ERRORS
:
2218 *val
= hmep
->hme_sqe_errors
;
2220 case ETHER_STAT_FIRST_COLLISIONS
:
2221 *val
= hmep
->hme_fstcol
;
2223 case ETHER_STAT_TX_LATE_COLLISIONS
:
2224 *val
= hmep
->hme_tlcol
;
2226 case ETHER_STAT_TOOLONG_ERRORS
:
2227 *val
= hmep
->hme_toolong_errors
;
2229 case ETHER_STAT_TOOSHORT_ERRORS
:
2230 *val
= hmep
->hme_runt
;
2232 case ETHER_STAT_CARRIER_ERRORS
:
2233 *val
= hmep
->hme_carrier_errors
;
2242 hme_m_tx(void *arg
, mblk_t
*mp
)
2244 struct hme
*hmep
= arg
;
2247 while (mp
!= NULL
) {
2250 if (!hmestart(hmep
, mp
)) {
2260 * Software IP checksum, for the edge cases that the
2261 * hardware can't handle. See hmestart for more info.
2264 hme_cksum(void *data
, int len
)
2266 uint16_t *words
= data
;
2267 int i
, nwords
= len
/ 2;
2270 /* just add up the words */
2271 for (i
= 0; i
< nwords
; i
++) {
2275 /* pick up residual byte ... assume even half-word allocations */
2277 sum
+= (*words
& htons(0xff00));
2280 sum
= (sum
>> 16) + (sum
& 0xffff);
2281 sum
= (sum
>> 16) + (sum
& 0xffff);
2283 return (~(sum
& 0xffff));
2287 hmestart(struct hme
*hmep
, mblk_t
*mp
)
2290 boolean_t retval
= B_TRUE
;
2294 uint32_t csflags
= 0;
2296 uint32_t start_offset
;
2297 uint32_t stuff_offset
;
2299 mac_hcksum_get(mp
, &start_offset
, &stuff_offset
, NULL
, NULL
, &flags
);
2301 if (flags
& HCK_PARTIALCKSUM
) {
2302 if (get_ether_type(mp
->b_rptr
) == ETHERTYPE_VLAN
) {
2303 start_offset
+= sizeof (struct ether_header
) + 4;
2304 stuff_offset
+= sizeof (struct ether_header
) + 4;
2306 start_offset
+= sizeof (struct ether_header
);
2307 stuff_offset
+= sizeof (struct ether_header
);
2309 csflags
= HMETMD_CSENABL
|
2310 (start_offset
<< HMETMD_CSSTART_SHIFT
) |
2311 (stuff_offset
<< HMETMD_CSSTUFF_SHIFT
);
2314 mutex_enter(&hmep
->hme_xmitlock
);
2316 if (hmep
->hme_flags
& HMESUSPENDED
) {
2317 hmep
->hme_carrier_errors
++;
2318 hmep
->hme_oerrors
++;
2322 if (hmep
->hme_txindex
!= hmep
->hme_txreclaim
) {
2325 if ((hmep
->hme_txindex
- HME_TMDMAX
) == hmep
->hme_txreclaim
)
2327 txptr
= hmep
->hme_txindex
% HME_TMDMAX
;
2328 tbuf
= &hmep
->hme_tbuf
[txptr
];
2331 * Note that for checksum offload, the hardware cannot
2332 * generate correct checksums if the packet is smaller than
2333 * 64-bytes. In such a case, we bcopy the packet and use
2334 * a software checksum.
2339 /* zero fill the padding */
2340 bzero(tbuf
->kaddr
, 64);
2342 mcopymsg(mp
, tbuf
->kaddr
);
2344 if ((csflags
!= 0) && ((len
< 64) ||
2345 (start_offset
> HMETMD_CSSTART_MAX
) ||
2346 (stuff_offset
> HMETMD_CSSTUFF_MAX
))) {
2348 sum
= hme_cksum(tbuf
->kaddr
+ start_offset
,
2349 len
- start_offset
);
2350 bcopy(&sum
, tbuf
->kaddr
+ stuff_offset
, sizeof (sum
));
2354 if (ddi_dma_sync(tbuf
->dmah
, 0, len
, DDI_DMA_SYNC_FORDEV
) ==
2356 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, DDI_MSG
,
2357 "ddi_dma_sync failed");
2361 * update MIB II statistics
2363 BUMP_OutNUcast(hmep
, tbuf
->kaddr
);
2365 PUT_TMD(txptr
, tbuf
->paddr
, len
,
2366 HMETMD_OWN
| HMETMD_SOP
| HMETMD_EOP
| csflags
);
2368 HMESYNCTMD(txptr
, DDI_DMA_SYNC_FORDEV
);
2369 hmep
->hme_txindex
++;
2371 PUT_ETXREG(txpend
, HMET_TXPEND_TDMD
);
2374 mutex_exit(&hmep
->hme_xmitlock
);
2380 mutex_exit(&hmep
->hme_xmitlock
);
2386 hmep
->hme_wantw
= B_TRUE
;
2390 mutex_exit(&hmep
->hme_xmitlock
);
2396 * Initialize channel.
2397 * Return 0 on success, nonzero on error.
2399 * The recommended sequence for initialization is:
2400 * 1. Issue a Global Reset command to the Ethernet Channel.
2401 * 2. Poll the Global_Reset bits until the execution of the reset has been
2403 * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2404 * Poll Register 0 to till the Resetbit is 0.
2405 * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2406 * 100Mbps and Non-Isolated mode. The main point here is to bring the
2407 * PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2408 * to the MII interface so that the Bigmac core can correctly reset
2409 * upon a software reset.
2410 * 2(c). Issue another Global Reset command to the Ethernet Channel and poll
2411 * the Global_Reset bits till completion.
2412 * 3. Set up all the data structures in the host memory.
2413 * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2415 * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2417 * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2418 * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2419 * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2420 * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2421 * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2422 * 11. Program the XIF Configuration Register (enable the XIF).
2423 * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2424 * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2428 #ifdef FEPS_URUN_BUG
2429 static int hme_palen
= 32;
2433 hmeinit(struct hme
*hmep
)
2442 * hme_intrlock, hme_xmitlock.
2444 mutex_enter(&hmep
->hme_intrlock
);
2447 * Don't touch the hardware if we are suspended. But don't
2448 * fail either. Some time later we may be resumed, and then
2449 * we'll be back here to program the device using the settings
2450 * in the soft state.
2452 if (hmep
->hme_flags
& HMESUSPENDED
) {
2453 mutex_exit(&hmep
->hme_intrlock
);
2458 * This should prevent us from clearing any interrupts that
2459 * may occur by temporarily stopping interrupts from occurring
2460 * for a short time. We need to update the interrupt mask
2461 * later in this function.
2463 PUT_GLOBREG(intmask
, ~HMEG_MASK_MIF_INTR
);
2467 * Rearranged the mutex acquisition order to solve the deadlock
2468 * situation as described in bug ID 4065896.
2471 mutex_enter(&hmep
->hme_xmitlock
);
2473 hmep
->hme_flags
= 0;
2474 hmep
->hme_wantw
= B_FALSE
;
2480 * Perform Global reset of the Sbus/FEPS ENET channel.
2482 (void) hmestop(hmep
);
2485 * Clear all descriptors.
2487 bzero(hmep
->hme_rmdp
, HME_RMDMAX
* sizeof (struct hme_rmd
));
2488 bzero(hmep
->hme_tmdp
, HME_TMDMAX
* sizeof (struct hme_tmd
));
2491 * Hang out receive buffers.
2493 for (i
= 0; i
< HME_RMDMAX
; i
++) {
2494 PUT_RMD(i
, hmep
->hme_rbuf
[i
].paddr
);
2498 * DMA sync descriptors.
2500 (void) ddi_dma_sync(hmep
->hme_rmd_dmah
, 0, 0, DDI_DMA_SYNC_FORDEV
);
2501 (void) ddi_dma_sync(hmep
->hme_tmd_dmah
, 0, 0, DDI_DMA_SYNC_FORDEV
);
2504 * Reset RMD and TMD 'walking' pointers.
2506 hmep
->hme_rxindex
= 0;
2507 hmep
->hme_txindex
= hmep
->hme_txreclaim
= 0;
2510 * This is the right place to initialize MIF !!!
2513 PUT_MIFREG(mif_imask
, HME_MIF_INTMASK
); /* mask all interrupts */
2515 if (!hmep
->hme_frame_enable
)
2516 PUT_MIFREG(mif_cfg
, GET_MIFREG(mif_cfg
) | HME_MIF_CFGBB
);
2518 PUT_MIFREG(mif_cfg
, GET_MIFREG(mif_cfg
) & ~HME_MIF_CFGBB
);
2519 /* enable frame mode */
2522 * Depending on the transceiver detected, select the source
2523 * of the clocks for the MAC. Without the clocks, TX_MAC does
2524 * not reset. When the Global Reset is issued to the Sbus/FEPS
2525 * ASIC, it selects Internal by default.
2528 switch ((phyad
= mii_get_addr(hmep
->hme_mii
))) {
2530 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, XCVR_MSG
, no_xcvr_msg
);
2531 goto init_fail
; /* abort initialization */
2533 case HME_INTERNAL_PHYAD
:
2534 PUT_MACREG(xifc
, 0);
2536 case HME_EXTERNAL_PHYAD
:
2537 /* Isolate the Int. xcvr */
2538 PUT_MACREG(xifc
, BMAC_XIFC_MIIBUFDIS
);
2545 * Initialize BigMAC registers.
2546 * First set the tx enable bit in tx config reg to 0 and poll on
2547 * it till it turns to 0. Same for rx config, hash and address
2549 * Here is the sequence per the spec.
2550 * MADD2 - MAC Address 2
2551 * MADD1 - MAC Address 1
2552 * MADD0 - MAC Address 0
2553 * HASH3, HASH2, HASH1, HASH0 for group address
2554 * AFR2, AFR1, AFR0 and AFMR for address filter mask
2555 * Program RXMIN and RXMAX for packet length if not 802.3
2556 * RXCFG - Rx config for not stripping CRC
2557 * XXX Anything else to hme configured in RXCFG
2558 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN
2559 * if not 802.3 compliant
2560 * XIF register for speed selection
2561 * MASK - Interrupt mask
2562 * Set bit 0 of TXCFG
2563 * Set bit 0 of RXCFG
2567 * Initialize the TX_MAC registers
2568 * Initialization of jamsize to work around rx crc bug
2570 PUT_MACREG(jam
, jamsize
);
2572 #ifdef FEPS_URUN_BUG
2574 PUT_MACREG(palen
, hme_palen
);
2577 PUT_MACREG(ipg1
, hmep
->hme_ipg1
);
2578 PUT_MACREG(ipg2
, hmep
->hme_ipg2
);
2581 ((hmep
->hme_ouraddr
.ether_addr_octet
[0] << 8) & 0x3) |
2582 hmep
->hme_ouraddr
.ether_addr_octet
[1]);
2584 /* Initialize the RX_MAC registers */
2587 * Program BigMAC with local individual ethernet address.
2589 PUT_MACREG(madd2
, (hmep
->hme_ouraddr
.ether_addr_octet
[4] << 8) |
2590 hmep
->hme_ouraddr
.ether_addr_octet
[5]);
2591 PUT_MACREG(madd1
, (hmep
->hme_ouraddr
.ether_addr_octet
[2] << 8) |
2592 hmep
->hme_ouraddr
.ether_addr_octet
[3]);
2593 PUT_MACREG(madd0
, (hmep
->hme_ouraddr
.ether_addr_octet
[0] << 8) |
2594 hmep
->hme_ouraddr
.ether_addr_octet
[1]);
2597 * Set up multicast address filter by passing all multicast
2598 * addresses through a crc generator, and then using the
2599 * low order 6 bits as a index into the 64 bit logical
2600 * address filter. The high order three bits select the word,
2601 * while the rest of the bits select the bit within the word.
2603 PUT_MACREG(hash0
, hmep
->hme_ladrf
[0]);
2604 PUT_MACREG(hash1
, hmep
->hme_ladrf
[1]);
2605 PUT_MACREG(hash2
, hmep
->hme_ladrf
[2]);
2606 PUT_MACREG(hash3
, hmep
->hme_ladrf
[3]);
2609 * Configure parameters to support VLAN. (VLAN encapsulation adds
2612 PUT_MACREG(txmax
, ETHERMAX
+ ETHERFCSL
+ 4);
2613 PUT_MACREG(rxmax
, ETHERMAX
+ ETHERFCSL
+ 4);
2616 * Initialize HME Global registers, ETX registers and ERX registers.
2619 PUT_ETXREG(txring
, hmep
->hme_tmd_paddr
);
2620 PUT_ERXREG(rxring
, hmep
->hme_rmd_paddr
);
2623 * ERX registers can be written only if they have even no. of bits set.
2624 * So, if the value written is not read back, set the lsb and write
2626 * static int hme_erx_fix = 1; : Use the fix for erx bug
2630 temp
= hmep
->hme_rmd_paddr
;
2632 if (GET_ERXREG(rxring
) != temp
)
2633 PUT_ERXREG(rxring
, (temp
| 4));
2636 PUT_GLOBREG(config
, (hmep
->hme_config
|
2637 (hmep
->hme_64bit_xfer
<< HMEG_CONFIG_64BIT_SHIFT
)));
2640 * Significant performance improvements can be achieved by
2641 * disabling transmit interrupt. Thus TMD's are reclaimed only
2642 * when we run out of them in hmestart().
2644 PUT_GLOBREG(intmask
,
2645 HMEG_MASK_INTR
| HMEG_MASK_TINT
| HMEG_MASK_TX_ALL
);
2647 PUT_ETXREG(txring_size
, ((HME_TMDMAX
-1)>> HMET_RINGSZ_SHIFT
));
2648 PUT_ETXREG(config
, (GET_ETXREG(config
) | HMET_CONFIG_TXDMA_EN
2649 | HMET_CONFIG_TXFIFOTH
));
2650 /* get the rxring size bits */
2651 switch (HME_RMDMAX
) {
2653 i
= HMER_CONFIG_RXRINGSZ32
;
2656 i
= HMER_CONFIG_RXRINGSZ64
;
2659 i
= HMER_CONFIG_RXRINGSZ128
;
2662 i
= HMER_CONFIG_RXRINGSZ256
;
2665 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2669 i
|= (HME_FSTBYTE_OFFSET
<< HMER_CONFIG_FBO_SHIFT
)
2670 | HMER_CONFIG_RXDMA_EN
;
2672 /* h/w checks start offset in half words */
2673 i
|= ((sizeof (struct ether_header
) / 2) << HMER_RX_CSSTART_SHIFT
);
2675 PUT_ERXREG(config
, i
);
2678 * Bug related to the parity handling in ERX. When erxp-config is
2680 * Sbus/FEPS drives the parity bit. This value is used while
2682 * This fixes the RECV problem in SS5.
2683 * static int hme_erx_fix = 1; : Use the fix for erx bug
2687 temp
= GET_ERXREG(config
);
2688 PUT_ERXREG(config
, i
);
2690 if (GET_ERXREG(config
) != i
)
2691 HME_FAULT_MSG4(hmep
, SEVERITY_UNKNOWN
, ERX_MSG
,
2692 "error:temp = %x erxp->config = %x, should be %x",
2693 temp
, GET_ERXREG(config
), i
);
2697 * Set up the rxconfig, txconfig and seed register without enabling
2698 * them the former two at this time
2700 * BigMAC strips the CRC bytes by default. Since this is
2701 * contrary to other pieces of hardware, this bit needs to
2702 * enabled to tell BigMAC not to strip the CRC bytes.
2703 * Do not filter this node's own packets.
2706 if (hme_reject_own
) {
2708 ((hmep
->hme_promisc
? BMAC_RXCFG_PROMIS
: 0) |
2709 BMAC_RXCFG_MYOWN
| BMAC_RXCFG_HASH
));
2712 ((hmep
->hme_promisc
? BMAC_RXCFG_PROMIS
: 0) |
2716 drv_usecwait(10); /* wait after setting Hash Enable bit */
2718 fdx
= (mii_get_duplex(hmep
->hme_mii
) == LINK_DUPLEX_FULL
);
2721 PUT_MACREG(txcfg
, (fdx
? BMAC_TXCFG_FDX
: 0) |
2724 PUT_MACREG(txcfg
, (fdx
? BMAC_TXCFG_FDX
: 0));
2727 if ((hmep
->hme_lance_mode
) && (hmep
->hme_lance_mode_enable
))
2728 i
= ((hmep
->hme_ipg0
& HME_MASK_5BIT
) << BMAC_XIFC_IPG0_SHIFT
)
2729 | BMAC_XIFC_LANCE_ENAB
;
2730 if (phyad
== HME_INTERNAL_PHYAD
)
2731 PUT_MACREG(xifc
, i
| (BMAC_XIFC_ENAB
));
2733 PUT_MACREG(xifc
, i
| (BMAC_XIFC_ENAB
| BMAC_XIFC_MIIBUFDIS
));
2735 PUT_MACREG(rxcfg
, GET_MACREG(rxcfg
) | BMAC_RXCFG_ENAB
);
2736 PUT_MACREG(txcfg
, GET_MACREG(txcfg
) | BMAC_TXCFG_ENAB
);
2738 hmep
->hme_flags
|= (HMERUNNING
| HMEINITIALIZED
);
2740 * Update the interrupt mask : this will re-allow interrupts to occur
2742 PUT_GLOBREG(intmask
, HMEG_MASK_INTR
);
2743 mac_tx_update(hmep
->hme_mh
);
2747 * Release the locks in reverse order
2749 mutex_exit(&hmep
->hme_xmitlock
);
2750 mutex_exit(&hmep
->hme_intrlock
);
2752 ret
= !(hmep
->hme_flags
& HMERUNNING
);
2754 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2772 * Calculate the dvma burstsize by setting up a dvma temporarily. Return
2773 * 0 as burstsize upon failure as it signifies no burst size.
2774 * Requests for 64-bit transfer setup, if the platform supports it.
2775 * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f),
2776 * sun4u Ultra-2 incorrectly returns a 32bit transfer.
2779 hmeburstsizes(struct hme
*hmep
)
2782 ddi_dma_handle_t handle
;
2784 if (ddi_dma_alloc_handle(hmep
->dip
, &hme_dma_attr
,
2785 DDI_DMA_DONTWAIT
, NULL
, &handle
)) {
2789 hmep
->hme_burstsizes
= burstsizes
= ddi_dma_burstsizes(handle
);
2790 ddi_dma_free_handle(&handle
);
2793 * Use user-configurable parameter for enabling 64-bit transfers
2795 burstsizes
= (hmep
->hme_burstsizes
>> 16);
2797 hmep
->hme_64bit_xfer
= hme_64bit_enable
; /* user config value */
2799 burstsizes
= hmep
->hme_burstsizes
;
2801 if (hmep
->hme_cheerio_mode
)
2802 hmep
->hme_64bit_xfer
= 0; /* Disable for cheerio */
2804 if (burstsizes
& 0x40)
2805 hmep
->hme_config
= HMEG_CONFIG_BURST64
;
2806 else if (burstsizes
& 0x20)
2807 hmep
->hme_config
= HMEG_CONFIG_BURST32
;
2809 hmep
->hme_config
= HMEG_CONFIG_BURST16
;
2811 return (DDI_SUCCESS
);
2815 hmeallocbuf(struct hme
*hmep
, hmebuf_t
*buf
, int dir
)
2817 ddi_dma_cookie_t dmac
;
2821 if (ddi_dma_alloc_handle(hmep
->dip
, &hme_dma_attr
,
2822 DDI_DMA_DONTWAIT
, NULL
, &buf
->dmah
) != DDI_SUCCESS
) {
2823 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2824 "cannot allocate buf dma handle - failed");
2825 return (DDI_FAILURE
);
2828 if (ddi_dma_mem_alloc(buf
->dmah
, ROUNDUP(HMEBUFSIZE
, 512),
2829 &hme_buf_attr
, DDI_DMA_STREAMING
, DDI_DMA_DONTWAIT
, NULL
,
2830 &buf
->kaddr
, &len
, &buf
->acch
) != DDI_SUCCESS
) {
2831 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2832 "cannot allocate buf memory - failed");
2833 return (DDI_FAILURE
);
2836 if (ddi_dma_addr_bind_handle(buf
->dmah
, NULL
, buf
->kaddr
,
2837 len
, dir
| DDI_DMA_CONSISTENT
, DDI_DMA_DONTWAIT
, NULL
,
2838 &dmac
, &ccnt
) != DDI_DMA_MAPPED
) {
2839 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2840 "cannot map buf for dma - failed");
2841 return (DDI_FAILURE
);
2843 buf
->paddr
= dmac
.dmac_address
;
2845 /* apparently they don't handle multiple cookies */
2847 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2848 "too many buf dma cookies");
2849 return (DDI_FAILURE
);
2851 return (DDI_SUCCESS
);
2855 hmeallocbufs(struct hme
*hmep
)
2857 hmep
->hme_tbuf
= kmem_zalloc(HME_TMDMAX
* sizeof (hmebuf_t
), KM_SLEEP
);
2858 hmep
->hme_rbuf
= kmem_zalloc(HME_RMDMAX
* sizeof (hmebuf_t
), KM_SLEEP
);
2860 /* Alloc RX buffers. */
2861 for (int i
= 0; i
< HME_RMDMAX
; i
++) {
2862 if (hmeallocbuf(hmep
, &hmep
->hme_rbuf
[i
], DDI_DMA_READ
) !=
2864 return (DDI_FAILURE
);
2868 /* Alloc TX buffers. */
2869 for (int i
= 0; i
< HME_TMDMAX
; i
++) {
2870 if (hmeallocbuf(hmep
, &hmep
->hme_tbuf
[i
], DDI_DMA_WRITE
) !=
2872 return (DDI_FAILURE
);
2875 return (DDI_SUCCESS
);
2879 hmefreebufs(struct hme
*hmep
)
2883 if (hmep
->hme_rbuf
== NULL
)
2887 * Free and unload pending xmit and recv buffers.
2888 * Maintaining the 1-to-1 ordered sequence of
2889 * We have written the routine to be idempotent.
2892 for (i
= 0; i
< HME_TMDMAX
; i
++) {
2893 hmebuf_t
*tbuf
= &hmep
->hme_tbuf
[i
];
2895 (void) ddi_dma_unbind_handle(tbuf
->dmah
);
2898 ddi_dma_mem_free(&tbuf
->acch
);
2901 ddi_dma_free_handle(&tbuf
->dmah
);
2904 for (i
= 0; i
< HME_RMDMAX
; i
++) {
2905 hmebuf_t
*rbuf
= &hmep
->hme_rbuf
[i
];
2907 (void) ddi_dma_unbind_handle(rbuf
->dmah
);
2910 ddi_dma_mem_free(&rbuf
->acch
);
2913 ddi_dma_free_handle(&rbuf
->dmah
);
2916 kmem_free(hmep
->hme_rbuf
, HME_RMDMAX
* sizeof (hmebuf_t
));
2917 kmem_free(hmep
->hme_tbuf
, HME_TMDMAX
* sizeof (hmebuf_t
));
2921 * Un-initialize (STOP) HME channel.
2924 hmeuninit(struct hme
*hmep
)
2927 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete.
2929 HMEDELAY((hmep
->hme_txindex
== hmep
->hme_txreclaim
), HMEDRAINTIME
);
2931 mutex_enter(&hmep
->hme_intrlock
);
2932 mutex_enter(&hmep
->hme_xmitlock
);
2934 hmep
->hme_flags
&= ~HMERUNNING
;
2936 (void) hmestop(hmep
);
2938 mutex_exit(&hmep
->hme_xmitlock
);
2939 mutex_exit(&hmep
->hme_intrlock
);
2943 * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2944 * map it in IO space. Allocate space for transmit and receive ddi_dma_handle
2945 * structures to use the DMA interface.
2948 hmeallocthings(struct hme
*hmep
)
2954 ddi_dma_cookie_t dmac
;
2955 dev_info_t
*dip
= hmep
->dip
;
2958 * Allocate the TMD and RMD descriptors and extra for page alignment.
2961 rval
= ddi_dma_alloc_handle(dip
, &hme_dma_attr
, DDI_DMA_DONTWAIT
, NULL
,
2962 &hmep
->hme_rmd_dmah
);
2963 if (rval
!= DDI_SUCCESS
) {
2964 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2965 "cannot allocate rmd handle - failed");
2966 return (DDI_FAILURE
);
2968 size
= HME_RMDMAX
* sizeof (struct hme_rmd
);
2969 rval
= ddi_dma_mem_alloc(hmep
->hme_rmd_dmah
, size
,
2970 &hmep
->hme_dev_attr
, DDI_DMA_CONSISTENT
, DDI_DMA_DONTWAIT
, NULL
,
2971 &hmep
->hme_rmd_kaddr
, &real_len
, &hmep
->hme_rmd_acch
);
2972 if (rval
!= DDI_SUCCESS
) {
2973 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2974 "cannot allocate rmd dma mem - failed");
2975 return (DDI_FAILURE
);
2977 hmep
->hme_rmdp
= (void *)(hmep
->hme_rmd_kaddr
);
2978 rval
= ddi_dma_addr_bind_handle(hmep
->hme_rmd_dmah
, NULL
,
2979 hmep
->hme_rmd_kaddr
, size
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
2980 DDI_DMA_DONTWAIT
, NULL
, &dmac
, &cookiec
);
2981 if (rval
!= DDI_DMA_MAPPED
) {
2982 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2983 "cannot allocate rmd dma - failed");
2984 return (DDI_FAILURE
);
2986 hmep
->hme_rmd_paddr
= dmac
.dmac_address
;
2988 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2989 "too many rmd cookies - failed");
2990 return (DDI_FAILURE
);
2993 rval
= ddi_dma_alloc_handle(dip
, &hme_dma_attr
, DDI_DMA_DONTWAIT
, NULL
,
2994 &hmep
->hme_tmd_dmah
);
2995 if (rval
!= DDI_SUCCESS
) {
2996 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
2997 "cannot allocate tmd handle - failed");
2998 return (DDI_FAILURE
);
3000 size
= HME_TMDMAX
* sizeof (struct hme_rmd
);
3001 rval
= ddi_dma_mem_alloc(hmep
->hme_tmd_dmah
, size
,
3002 &hmep
->hme_dev_attr
, DDI_DMA_CONSISTENT
, DDI_DMA_DONTWAIT
, NULL
,
3003 &hmep
->hme_tmd_kaddr
, &real_len
, &hmep
->hme_tmd_acch
);
3004 if (rval
!= DDI_SUCCESS
) {
3005 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
3006 "cannot allocate tmd dma mem - failed");
3007 return (DDI_FAILURE
);
3009 hmep
->hme_tmdp
= (void *)(hmep
->hme_tmd_kaddr
);
3010 rval
= ddi_dma_addr_bind_handle(hmep
->hme_tmd_dmah
, NULL
,
3011 hmep
->hme_tmd_kaddr
, size
, DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
,
3012 DDI_DMA_DONTWAIT
, NULL
, &dmac
, &cookiec
);
3013 if (rval
!= DDI_DMA_MAPPED
) {
3014 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
3015 "cannot allocate tmd dma - failed");
3016 return (DDI_FAILURE
);
3018 hmep
->hme_tmd_paddr
= dmac
.dmac_address
;
3020 HME_FAULT_MSG1(hmep
, SEVERITY_HIGH
, INIT_MSG
,
3021 "too many tmd cookies - failed");
3022 return (DDI_FAILURE
);
3025 return (DDI_SUCCESS
);
3029 hmefreethings(struct hme
*hmep
)
3031 if (hmep
->hme_rmd_paddr
) {
3032 (void) ddi_dma_unbind_handle(hmep
->hme_rmd_dmah
);
3033 hmep
->hme_rmd_paddr
= 0;
3035 if (hmep
->hme_rmd_acch
)
3036 ddi_dma_mem_free(&hmep
->hme_rmd_acch
);
3037 if (hmep
->hme_rmd_dmah
)
3038 ddi_dma_free_handle(&hmep
->hme_rmd_dmah
);
3040 if (hmep
->hme_tmd_paddr
) {
3041 (void) ddi_dma_unbind_handle(hmep
->hme_tmd_dmah
);
3042 hmep
->hme_tmd_paddr
= 0;
3044 if (hmep
->hme_tmd_acch
)
3045 ddi_dma_mem_free(&hmep
->hme_tmd_acch
);
3046 if (hmep
->hme_tmd_dmah
)
3047 ddi_dma_free_handle(&hmep
->hme_tmd_dmah
);
3051 * First check to see if it our device interrupting.
3054 hmeintr(caddr_t arg
)
3056 struct hme
*hmep
= (void *)arg
;
3058 uint32_t serviced
= DDI_INTR_UNCLAIMED
;
3059 uint32_t num_reads
= 0;
3061 mblk_t
*mp
, *head
, **tail
;
3067 mutex_enter(&hmep
->hme_intrlock
);
3070 * The status register auto-clears on read except for
3073 hmesbits
= GET_GLOBREG(status
);
3077 * Note: TINT is sometimes enabled in thr hmereclaim()
3081 * Bugid 1227832 - to handle spurious interrupts on fusion systems.
3082 * Claim the first interrupt after initialization
3084 if (hmep
->hme_flags
& HMEINITIALIZED
) {
3085 hmep
->hme_flags
&= ~HMEINITIALIZED
;
3086 serviced
= DDI_INTR_CLAIMED
;
3089 if ((hmesbits
& (HMEG_STATUS_INTR
| HMEG_STATUS_TINT
)) == 0) {
3090 /* No interesting interrupt */
3091 if (hmep
->hme_intrstats
) {
3092 if (serviced
== DDI_INTR_UNCLAIMED
)
3093 KIOIP
->intrs
[KSTAT_INTR_SPURIOUS
]++;
3095 KIOIP
->intrs
[KSTAT_INTR_HARD
]++;
3097 mutex_exit(&hmep
->hme_intrlock
);
3101 serviced
= DDI_INTR_CLAIMED
;
3103 if (!(hmep
->hme_flags
& HMERUNNING
)) {
3104 if (hmep
->hme_intrstats
)
3105 KIOIP
->intrs
[KSTAT_INTR_HARD
]++;
3106 mutex_exit(&hmep
->hme_intrlock
);
3111 if (hmesbits
& (HMEG_STATUS_FATAL_ERR
| HMEG_STATUS_NONFATAL_ERR
)) {
3112 if (hmesbits
& HMEG_STATUS_FATAL_ERR
) {
3114 if (hmep
->hme_intrstats
)
3115 KIOIP
->intrs
[KSTAT_INTR_HARD
]++;
3116 hme_fatal_err(hmep
, hmesbits
);
3118 mutex_exit(&hmep
->hme_intrlock
);
3119 (void) hmeinit(hmep
);
3122 hme_nonfatal_err(hmep
, hmesbits
);
3125 if (hmesbits
& (HMEG_STATUS_TX_ALL
| HMEG_STATUS_TINT
)) {
3126 mutex_enter(&hmep
->hme_xmitlock
);
3129 mutex_exit(&hmep
->hme_xmitlock
);
3132 if (hmesbits
& HMEG_STATUS_RINT
) {
3135 * This dummy PIO is required to flush the SBus
3136 * Bridge buffers in QFE.
3138 (void) GET_GLOBREG(config
);
3141 * Loop through each RMD no more than once.
3143 while (num_reads
++ < HME_RMDMAX
) {
3147 rxptr
= hmep
->hme_rxindex
% HME_RMDMAX
;
3148 HMESYNCRMD(rxptr
, DDI_DMA_SYNC_FORKERNEL
);
3150 rflags
= GET_RMD_FLAGS(rxptr
);
3151 if (rflags
& HMERMD_OWN
) {
3153 * Chip still owns it. We're done.
3159 * Retrieve the packet.
3161 rbuf
= &hmep
->hme_rbuf
[rxptr
];
3162 mp
= hmeread(hmep
, rbuf
, rflags
);
3165 * Return ownership of the RMD.
3167 PUT_RMD(rxptr
, rbuf
->paddr
);
3168 HMESYNCRMD(rxptr
, DDI_DMA_SYNC_FORDEV
);
3176 * Advance to the next RMD.
3178 hmep
->hme_rxindex
++;
3182 if (hmep
->hme_intrstats
)
3183 KIOIP
->intrs
[KSTAT_INTR_HARD
]++;
3185 mutex_exit(&hmep
->hme_intrlock
);
3188 mac_rx(hmep
->hme_mh
, NULL
, head
);
3194 * Transmit completion reclaiming.
3197 hmereclaim(struct hme
*hmep
)
3199 boolean_t reclaimed
= B_FALSE
;
3202 * Loop through each TMD.
3204 while (hmep
->hme_txindex
> hmep
->hme_txreclaim
) {
3209 reclaim
= hmep
->hme_txreclaim
% HME_TMDMAX
;
3210 HMESYNCTMD(reclaim
, DDI_DMA_SYNC_FORKERNEL
);
3212 flags
= GET_TMD_FLAGS(reclaim
);
3213 if (flags
& HMETMD_OWN
) {
3215 * Chip still owns it. We're done.
3221 * Count a chained packet only once.
3223 if (flags
& HMETMD_SOP
) {
3224 hmep
->hme_opackets
++;
3230 hmep
->hme_obytes
+= flags
& HMETMD_BUFSIZE
;
3233 hmep
->hme_txreclaim
++;
3238 * we could reclaim some TMDs so turn off interrupts
3240 if (hmep
->hme_wantw
) {
3241 PUT_GLOBREG(intmask
,
3242 HMEG_MASK_INTR
| HMEG_MASK_TINT
|
3244 hmep
->hme_wantw
= B_FALSE
;
3245 mac_tx_update(hmep
->hme_mh
);
3249 * enable TINTS: so that even if there is no further activity
3250 * hmereclaim will get called
3252 if (hmep
->hme_wantw
)
3253 PUT_GLOBREG(intmask
,
3254 GET_GLOBREG(intmask
) & ~HMEG_MASK_TX_ALL
);
3260 * Handle interrupts for fatal errors
3261 * Need reinitialization of the ENET channel.
3264 hme_fatal_err(struct hme
*hmep
, uint_t hmesbits
)
3267 if (hmesbits
& HMEG_STATUS_SLV_PAR_ERR
) {
3268 hmep
->hme_slvparerr
++;
3271 if (hmesbits
& HMEG_STATUS_SLV_ERR_ACK
) {
3272 hmep
->hme_slverrack
++;
3275 if (hmesbits
& HMEG_STATUS_TX_TAG_ERR
) {
3276 hmep
->hme_txtagerr
++;
3277 hmep
->hme_oerrors
++;
3280 if (hmesbits
& HMEG_STATUS_TX_PAR_ERR
) {
3281 hmep
->hme_txparerr
++;
3282 hmep
->hme_oerrors
++;
3285 if (hmesbits
& HMEG_STATUS_TX_LATE_ERR
) {
3286 hmep
->hme_txlaterr
++;
3287 hmep
->hme_oerrors
++;
3290 if (hmesbits
& HMEG_STATUS_TX_ERR_ACK
) {
3291 hmep
->hme_txerrack
++;
3292 hmep
->hme_oerrors
++;
3295 if (hmesbits
& HMEG_STATUS_EOP_ERR
) {
3299 if (hmesbits
& HMEG_STATUS_RX_TAG_ERR
) {
3300 hmep
->hme_rxtagerr
++;
3301 hmep
->hme_ierrors
++;
3304 if (hmesbits
& HMEG_STATUS_RX_PAR_ERR
) {
3305 hmep
->hme_rxparerr
++;
3306 hmep
->hme_ierrors
++;
3309 if (hmesbits
& HMEG_STATUS_RX_LATE_ERR
) {
3310 hmep
->hme_rxlaterr
++;
3311 hmep
->hme_ierrors
++;
3314 if (hmesbits
& HMEG_STATUS_RX_ERR_ACK
) {
3315 hmep
->hme_rxerrack
++;
3316 hmep
->hme_ierrors
++;
3321 * Handle interrupts regarding non-fatal errors.
3324 hme_nonfatal_err(struct hme
*hmep
, uint_t hmesbits
)
3327 if (hmesbits
& HMEG_STATUS_RX_DROP
) {
3329 hmep
->hme_ierrors
++;
3332 if (hmesbits
& HMEG_STATUS_DEFTIMR_EXP
) {
3333 hmep
->hme_defer_xmts
++;
3336 if (hmesbits
& HMEG_STATUS_FSTCOLC_EXP
) {
3337 hmep
->hme_fstcol
+= 256;
3340 if (hmesbits
& HMEG_STATUS_LATCOLC_EXP
) {
3341 hmep
->hme_tlcol
+= 256;
3342 hmep
->hme_oerrors
+= 256;
3345 if (hmesbits
& HMEG_STATUS_EXCOLC_EXP
) {
3346 hmep
->hme_excol
+= 256;
3347 hmep
->hme_oerrors
+= 256;
3350 if (hmesbits
& HMEG_STATUS_NRMCOLC_EXP
) {
3351 hmep
->hme_coll
+= 256;
3354 if (hmesbits
& HMEG_STATUS_MXPKTSZ_ERR
) {
3356 hmep
->hme_oerrors
++;
3360 * This error is fatal and the board needs to
3361 * be reinitialized. Comments?
3363 if (hmesbits
& HMEG_STATUS_TXFIFO_UNDR
) {
3365 hmep
->hme_oerrors
++;
3368 if (hmesbits
& HMEG_STATUS_SQE_TST_ERR
) {
3369 hmep
->hme_sqe_errors
++;
3372 if (hmesbits
& HMEG_STATUS_RCV_CNT_EXP
) {
3373 if (hmep
->hme_rxcv_enable
) {
3374 hmep
->hme_cvc
+= 256;
3378 if (hmesbits
& HMEG_STATUS_RXFIFO_OVFL
) {
3380 hmep
->hme_ierrors
++;
3383 if (hmesbits
& HMEG_STATUS_LEN_CNT_EXP
) {
3384 hmep
->hme_lenerr
+= 256;
3385 hmep
->hme_ierrors
+= 256;
3388 if (hmesbits
& HMEG_STATUS_ALN_CNT_EXP
) {
3389 hmep
->hme_align_errors
+= 256;
3390 hmep
->hme_ierrors
+= 256;
3393 if (hmesbits
& HMEG_STATUS_CRC_CNT_EXP
) {
3394 hmep
->hme_fcs_errors
+= 256;
3395 hmep
->hme_ierrors
+= 256;
3400 hmeread(struct hme
*hmep
, hmebuf_t
*rbuf
, uint32_t rflags
)
3406 len
= (rflags
& HMERMD_BUFSIZE
) >> HMERMD_BUFSIZE_SHIFT
;
3409 * Check for short packet
3410 * and check for overflow packet also. The processing is the
3411 * same for both the cases - reuse the buffer. Update the Buffer
3414 if ((len
< ETHERMIN
) || (rflags
& HMERMD_OVFLOW
) ||
3415 (len
> (ETHERMAX
+ 4))) {
3421 hmep
->hme_toolong_errors
++;
3423 hmep
->hme_ierrors
++;
3428 * Sync the received buffer before looking at it.
3431 (void) ddi_dma_sync(rbuf
->dmah
, 0, 0, DDI_DMA_SYNC_FORKERNEL
);
3434 * copy the packet data and then recycle the descriptor.
3437 if ((bp
= allocb(len
+ HME_FSTBYTE_OFFSET
, BPRI_HI
)) == NULL
) {
3439 hmep
->hme_allocbfail
++;
3440 hmep
->hme_norcvbuf
++;
3445 bcopy(rbuf
->kaddr
, bp
->b_rptr
, len
+ HME_FSTBYTE_OFFSET
);
3447 hmep
->hme_ipackets
++;
3449 /* Add the First Byte offset to the b_rptr and copy */
3450 bp
->b_rptr
+= HME_FSTBYTE_OFFSET
;
3451 bp
->b_wptr
= bp
->b_rptr
+ len
;
3454 * update MIB II statistics
3456 BUMP_InNUcast(hmep
, bp
->b_rptr
);
3457 hmep
->hme_rbytes
+= len
;
3459 type
= get_ether_type(bp
->b_rptr
);
3462 * TCP partial checksum in hardware
3464 if (type
== ETHERTYPE_IP
|| type
== ETHERTYPE_IPV6
) {
3465 uint16_t cksum
= ~rflags
& HMERMD_CKSUM
;
3466 uint_t end
= len
- sizeof (struct ether_header
);
3467 mac_hcksum_set(bp
, 0, 0, end
, htons(cksum
), HCK_PARTIALCKSUM
);
3475 hme_fault_msg(struct hme
*hmep
, uint_t severity
, msg_t type
, char *fmt
, ...)
3477 char msg_buffer
[255];
3481 (void) vsnprintf(msg_buffer
, sizeof (msg_buffer
), fmt
, ap
);
3484 cmn_err(CE_NOTE
, "hme : %s", msg_buffer
);
3486 } else if (type
== DISPLAY_MSG
) {
3487 cmn_err(CE_CONT
, "?%s%d : %s\n", ddi_driver_name(hmep
->dip
),
3488 hmep
->instance
, msg_buffer
);
3489 } else if (severity
== SEVERITY_HIGH
) {
3490 cmn_err(CE_WARN
, "%s%d : %s, SEVERITY_HIGH, %s\n",
3491 ddi_driver_name(hmep
->dip
), hmep
->instance
,
3492 msg_buffer
, msg_string
[type
]);
3494 cmn_err(CE_CONT
, "%s%d : %s\n", ddi_driver_name(hmep
->dip
),
3495 hmep
->instance
, msg_buffer
);
3501 * if this is the first init do not bother to save the
3502 * counters. They should be 0, but do not count on it.
3505 hmesavecntrs(struct hme
*hmep
)
3507 uint32_t fecnt
, aecnt
, lecnt
, rxcv
;
3508 uint32_t ltcnt
, excnt
;
3510 /* XXX What all gets added in ierrors and oerrors? */
3511 fecnt
= GET_MACREG(fecnt
);
3512 PUT_MACREG(fecnt
, 0);
3514 aecnt
= GET_MACREG(aecnt
);
3515 hmep
->hme_align_errors
+= aecnt
;
3516 PUT_MACREG(aecnt
, 0);
3518 lecnt
= GET_MACREG(lecnt
);
3519 hmep
->hme_lenerr
+= lecnt
;
3520 PUT_MACREG(lecnt
, 0);
3522 rxcv
= GET_MACREG(rxcv
);
3523 #ifdef HME_CODEVIOL_BUG
3525 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier
3527 if (!hmep
->hme_rxcv_enable
) {
3531 hmep
->hme_cvc
+= rxcv
;
3532 PUT_MACREG(rxcv
, 0);
3534 ltcnt
= GET_MACREG(ltcnt
);
3535 hmep
->hme_tlcol
+= ltcnt
;
3536 PUT_MACREG(ltcnt
, 0);
3538 excnt
= GET_MACREG(excnt
);
3539 hmep
->hme_excol
+= excnt
;
3540 PUT_MACREG(excnt
, 0);
3542 hmep
->hme_fcs_errors
+= fecnt
;
3543 hmep
->hme_ierrors
+= (fecnt
+ aecnt
+ lecnt
);
3544 hmep
->hme_oerrors
+= (ltcnt
+ excnt
);
3545 hmep
->hme_coll
+= (GET_MACREG(nccnt
) + ltcnt
);
3547 PUT_MACREG(nccnt
, 0);
3552 * To set up the mac address for the network interface:
3553 * The adapter card may support a local mac address which is published
3554 * in a device node property "local-mac-address". This mac address is
3555 * treated as the factory-installed mac address for DLPI interface.
3556 * If the adapter firmware has used the device for diskless boot
3557 * operation it publishes a property called "mac-address" for use by
3558 * inetboot and the device driver.
3559 * If "mac-address" is not found, the system options property
3560 * "local-mac-address" is used to select the mac-address. If this option
3561 * is set to "true", and "local-mac-address" has been found, then
3562 * local-mac-address is used; otherwise the system mac address is used
3563 * by calling the "localetheraddr()" function.
3566 hme_setup_mac_address(struct hme
*hmep
, dev_info_t
*dip
)
3569 int prop_len
= sizeof (int);
3571 hmep
->hme_addrflags
= 0;
3574 * Check if it is an adapter with its own local mac address
3575 * If it is present, save it as the "factory-address"
3578 if (ddi_getlongprop(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
3579 "local-mac-address",
3580 (caddr_t
)&prop
, &prop_len
) == DDI_PROP_SUCCESS
) {
3581 if (prop_len
== ETHERADDRL
) {
3582 hmep
->hme_addrflags
= HME_FACTADDR_PRESENT
;
3583 ether_bcopy(prop
, &hmep
->hme_factaddr
);
3584 HME_FAULT_MSG2(hmep
, SEVERITY_NONE
, DISPLAY_MSG
,
3585 "Local Ethernet address = %s",
3586 ether_sprintf(&hmep
->hme_factaddr
));
3588 kmem_free(prop
, prop_len
);
3592 * Check if the adapter has published "mac-address" property.
3593 * If it is present, use it as the mac address for this device.
3595 if (ddi_getlongprop(DDI_DEV_T_ANY
, dip
, DDI_PROP_DONTPASS
,
3596 "mac-address", (caddr_t
)&prop
, &prop_len
) == DDI_PROP_SUCCESS
) {
3597 if (prop_len
>= ETHERADDRL
) {
3598 ether_bcopy(prop
, &hmep
->hme_ouraddr
);
3599 kmem_free(prop
, prop_len
);
3602 kmem_free(prop
, prop_len
);
3605 ether_bcopy(&hmep
->hme_factaddr
, &hmep
->hme_ouraddr
);
3610 hme_check_acc_handle(char *file
, uint_t line
, struct hme
*hmep
,
3611 ddi_acc_handle_t handle
)