2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 * Copyright 2016 Joyent, Inc.
32 * **********************************************************************
38 * This file contains the interface routines for the solaris OS. *
39 * It has all DDI entry point routines and GLD entry point routines. *
41 * This file also contains routines that take care of initialization *
42 * uninit routine and interrupt routine. *
44 * **********************************************************************
49 #include "e1000g_sw.h"
50 #include "e1000g_debug.h"
52 static char ident
[] = "Intel PRO/1000 Ethernet";
53 /* LINTED E_STATIC_UNUSED */
54 static char e1000g_version
[] = "Driver Ver. 5.3.24";
57 * Proto types for DDI entry points
59 static int e1000g_attach(dev_info_t
*, ddi_attach_cmd_t
);
60 static int e1000g_detach(dev_info_t
*, ddi_detach_cmd_t
);
61 static int e1000g_quiesce(dev_info_t
*);
64 * init and intr routines prototype
66 static int e1000g_resume(dev_info_t
*);
67 static int e1000g_suspend(dev_info_t
*);
68 static uint_t
e1000g_intr_pciexpress(caddr_t
);
69 static uint_t
e1000g_intr(caddr_t
);
70 static void e1000g_intr_work(struct e1000g
*, uint32_t);
71 #pragma inline(e1000g_intr_work)
72 static int e1000g_init(struct e1000g
*);
73 static int e1000g_start(struct e1000g
*, boolean_t
);
74 static void e1000g_stop(struct e1000g
*, boolean_t
);
75 static int e1000g_m_start(void *);
76 static void e1000g_m_stop(void *);
77 static int e1000g_m_promisc(void *, boolean_t
);
78 static boolean_t
e1000g_m_getcapab(void *, mac_capab_t
, void *);
79 static int e1000g_m_multicst(void *, boolean_t
, const uint8_t *);
80 static void e1000g_m_ioctl(void *, queue_t
*, mblk_t
*);
81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t
,
82 uint_t
, const void *);
83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t
,
85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t
,
86 mac_prop_info_handle_t
);
87 static int e1000g_set_priv_prop(struct e1000g
*, const char *, uint_t
,
89 static int e1000g_get_priv_prop(struct e1000g
*, const char *, uint_t
, void *);
90 static void e1000g_init_locks(struct e1000g
*);
91 static void e1000g_destroy_locks(struct e1000g
*);
92 static int e1000g_identify_hardware(struct e1000g
*);
93 static int e1000g_regs_map(struct e1000g
*);
94 static int e1000g_set_driver_params(struct e1000g
*);
95 static void e1000g_set_bufsize(struct e1000g
*);
96 static int e1000g_register_mac(struct e1000g
*);
97 static boolean_t
e1000g_rx_drain(struct e1000g
*);
98 static boolean_t
e1000g_tx_drain(struct e1000g
*);
99 static void e1000g_init_unicst(struct e1000g
*);
100 static int e1000g_unicst_set(struct e1000g
*, const uint8_t *, int);
101 static int e1000g_alloc_rx_data(struct e1000g
*);
102 static void e1000g_release_multicast(struct e1000g
*);
103 static void e1000g_pch_limits(struct e1000g
*);
104 static uint32_t e1000g_mtu2maxframe(uint32_t);
109 static boolean_t
e1000g_reset_adapter(struct e1000g
*);
110 static void e1000g_tx_clean(struct e1000g
*);
111 static void e1000g_rx_clean(struct e1000g
*);
112 static void e1000g_link_timer(void *);
113 static void e1000g_local_timer(void *);
114 static boolean_t
e1000g_link_check(struct e1000g
*);
115 static boolean_t
e1000g_stall_check(struct e1000g
*);
116 static void e1000g_smartspeed(struct e1000g
*);
117 static void e1000g_get_conf(struct e1000g
*);
118 static boolean_t
e1000g_get_prop(struct e1000g
*, char *, int, int, int,
120 static void enable_watchdog_timer(struct e1000g
*);
121 static void disable_watchdog_timer(struct e1000g
*);
122 static void start_watchdog_timer(struct e1000g
*);
123 static void restart_watchdog_timer(struct e1000g
*);
124 static void stop_watchdog_timer(struct e1000g
*);
125 static void stop_link_timer(struct e1000g
*);
126 static void stop_82547_timer(e1000g_tx_ring_t
*);
127 static void e1000g_force_speed_duplex(struct e1000g
*);
128 static void e1000g_setup_max_mtu(struct e1000g
*);
129 static void e1000g_get_max_frame_size(struct e1000g
*);
130 static boolean_t
is_valid_mac_addr(uint8_t *);
131 static void e1000g_unattach(dev_info_t
*, struct e1000g
*);
132 static int e1000g_get_bar_info(dev_info_t
*, int, bar_info_t
*);
134 static void e1000g_ioc_peek_reg(struct e1000g
*, e1000g_peekpoke_t
*);
135 static void e1000g_ioc_poke_reg(struct e1000g
*, e1000g_peekpoke_t
*);
136 static void e1000g_ioc_peek_mem(struct e1000g
*, e1000g_peekpoke_t
*);
137 static void e1000g_ioc_poke_mem(struct e1000g
*, e1000g_peekpoke_t
*);
138 static enum ioc_reply
e1000g_pp_ioctl(struct e1000g
*,
139 struct iocblk
*, mblk_t
*);
141 static enum ioc_reply
e1000g_loopback_ioctl(struct e1000g
*,
142 struct iocblk
*, mblk_t
*);
143 static boolean_t
e1000g_check_loopback_support(struct e1000_hw
*);
144 static boolean_t
e1000g_set_loopback_mode(struct e1000g
*, uint32_t);
145 static void e1000g_set_internal_loopback(struct e1000g
*);
146 static void e1000g_set_external_loopback_1000(struct e1000g
*);
147 static void e1000g_set_external_loopback_100(struct e1000g
*);
148 static void e1000g_set_external_loopback_10(struct e1000g
*);
149 static int e1000g_add_intrs(struct e1000g
*);
150 static int e1000g_intr_add(struct e1000g
*, int);
151 static int e1000g_rem_intrs(struct e1000g
*);
152 static int e1000g_enable_intrs(struct e1000g
*);
153 static int e1000g_disable_intrs(struct e1000g
*);
154 static boolean_t
e1000g_link_up(struct e1000g
*);
155 static void e1000g_get_phy_state(struct e1000g
*);
156 static int e1000g_fm_error_cb(dev_info_t
*dip
, ddi_fm_error_t
*err
,
157 const void *impl_data
);
158 static void e1000g_fm_init(struct e1000g
*Adapter
);
159 static void e1000g_fm_fini(struct e1000g
*Adapter
);
160 static void e1000g_param_sync(struct e1000g
*);
161 static void e1000g_get_driver_control(struct e1000_hw
*);
162 static void e1000g_release_driver_control(struct e1000_hw
*);
163 static void e1000g_restore_promisc(struct e1000g
*Adapter
);
165 char *e1000g_priv_props
[] = {
166 "_tx_bcopy_threshold",
167 "_tx_interrupt_enable",
169 "_tx_intr_abs_delay",
170 "_rx_bcopy_threshold",
171 "_max_num_rcv_packets",
173 "_rx_intr_abs_delay",
174 "_intr_throttling_rate",
177 "_adv_asym_pause_cap",
181 static struct cb_ops cb_ws_ops
= {
182 nulldev
, /* cb_open */
183 nulldev
, /* cb_close */
184 nodev
, /* cb_strategy */
185 nodev
, /* cb_print */
188 nodev
, /* cb_write */
189 nodev
, /* cb_ioctl */
190 nodev
, /* cb_devmap */
192 nodev
, /* cb_segmap */
193 nochpoll
, /* cb_chpoll */
194 ddi_prop_op
, /* cb_prop_op */
195 NULL
, /* cb_stream */
196 D_MP
| D_HOTPLUG
, /* cb_flag */
198 nodev
, /* cb_aread */
199 nodev
/* cb_awrite */
202 static struct dev_ops ws_ops
= {
203 DEVO_REV
, /* devo_rev */
205 NULL
, /* devo_getinfo */
206 nulldev
, /* devo_identify */
207 nulldev
, /* devo_probe */
208 e1000g_attach
, /* devo_attach */
209 e1000g_detach
, /* devo_detach */
210 nodev
, /* devo_reset */
211 &cb_ws_ops
, /* devo_cb_ops */
212 NULL
, /* devo_bus_ops */
213 ddi_power
, /* devo_power */
214 e1000g_quiesce
/* devo_quiesce */
217 static struct modldrv modldrv
= {
218 &mod_driverops
, /* Type of module. This one is a driver */
219 ident
, /* Discription string */
220 &ws_ops
, /* driver ops */
223 static struct modlinkage modlinkage
= {
224 MODREV_1
, &modldrv
, NULL
227 /* Access attributes for register mapping */
228 static ddi_device_acc_attr_t e1000g_regs_acc_attr
= {
230 DDI_STRUCTURE_LE_ACC
,
235 #define E1000G_M_CALLBACK_FLAGS \
236 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
238 static mac_callbacks_t e1000g_m_callbacks
= {
239 E1000G_M_CALLBACK_FLAGS
,
260 uint32_t e1000g_jumbo_mtu
= MAXIMUM_MTU_9K
;
261 uint32_t e1000g_mblks_pending
= 0;
263 * Workaround for Dynamic Reconfiguration support, for x86 platform only.
264 * Here we maintain a private dev_info list if e1000g_force_detach is
265 * enabled. If we force the driver to detach while there are still some
266 * rx buffers retained in the upper layer, we have to keep a copy of the
267 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
268 * structure will be freed after the driver is detached. However when we
269 * finally free those rx buffers released by the upper layer, we need to
270 * refer to the dev_info to free the dma buffers. So we save a copy of
271 * the dev_info for this purpose. On x86 platform, we assume this copy
272 * of dev_info is always valid, but on SPARC platform, it could be invalid
273 * after the system board level DR operation. For this reason, the global
274 * variable e1000g_force_detach must be B_FALSE on SPARC platform.
276 boolean_t e1000g_force_detach
= B_TRUE
;
277 private_devi_list_t
*e1000g_private_devi_list
= NULL
;
280 * The mutex e1000g_rx_detach_lock is defined to protect the processing of
281 * the private dev_info list, and to serialize the processing of rx buffer
282 * freeing and rx buffer recycling.
284 kmutex_t e1000g_rx_detach_lock
;
286 * The rwlock e1000g_dma_type_lock is defined to protect the global flag
287 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
288 * If there are many e1000g instances, the system may run out of DVMA
289 * resources during the initialization of the instances, then the flag will
290 * be changed to "USE_DMA". Because different e1000g instances are initialized
291 * in parallel, we need to use this lock to protect the flag.
293 krwlock_t e1000g_dma_type_lock
;
296 * The 82546 chipset is a dual-port device, both the ports share one eeprom.
297 * Based on the information from Intel, the 82546 chipset has some hardware
298 * problem. When one port is being reset and the other port is trying to
299 * access the eeprom, it could cause system hang or panic. To workaround this
300 * hardware problem, we use a global mutex to prevent such operations from
301 * happening simultaneously on different instances. This workaround is applied
302 * to all the devices supported by this driver.
304 kmutex_t e1000g_nvm_lock
;
307 * Loadable module configuration entry points for the driver
311 * _init - module initialization
318 mac_init_ops(&ws_ops
, WSNAME
);
319 status
= mod_install(&modlinkage
);
320 if (status
!= DDI_SUCCESS
)
321 mac_fini_ops(&ws_ops
);
323 mutex_init(&e1000g_rx_detach_lock
, NULL
, MUTEX_DRIVER
, NULL
);
324 rw_init(&e1000g_dma_type_lock
, NULL
, RW_DRIVER
, NULL
);
325 mutex_init(&e1000g_nvm_lock
, NULL
, MUTEX_DRIVER
, NULL
);
332 * _fini - module finalization
339 if (e1000g_mblks_pending
!= 0)
342 status
= mod_remove(&modlinkage
);
343 if (status
== DDI_SUCCESS
) {
344 mac_fini_ops(&ws_ops
);
346 if (e1000g_force_detach
) {
347 private_devi_list_t
*devi_node
;
349 mutex_enter(&e1000g_rx_detach_lock
);
350 while (e1000g_private_devi_list
!= NULL
) {
351 devi_node
= e1000g_private_devi_list
;
352 e1000g_private_devi_list
=
353 e1000g_private_devi_list
->next
;
355 kmem_free(devi_node
->priv_dip
,
356 sizeof (struct dev_info
));
358 sizeof (private_devi_list_t
));
360 mutex_exit(&e1000g_rx_detach_lock
);
363 mutex_destroy(&e1000g_rx_detach_lock
);
364 rw_destroy(&e1000g_dma_type_lock
);
365 mutex_destroy(&e1000g_nvm_lock
);
372 * _info - module information
375 _info(struct modinfo
*modinfop
)
377 return (mod_info(&modlinkage
, modinfop
));
381 * e1000g_attach - driver attach
383 * This function is the device-specific initialization entry
384 * point. This entry point is required and must be written.
385 * The DDI_ATTACH command must be provided in the attach entry
386 * point. When attach() is called with cmd set to DDI_ATTACH,
387 * all normal kernel services (such as kmem_alloc(9F)) are
388 * available for use by the driver.
390 * The attach() function will be called once for each instance
391 * of the device on the system with cmd set to DDI_ATTACH.
392 * Until attach() succeeds, the only driver entry points which
393 * may be called are open(9E) and getinfo(9E).
396 e1000g_attach(dev_info_t
*devinfo
, ddi_attach_cmd_t cmd
)
398 struct e1000g
*Adapter
;
400 struct e1000g_osdep
*osdep
;
405 e1000g_log(NULL
, CE_WARN
,
406 "Unsupported command send to e1000g_attach... ");
407 return (DDI_FAILURE
);
410 return (e1000g_resume(devinfo
));
417 * get device instance number
419 instance
= ddi_get_instance(devinfo
);
422 * Allocate soft data structure
424 Adapter
= kmem_zalloc(sizeof (*Adapter
), KM_SLEEP
);
426 Adapter
->dip
= devinfo
;
427 Adapter
->instance
= instance
;
428 Adapter
->tx_ring
->adapter
= Adapter
;
429 Adapter
->rx_ring
->adapter
= Adapter
;
431 hw
= &Adapter
->shared
;
432 osdep
= &Adapter
->osdep
;
434 osdep
->adapter
= Adapter
;
436 ddi_set_driver_private(devinfo
, (caddr_t
)Adapter
);
439 * Initialize for fma support
441 (void) e1000g_get_prop(Adapter
, "fm-capable",
443 DDI_FM_EREPORT_CAPABLE
| DDI_FM_ACCCHK_CAPABLE
|
444 DDI_FM_DMACHK_CAPABLE
| DDI_FM_ERRCB_CAPABLE
,
445 &Adapter
->fm_capabilities
);
446 e1000g_fm_init(Adapter
);
447 Adapter
->attach_progress
|= ATTACH_PROGRESS_FMINIT
;
452 if (pci_config_setup(devinfo
, &osdep
->cfg_handle
) != DDI_SUCCESS
) {
453 e1000g_log(Adapter
, CE_WARN
, "PCI configuration failed");
456 Adapter
->attach_progress
|= ATTACH_PROGRESS_PCI_CONFIG
;
461 if (e1000g_identify_hardware(Adapter
) != DDI_SUCCESS
) {
462 e1000g_log(Adapter
, CE_WARN
, "Identify hardware failed");
467 * Map in the device registers.
469 if (e1000g_regs_map(Adapter
) != DDI_SUCCESS
) {
470 e1000g_log(Adapter
, CE_WARN
, "Mapping registers failed");
473 Adapter
->attach_progress
|= ATTACH_PROGRESS_REGS_MAP
;
476 * Initialize driver parameters
478 if (e1000g_set_driver_params(Adapter
) != DDI_SUCCESS
) {
481 Adapter
->attach_progress
|= ATTACH_PROGRESS_SETUP
;
483 if (e1000g_check_acc_handle(Adapter
->osdep
.cfg_handle
) != DDI_FM_OK
) {
484 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_LOST
);
489 * Disable ULP support
491 (void) e1000_disable_ulp_lpt_lp(hw
, TRUE
);
494 * Initialize interrupts
496 if (e1000g_add_intrs(Adapter
) != DDI_SUCCESS
) {
497 e1000g_log(Adapter
, CE_WARN
, "Add interrupts failed");
500 Adapter
->attach_progress
|= ATTACH_PROGRESS_ADD_INTR
;
503 * Initialize mutex's for this device.
504 * Do this before enabling the interrupt handler and
505 * register the softint to avoid the condition where
506 * interrupt handler can try using uninitialized mutex
508 e1000g_init_locks(Adapter
);
509 Adapter
->attach_progress
|= ATTACH_PROGRESS_LOCKS
;
512 * Initialize Driver Counters
514 if (e1000g_init_stats(Adapter
) != DDI_SUCCESS
) {
515 e1000g_log(Adapter
, CE_WARN
, "Init stats failed");
518 Adapter
->attach_progress
|= ATTACH_PROGRESS_KSTATS
;
521 * Initialize chip hardware and software structures
523 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
524 if (e1000g_init(Adapter
) != DDI_SUCCESS
) {
525 rw_exit(&Adapter
->chip_lock
);
526 e1000g_log(Adapter
, CE_WARN
, "Adapter initialization failed");
529 rw_exit(&Adapter
->chip_lock
);
530 Adapter
->attach_progress
|= ATTACH_PROGRESS_INIT
;
533 * Register the driver to the MAC
535 if (e1000g_register_mac(Adapter
) != DDI_SUCCESS
) {
536 e1000g_log(Adapter
, CE_WARN
, "Register MAC failed");
539 Adapter
->attach_progress
|= ATTACH_PROGRESS_MAC
;
542 * Now that mutex locks are initialized, and the chip is also
543 * initialized, enable interrupts.
545 if (e1000g_enable_intrs(Adapter
) != DDI_SUCCESS
) {
546 e1000g_log(Adapter
, CE_WARN
, "Enable DDI interrupts failed");
549 Adapter
->attach_progress
|= ATTACH_PROGRESS_ENABLE_INTR
;
552 * If e1000g_force_detach is enabled, in global private dip list,
553 * we will create a new entry, which maintains the priv_dip for DR
554 * supports after driver detached.
556 if (e1000g_force_detach
) {
557 private_devi_list_t
*devi_node
;
560 kmem_zalloc(sizeof (struct dev_info
), KM_SLEEP
);
561 bcopy(DEVI(devinfo
), DEVI(Adapter
->priv_dip
),
562 sizeof (struct dev_info
));
565 kmem_zalloc(sizeof (private_devi_list_t
), KM_SLEEP
);
567 mutex_enter(&e1000g_rx_detach_lock
);
568 devi_node
->priv_dip
= Adapter
->priv_dip
;
569 devi_node
->flag
= E1000G_PRIV_DEVI_ATTACH
;
570 devi_node
->pending_rx_count
= 0;
572 Adapter
->priv_devi_node
= devi_node
;
574 if (e1000g_private_devi_list
== NULL
) {
575 devi_node
->prev
= NULL
;
576 devi_node
->next
= NULL
;
577 e1000g_private_devi_list
= devi_node
;
579 devi_node
->prev
= NULL
;
580 devi_node
->next
= e1000g_private_devi_list
;
581 e1000g_private_devi_list
->prev
= devi_node
;
582 e1000g_private_devi_list
= devi_node
;
584 mutex_exit(&e1000g_rx_detach_lock
);
587 Adapter
->e1000g_state
= E1000G_INITIALIZED
;
588 return (DDI_SUCCESS
);
591 e1000g_unattach(devinfo
, Adapter
);
592 return (DDI_FAILURE
);
596 e1000g_register_mac(struct e1000g
*Adapter
)
598 struct e1000_hw
*hw
= &Adapter
->shared
;
602 if ((mac
= mac_alloc(MAC_VERSION
)) == NULL
)
603 return (DDI_FAILURE
);
605 mac
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
606 mac
->m_driver
= Adapter
;
607 mac
->m_dip
= Adapter
->dip
;
608 mac
->m_src_addr
= hw
->mac
.addr
;
609 mac
->m_callbacks
= &e1000g_m_callbacks
;
611 mac
->m_max_sdu
= Adapter
->default_mtu
;
612 mac
->m_margin
= VLAN_TAGSZ
;
613 mac
->m_priv_props
= e1000g_priv_props
;
614 mac
->m_v12n
= MAC_VIRT_LEVEL1
;
616 err
= mac_register(mac
, &Adapter
->mh
);
619 return (err
== 0 ? DDI_SUCCESS
: DDI_FAILURE
);
623 e1000g_identify_hardware(struct e1000g
*Adapter
)
625 struct e1000_hw
*hw
= &Adapter
->shared
;
626 struct e1000g_osdep
*osdep
= &Adapter
->osdep
;
628 /* Get the device id */
630 pci_config_get16(osdep
->cfg_handle
, PCI_CONF_VENID
);
632 pci_config_get16(osdep
->cfg_handle
, PCI_CONF_DEVID
);
634 pci_config_get8(osdep
->cfg_handle
, PCI_CONF_REVID
);
635 hw
->subsystem_device_id
=
636 pci_config_get16(osdep
->cfg_handle
, PCI_CONF_SUBSYSID
);
637 hw
->subsystem_vendor_id
=
638 pci_config_get16(osdep
->cfg_handle
, PCI_CONF_SUBVENID
);
640 if (e1000_set_mac_type(hw
) != E1000_SUCCESS
) {
641 E1000G_DEBUGLOG_0(Adapter
, E1000G_INFO_LEVEL
,
642 "MAC type could not be set properly.");
643 return (DDI_FAILURE
);
646 return (DDI_SUCCESS
);
650 e1000g_regs_map(struct e1000g
*Adapter
)
652 dev_info_t
*devinfo
= Adapter
->dip
;
653 struct e1000_hw
*hw
= &Adapter
->shared
;
654 struct e1000g_osdep
*osdep
= &Adapter
->osdep
;
659 rnumber
= ADAPTER_REG_SET
;
660 /* Get size of adapter register memory */
661 if (ddi_dev_regsize(devinfo
, rnumber
, &mem_size
) !=
663 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
664 "ddi_dev_regsize for registers failed");
665 return (DDI_FAILURE
);
668 /* Map adapter register memory */
669 if ((ddi_regs_map_setup(devinfo
, rnumber
,
670 (caddr_t
*)&hw
->hw_addr
, 0, mem_size
, &e1000g_regs_acc_attr
,
671 &osdep
->reg_handle
)) != DDI_SUCCESS
) {
672 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
673 "ddi_regs_map_setup for registers failed");
677 /* ICH needs to map flash memory */
678 switch (hw
->mac
.type
) {
685 rnumber
= ICH_FLASH_REG_SET
;
688 if (ddi_dev_regsize(devinfo
, rnumber
,
689 &mem_size
) != DDI_SUCCESS
) {
690 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
691 "ddi_dev_regsize for ICH flash failed");
696 if (ddi_regs_map_setup(devinfo
, rnumber
,
697 (caddr_t
*)&hw
->flash_address
, 0,
698 mem_size
, &e1000g_regs_acc_attr
,
699 &osdep
->ich_flash_handle
) != DDI_SUCCESS
) {
700 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
701 "ddi_regs_map_setup for ICH flash failed");
707 * On the SPT, the device flash is actually in BAR0, not a
708 * separate BAR. Therefore we end up setting the
709 * ich_flash_handle to be the same as the register handle.
710 * We mark the same to reduce the confusion in the other
711 * functions and macros. Though this does make the set up and
712 * tear-down path slightly more complicated.
714 osdep
->ich_flash_handle
= osdep
->reg_handle
;
715 hw
->flash_address
= hw
->hw_addr
;
721 switch (hw
->mac
.type
) {
727 case e1000_82541_rev_2
:
728 /* find the IO bar */
730 for (offset
= PCI_CONF_BASE1
;
731 offset
<= PCI_CONF_BASE5
; offset
+= 4) {
732 if (e1000g_get_bar_info(devinfo
, offset
, &bar_info
)
735 if (bar_info
.type
== E1000G_BAR_IO
) {
736 rnumber
= bar_info
.rnumber
;
742 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
743 "No io space is found");
747 /* get io space size */
748 if (ddi_dev_regsize(devinfo
, rnumber
,
749 &mem_size
) != DDI_SUCCESS
) {
750 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
751 "ddi_dev_regsize for io space failed");
756 if ((ddi_regs_map_setup(devinfo
, rnumber
,
757 (caddr_t
*)&hw
->io_base
, 0, mem_size
,
758 &e1000g_regs_acc_attr
,
759 &osdep
->io_reg_handle
)) != DDI_SUCCESS
) {
760 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
761 "ddi_regs_map_setup for io space failed");
770 return (DDI_SUCCESS
);
773 if (osdep
->reg_handle
!= NULL
)
774 ddi_regs_map_free(&osdep
->reg_handle
);
775 if (osdep
->ich_flash_handle
!= NULL
&& hw
->mac
.type
!= e1000_pch_spt
)
776 ddi_regs_map_free(&osdep
->ich_flash_handle
);
777 return (DDI_FAILURE
);
781 e1000g_set_driver_params(struct e1000g
*Adapter
)
785 hw
= &Adapter
->shared
;
787 /* Set MAC type and initialize hardware functions */
788 if (e1000_setup_init_funcs(hw
, B_TRUE
) != E1000_SUCCESS
) {
789 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
790 "Could not setup hardware functions");
791 return (DDI_FAILURE
);
794 /* Get bus information */
795 if (e1000_get_bus_info(hw
) != E1000_SUCCESS
) {
796 E1000G_DEBUGLOG_0(Adapter
, CE_WARN
,
797 "Could not get bus information");
798 return (DDI_FAILURE
);
801 e1000_read_pci_cfg(hw
, PCI_COMMAND_REGISTER
, &hw
->bus
.pci_cmd_word
);
803 hw
->mac
.autoneg_failed
= B_TRUE
;
805 /* Set the autoneg_wait_to_complete flag to B_FALSE */
806 hw
->phy
.autoneg_wait_to_complete
= B_FALSE
;
808 /* Adaptive IFS related changes */
809 hw
->mac
.adaptive_ifs
= B_TRUE
;
811 /* Enable phy init script for IGP phy of 82541/82547 */
812 if ((hw
->mac
.type
== e1000_82547
) ||
813 (hw
->mac
.type
== e1000_82541
) ||
814 (hw
->mac
.type
== e1000_82547_rev_2
) ||
815 (hw
->mac
.type
== e1000_82541_rev_2
))
816 e1000_init_script_state_82541(hw
, B_TRUE
);
818 /* Enable the TTL workaround for 82541/82547 */
819 e1000_set_ttl_workaround_state_82541(hw
, B_TRUE
);
821 Adapter
->strip_crc
= B_FALSE
;
823 /* setup the maximum MTU size of the chip */
824 e1000g_setup_max_mtu(Adapter
);
826 /* Get speed/duplex settings in conf file */
827 hw
->mac
.forced_speed_duplex
= ADVERTISE_100_FULL
;
828 hw
->phy
.autoneg_advertised
= AUTONEG_ADVERTISE_SPEED_DEFAULT
;
829 e1000g_force_speed_duplex(Adapter
);
831 /* Get Jumbo Frames settings in conf file */
832 e1000g_get_max_frame_size(Adapter
);
834 /* Get conf file properties */
835 e1000g_get_conf(Adapter
);
837 /* enforce PCH limits */
838 e1000g_pch_limits(Adapter
);
840 /* Set Rx/Tx buffer size */
841 e1000g_set_bufsize(Adapter
);
843 /* Master Latency Timer */
844 Adapter
->master_latency_timer
= DEFAULT_MASTER_LATENCY_TIMER
;
847 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
848 hw
->phy
.mdix
= 0; /* AUTO_ALL_MODES */
849 hw
->phy
.disable_polarity_correction
= B_FALSE
;
850 hw
->phy
.ms_type
= e1000_ms_hw_default
; /* E1000_MASTER_SLAVE */
853 /* The initial link state should be "unknown" */
854 Adapter
->link_state
= LINK_STATE_UNKNOWN
;
856 /* Initialize rx parameters */
857 Adapter
->rx_intr_delay
= DEFAULT_RX_INTR_DELAY
;
858 Adapter
->rx_intr_abs_delay
= DEFAULT_RX_INTR_ABS_DELAY
;
860 /* Initialize tx parameters */
861 Adapter
->tx_intr_enable
= DEFAULT_TX_INTR_ENABLE
;
862 Adapter
->tx_bcopy_thresh
= DEFAULT_TX_BCOPY_THRESHOLD
;
863 Adapter
->tx_intr_delay
= DEFAULT_TX_INTR_DELAY
;
864 Adapter
->tx_intr_abs_delay
= DEFAULT_TX_INTR_ABS_DELAY
;
866 /* Initialize rx parameters */
867 Adapter
->rx_bcopy_thresh
= DEFAULT_RX_BCOPY_THRESHOLD
;
869 return (DDI_SUCCESS
);
873 e1000g_setup_max_mtu(struct e1000g
*Adapter
)
875 struct e1000_mac_info
*mac
= &Adapter
->shared
.mac
;
876 struct e1000_phy_info
*phy
= &Adapter
->shared
.phy
;
879 /* types that do not support jumbo frames */
883 Adapter
->max_mtu
= ETHERMTU
;
885 /* ich9 supports jumbo frames except on one phy type */
887 if (phy
->type
== e1000_phy_ife
)
888 Adapter
->max_mtu
= ETHERMTU
;
890 Adapter
->max_mtu
= MAXIMUM_MTU_9K
;
892 /* pch can do jumbo frames up to 4K */
894 Adapter
->max_mtu
= MAXIMUM_MTU_4K
;
896 /* pch2 can do jumbo frames up to 9K */
900 Adapter
->max_mtu
= MAXIMUM_MTU_9K
;
902 /* types with a special limit */
906 case e1000_80003es2lan
:
908 if (e1000g_jumbo_mtu
>= ETHERMTU
&&
909 e1000g_jumbo_mtu
<= MAXIMUM_MTU_9K
) {
910 Adapter
->max_mtu
= e1000g_jumbo_mtu
;
912 Adapter
->max_mtu
= MAXIMUM_MTU_9K
;
915 /* default limit is 16K */
917 Adapter
->max_mtu
= FRAME_SIZE_UPTO_16K
-
918 sizeof (struct ether_vlan_header
) - ETHERFCSL
;
924 e1000g_set_bufsize(struct e1000g
*Adapter
)
926 struct e1000_mac_info
*mac
= &Adapter
->shared
.mac
;
930 dev_info_t
*devinfo
= Adapter
->dip
;
931 /* Get the system page size */
932 Adapter
->sys_page_sz
= ddi_ptob(devinfo
, (ulong_t
)1);
935 Adapter
->min_frame_size
= ETHERMIN
+ ETHERFCSL
;
937 if (Adapter
->mem_workaround_82546
&&
938 ((mac
->type
== e1000_82545
) ||
939 (mac
->type
== e1000_82546
) ||
940 (mac
->type
== e1000_82546_rev_3
))) {
941 Adapter
->rx_buffer_size
= E1000_RX_BUFFER_SIZE_2K
;
943 rx_size
= Adapter
->max_frame_size
;
944 if ((rx_size
> FRAME_SIZE_UPTO_2K
) &&
945 (rx_size
<= FRAME_SIZE_UPTO_4K
))
946 Adapter
->rx_buffer_size
= E1000_RX_BUFFER_SIZE_4K
;
947 else if ((rx_size
> FRAME_SIZE_UPTO_4K
) &&
948 (rx_size
<= FRAME_SIZE_UPTO_8K
))
949 Adapter
->rx_buffer_size
= E1000_RX_BUFFER_SIZE_8K
;
950 else if ((rx_size
> FRAME_SIZE_UPTO_8K
) &&
951 (rx_size
<= FRAME_SIZE_UPTO_16K
))
952 Adapter
->rx_buffer_size
= E1000_RX_BUFFER_SIZE_16K
;
954 Adapter
->rx_buffer_size
= E1000_RX_BUFFER_SIZE_2K
;
956 Adapter
->rx_buffer_size
+= E1000G_IPALIGNROOM
;
958 tx_size
= Adapter
->max_frame_size
;
959 if ((tx_size
> FRAME_SIZE_UPTO_2K
) && (tx_size
<= FRAME_SIZE_UPTO_4K
))
960 Adapter
->tx_buffer_size
= E1000_TX_BUFFER_SIZE_4K
;
961 else if ((tx_size
> FRAME_SIZE_UPTO_4K
) &&
962 (tx_size
<= FRAME_SIZE_UPTO_8K
))
963 Adapter
->tx_buffer_size
= E1000_TX_BUFFER_SIZE_8K
;
964 else if ((tx_size
> FRAME_SIZE_UPTO_8K
) &&
965 (tx_size
<= FRAME_SIZE_UPTO_16K
))
966 Adapter
->tx_buffer_size
= E1000_TX_BUFFER_SIZE_16K
;
968 Adapter
->tx_buffer_size
= E1000_TX_BUFFER_SIZE_2K
;
971 * For Wiseman adapters we have an requirement of having receive
972 * buffers aligned at 256 byte boundary. Since Livengood does not
973 * require this and forcing it for all hardwares will have
974 * performance implications, I am making it applicable only for
975 * Wiseman and for Jumbo frames enabled mode as rest of the time,
976 * it is okay to have normal frames...but it does involve a
977 * potential risk where we may loose data if buffer is not
978 * aligned...so all wiseman boards to have 256 byte aligned
981 if (mac
->type
< e1000_82543
)
982 Adapter
->rx_buf_align
= RECEIVE_BUFFER_ALIGN_SIZE
;
984 Adapter
->rx_buf_align
= 1;
988 * e1000g_detach - driver detach
990 * The detach() function is the complement of the attach routine.
991 * If cmd is set to DDI_DETACH, detach() is used to remove the
992 * state associated with a given instance of a device node
993 * prior to the removal of that instance from the system.
995 * The detach() function will be called once for each instance
996 * of the device for which there has been a successful attach()
997 * once there are no longer any opens on the device.
999 * Interrupts routine are disabled, All memory allocated by this
1003 e1000g_detach(dev_info_t
*devinfo
, ddi_detach_cmd_t cmd
)
1005 struct e1000g
*Adapter
;
1010 return (DDI_FAILURE
);
1013 return (e1000g_suspend(devinfo
));
1019 Adapter
= (struct e1000g
*)ddi_get_driver_private(devinfo
);
1020 if (Adapter
== NULL
)
1021 return (DDI_FAILURE
);
1023 rx_drain
= e1000g_rx_drain(Adapter
);
1024 if (!rx_drain
&& !e1000g_force_detach
)
1025 return (DDI_FAILURE
);
1027 if (mac_unregister(Adapter
->mh
) != 0) {
1028 e1000g_log(Adapter
, CE_WARN
, "Unregister MAC failed");
1029 return (DDI_FAILURE
);
1031 Adapter
->attach_progress
&= ~ATTACH_PROGRESS_MAC
;
1033 ASSERT(!(Adapter
->e1000g_state
& E1000G_STARTED
));
1035 if (!e1000g_force_detach
&& !rx_drain
)
1036 return (DDI_FAILURE
);
1038 e1000g_unattach(devinfo
, Adapter
);
1040 return (DDI_SUCCESS
);
1044 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1047 e1000g_free_priv_devi_node(private_devi_list_t
*devi_node
)
1049 ASSERT(e1000g_private_devi_list
!= NULL
);
1050 ASSERT(devi_node
!= NULL
);
1052 if (devi_node
->prev
!= NULL
)
1053 devi_node
->prev
->next
= devi_node
->next
;
1054 if (devi_node
->next
!= NULL
)
1055 devi_node
->next
->prev
= devi_node
->prev
;
1056 if (devi_node
== e1000g_private_devi_list
)
1057 e1000g_private_devi_list
= devi_node
->next
;
1059 kmem_free(devi_node
->priv_dip
,
1060 sizeof (struct dev_info
));
1061 kmem_free(devi_node
,
1062 sizeof (private_devi_list_t
));
1066 e1000g_unattach(dev_info_t
*devinfo
, struct e1000g
*Adapter
)
1068 private_devi_list_t
*devi_node
;
1071 if (Adapter
->attach_progress
& ATTACH_PROGRESS_ENABLE_INTR
) {
1072 (void) e1000g_disable_intrs(Adapter
);
1075 if (Adapter
->attach_progress
& ATTACH_PROGRESS_MAC
) {
1076 (void) mac_unregister(Adapter
->mh
);
1079 if (Adapter
->attach_progress
& ATTACH_PROGRESS_ADD_INTR
) {
1080 (void) e1000g_rem_intrs(Adapter
);
1083 if (Adapter
->attach_progress
& ATTACH_PROGRESS_SETUP
) {
1084 (void) ddi_prop_remove_all(devinfo
);
1087 if (Adapter
->attach_progress
& ATTACH_PROGRESS_KSTATS
) {
1088 kstat_delete((kstat_t
*)Adapter
->e1000g_ksp
);
1091 if (Adapter
->attach_progress
& ATTACH_PROGRESS_INIT
) {
1092 stop_link_timer(Adapter
);
1094 mutex_enter(&e1000g_nvm_lock
);
1095 result
= e1000_reset_hw(&Adapter
->shared
);
1096 mutex_exit(&e1000g_nvm_lock
);
1098 if (result
!= E1000_SUCCESS
) {
1099 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
1100 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_LOST
);
1104 e1000g_release_multicast(Adapter
);
1106 if (Adapter
->attach_progress
& ATTACH_PROGRESS_REGS_MAP
) {
1107 if (Adapter
->osdep
.reg_handle
!= NULL
)
1108 ddi_regs_map_free(&Adapter
->osdep
.reg_handle
);
1109 if (Adapter
->osdep
.ich_flash_handle
!= NULL
&&
1110 Adapter
->shared
.mac
.type
!= e1000_pch_spt
)
1111 ddi_regs_map_free(&Adapter
->osdep
.ich_flash_handle
);
1112 if (Adapter
->osdep
.io_reg_handle
!= NULL
)
1113 ddi_regs_map_free(&Adapter
->osdep
.io_reg_handle
);
1116 if (Adapter
->attach_progress
& ATTACH_PROGRESS_PCI_CONFIG
) {
1117 if (Adapter
->osdep
.cfg_handle
!= NULL
)
1118 pci_config_teardown(&Adapter
->osdep
.cfg_handle
);
1121 if (Adapter
->attach_progress
& ATTACH_PROGRESS_LOCKS
) {
1122 e1000g_destroy_locks(Adapter
);
1125 if (Adapter
->attach_progress
& ATTACH_PROGRESS_FMINIT
) {
1126 e1000g_fm_fini(Adapter
);
1129 mutex_enter(&e1000g_rx_detach_lock
);
1130 if (e1000g_force_detach
&& (Adapter
->priv_devi_node
!= NULL
)) {
1131 devi_node
= Adapter
->priv_devi_node
;
1132 devi_node
->flag
|= E1000G_PRIV_DEVI_DETACH
;
1134 if (devi_node
->pending_rx_count
== 0) {
1135 e1000g_free_priv_devi_node(devi_node
);
1138 mutex_exit(&e1000g_rx_detach_lock
);
1140 kmem_free((caddr_t
)Adapter
, sizeof (struct e1000g
));
1143 * Another hotplug spec requirement,
1144 * run ddi_set_driver_private(devinfo, null);
1146 ddi_set_driver_private(devinfo
, NULL
);
1150 * Get the BAR type and rnumber for a given PCI BAR offset
1153 e1000g_get_bar_info(dev_info_t
*dip
, int bar_offset
, bar_info_t
*bar_info
)
1155 pci_regspec_t
*regs
;
1157 int type
, rnumber
, rcount
;
1159 ASSERT((bar_offset
>= PCI_CONF_BASE0
) &&
1160 (bar_offset
<= PCI_CONF_BASE5
));
1163 * Get the DDI "reg" property
1165 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
,
1166 DDI_PROP_DONTPASS
, "reg", (int **)®s
,
1167 ®s_length
) != DDI_PROP_SUCCESS
) {
1168 return (DDI_FAILURE
);
1171 rcount
= regs_length
* sizeof (int) / sizeof (pci_regspec_t
);
1173 * Check the BAR offset
1175 for (rnumber
= 0; rnumber
< rcount
; ++rnumber
) {
1176 if (PCI_REG_REG_G(regs
[rnumber
].pci_phys_hi
) == bar_offset
) {
1177 type
= regs
[rnumber
].pci_phys_hi
& PCI_ADDR_MASK
;
1182 ddi_prop_free(regs
);
1184 if (rnumber
>= rcount
)
1185 return (DDI_FAILURE
);
1188 case PCI_ADDR_CONFIG
:
1189 bar_info
->type
= E1000G_BAR_CONFIG
;
1192 bar_info
->type
= E1000G_BAR_IO
;
1194 case PCI_ADDR_MEM32
:
1195 bar_info
->type
= E1000G_BAR_MEM32
;
1197 case PCI_ADDR_MEM64
:
1198 bar_info
->type
= E1000G_BAR_MEM64
;
1201 return (DDI_FAILURE
);
1203 bar_info
->rnumber
= rnumber
;
1204 return (DDI_SUCCESS
);
1208 e1000g_init_locks(struct e1000g
*Adapter
)
1210 e1000g_tx_ring_t
*tx_ring
;
1211 e1000g_rx_ring_t
*rx_ring
;
1213 rw_init(&Adapter
->chip_lock
, NULL
,
1214 RW_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1215 mutex_init(&Adapter
->link_lock
, NULL
,
1216 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1217 mutex_init(&Adapter
->watchdog_lock
, NULL
,
1218 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1220 tx_ring
= Adapter
->tx_ring
;
1222 mutex_init(&tx_ring
->tx_lock
, NULL
,
1223 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1224 mutex_init(&tx_ring
->usedlist_lock
, NULL
,
1225 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1226 mutex_init(&tx_ring
->freelist_lock
, NULL
,
1227 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1229 rx_ring
= Adapter
->rx_ring
;
1231 mutex_init(&rx_ring
->rx_lock
, NULL
,
1232 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1236 e1000g_destroy_locks(struct e1000g
*Adapter
)
1238 e1000g_tx_ring_t
*tx_ring
;
1239 e1000g_rx_ring_t
*rx_ring
;
1241 tx_ring
= Adapter
->tx_ring
;
1242 mutex_destroy(&tx_ring
->tx_lock
);
1243 mutex_destroy(&tx_ring
->usedlist_lock
);
1244 mutex_destroy(&tx_ring
->freelist_lock
);
1246 rx_ring
= Adapter
->rx_ring
;
1247 mutex_destroy(&rx_ring
->rx_lock
);
1249 mutex_destroy(&Adapter
->link_lock
);
1250 mutex_destroy(&Adapter
->watchdog_lock
);
1251 rw_destroy(&Adapter
->chip_lock
);
1253 /* destory mutex initialized in shared code */
1254 e1000_destroy_hw_mutex(&Adapter
->shared
);
1258 e1000g_resume(dev_info_t
*devinfo
)
1260 struct e1000g
*Adapter
;
1262 Adapter
= (struct e1000g
*)ddi_get_driver_private(devinfo
);
1263 if (Adapter
== NULL
)
1264 e1000g_log(Adapter
, CE_PANIC
,
1265 "Instance pointer is null\n");
1267 if (Adapter
->dip
!= devinfo
)
1268 e1000g_log(Adapter
, CE_PANIC
,
1269 "Devinfo is not the same as saved devinfo\n");
1271 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
1273 if (Adapter
->e1000g_state
& E1000G_STARTED
) {
1274 if (e1000g_start(Adapter
, B_FALSE
) != DDI_SUCCESS
) {
1275 rw_exit(&Adapter
->chip_lock
);
1277 * We note the failure, but return success, as the
1278 * system is still usable without this controller.
1280 e1000g_log(Adapter
, CE_WARN
,
1281 "e1000g_resume: failed to restart controller\n");
1282 return (DDI_SUCCESS
);
1284 /* Enable and start the watchdog timer */
1285 enable_watchdog_timer(Adapter
);
1288 Adapter
->e1000g_state
&= ~E1000G_SUSPENDED
;
1290 rw_exit(&Adapter
->chip_lock
);
1292 return (DDI_SUCCESS
);
1296 e1000g_suspend(dev_info_t
*devinfo
)
1298 struct e1000g
*Adapter
;
1300 Adapter
= (struct e1000g
*)ddi_get_driver_private(devinfo
);
1301 if (Adapter
== NULL
)
1302 return (DDI_FAILURE
);
1304 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
1306 Adapter
->e1000g_state
|= E1000G_SUSPENDED
;
1308 /* if the port isn't plumbed, we can simply return */
1309 if (!(Adapter
->e1000g_state
& E1000G_STARTED
)) {
1310 rw_exit(&Adapter
->chip_lock
);
1311 return (DDI_SUCCESS
);
1314 e1000g_stop(Adapter
, B_FALSE
);
1316 rw_exit(&Adapter
->chip_lock
);
1318 /* Disable and stop all the timers */
1319 disable_watchdog_timer(Adapter
);
1320 stop_link_timer(Adapter
);
1321 stop_82547_timer(Adapter
->tx_ring
);
1323 return (DDI_SUCCESS
);
1327 e1000g_init(struct e1000g
*Adapter
)
1330 uint32_t high_water
;
1331 struct e1000_hw
*hw
;
1332 clock_t link_timeout
;
1335 hw
= &Adapter
->shared
;
1338 * reset to put the hardware in a known state
1339 * before we try to do anything with the eeprom
1341 mutex_enter(&e1000g_nvm_lock
);
1342 result
= e1000_reset_hw(hw
);
1343 mutex_exit(&e1000g_nvm_lock
);
1345 if (result
!= E1000_SUCCESS
) {
1346 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
1350 mutex_enter(&e1000g_nvm_lock
);
1351 result
= e1000_validate_nvm_checksum(hw
);
1352 if (result
< E1000_SUCCESS
) {
1354 * Some PCI-E parts fail the first check due to
1355 * the link being in sleep state. Call it again,
1356 * if it fails a second time its a real issue.
1358 result
= e1000_validate_nvm_checksum(hw
);
1360 mutex_exit(&e1000g_nvm_lock
);
1362 if (result
< E1000_SUCCESS
) {
1363 e1000g_log(Adapter
, CE_WARN
,
1364 "Invalid NVM checksum. Please contact "
1365 "the vendor to update the NVM.");
1366 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
1371 /* Get the local ethernet address. */
1373 mutex_enter(&e1000g_nvm_lock
);
1374 result
= e1000_read_mac_addr(hw
);
1375 mutex_exit(&e1000g_nvm_lock
);
1378 if (result
< E1000_SUCCESS
) {
1379 e1000g_log(Adapter
, CE_WARN
, "Read mac addr failed");
1380 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
1384 /* check for valid mac address */
1385 if (!is_valid_mac_addr(hw
->mac
.addr
)) {
1386 e1000g_log(Adapter
, CE_WARN
, "Invalid mac addr");
1387 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
1391 /* Set LAA state for 82571 chipset */
1392 e1000_set_laa_state_82571(hw
, B_TRUE
);
1394 /* Master Latency Timer implementation */
1395 if (Adapter
->master_latency_timer
) {
1396 pci_config_put8(Adapter
->osdep
.cfg_handle
,
1397 PCI_CONF_LATENCY_TIMER
, Adapter
->master_latency_timer
);
1400 if (hw
->mac
.type
< e1000_82547
) {
1404 if (Adapter
->max_frame_size
> FRAME_SIZE_UPTO_8K
)
1405 pba
= E1000_PBA_40K
; /* 40K for Rx, 24K for Tx */
1407 pba
= E1000_PBA_48K
; /* 48K for Rx, 16K for Tx */
1408 } else if ((hw
->mac
.type
== e1000_82571
) ||
1409 (hw
->mac
.type
== e1000_82572
) ||
1410 (hw
->mac
.type
== e1000_80003es2lan
)) {
1414 if (Adapter
->max_frame_size
> FRAME_SIZE_UPTO_8K
)
1415 pba
= E1000_PBA_30K
; /* 30K for Rx, 18K for Tx */
1417 pba
= E1000_PBA_38K
; /* 38K for Rx, 10K for Tx */
1418 } else if (hw
->mac
.type
== e1000_82573
) {
1419 pba
= E1000_PBA_20K
; /* 20K for Rx, 12K for Tx */
1420 } else if (hw
->mac
.type
== e1000_82574
) {
1421 /* Keep adapter default: 20K for Rx, 20K for Tx */
1422 pba
= E1000_READ_REG(hw
, E1000_PBA
);
1423 } else if (hw
->mac
.type
== e1000_ich8lan
) {
1424 pba
= E1000_PBA_8K
; /* 8K for Rx, 12K for Tx */
1425 } else if (hw
->mac
.type
== e1000_ich9lan
) {
1426 pba
= E1000_PBA_10K
;
1427 } else if (hw
->mac
.type
== e1000_ich10lan
) {
1428 pba
= E1000_PBA_10K
;
1429 } else if (hw
->mac
.type
== e1000_pchlan
) {
1430 pba
= E1000_PBA_26K
;
1431 } else if (hw
->mac
.type
== e1000_pch2lan
) {
1432 pba
= E1000_PBA_26K
;
1433 } else if (hw
->mac
.type
== e1000_pch_lpt
) {
1434 pba
= E1000_PBA_26K
;
1435 } else if (hw
->mac
.type
== e1000_pch_spt
) {
1436 pba
= E1000_PBA_26K
;
1441 if (Adapter
->max_frame_size
> FRAME_SIZE_UPTO_8K
)
1442 pba
= E1000_PBA_22K
; /* 22K for Rx, 18K for Tx */
1444 pba
= E1000_PBA_30K
; /* 30K for Rx, 10K for Tx */
1446 E1000_WRITE_REG(hw
, E1000_PBA
, pba
);
1449 * These parameters set thresholds for the adapter's generation(Tx)
1450 * and response(Rx) to Ethernet PAUSE frames. These are just threshold
1451 * settings. Flow control is enabled or disabled in the configuration
1453 * High-water mark is set down from the top of the rx fifo (not
1454 * sensitive to max_frame_size) and low-water is set just below
1456 * The high water mark must be low enough to fit one full frame above
1457 * it in the rx FIFO. Should be the lower of:
1458 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1459 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1460 * Rx FIFO size minus one full frame.
1462 high_water
= min(((pba
<< 10) * 9 / 10),
1463 ((hw
->mac
.type
== e1000_82573
|| hw
->mac
.type
== e1000_82574
||
1464 hw
->mac
.type
== e1000_ich9lan
|| hw
->mac
.type
== e1000_ich10lan
) ?
1465 ((pba
<< 10) - (E1000_ERT_2048
<< 3)) :
1466 ((pba
<< 10) - Adapter
->max_frame_size
)));
1468 hw
->fc
.high_water
= high_water
& 0xFFF8;
1469 hw
->fc
.low_water
= hw
->fc
.high_water
- 8;
1471 if (hw
->mac
.type
== e1000_80003es2lan
)
1472 hw
->fc
.pause_time
= 0xFFFF;
1474 hw
->fc
.pause_time
= E1000_FC_PAUSE_TIME
;
1475 hw
->fc
.send_xon
= B_TRUE
;
1478 * Reset the adapter hardware the second time.
1480 mutex_enter(&e1000g_nvm_lock
);
1481 result
= e1000_reset_hw(hw
);
1482 mutex_exit(&e1000g_nvm_lock
);
1484 if (result
!= E1000_SUCCESS
) {
1485 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
1489 /* disable wakeup control by default */
1490 if (hw
->mac
.type
>= e1000_82544
)
1491 E1000_WRITE_REG(hw
, E1000_WUC
, 0);
1494 * MWI should be disabled on 82546.
1496 if (hw
->mac
.type
== e1000_82546
)
1497 e1000_pci_clear_mwi(hw
);
1499 e1000_pci_set_mwi(hw
);
1502 * Configure/Initialize hardware
1504 mutex_enter(&e1000g_nvm_lock
);
1505 result
= e1000_init_hw(hw
);
1506 mutex_exit(&e1000g_nvm_lock
);
1508 if (result
< E1000_SUCCESS
) {
1509 e1000g_log(Adapter
, CE_WARN
, "Initialize hw failed");
1510 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
1515 * Restore LED settings to the default from EEPROM
1516 * to meet the standard for Sun platforms.
1518 (void) e1000_cleanup_led(hw
);
1520 /* Disable Smart Power Down */
1521 phy_spd_state(hw
, B_FALSE
);
1523 /* Make sure driver has control */
1524 e1000g_get_driver_control(hw
);
1527 * Initialize unicast addresses.
1529 e1000g_init_unicst(Adapter
);
1532 * Setup and initialize the mctable structures. After this routine
1533 * completes Multicast table will be set
1535 e1000_update_mc_addr_list(hw
,
1536 (uint8_t *)Adapter
->mcast_table
, Adapter
->mcast_count
);
1540 * Implement Adaptive IFS
1542 e1000_reset_adaptive(hw
);
1544 /* Setup Interrupt Throttling Register */
1545 if (hw
->mac
.type
>= e1000_82540
) {
1546 E1000_WRITE_REG(hw
, E1000_ITR
, Adapter
->intr_throttling_rate
);
1548 Adapter
->intr_adaptive
= B_FALSE
;
1550 /* Start the timer for link setup */
1551 if (hw
->mac
.autoneg
)
1552 link_timeout
= PHY_AUTO_NEG_LIMIT
* drv_usectohz(100000);
1554 link_timeout
= PHY_FORCE_LIMIT
* drv_usectohz(100000);
1556 mutex_enter(&Adapter
->link_lock
);
1557 if (hw
->phy
.autoneg_wait_to_complete
) {
1558 Adapter
->link_complete
= B_TRUE
;
1560 Adapter
->link_complete
= B_FALSE
;
1561 Adapter
->link_tid
= timeout(e1000g_link_timer
,
1562 (void *)Adapter
, link_timeout
);
1564 mutex_exit(&Adapter
->link_lock
);
1566 /* Save the state of the phy */
1567 e1000g_get_phy_state(Adapter
);
1569 e1000g_param_sync(Adapter
);
1571 Adapter
->init_count
++;
1573 if (e1000g_check_acc_handle(Adapter
->osdep
.cfg_handle
) != DDI_FM_OK
) {
1576 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
1580 Adapter
->poll_mode
= e1000g_poll_mode
;
1582 return (DDI_SUCCESS
);
1585 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_LOST
);
1586 return (DDI_FAILURE
);
1590 e1000g_alloc_rx_data(struct e1000g
*Adapter
)
1592 e1000g_rx_ring_t
*rx_ring
;
1593 e1000g_rx_data_t
*rx_data
;
1595 rx_ring
= Adapter
->rx_ring
;
1597 rx_data
= kmem_zalloc(sizeof (e1000g_rx_data_t
), KM_NOSLEEP
);
1599 if (rx_data
== NULL
)
1600 return (DDI_FAILURE
);
1602 rx_data
->priv_devi_node
= Adapter
->priv_devi_node
;
1603 rx_data
->rx_ring
= rx_ring
;
1605 mutex_init(&rx_data
->freelist_lock
, NULL
,
1606 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1607 mutex_init(&rx_data
->recycle_lock
, NULL
,
1608 MUTEX_DRIVER
, DDI_INTR_PRI(Adapter
->intr_pri
));
1610 rx_ring
->rx_data
= rx_data
;
1612 return (DDI_SUCCESS
);
1616 e1000g_free_rx_pending_buffers(e1000g_rx_data_t
*rx_data
)
1618 rx_sw_packet_t
*packet
, *next_packet
;
1620 if (rx_data
== NULL
)
1623 packet
= rx_data
->packet_area
;
1624 while (packet
!= NULL
) {
1625 next_packet
= packet
->next
;
1626 e1000g_free_rx_sw_packet(packet
, B_TRUE
);
1627 packet
= next_packet
;
1629 rx_data
->packet_area
= NULL
;
1633 e1000g_free_rx_data(e1000g_rx_data_t
*rx_data
)
1635 if (rx_data
== NULL
)
1638 mutex_destroy(&rx_data
->freelist_lock
);
1639 mutex_destroy(&rx_data
->recycle_lock
);
1641 kmem_free(rx_data
, sizeof (e1000g_rx_data_t
));
1645 * Check if the link is up
1648 e1000g_link_up(struct e1000g
*Adapter
)
1650 struct e1000_hw
*hw
= &Adapter
->shared
;
1651 boolean_t link_up
= B_FALSE
;
1654 * get_link_status is set in the interrupt handler on link-status-change
1655 * or rx sequence error interrupt. get_link_status will stay
1656 * false until the e1000_check_for_link establishes link only
1657 * for copper adapters.
1659 switch (hw
->phy
.media_type
) {
1660 case e1000_media_type_copper
:
1661 if (hw
->mac
.get_link_status
) {
1663 * SPT devices need a bit of extra time before we ask
1666 if (hw
->mac
.type
== e1000_pch_spt
)
1668 (void) e1000_check_for_link(hw
);
1669 if ((E1000_READ_REG(hw
, E1000_STATUS
) &
1673 link_up
= !hw
->mac
.get_link_status
;
1679 case e1000_media_type_fiber
:
1680 (void) e1000_check_for_link(hw
);
1681 link_up
= (E1000_READ_REG(hw
, E1000_STATUS
) &
1684 case e1000_media_type_internal_serdes
:
1685 (void) e1000_check_for_link(hw
);
1686 link_up
= hw
->mac
.serdes_has_link
;
1694 e1000g_m_ioctl(void *arg
, queue_t
*q
, mblk_t
*mp
)
1696 struct iocblk
*iocp
;
1697 struct e1000g
*e1000gp
;
1698 enum ioc_reply status
;
1700 iocp
= (struct iocblk
*)(uintptr_t)mp
->b_rptr
;
1701 iocp
->ioc_error
= 0;
1702 e1000gp
= (struct e1000g
*)arg
;
1705 if (e1000gp
== NULL
) {
1706 miocnak(q
, mp
, 0, EINVAL
);
1710 rw_enter(&e1000gp
->chip_lock
, RW_READER
);
1711 if (e1000gp
->e1000g_state
& E1000G_SUSPENDED
) {
1712 rw_exit(&e1000gp
->chip_lock
);
1713 miocnak(q
, mp
, 0, EINVAL
);
1716 rw_exit(&e1000gp
->chip_lock
);
1718 switch (iocp
->ioc_cmd
) {
1720 case LB_GET_INFO_SIZE
:
1724 status
= e1000g_loopback_ioctl(e1000gp
, iocp
, mp
);
1729 case E1000G_IOC_REG_PEEK
:
1730 case E1000G_IOC_REG_POKE
:
1731 status
= e1000g_pp_ioctl(e1000gp
, iocp
, mp
);
1733 case E1000G_IOC_CHIP_RESET
:
1734 e1000gp
->reset_count
++;
1735 if (e1000g_reset_adapter(e1000gp
))
1747 * Decide how to reply
1753 * Error, reply with a NAK and EINVAL or the specified error
1755 miocnak(q
, mp
, 0, iocp
->ioc_error
== 0 ?
1756 EINVAL
: iocp
->ioc_error
);
1761 * OK, reply already sent
1767 * OK, reply with an ACK
1769 miocack(q
, mp
, 0, 0);
1774 * OK, send prepared reply as ACK or NAK
1776 mp
->b_datap
->db_type
= iocp
->ioc_error
== 0 ?
1777 M_IOCACK
: M_IOCNAK
;
1784 * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1785 * capable of supporting only one interrupt and we shouldn't disable
1786 * the physical interrupt. In this case we let the interrupt come and
1787 * we queue the packets in the rx ring itself in case we are in polling
1788 * mode (better latency but slightly lower performance and a very
1789 * high intrrupt count in mpstat which is harmless).
1791 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1792 * which can be disabled in poll mode. This gives better overall
1793 * throughput (compared to the mode above), shows very low interrupt
1794 * count but has slightly higher latency since we pick the packets when
1795 * the poll thread does polling.
1797 * Currently, this flag should be enabled only while doing performance
1798 * measurement or when it can be guaranteed that entire NIC going
1799 * in poll mode will not harm any traffic like cluster heartbeat etc.
1801 int e1000g_poll_mode
= 0;
1804 * Called from the upper layers when driver is in polling mode to
1805 * pick up any queued packets. Care should be taken to not block
1808 static mblk_t
*e1000g_poll_ring(void *arg
, int bytes_to_pickup
)
1810 e1000g_rx_ring_t
*rx_ring
= (e1000g_rx_ring_t
*)arg
;
1813 struct e1000g
*adapter
;
1815 adapter
= rx_ring
->adapter
;
1817 rw_enter(&adapter
->chip_lock
, RW_READER
);
1819 if (adapter
->e1000g_state
& E1000G_SUSPENDED
) {
1820 rw_exit(&adapter
->chip_lock
);
1824 mutex_enter(&rx_ring
->rx_lock
);
1825 mp
= e1000g_receive(rx_ring
, &tail
, bytes_to_pickup
);
1826 mutex_exit(&rx_ring
->rx_lock
);
1827 rw_exit(&adapter
->chip_lock
);
1832 e1000g_m_start(void *arg
)
1834 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
1836 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
1838 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
1839 rw_exit(&Adapter
->chip_lock
);
1843 if (e1000g_start(Adapter
, B_TRUE
) != DDI_SUCCESS
) {
1844 rw_exit(&Adapter
->chip_lock
);
1845 return (ENOTACTIVE
);
1848 Adapter
->e1000g_state
|= E1000G_STARTED
;
1850 rw_exit(&Adapter
->chip_lock
);
1852 /* Enable and start the watchdog timer */
1853 enable_watchdog_timer(Adapter
);
1859 e1000g_start(struct e1000g
*Adapter
, boolean_t global
)
1861 e1000g_rx_data_t
*rx_data
;
1864 if (e1000g_alloc_rx_data(Adapter
) != DDI_SUCCESS
) {
1865 e1000g_log(Adapter
, CE_WARN
, "Allocate rx data failed");
1869 /* Allocate dma resources for descriptors and buffers */
1870 if (e1000g_alloc_dma_resources(Adapter
) != DDI_SUCCESS
) {
1871 e1000g_log(Adapter
, CE_WARN
,
1872 "Alloc DMA resources failed");
1875 Adapter
->rx_buffer_setup
= B_FALSE
;
1878 if (!(Adapter
->attach_progress
& ATTACH_PROGRESS_INIT
)) {
1879 if (e1000g_init(Adapter
) != DDI_SUCCESS
) {
1880 e1000g_log(Adapter
, CE_WARN
,
1881 "Adapter initialization failed");
1886 /* Setup and initialize the transmit structures */
1887 e1000g_tx_setup(Adapter
);
1890 /* Setup and initialize the receive structures */
1891 e1000g_rx_setup(Adapter
);
1894 /* Restore the e1000g promiscuous mode */
1895 e1000g_restore_promisc(Adapter
);
1897 e1000g_mask_interrupt(Adapter
);
1899 Adapter
->attach_progress
|= ATTACH_PROGRESS_INIT
;
1901 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
1902 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_LOST
);
1906 return (DDI_SUCCESS
);
1909 rx_data
= Adapter
->rx_ring
->rx_data
;
1912 e1000g_release_dma_resources(Adapter
);
1913 e1000g_free_rx_pending_buffers(rx_data
);
1914 e1000g_free_rx_data(rx_data
);
1917 mutex_enter(&e1000g_nvm_lock
);
1918 (void) e1000_reset_hw(&Adapter
->shared
);
1919 mutex_exit(&e1000g_nvm_lock
);
1921 return (DDI_FAILURE
);
1925 * The I219 has the curious property that if the descriptor rings are not
1926 * emptied before resetting the hardware or before changing the device state
1927 * based on runtime power management, it'll cause the card to hang. This can
1928 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we
1929 * have to flush the rings if we're in this state.
1932 e1000g_flush_desc_rings(struct e1000g
*Adapter
)
1934 struct e1000_hw
*hw
= &Adapter
->shared
;
1936 u32 fext_nvm11
, tdlen
;
1938 /* First, disable MULR fix in FEXTNVM11 */
1939 fext_nvm11
= E1000_READ_REG(hw
, E1000_FEXTNVM11
);
1940 fext_nvm11
|= E1000_FEXTNVM11_DISABLE_MULR_FIX
;
1941 E1000_WRITE_REG(hw
, E1000_FEXTNVM11
, fext_nvm11
);
1943 /* do nothing if we're not in faulty state, or if the queue is empty */
1944 tdlen
= E1000_READ_REG(hw
, E1000_TDLEN(0));
1945 hang_state
= pci_config_get16(Adapter
->osdep
.cfg_handle
,
1946 PCICFG_DESC_RING_STATUS
);
1947 if (!(hang_state
& FLUSH_DESC_REQUIRED
) || !tdlen
)
1949 e1000g_flush_tx_ring(Adapter
);
1951 /* recheck, maybe the fault is caused by the rx ring */
1952 hang_state
= pci_config_get16(Adapter
->osdep
.cfg_handle
,
1953 PCICFG_DESC_RING_STATUS
);
1954 if (hang_state
& FLUSH_DESC_REQUIRED
)
1955 e1000g_flush_rx_ring(Adapter
);
1960 e1000g_m_stop(void *arg
)
1962 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
1964 /* Drain tx sessions */
1965 (void) e1000g_tx_drain(Adapter
);
1967 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
1969 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
1970 rw_exit(&Adapter
->chip_lock
);
1973 Adapter
->e1000g_state
&= ~E1000G_STARTED
;
1974 e1000g_stop(Adapter
, B_TRUE
);
1976 rw_exit(&Adapter
->chip_lock
);
1978 /* Disable and stop all the timers */
1979 disable_watchdog_timer(Adapter
);
1980 stop_link_timer(Adapter
);
1981 stop_82547_timer(Adapter
->tx_ring
);
1985 e1000g_stop(struct e1000g
*Adapter
, boolean_t global
)
1987 private_devi_list_t
*devi_node
;
1988 e1000g_rx_data_t
*rx_data
;
1991 Adapter
->attach_progress
&= ~ATTACH_PROGRESS_INIT
;
1993 /* Stop the chip and release pending resources */
1995 /* Tell firmware driver is no longer in control */
1996 e1000g_release_driver_control(&Adapter
->shared
);
1998 e1000g_clear_all_interrupts(Adapter
);
2000 mutex_enter(&e1000g_nvm_lock
);
2001 result
= e1000_reset_hw(&Adapter
->shared
);
2002 mutex_exit(&e1000g_nvm_lock
);
2004 if (result
!= E1000_SUCCESS
) {
2005 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_INVAL_STATE
);
2006 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_LOST
);
2009 mutex_enter(&Adapter
->link_lock
);
2010 Adapter
->link_complete
= B_FALSE
;
2011 mutex_exit(&Adapter
->link_lock
);
2013 /* Release resources still held by the TX descriptors */
2014 e1000g_tx_clean(Adapter
);
2016 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
)
2017 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_LOST
);
2019 /* Clean the pending rx jumbo packet fragment */
2020 e1000g_rx_clean(Adapter
);
2023 * The I219, eg. the pch_spt, has bugs such that we must ensure that
2024 * rings are flushed before we do anything else. This must be done
2025 * before we release DMA resources.
2027 if (Adapter
->shared
.mac
.type
== e1000_pch_spt
)
2028 e1000g_flush_desc_rings(Adapter
);
2031 e1000g_release_dma_resources(Adapter
);
2033 mutex_enter(&e1000g_rx_detach_lock
);
2034 rx_data
= Adapter
->rx_ring
->rx_data
;
2035 rx_data
->flag
|= E1000G_RX_STOPPED
;
2037 if (rx_data
->pending_count
== 0) {
2038 e1000g_free_rx_pending_buffers(rx_data
);
2039 e1000g_free_rx_data(rx_data
);
2041 devi_node
= rx_data
->priv_devi_node
;
2042 if (devi_node
!= NULL
)
2043 atomic_inc_32(&devi_node
->pending_rx_count
);
2045 atomic_inc_32(&Adapter
->pending_rx_count
);
2047 mutex_exit(&e1000g_rx_detach_lock
);
2050 if (Adapter
->link_state
!= LINK_STATE_UNKNOWN
) {
2051 Adapter
->link_state
= LINK_STATE_UNKNOWN
;
2052 if (!Adapter
->reset_flag
)
2053 mac_link_update(Adapter
->mh
, Adapter
->link_state
);
2058 e1000g_rx_clean(struct e1000g
*Adapter
)
2060 e1000g_rx_data_t
*rx_data
= Adapter
->rx_ring
->rx_data
;
2062 if (rx_data
== NULL
)
2065 if (rx_data
->rx_mblk
!= NULL
) {
2066 freemsg(rx_data
->rx_mblk
);
2067 rx_data
->rx_mblk
= NULL
;
2068 rx_data
->rx_mblk_tail
= NULL
;
2069 rx_data
->rx_mblk_len
= 0;
2074 e1000g_tx_clean(struct e1000g
*Adapter
)
2076 e1000g_tx_ring_t
*tx_ring
;
2077 p_tx_sw_packet_t packet
;
2080 uint32_t packet_count
;
2082 tx_ring
= Adapter
->tx_ring
;
2085 * Here we don't need to protect the lists using
2086 * the usedlist_lock and freelist_lock, for they
2087 * have been protected by the chip_lock.
2092 packet
= (p_tx_sw_packet_t
)QUEUE_GET_HEAD(&tx_ring
->used_list
);
2093 while (packet
!= NULL
) {
2094 if (packet
->mp
!= NULL
) {
2095 /* Assemble the message chain */
2100 nmp
->b_next
= packet
->mp
;
2103 /* Disconnect the message from the sw packet */
2107 e1000g_free_tx_swpkt(packet
);
2110 packet
= (p_tx_sw_packet_t
)
2111 QUEUE_GET_NEXT(&tx_ring
->used_list
, &packet
->Link
);
2117 if (packet_count
> 0) {
2118 QUEUE_APPEND(&tx_ring
->free_list
, &tx_ring
->used_list
);
2119 QUEUE_INIT_LIST(&tx_ring
->used_list
);
2121 /* Setup TX descriptor pointers */
2122 tx_ring
->tbd_next
= tx_ring
->tbd_first
;
2123 tx_ring
->tbd_oldest
= tx_ring
->tbd_first
;
2125 /* Setup our HW Tx Head & Tail descriptor pointers */
2126 E1000_WRITE_REG(&Adapter
->shared
, E1000_TDH(0), 0);
2127 E1000_WRITE_REG(&Adapter
->shared
, E1000_TDT(0), 0);
2132 e1000g_tx_drain(struct e1000g
*Adapter
)
2136 e1000g_tx_ring_t
*tx_ring
;
2138 tx_ring
= Adapter
->tx_ring
;
2140 /* Allow up to 'wsdraintime' for pending xmit's to complete. */
2141 for (i
= 0; i
< TX_DRAIN_TIME
; i
++) {
2142 mutex_enter(&tx_ring
->usedlist_lock
);
2143 done
= IS_QUEUE_EMPTY(&tx_ring
->used_list
);
2144 mutex_exit(&tx_ring
->usedlist_lock
);
2156 e1000g_rx_drain(struct e1000g
*Adapter
)
2162 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2164 for (i
= 0; i
< RX_DRAIN_TIME
; i
++) {
2165 done
= (Adapter
->pending_rx_count
== 0);
2177 e1000g_reset_adapter(struct e1000g
*Adapter
)
2179 /* Disable and stop all the timers */
2180 disable_watchdog_timer(Adapter
);
2181 stop_link_timer(Adapter
);
2182 stop_82547_timer(Adapter
->tx_ring
);
2184 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
2186 if (Adapter
->stall_flag
) {
2187 Adapter
->stall_flag
= B_FALSE
;
2188 Adapter
->reset_flag
= B_TRUE
;
2191 if (!(Adapter
->e1000g_state
& E1000G_STARTED
)) {
2192 rw_exit(&Adapter
->chip_lock
);
2196 e1000g_stop(Adapter
, B_FALSE
);
2198 if (e1000g_start(Adapter
, B_FALSE
) != DDI_SUCCESS
) {
2199 rw_exit(&Adapter
->chip_lock
);
2200 e1000g_log(Adapter
, CE_WARN
, "Reset failed");
2204 rw_exit(&Adapter
->chip_lock
);
2206 /* Enable and start the watchdog timer */
2207 enable_watchdog_timer(Adapter
);
2213 e1000g_global_reset(struct e1000g
*Adapter
)
2215 /* Disable and stop all the timers */
2216 disable_watchdog_timer(Adapter
);
2217 stop_link_timer(Adapter
);
2218 stop_82547_timer(Adapter
->tx_ring
);
2220 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
2222 e1000g_stop(Adapter
, B_TRUE
);
2224 Adapter
->init_count
= 0;
2226 if (e1000g_start(Adapter
, B_TRUE
) != DDI_SUCCESS
) {
2227 rw_exit(&Adapter
->chip_lock
);
2228 e1000g_log(Adapter
, CE_WARN
, "Reset failed");
2232 rw_exit(&Adapter
->chip_lock
);
2234 /* Enable and start the watchdog timer */
2235 enable_watchdog_timer(Adapter
);
2241 * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2243 * This interrupt service routine is for PCI-Express adapters.
2244 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2248 e1000g_intr_pciexpress(caddr_t arg
)
2250 struct e1000g
*Adapter
;
2253 Adapter
= (struct e1000g
*)(uintptr_t)arg
;
2254 icr
= E1000_READ_REG(&Adapter
->shared
, E1000_ICR
);
2256 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
2257 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
2258 return (DDI_INTR_CLAIMED
);
2261 if (icr
& E1000_ICR_INT_ASSERTED
) {
2263 * E1000_ICR_INT_ASSERTED bit was set:
2264 * Read(Clear) the ICR, claim this interrupt,
2265 * look for work to do.
2267 e1000g_intr_work(Adapter
, icr
);
2268 return (DDI_INTR_CLAIMED
);
2271 * E1000_ICR_INT_ASSERTED bit was not set:
2272 * Don't claim this interrupt, return immediately.
2274 return (DDI_INTR_UNCLAIMED
);
2279 * e1000g_intr - ISR for PCI/PCI-X chipsets
2281 * This interrupt service routine is for PCI/PCI-X adapters.
2282 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2283 * bit is set or not.
2286 e1000g_intr(caddr_t arg
)
2288 struct e1000g
*Adapter
;
2291 Adapter
= (struct e1000g
*)(uintptr_t)arg
;
2292 icr
= E1000_READ_REG(&Adapter
->shared
, E1000_ICR
);
2294 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
2295 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
2296 return (DDI_INTR_CLAIMED
);
2301 * Any bit was set in ICR:
2302 * Read(Clear) the ICR, claim this interrupt,
2303 * look for work to do.
2305 e1000g_intr_work(Adapter
, icr
);
2306 return (DDI_INTR_CLAIMED
);
2309 * No bit was set in ICR:
2310 * Don't claim this interrupt, return immediately.
2312 return (DDI_INTR_UNCLAIMED
);
2317 * e1000g_intr_work - actual processing of ISR
2319 * Read(clear) the ICR contents and call appropriate interrupt
2320 * processing routines.
2323 e1000g_intr_work(struct e1000g
*Adapter
, uint32_t icr
)
2325 struct e1000_hw
*hw
;
2326 hw
= &Adapter
->shared
;
2327 e1000g_tx_ring_t
*tx_ring
= Adapter
->tx_ring
;
2329 Adapter
->rx_pkt_cnt
= 0;
2330 Adapter
->tx_pkt_cnt
= 0;
2332 rw_enter(&Adapter
->chip_lock
, RW_READER
);
2334 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
2335 rw_exit(&Adapter
->chip_lock
);
2339 * Here we need to check the "e1000g_state" flag within the chip_lock to
2340 * ensure the receive routine will not execute when the adapter is
2343 if (!(Adapter
->e1000g_state
& E1000G_STARTED
)) {
2344 rw_exit(&Adapter
->chip_lock
);
2348 if (icr
& E1000_ICR_RXT0
) {
2350 mblk_t
*tail
= NULL
;
2351 e1000g_rx_ring_t
*rx_ring
;
2353 rx_ring
= Adapter
->rx_ring
;
2354 mutex_enter(&rx_ring
->rx_lock
);
2356 * Sometimes with legacy interrupts, it possible that
2357 * there is a single interrupt for Rx/Tx. In which
2358 * case, if poll flag is set, we shouldn't really
2359 * be doing Rx processing.
2361 if (!rx_ring
->poll_flag
)
2362 mp
= e1000g_receive(rx_ring
, &tail
,
2363 E1000G_CHAIN_NO_LIMIT
);
2364 mutex_exit(&rx_ring
->rx_lock
);
2365 rw_exit(&Adapter
->chip_lock
);
2367 mac_rx_ring(Adapter
->mh
, rx_ring
->mrh
,
2368 mp
, rx_ring
->ring_gen_num
);
2370 rw_exit(&Adapter
->chip_lock
);
2372 if (icr
& E1000_ICR_TXDW
) {
2373 if (!Adapter
->tx_intr_enable
)
2374 e1000g_clear_tx_interrupt(Adapter
);
2376 /* Recycle the tx descriptors */
2377 rw_enter(&Adapter
->chip_lock
, RW_READER
);
2378 (void) e1000g_recycle(tx_ring
);
2379 E1000G_DEBUG_STAT(tx_ring
->stat_recycle_intr
);
2380 rw_exit(&Adapter
->chip_lock
);
2382 if (tx_ring
->resched_needed
&&
2383 (tx_ring
->tbd_avail
> DEFAULT_TX_UPDATE_THRESHOLD
)) {
2384 tx_ring
->resched_needed
= B_FALSE
;
2385 mac_tx_update(Adapter
->mh
);
2386 E1000G_STAT(tx_ring
->stat_reschedule
);
2391 * The Receive Sequence errors RXSEQ and the link status change LSC
2392 * are checked to detect that the cable has been pulled out. For
2393 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2394 * are an indication that cable is not connected.
2396 if ((icr
& E1000_ICR_RXSEQ
) ||
2397 (icr
& E1000_ICR_LSC
) ||
2398 (icr
& E1000_ICR_GPI_EN1
)) {
2399 boolean_t link_changed
;
2400 timeout_id_t tid
= 0;
2402 stop_watchdog_timer(Adapter
);
2404 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
2407 * Because we got a link-status-change interrupt, force
2408 * e1000_check_for_link() to look at phy
2410 Adapter
->shared
.mac
.get_link_status
= B_TRUE
;
2412 /* e1000g_link_check takes care of link status change */
2413 link_changed
= e1000g_link_check(Adapter
);
2415 /* Get new phy state */
2416 e1000g_get_phy_state(Adapter
);
2419 * If the link timer has not timed out, we'll not notify
2420 * the upper layer with any link state until the link is up.
2422 if (link_changed
&& !Adapter
->link_complete
) {
2423 if (Adapter
->link_state
== LINK_STATE_UP
) {
2424 mutex_enter(&Adapter
->link_lock
);
2425 Adapter
->link_complete
= B_TRUE
;
2426 tid
= Adapter
->link_tid
;
2427 Adapter
->link_tid
= 0;
2428 mutex_exit(&Adapter
->link_lock
);
2430 link_changed
= B_FALSE
;
2433 rw_exit(&Adapter
->chip_lock
);
2437 (void) untimeout(tid
);
2440 * Workaround for esb2. Data stuck in fifo on a link
2441 * down event. Stop receiver here and reset in watchdog.
2443 if ((Adapter
->link_state
== LINK_STATE_DOWN
) &&
2444 (Adapter
->shared
.mac
.type
== e1000_80003es2lan
)) {
2445 uint32_t rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
2446 E1000_WRITE_REG(hw
, E1000_RCTL
,
2447 rctl
& ~E1000_RCTL_EN
);
2448 e1000g_log(Adapter
, CE_WARN
,
2449 "ESB2 receiver disabled");
2450 Adapter
->esb2_workaround
= B_TRUE
;
2452 if (!Adapter
->reset_flag
)
2453 mac_link_update(Adapter
->mh
,
2454 Adapter
->link_state
);
2455 if (Adapter
->link_state
== LINK_STATE_UP
)
2456 Adapter
->reset_flag
= B_FALSE
;
2459 start_watchdog_timer(Adapter
);
2464 e1000g_init_unicst(struct e1000g
*Adapter
)
2466 struct e1000_hw
*hw
;
2469 hw
= &Adapter
->shared
;
2471 if (Adapter
->init_count
== 0) {
2472 /* Initialize the multiple unicast addresses */
2473 Adapter
->unicst_total
= min(hw
->mac
.rar_entry_count
,
2474 MAX_NUM_UNICAST_ADDRESSES
);
2477 * The common code does not correctly calculate the number of
2478 * rar's that could be reserved by firmware for the pch_lpt and
2479 * pch_spt macs. The interface has one primary rar, and 11
2480 * additional ones. Those 11 additional ones are not always
2481 * available. According to the datasheet, we need to check a
2482 * few of the bits set in the FWSM register. If the value is
2483 * zero, everything is available. If the value is 1, none of the
2484 * additional registers are available. If the value is 2-7, only
2485 * that number are available.
2487 if (hw
->mac
.type
== e1000_pch_lpt
||
2488 hw
->mac
.type
== e1000_pch_spt
) {
2489 uint32_t locked
, rar
;
2491 locked
= E1000_READ_REG(hw
, E1000_FWSM
) &
2492 E1000_FWSM_WLOCK_MAC_MASK
;
2493 locked
>>= E1000_FWSM_WLOCK_MAC_SHIFT
;
2497 else if (locked
== 1)
2501 Adapter
->unicst_total
= min(rar
,
2502 MAX_NUM_UNICAST_ADDRESSES
);
2505 /* Workaround for an erratum of 82571 chipst */
2506 if ((hw
->mac
.type
== e1000_82571
) &&
2507 (e1000_get_laa_state_82571(hw
) == B_TRUE
))
2508 Adapter
->unicst_total
--;
2510 /* VMware doesn't support multiple mac addresses properly */
2511 if (hw
->subsystem_vendor_id
== 0x15ad)
2512 Adapter
->unicst_total
= 1;
2514 Adapter
->unicst_avail
= Adapter
->unicst_total
;
2516 for (slot
= 0; slot
< Adapter
->unicst_total
; slot
++) {
2517 /* Clear both the flag and MAC address */
2518 Adapter
->unicst_addr
[slot
].reg
.high
= 0;
2519 Adapter
->unicst_addr
[slot
].reg
.low
= 0;
2522 /* Workaround for an erratum of 82571 chipst */
2523 if ((hw
->mac
.type
== e1000_82571
) &&
2524 (e1000_get_laa_state_82571(hw
) == B_TRUE
))
2525 (void) e1000_rar_set(hw
, hw
->mac
.addr
, LAST_RAR_ENTRY
);
2527 /* Re-configure the RAR registers */
2528 for (slot
= 0; slot
< Adapter
->unicst_total
; slot
++)
2529 if (Adapter
->unicst_addr
[slot
].mac
.set
== 1)
2530 (void) e1000_rar_set(hw
,
2531 Adapter
->unicst_addr
[slot
].mac
.addr
, slot
);
2534 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
)
2535 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
2539 e1000g_unicst_set(struct e1000g
*Adapter
, const uint8_t *mac_addr
,
2542 struct e1000_hw
*hw
;
2544 hw
= &Adapter
->shared
;
2547 * The first revision of Wiseman silicon (rev 2.0) has an errata
2548 * that requires the receiver to be in reset when any of the
2549 * receive address registers (RAR regs) are accessed. The first
2550 * rev of Wiseman silicon also requires MWI to be disabled when
2551 * a global reset or a receive reset is issued. So before we
2552 * initialize the RARs, we check the rev of the Wiseman controller
2553 * and work around any necessary HW errata.
2555 if ((hw
->mac
.type
== e1000_82542
) &&
2556 (hw
->revision_id
== E1000_REVISION_2
)) {
2557 e1000_pci_clear_mwi(hw
);
2558 E1000_WRITE_REG(hw
, E1000_RCTL
, E1000_RCTL_RST
);
2561 if (mac_addr
== NULL
) {
2562 E1000_WRITE_REG_ARRAY(hw
, E1000_RA
, slot
<< 1, 0);
2563 E1000_WRITE_FLUSH(hw
);
2564 E1000_WRITE_REG_ARRAY(hw
, E1000_RA
, (slot
<< 1) + 1, 0);
2565 E1000_WRITE_FLUSH(hw
);
2566 /* Clear both the flag and MAC address */
2567 Adapter
->unicst_addr
[slot
].reg
.high
= 0;
2568 Adapter
->unicst_addr
[slot
].reg
.low
= 0;
2570 bcopy(mac_addr
, Adapter
->unicst_addr
[slot
].mac
.addr
,
2572 (void) e1000_rar_set(hw
, (uint8_t *)mac_addr
, slot
);
2573 Adapter
->unicst_addr
[slot
].mac
.set
= 1;
2576 /* Workaround for an erratum of 82571 chipst */
2578 if ((hw
->mac
.type
== e1000_82571
) &&
2579 (e1000_get_laa_state_82571(hw
) == B_TRUE
))
2580 if (mac_addr
== NULL
) {
2581 E1000_WRITE_REG_ARRAY(hw
, E1000_RA
,
2583 E1000_WRITE_FLUSH(hw
);
2584 E1000_WRITE_REG_ARRAY(hw
, E1000_RA
,
2585 (slot
<< 1) + 1, 0);
2586 E1000_WRITE_FLUSH(hw
);
2588 (void) e1000_rar_set(hw
, (uint8_t *)mac_addr
,
2594 * If we are using Wiseman rev 2.0 silicon, we will have previously
2595 * put the receive in reset, and disabled MWI, to work around some
2596 * HW errata. Now we should take the receiver out of reset, and
2597 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2599 if ((hw
->mac
.type
== e1000_82542
) &&
2600 (hw
->revision_id
== E1000_REVISION_2
)) {
2601 E1000_WRITE_REG(hw
, E1000_RCTL
, 0);
2603 if (hw
->bus
.pci_cmd_word
& CMD_MEM_WRT_INVALIDATE
)
2604 e1000_pci_set_mwi(hw
);
2605 e1000g_rx_setup(Adapter
);
2608 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
2609 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
2617 multicst_add(struct e1000g
*Adapter
, const uint8_t *multiaddr
)
2619 struct e1000_hw
*hw
= &Adapter
->shared
;
2620 struct ether_addr
*newtable
;
2625 if ((multiaddr
[0] & 01) == 0) {
2627 e1000g_log(Adapter
, CE_WARN
, "Illegal multicast address");
2631 if (Adapter
->mcast_count
>= Adapter
->mcast_max_num
) {
2633 e1000g_log(Adapter
, CE_WARN
,
2634 "Adapter requested more than %d mcast addresses",
2635 Adapter
->mcast_max_num
);
2640 if (Adapter
->mcast_count
== Adapter
->mcast_alloc_count
) {
2641 old_len
= Adapter
->mcast_alloc_count
*
2642 sizeof (struct ether_addr
);
2643 new_len
= (Adapter
->mcast_alloc_count
+ MCAST_ALLOC_SIZE
) *
2644 sizeof (struct ether_addr
);
2646 newtable
= kmem_alloc(new_len
, KM_NOSLEEP
);
2647 if (newtable
== NULL
) {
2649 e1000g_log(Adapter
, CE_WARN
,
2650 "Not enough memory to alloc mcast table");
2654 if (Adapter
->mcast_table
!= NULL
) {
2655 bcopy(Adapter
->mcast_table
, newtable
, old_len
);
2656 kmem_free(Adapter
->mcast_table
, old_len
);
2658 Adapter
->mcast_alloc_count
+= MCAST_ALLOC_SIZE
;
2659 Adapter
->mcast_table
= newtable
;
2663 &Adapter
->mcast_table
[Adapter
->mcast_count
], ETHERADDRL
);
2664 Adapter
->mcast_count
++;
2667 * Update the MC table in the hardware
2669 e1000g_clear_interrupt(Adapter
);
2671 e1000_update_mc_addr_list(hw
,
2672 (uint8_t *)Adapter
->mcast_table
, Adapter
->mcast_count
);
2674 e1000g_mask_interrupt(Adapter
);
2676 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
2677 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
2686 multicst_remove(struct e1000g
*Adapter
, const uint8_t *multiaddr
)
2688 struct e1000_hw
*hw
= &Adapter
->shared
;
2689 struct ether_addr
*newtable
;
2694 for (i
= 0; i
< Adapter
->mcast_count
; i
++) {
2695 if (bcmp(multiaddr
, &Adapter
->mcast_table
[i
],
2697 for (i
++; i
< Adapter
->mcast_count
; i
++) {
2698 Adapter
->mcast_table
[i
- 1] =
2699 Adapter
->mcast_table
[i
];
2701 Adapter
->mcast_count
--;
2706 if ((Adapter
->mcast_alloc_count
- Adapter
->mcast_count
) >
2708 old_len
= Adapter
->mcast_alloc_count
*
2709 sizeof (struct ether_addr
);
2710 new_len
= (Adapter
->mcast_alloc_count
- MCAST_ALLOC_SIZE
) *
2711 sizeof (struct ether_addr
);
2713 newtable
= kmem_alloc(new_len
, KM_NOSLEEP
);
2714 if (newtable
!= NULL
) {
2715 bcopy(Adapter
->mcast_table
, newtable
, new_len
);
2716 kmem_free(Adapter
->mcast_table
, old_len
);
2718 Adapter
->mcast_alloc_count
-= MCAST_ALLOC_SIZE
;
2719 Adapter
->mcast_table
= newtable
;
2724 * Update the MC table in the hardware
2726 e1000g_clear_interrupt(Adapter
);
2728 e1000_update_mc_addr_list(hw
,
2729 (uint8_t *)Adapter
->mcast_table
, Adapter
->mcast_count
);
2731 e1000g_mask_interrupt(Adapter
);
2733 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
2734 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
2742 e1000g_release_multicast(struct e1000g
*Adapter
)
2744 if (Adapter
->mcast_table
!= NULL
) {
2745 kmem_free(Adapter
->mcast_table
,
2746 Adapter
->mcast_alloc_count
* sizeof (struct ether_addr
));
2747 Adapter
->mcast_table
= NULL
;
2752 e1000g_m_multicst(void *arg
, boolean_t add
, const uint8_t *addr
)
2754 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
2757 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
2759 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
2764 result
= (add
) ? multicst_add(Adapter
, addr
)
2765 : multicst_remove(Adapter
, addr
);
2768 rw_exit(&Adapter
->chip_lock
);
2774 e1000g_m_promisc(void *arg
, boolean_t on
)
2776 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
2779 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
2781 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
2782 rw_exit(&Adapter
->chip_lock
);
2786 rctl
= E1000_READ_REG(&Adapter
->shared
, E1000_RCTL
);
2790 (E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_BAM
);
2792 rctl
&= (~(E1000_RCTL_UPE
| E1000_RCTL_MPE
));
2794 E1000_WRITE_REG(&Adapter
->shared
, E1000_RCTL
, rctl
);
2796 Adapter
->e1000g_promisc
= on
;
2798 rw_exit(&Adapter
->chip_lock
);
2800 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
2801 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
2809 * Entry points to enable and disable interrupts at the granularity of
2811 * Turns the poll_mode for the whole adapter on and off to enable or
2812 * override the ring level polling control over the hardware interrupts.
2815 e1000g_rx_group_intr_enable(mac_intr_handle_t arg
)
2817 struct e1000g
*adapter
= (struct e1000g
*)arg
;
2818 e1000g_rx_ring_t
*rx_ring
= adapter
->rx_ring
;
2821 * Later interrupts at the granularity of the this ring will
2822 * invoke mac_rx() with NULL, indicating the need for another
2823 * software classification.
2824 * We have a single ring usable per adapter now, so we only need to
2825 * reset the rx handle for that one.
2826 * When more RX rings can be used, we should update each one of them.
2828 mutex_enter(&rx_ring
->rx_lock
);
2829 rx_ring
->mrh
= NULL
;
2830 adapter
->poll_mode
= B_FALSE
;
2831 mutex_exit(&rx_ring
->rx_lock
);
2836 e1000g_rx_group_intr_disable(mac_intr_handle_t arg
)
2838 struct e1000g
*adapter
= (struct e1000g
*)arg
;
2839 e1000g_rx_ring_t
*rx_ring
= adapter
->rx_ring
;
2841 mutex_enter(&rx_ring
->rx_lock
);
2844 * Later interrupts at the granularity of the this ring will
2845 * invoke mac_rx() with the handle for this ring;
2847 adapter
->poll_mode
= B_TRUE
;
2848 rx_ring
->mrh
= rx_ring
->mrh_init
;
2849 mutex_exit(&rx_ring
->rx_lock
);
2854 * Entry points to enable and disable interrupts at the granularity of
2856 * adapter poll_mode controls whether we actually proceed with hardware
2857 * interrupt toggling.
2860 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh
)
2862 e1000g_rx_ring_t
*rx_ring
= (e1000g_rx_ring_t
*)intrh
;
2863 struct e1000g
*adapter
= rx_ring
->adapter
;
2864 struct e1000_hw
*hw
= &adapter
->shared
;
2867 rw_enter(&adapter
->chip_lock
, RW_READER
);
2869 if (adapter
->e1000g_state
& E1000G_SUSPENDED
) {
2870 rw_exit(&adapter
->chip_lock
);
2874 mutex_enter(&rx_ring
->rx_lock
);
2875 rx_ring
->poll_flag
= 0;
2876 mutex_exit(&rx_ring
->rx_lock
);
2878 /* Rx interrupt enabling for MSI and legacy */
2879 intr_mask
= E1000_READ_REG(hw
, E1000_IMS
);
2880 intr_mask
|= E1000_IMS_RXT0
;
2881 E1000_WRITE_REG(hw
, E1000_IMS
, intr_mask
);
2882 E1000_WRITE_FLUSH(hw
);
2884 /* Trigger a Rx interrupt to check Rx ring */
2885 E1000_WRITE_REG(hw
, E1000_ICS
, E1000_IMS_RXT0
);
2886 E1000_WRITE_FLUSH(hw
);
2888 rw_exit(&adapter
->chip_lock
);
2893 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh
)
2895 e1000g_rx_ring_t
*rx_ring
= (e1000g_rx_ring_t
*)intrh
;
2896 struct e1000g
*adapter
= rx_ring
->adapter
;
2897 struct e1000_hw
*hw
= &adapter
->shared
;
2899 rw_enter(&adapter
->chip_lock
, RW_READER
);
2901 if (adapter
->e1000g_state
& E1000G_SUSPENDED
) {
2902 rw_exit(&adapter
->chip_lock
);
2905 mutex_enter(&rx_ring
->rx_lock
);
2906 rx_ring
->poll_flag
= 1;
2907 mutex_exit(&rx_ring
->rx_lock
);
2909 /* Rx interrupt disabling for MSI and legacy */
2910 E1000_WRITE_REG(hw
, E1000_IMC
, E1000_IMS_RXT0
);
2911 E1000_WRITE_FLUSH(hw
);
2913 rw_exit(&adapter
->chip_lock
);
2918 * e1000g_unicst_find - Find the slot for the specified unicast address
2921 e1000g_unicst_find(struct e1000g
*Adapter
, const uint8_t *mac_addr
)
2925 for (slot
= 0; slot
< Adapter
->unicst_total
; slot
++) {
2926 if ((Adapter
->unicst_addr
[slot
].mac
.set
== 1) &&
2927 (bcmp(Adapter
->unicst_addr
[slot
].mac
.addr
,
2928 mac_addr
, ETHERADDRL
) == 0))
2936 * Entry points to add and remove a MAC address to a ring group.
2937 * The caller takes care of adding and removing the MAC addresses
2938 * to the filter via these two routines.
2942 e1000g_addmac(void *arg
, const uint8_t *mac_addr
)
2944 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
2947 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
2949 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
2950 rw_exit(&Adapter
->chip_lock
);
2954 if (e1000g_unicst_find(Adapter
, mac_addr
) != -1) {
2955 /* The same address is already in slot */
2956 rw_exit(&Adapter
->chip_lock
);
2960 if (Adapter
->unicst_avail
== 0) {
2961 /* no slots available */
2962 rw_exit(&Adapter
->chip_lock
);
2966 /* Search for a free slot */
2967 for (slot
= 0; slot
< Adapter
->unicst_total
; slot
++) {
2968 if (Adapter
->unicst_addr
[slot
].mac
.set
== 0)
2971 ASSERT(slot
< Adapter
->unicst_total
);
2973 err
= e1000g_unicst_set(Adapter
, mac_addr
, slot
);
2975 Adapter
->unicst_avail
--;
2977 rw_exit(&Adapter
->chip_lock
);
2983 e1000g_remmac(void *arg
, const uint8_t *mac_addr
)
2985 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
2988 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
2990 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
2991 rw_exit(&Adapter
->chip_lock
);
2995 slot
= e1000g_unicst_find(Adapter
, mac_addr
);
2997 rw_exit(&Adapter
->chip_lock
);
3001 ASSERT(Adapter
->unicst_addr
[slot
].mac
.set
);
3003 /* Clear this slot */
3004 err
= e1000g_unicst_set(Adapter
, NULL
, slot
);
3006 Adapter
->unicst_avail
++;
3008 rw_exit(&Adapter
->chip_lock
);
3014 e1000g_ring_start(mac_ring_driver_t rh
, uint64_t mr_gen_num
)
3016 e1000g_rx_ring_t
*rx_ring
= (e1000g_rx_ring_t
*)rh
;
3018 mutex_enter(&rx_ring
->rx_lock
);
3019 rx_ring
->ring_gen_num
= mr_gen_num
;
3020 mutex_exit(&rx_ring
->rx_lock
);
3025 * Callback funtion for MAC layer to register all rings.
3027 * The hardware supports a single group with currently only one ring
3029 * Though not offering virtualization ability per se, exposing the
3030 * group/ring still enables the polling and interrupt toggling.
3034 e1000g_fill_ring(void *arg
, mac_ring_type_t rtype
, const int grp_index
,
3035 const int ring_index
, mac_ring_info_t
*infop
, mac_ring_handle_t rh
)
3037 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
3038 e1000g_rx_ring_t
*rx_ring
= Adapter
->rx_ring
;
3042 * We advertised only RX group/rings, so the MAC framework shouldn't
3043 * ask for any thing else.
3045 ASSERT(rtype
== MAC_RING_TYPE_RX
&& grp_index
== 0 && ring_index
== 0);
3047 rx_ring
->mrh
= rx_ring
->mrh_init
= rh
;
3048 infop
->mri_driver
= (mac_ring_driver_t
)rx_ring
;
3049 infop
->mri_start
= e1000g_ring_start
;
3050 infop
->mri_stop
= NULL
;
3051 infop
->mri_poll
= e1000g_poll_ring
;
3052 infop
->mri_stat
= e1000g_rx_ring_stat
;
3054 /* Ring level interrupts */
3055 mintr
= &infop
->mri_intr
;
3056 mintr
->mi_handle
= (mac_intr_handle_t
)rx_ring
;
3057 mintr
->mi_enable
= e1000g_rx_ring_intr_enable
;
3058 mintr
->mi_disable
= e1000g_rx_ring_intr_disable
;
3059 if (Adapter
->msi_enable
)
3060 mintr
->mi_ddi_handle
= Adapter
->htable
[0];
3065 e1000g_fill_group(void *arg
, mac_ring_type_t rtype
, const int grp_index
,
3066 mac_group_info_t
*infop
, mac_group_handle_t gh
)
3068 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
3072 * We advertised a single RX ring. Getting a request for anything else
3073 * signifies a bug in the MAC framework.
3075 ASSERT(rtype
== MAC_RING_TYPE_RX
&& grp_index
== 0);
3077 Adapter
->rx_group
= gh
;
3079 infop
->mgi_driver
= (mac_group_driver_t
)Adapter
;
3080 infop
->mgi_start
= NULL
;
3081 infop
->mgi_stop
= NULL
;
3082 infop
->mgi_addmac
= e1000g_addmac
;
3083 infop
->mgi_remmac
= e1000g_remmac
;
3084 infop
->mgi_count
= 1;
3086 /* Group level interrupts */
3087 mintr
= &infop
->mgi_intr
;
3088 mintr
->mi_handle
= (mac_intr_handle_t
)Adapter
;
3089 mintr
->mi_enable
= e1000g_rx_group_intr_enable
;
3090 mintr
->mi_disable
= e1000g_rx_group_intr_disable
;
3094 e1000g_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
)
3096 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
3099 case MAC_CAPAB_HCKSUM
: {
3100 uint32_t *txflags
= cap_data
;
3102 if (Adapter
->tx_hcksum_enable
)
3103 *txflags
= HCKSUM_IPHDRCKSUM
|
3104 HCKSUM_INET_PARTIAL
;
3110 case MAC_CAPAB_LSO
: {
3111 mac_capab_lso_t
*cap_lso
= cap_data
;
3113 if (Adapter
->lso_enable
) {
3114 cap_lso
->lso_flags
= LSO_TX_BASIC_TCP_IPV4
;
3115 cap_lso
->lso_basic_tcp_ipv4
.lso_max
=
3121 case MAC_CAPAB_RINGS
: {
3122 mac_capab_rings_t
*cap_rings
= cap_data
;
3124 /* No TX rings exposed yet */
3125 if (cap_rings
->mr_type
!= MAC_RING_TYPE_RX
)
3128 cap_rings
->mr_group_type
= MAC_GROUP_TYPE_STATIC
;
3129 cap_rings
->mr_rnum
= 1;
3130 cap_rings
->mr_gnum
= 1;
3131 cap_rings
->mr_rget
= e1000g_fill_ring
;
3132 cap_rings
->mr_gget
= e1000g_fill_group
;
3142 e1000g_param_locked(mac_prop_id_t pr_num
)
3145 * All en_* parameters are locked (read-only) while
3146 * the device is in any sort of loopback mode ...
3149 case MAC_PROP_EN_1000FDX_CAP
:
3150 case MAC_PROP_EN_1000HDX_CAP
:
3151 case MAC_PROP_EN_100FDX_CAP
:
3152 case MAC_PROP_EN_100HDX_CAP
:
3153 case MAC_PROP_EN_10FDX_CAP
:
3154 case MAC_PROP_EN_10HDX_CAP
:
3155 case MAC_PROP_AUTONEG
:
3156 case MAC_PROP_FLOWCTRL
:
3163 * callback function for set/get of properties
3166 e1000g_m_setprop(void *arg
, const char *pr_name
, mac_prop_id_t pr_num
,
3167 uint_t pr_valsize
, const void *pr_val
)
3169 struct e1000g
*Adapter
= arg
;
3170 struct e1000_hw
*hw
= &Adapter
->shared
;
3171 struct e1000_fc_info
*fc
= &Adapter
->shared
.fc
;
3173 link_flowctrl_t flowctrl
;
3174 uint32_t cur_mtu
, new_mtu
;
3176 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
3178 if (Adapter
->e1000g_state
& E1000G_SUSPENDED
) {
3179 rw_exit(&Adapter
->chip_lock
);
3183 if (Adapter
->loopback_mode
!= E1000G_LB_NONE
&&
3184 e1000g_param_locked(pr_num
)) {
3186 * All en_* parameters are locked (read-only)
3187 * while the device is in any sort of loopback mode.
3189 rw_exit(&Adapter
->chip_lock
);
3194 case MAC_PROP_EN_1000FDX_CAP
:
3195 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3199 Adapter
->param_en_1000fdx
= *(uint8_t *)pr_val
;
3200 Adapter
->param_adv_1000fdx
= *(uint8_t *)pr_val
;
3202 case MAC_PROP_EN_100FDX_CAP
:
3203 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3207 Adapter
->param_en_100fdx
= *(uint8_t *)pr_val
;
3208 Adapter
->param_adv_100fdx
= *(uint8_t *)pr_val
;
3210 case MAC_PROP_EN_100HDX_CAP
:
3211 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3215 Adapter
->param_en_100hdx
= *(uint8_t *)pr_val
;
3216 Adapter
->param_adv_100hdx
= *(uint8_t *)pr_val
;
3218 case MAC_PROP_EN_10FDX_CAP
:
3219 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3223 Adapter
->param_en_10fdx
= *(uint8_t *)pr_val
;
3224 Adapter
->param_adv_10fdx
= *(uint8_t *)pr_val
;
3226 case MAC_PROP_EN_10HDX_CAP
:
3227 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3231 Adapter
->param_en_10hdx
= *(uint8_t *)pr_val
;
3232 Adapter
->param_adv_10hdx
= *(uint8_t *)pr_val
;
3234 case MAC_PROP_AUTONEG
:
3235 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3239 Adapter
->param_adv_autoneg
= *(uint8_t *)pr_val
;
3241 case MAC_PROP_FLOWCTRL
:
3242 fc
->send_xon
= B_TRUE
;
3243 bcopy(pr_val
, &flowctrl
, sizeof (flowctrl
));
3249 case LINK_FLOWCTRL_NONE
:
3250 fc
->requested_mode
= e1000_fc_none
;
3252 case LINK_FLOWCTRL_RX
:
3253 fc
->requested_mode
= e1000_fc_rx_pause
;
3255 case LINK_FLOWCTRL_TX
:
3256 fc
->requested_mode
= e1000_fc_tx_pause
;
3258 case LINK_FLOWCTRL_BI
:
3259 fc
->requested_mode
= e1000_fc_full
;
3264 /* check PCH limits & reset the link */
3265 e1000g_pch_limits(Adapter
);
3266 if (e1000g_reset_link(Adapter
) != DDI_SUCCESS
)
3270 case MAC_PROP_ADV_1000FDX_CAP
:
3271 case MAC_PROP_ADV_1000HDX_CAP
:
3272 case MAC_PROP_ADV_100FDX_CAP
:
3273 case MAC_PROP_ADV_100HDX_CAP
:
3274 case MAC_PROP_ADV_10FDX_CAP
:
3275 case MAC_PROP_ADV_10HDX_CAP
:
3276 case MAC_PROP_EN_1000HDX_CAP
:
3277 case MAC_PROP_STATUS
:
3278 case MAC_PROP_SPEED
:
3279 case MAC_PROP_DUPLEX
:
3280 err
= ENOTSUP
; /* read-only prop. Can't set this. */
3283 /* adapter must be stopped for an MTU change */
3284 if (Adapter
->e1000g_state
& E1000G_STARTED
) {
3289 cur_mtu
= Adapter
->default_mtu
;
3291 /* get new requested MTU */
3292 bcopy(pr_val
, &new_mtu
, sizeof (new_mtu
));
3293 if (new_mtu
== cur_mtu
) {
3298 if ((new_mtu
< DEFAULT_MTU
) ||
3299 (new_mtu
> Adapter
->max_mtu
)) {
3304 /* inform MAC framework of new MTU */
3305 err
= mac_maxsdu_update(Adapter
->mh
, new_mtu
);
3308 Adapter
->default_mtu
= new_mtu
;
3309 Adapter
->max_frame_size
=
3310 e1000g_mtu2maxframe(new_mtu
);
3313 * check PCH limits & set buffer sizes to
3316 e1000g_pch_limits(Adapter
);
3317 e1000g_set_bufsize(Adapter
);
3320 * decrease the number of descriptors and free
3321 * packets for jumbo frames to reduce tx/rx
3322 * resource consumption
3324 if (Adapter
->max_frame_size
>=
3325 (FRAME_SIZE_UPTO_4K
)) {
3326 if (Adapter
->tx_desc_num_flag
== 0)
3327 Adapter
->tx_desc_num
=
3328 DEFAULT_JUMBO_NUM_TX_DESC
;
3330 if (Adapter
->rx_desc_num_flag
== 0)
3331 Adapter
->rx_desc_num
=
3332 DEFAULT_JUMBO_NUM_RX_DESC
;
3334 if (Adapter
->tx_buf_num_flag
== 0)
3335 Adapter
->tx_freelist_num
=
3336 DEFAULT_JUMBO_NUM_TX_BUF
;
3338 if (Adapter
->rx_buf_num_flag
== 0)
3339 Adapter
->rx_freelist_limit
=
3340 DEFAULT_JUMBO_NUM_RX_BUF
;
3342 if (Adapter
->tx_desc_num_flag
== 0)
3343 Adapter
->tx_desc_num
=
3344 DEFAULT_NUM_TX_DESCRIPTOR
;
3346 if (Adapter
->rx_desc_num_flag
== 0)
3347 Adapter
->rx_desc_num
=
3348 DEFAULT_NUM_RX_DESCRIPTOR
;
3350 if (Adapter
->tx_buf_num_flag
== 0)
3351 Adapter
->tx_freelist_num
=
3352 DEFAULT_NUM_TX_FREELIST
;
3354 if (Adapter
->rx_buf_num_flag
== 0)
3355 Adapter
->rx_freelist_limit
=
3356 DEFAULT_NUM_RX_FREELIST
;
3360 case MAC_PROP_PRIVATE
:
3361 err
= e1000g_set_priv_prop(Adapter
, pr_name
,
3362 pr_valsize
, pr_val
);
3368 rw_exit(&Adapter
->chip_lock
);
3373 e1000g_m_getprop(void *arg
, const char *pr_name
, mac_prop_id_t pr_num
,
3374 uint_t pr_valsize
, void *pr_val
)
3376 struct e1000g
*Adapter
= arg
;
3377 struct e1000_fc_info
*fc
= &Adapter
->shared
.fc
;
3379 link_flowctrl_t flowctrl
;
3383 case MAC_PROP_DUPLEX
:
3384 ASSERT(pr_valsize
>= sizeof (link_duplex_t
));
3385 bcopy(&Adapter
->link_duplex
, pr_val
,
3386 sizeof (link_duplex_t
));
3388 case MAC_PROP_SPEED
:
3389 ASSERT(pr_valsize
>= sizeof (uint64_t));
3390 tmp
= Adapter
->link_speed
* 1000000ull;
3391 bcopy(&tmp
, pr_val
, sizeof (tmp
));
3393 case MAC_PROP_AUTONEG
:
3394 *(uint8_t *)pr_val
= Adapter
->param_adv_autoneg
;
3396 case MAC_PROP_FLOWCTRL
:
3397 ASSERT(pr_valsize
>= sizeof (link_flowctrl_t
));
3398 switch (fc
->current_mode
) {
3400 flowctrl
= LINK_FLOWCTRL_NONE
;
3402 case e1000_fc_rx_pause
:
3403 flowctrl
= LINK_FLOWCTRL_RX
;
3405 case e1000_fc_tx_pause
:
3406 flowctrl
= LINK_FLOWCTRL_TX
;
3409 flowctrl
= LINK_FLOWCTRL_BI
;
3412 bcopy(&flowctrl
, pr_val
, sizeof (flowctrl
));
3414 case MAC_PROP_ADV_1000FDX_CAP
:
3415 *(uint8_t *)pr_val
= Adapter
->param_adv_1000fdx
;
3417 case MAC_PROP_EN_1000FDX_CAP
:
3418 *(uint8_t *)pr_val
= Adapter
->param_en_1000fdx
;
3420 case MAC_PROP_ADV_1000HDX_CAP
:
3421 *(uint8_t *)pr_val
= Adapter
->param_adv_1000hdx
;
3423 case MAC_PROP_EN_1000HDX_CAP
:
3424 *(uint8_t *)pr_val
= Adapter
->param_en_1000hdx
;
3426 case MAC_PROP_ADV_100FDX_CAP
:
3427 *(uint8_t *)pr_val
= Adapter
->param_adv_100fdx
;
3429 case MAC_PROP_EN_100FDX_CAP
:
3430 *(uint8_t *)pr_val
= Adapter
->param_en_100fdx
;
3432 case MAC_PROP_ADV_100HDX_CAP
:
3433 *(uint8_t *)pr_val
= Adapter
->param_adv_100hdx
;
3435 case MAC_PROP_EN_100HDX_CAP
:
3436 *(uint8_t *)pr_val
= Adapter
->param_en_100hdx
;
3438 case MAC_PROP_ADV_10FDX_CAP
:
3439 *(uint8_t *)pr_val
= Adapter
->param_adv_10fdx
;
3441 case MAC_PROP_EN_10FDX_CAP
:
3442 *(uint8_t *)pr_val
= Adapter
->param_en_10fdx
;
3444 case MAC_PROP_ADV_10HDX_CAP
:
3445 *(uint8_t *)pr_val
= Adapter
->param_adv_10hdx
;
3447 case MAC_PROP_EN_10HDX_CAP
:
3448 *(uint8_t *)pr_val
= Adapter
->param_en_10hdx
;
3450 case MAC_PROP_ADV_100T4_CAP
:
3451 case MAC_PROP_EN_100T4_CAP
:
3452 *(uint8_t *)pr_val
= Adapter
->param_adv_100t4
;
3454 case MAC_PROP_PRIVATE
:
3455 err
= e1000g_get_priv_prop(Adapter
, pr_name
,
3456 pr_valsize
, pr_val
);
3467 e1000g_m_propinfo(void *arg
, const char *pr_name
, mac_prop_id_t pr_num
,
3468 mac_prop_info_handle_t prh
)
3470 struct e1000g
*Adapter
= arg
;
3471 struct e1000_hw
*hw
= &Adapter
->shared
;
3474 case MAC_PROP_DUPLEX
:
3475 case MAC_PROP_SPEED
:
3476 case MAC_PROP_ADV_1000FDX_CAP
:
3477 case MAC_PROP_ADV_1000HDX_CAP
:
3478 case MAC_PROP_ADV_100FDX_CAP
:
3479 case MAC_PROP_ADV_100HDX_CAP
:
3480 case MAC_PROP_ADV_10FDX_CAP
:
3481 case MAC_PROP_ADV_10HDX_CAP
:
3482 case MAC_PROP_ADV_100T4_CAP
:
3483 case MAC_PROP_EN_100T4_CAP
:
3484 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3487 case MAC_PROP_EN_1000FDX_CAP
:
3488 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3489 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3491 mac_prop_info_set_default_uint8(prh
,
3492 ((Adapter
->phy_ext_status
&
3493 IEEE_ESR_1000T_FD_CAPS
) ||
3494 (Adapter
->phy_ext_status
&
3495 IEEE_ESR_1000X_FD_CAPS
)) ? 1 : 0);
3499 case MAC_PROP_EN_100FDX_CAP
:
3500 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3501 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3503 mac_prop_info_set_default_uint8(prh
,
3504 ((Adapter
->phy_status
& MII_SR_100X_FD_CAPS
) ||
3505 (Adapter
->phy_status
& MII_SR_100T2_FD_CAPS
))
3510 case MAC_PROP_EN_100HDX_CAP
:
3511 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3512 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3514 mac_prop_info_set_default_uint8(prh
,
3515 ((Adapter
->phy_status
& MII_SR_100X_HD_CAPS
) ||
3516 (Adapter
->phy_status
& MII_SR_100T2_HD_CAPS
))
3521 case MAC_PROP_EN_10FDX_CAP
:
3522 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3523 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3525 mac_prop_info_set_default_uint8(prh
,
3526 (Adapter
->phy_status
& MII_SR_10T_FD_CAPS
) ? 1 : 0);
3530 case MAC_PROP_EN_10HDX_CAP
:
3531 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3532 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3534 mac_prop_info_set_default_uint8(prh
,
3535 (Adapter
->phy_status
& MII_SR_10T_HD_CAPS
) ? 1 : 0);
3539 case MAC_PROP_EN_1000HDX_CAP
:
3540 if (hw
->phy
.media_type
!= e1000_media_type_copper
)
3541 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3544 case MAC_PROP_AUTONEG
:
3545 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
3546 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3548 mac_prop_info_set_default_uint8(prh
,
3549 (Adapter
->phy_status
& MII_SR_AUTONEG_CAPS
)
3554 case MAC_PROP_FLOWCTRL
:
3555 mac_prop_info_set_default_link_flowctrl(prh
, LINK_FLOWCTRL_BI
);
3558 case MAC_PROP_MTU
: {
3559 struct e1000_mac_info
*mac
= &Adapter
->shared
.mac
;
3560 struct e1000_phy_info
*phy
= &Adapter
->shared
.phy
;
3563 /* some MAC types do not support jumbo frames */
3564 if ((mac
->type
== e1000_ich8lan
) ||
3565 ((mac
->type
== e1000_ich9lan
) && (phy
->type
==
3569 max
= Adapter
->max_mtu
;
3572 mac_prop_info_set_range_uint32(prh
, DEFAULT_MTU
, max
);
3575 case MAC_PROP_PRIVATE
: {
3579 if (strcmp(pr_name
, "_adv_pause_cap") == 0 ||
3580 strcmp(pr_name
, "_adv_asym_pause_cap") == 0) {
3581 mac_prop_info_set_perm(prh
, MAC_PROP_PERM_READ
);
3583 } else if (strcmp(pr_name
, "_tx_bcopy_threshold") == 0) {
3584 value
= DEFAULT_TX_BCOPY_THRESHOLD
;
3585 } else if (strcmp(pr_name
, "_tx_interrupt_enable") == 0) {
3586 value
= DEFAULT_TX_INTR_ENABLE
;
3587 } else if (strcmp(pr_name
, "_tx_intr_delay") == 0) {
3588 value
= DEFAULT_TX_INTR_DELAY
;
3589 } else if (strcmp(pr_name
, "_tx_intr_abs_delay") == 0) {
3590 value
= DEFAULT_TX_INTR_ABS_DELAY
;
3591 } else if (strcmp(pr_name
, "_rx_bcopy_threshold") == 0) {
3592 value
= DEFAULT_RX_BCOPY_THRESHOLD
;
3593 } else if (strcmp(pr_name
, "_max_num_rcv_packets") == 0) {
3594 value
= DEFAULT_RX_LIMIT_ON_INTR
;
3595 } else if (strcmp(pr_name
, "_rx_intr_delay") == 0) {
3596 value
= DEFAULT_RX_INTR_DELAY
;
3597 } else if (strcmp(pr_name
, "_rx_intr_abs_delay") == 0) {
3598 value
= DEFAULT_RX_INTR_ABS_DELAY
;
3599 } else if (strcmp(pr_name
, "_intr_throttling_rate") == 0) {
3600 value
= DEFAULT_INTR_THROTTLING
;
3601 } else if (strcmp(pr_name
, "_intr_adaptive") == 0) {
3607 (void) snprintf(valstr
, sizeof (valstr
), "%d", value
);
3608 mac_prop_info_set_default_str(prh
, valstr
);
3616 e1000g_set_priv_prop(struct e1000g
*Adapter
, const char *pr_name
,
3617 uint_t pr_valsize
, const void *pr_val
)
3621 struct e1000_hw
*hw
= &Adapter
->shared
;
3623 if (strcmp(pr_name
, "_tx_bcopy_threshold") == 0) {
3624 if (pr_val
== NULL
) {
3628 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3629 if (result
< MIN_TX_BCOPY_THRESHOLD
||
3630 result
> MAX_TX_BCOPY_THRESHOLD
)
3633 Adapter
->tx_bcopy_thresh
= (uint32_t)result
;
3637 if (strcmp(pr_name
, "_tx_interrupt_enable") == 0) {
3638 if (pr_val
== NULL
) {
3642 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3643 if (result
< 0 || result
> 1)
3646 Adapter
->tx_intr_enable
= (result
== 1) ?
3648 if (Adapter
->tx_intr_enable
)
3649 e1000g_mask_tx_interrupt(Adapter
);
3651 e1000g_clear_tx_interrupt(Adapter
);
3652 if (e1000g_check_acc_handle(
3653 Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
3654 ddi_fm_service_impact(Adapter
->dip
,
3655 DDI_SERVICE_DEGRADED
);
3661 if (strcmp(pr_name
, "_tx_intr_delay") == 0) {
3662 if (pr_val
== NULL
) {
3666 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3667 if (result
< MIN_TX_INTR_DELAY
||
3668 result
> MAX_TX_INTR_DELAY
)
3671 Adapter
->tx_intr_delay
= (uint32_t)result
;
3672 E1000_WRITE_REG(hw
, E1000_TIDV
, Adapter
->tx_intr_delay
);
3673 if (e1000g_check_acc_handle(
3674 Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
3675 ddi_fm_service_impact(Adapter
->dip
,
3676 DDI_SERVICE_DEGRADED
);
3682 if (strcmp(pr_name
, "_tx_intr_abs_delay") == 0) {
3683 if (pr_val
== NULL
) {
3687 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3688 if (result
< MIN_TX_INTR_ABS_DELAY
||
3689 result
> MAX_TX_INTR_ABS_DELAY
)
3692 Adapter
->tx_intr_abs_delay
= (uint32_t)result
;
3693 E1000_WRITE_REG(hw
, E1000_TADV
,
3694 Adapter
->tx_intr_abs_delay
);
3695 if (e1000g_check_acc_handle(
3696 Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
3697 ddi_fm_service_impact(Adapter
->dip
,
3698 DDI_SERVICE_DEGRADED
);
3704 if (strcmp(pr_name
, "_rx_bcopy_threshold") == 0) {
3705 if (pr_val
== NULL
) {
3709 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3710 if (result
< MIN_RX_BCOPY_THRESHOLD
||
3711 result
> MAX_RX_BCOPY_THRESHOLD
)
3714 Adapter
->rx_bcopy_thresh
= (uint32_t)result
;
3717 if (strcmp(pr_name
, "_max_num_rcv_packets") == 0) {
3718 if (pr_val
== NULL
) {
3722 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3723 if (result
< MIN_RX_LIMIT_ON_INTR
||
3724 result
> MAX_RX_LIMIT_ON_INTR
)
3727 Adapter
->rx_limit_onintr
= (uint32_t)result
;
3730 if (strcmp(pr_name
, "_rx_intr_delay") == 0) {
3731 if (pr_val
== NULL
) {
3735 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3736 if (result
< MIN_RX_INTR_DELAY
||
3737 result
> MAX_RX_INTR_DELAY
)
3740 Adapter
->rx_intr_delay
= (uint32_t)result
;
3741 E1000_WRITE_REG(hw
, E1000_RDTR
, Adapter
->rx_intr_delay
);
3742 if (e1000g_check_acc_handle(
3743 Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
3744 ddi_fm_service_impact(Adapter
->dip
,
3745 DDI_SERVICE_DEGRADED
);
3751 if (strcmp(pr_name
, "_rx_intr_abs_delay") == 0) {
3752 if (pr_val
== NULL
) {
3756 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3757 if (result
< MIN_RX_INTR_ABS_DELAY
||
3758 result
> MAX_RX_INTR_ABS_DELAY
)
3761 Adapter
->rx_intr_abs_delay
= (uint32_t)result
;
3762 E1000_WRITE_REG(hw
, E1000_RADV
,
3763 Adapter
->rx_intr_abs_delay
);
3764 if (e1000g_check_acc_handle(
3765 Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
3766 ddi_fm_service_impact(Adapter
->dip
,
3767 DDI_SERVICE_DEGRADED
);
3773 if (strcmp(pr_name
, "_intr_throttling_rate") == 0) {
3774 if (pr_val
== NULL
) {
3778 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3779 if (result
< MIN_INTR_THROTTLING
||
3780 result
> MAX_INTR_THROTTLING
)
3783 if (hw
->mac
.type
>= e1000_82540
) {
3784 Adapter
->intr_throttling_rate
=
3786 E1000_WRITE_REG(hw
, E1000_ITR
,
3787 Adapter
->intr_throttling_rate
);
3788 if (e1000g_check_acc_handle(
3789 Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
3790 ddi_fm_service_impact(Adapter
->dip
,
3791 DDI_SERVICE_DEGRADED
);
3799 if (strcmp(pr_name
, "_intr_adaptive") == 0) {
3800 if (pr_val
== NULL
) {
3804 (void) ddi_strtol(pr_val
, (char **)NULL
, 0, &result
);
3805 if (result
< 0 || result
> 1)
3808 if (hw
->mac
.type
>= e1000_82540
) {
3809 Adapter
->intr_adaptive
= (result
== 1) ?
3821 e1000g_get_priv_prop(struct e1000g
*Adapter
, const char *pr_name
,
3822 uint_t pr_valsize
, void *pr_val
)
3827 if (strcmp(pr_name
, "_adv_pause_cap") == 0) {
3828 value
= Adapter
->param_adv_pause
;
3832 if (strcmp(pr_name
, "_adv_asym_pause_cap") == 0) {
3833 value
= Adapter
->param_adv_asym_pause
;
3837 if (strcmp(pr_name
, "_tx_bcopy_threshold") == 0) {
3838 value
= Adapter
->tx_bcopy_thresh
;
3842 if (strcmp(pr_name
, "_tx_interrupt_enable") == 0) {
3843 value
= Adapter
->tx_intr_enable
;
3847 if (strcmp(pr_name
, "_tx_intr_delay") == 0) {
3848 value
= Adapter
->tx_intr_delay
;
3852 if (strcmp(pr_name
, "_tx_intr_abs_delay") == 0) {
3853 value
= Adapter
->tx_intr_abs_delay
;
3857 if (strcmp(pr_name
, "_rx_bcopy_threshold") == 0) {
3858 value
= Adapter
->rx_bcopy_thresh
;
3862 if (strcmp(pr_name
, "_max_num_rcv_packets") == 0) {
3863 value
= Adapter
->rx_limit_onintr
;
3867 if (strcmp(pr_name
, "_rx_intr_delay") == 0) {
3868 value
= Adapter
->rx_intr_delay
;
3872 if (strcmp(pr_name
, "_rx_intr_abs_delay") == 0) {
3873 value
= Adapter
->rx_intr_abs_delay
;
3877 if (strcmp(pr_name
, "_intr_throttling_rate") == 0) {
3878 value
= Adapter
->intr_throttling_rate
;
3882 if (strcmp(pr_name
, "_intr_adaptive") == 0) {
3883 value
= Adapter
->intr_adaptive
;
3889 (void) snprintf(pr_val
, pr_valsize
, "%d", value
);
3895 * e1000g_get_conf - get configurations set in e1000g.conf
3896 * This routine gets user-configured values out of the configuration
3899 * For each configurable value, there is a minimum, a maximum, and a
3901 * If user does not configure a value, use the default.
3902 * If user configures below the minimum, use the minumum.
3903 * If user configures above the maximum, use the maxumum.
3906 e1000g_get_conf(struct e1000g
*Adapter
)
3908 struct e1000_hw
*hw
= &Adapter
->shared
;
3909 boolean_t tbi_compatibility
= B_FALSE
;
3910 boolean_t is_jumbo
= B_FALSE
;
3913 * decrease the number of descriptors and free packets
3914 * for jumbo frames to reduce tx/rx resource consumption
3916 if (Adapter
->max_frame_size
>= FRAME_SIZE_UPTO_4K
) {
3921 * get each configurable property from e1000g.conf
3927 Adapter
->tx_desc_num_flag
=
3928 e1000g_get_prop(Adapter
, "NumTxDescriptors",
3929 MIN_NUM_TX_DESCRIPTOR
, MAX_NUM_TX_DESCRIPTOR
,
3930 is_jumbo
? DEFAULT_JUMBO_NUM_TX_DESC
3931 : DEFAULT_NUM_TX_DESCRIPTOR
, &propval
);
3932 Adapter
->tx_desc_num
= propval
;
3937 Adapter
->rx_desc_num_flag
=
3938 e1000g_get_prop(Adapter
, "NumRxDescriptors",
3939 MIN_NUM_RX_DESCRIPTOR
, MAX_NUM_RX_DESCRIPTOR
,
3940 is_jumbo
? DEFAULT_JUMBO_NUM_RX_DESC
3941 : DEFAULT_NUM_RX_DESCRIPTOR
, &propval
);
3942 Adapter
->rx_desc_num
= propval
;
3947 Adapter
->rx_buf_num_flag
=
3948 e1000g_get_prop(Adapter
, "NumRxFreeList",
3949 MIN_NUM_RX_FREELIST
, MAX_NUM_RX_FREELIST
,
3950 is_jumbo
? DEFAULT_JUMBO_NUM_RX_BUF
3951 : DEFAULT_NUM_RX_FREELIST
, &propval
);
3952 Adapter
->rx_freelist_limit
= propval
;
3957 Adapter
->tx_buf_num_flag
=
3958 e1000g_get_prop(Adapter
, "NumTxPacketList",
3959 MIN_NUM_TX_FREELIST
, MAX_NUM_TX_FREELIST
,
3960 is_jumbo
? DEFAULT_JUMBO_NUM_TX_BUF
3961 : DEFAULT_NUM_TX_FREELIST
, &propval
);
3962 Adapter
->tx_freelist_num
= propval
;
3967 hw
->fc
.send_xon
= B_TRUE
;
3968 (void) e1000g_get_prop(Adapter
, "FlowControl",
3969 e1000_fc_none
, 4, DEFAULT_FLOW_CONTROL
, &propval
);
3970 hw
->fc
.requested_mode
= propval
;
3971 /* 4 is the setting that says "let the eeprom decide" */
3972 if (hw
->fc
.requested_mode
== 4)
3973 hw
->fc
.requested_mode
= e1000_fc_default
;
3976 * Max Num Receive Packets on Interrupt
3978 (void) e1000g_get_prop(Adapter
, "MaxNumReceivePackets",
3979 MIN_RX_LIMIT_ON_INTR
, MAX_RX_LIMIT_ON_INTR
,
3980 DEFAULT_RX_LIMIT_ON_INTR
, &propval
);
3981 Adapter
->rx_limit_onintr
= propval
;
3984 * PHY master slave setting
3986 (void) e1000g_get_prop(Adapter
, "SetMasterSlave",
3987 e1000_ms_hw_default
, e1000_ms_auto
,
3988 e1000_ms_hw_default
, &propval
);
3989 hw
->phy
.ms_type
= propval
;
3992 * Parameter which controls TBI mode workaround, which is only
3993 * needed on certain switches such as Cisco 6500/Foundry
3995 (void) e1000g_get_prop(Adapter
, "TbiCompatibilityEnable",
3996 0, 1, DEFAULT_TBI_COMPAT_ENABLE
, &propval
);
3997 tbi_compatibility
= (propval
== 1);
3998 e1000_set_tbi_compatibility_82543(hw
, tbi_compatibility
);
4003 (void) e1000g_get_prop(Adapter
, "MSIEnable",
4004 0, 1, DEFAULT_MSI_ENABLE
, &propval
);
4005 Adapter
->msi_enable
= (propval
== 1);
4008 * Interrupt Throttling Rate
4010 (void) e1000g_get_prop(Adapter
, "intr_throttling_rate",
4011 MIN_INTR_THROTTLING
, MAX_INTR_THROTTLING
,
4012 DEFAULT_INTR_THROTTLING
, &propval
);
4013 Adapter
->intr_throttling_rate
= propval
;
4016 * Adaptive Interrupt Blanking Enable/Disable
4017 * It is enabled by default
4019 (void) e1000g_get_prop(Adapter
, "intr_adaptive", 0, 1, 1,
4021 Adapter
->intr_adaptive
= (propval
== 1);
4024 * Hardware checksum enable/disable parameter
4026 (void) e1000g_get_prop(Adapter
, "tx_hcksum_enable",
4027 0, 1, DEFAULT_TX_HCKSUM_ENABLE
, &propval
);
4028 Adapter
->tx_hcksum_enable
= (propval
== 1);
4030 * Checksum on/off selection via global parameters.
4032 * If the chip is flagged as not capable of (correctly)
4033 * handling checksumming, we don't enable it on either
4034 * Rx or Tx side. Otherwise, we take this chip's settings
4035 * from the patchable global defaults.
4037 * We advertise our capabilities only if TX offload is
4038 * enabled. On receive, the stack will accept checksummed
4039 * packets anyway, even if we haven't said we can deliver
4042 switch (hw
->mac
.type
) {
4046 case e1000_82545_rev_3
:
4048 case e1000_82546_rev_3
:
4052 case e1000_80003es2lan
:
4055 * For the following Intel PRO/1000 chipsets, we have not
4056 * tested the hardware checksum offload capability, so we
4057 * disable the capability for them.
4061 * e1000_82541_rev_2,
4063 * e1000_82547_rev_2,
4066 Adapter
->tx_hcksum_enable
= B_FALSE
;
4070 * Large Send Offloading(LSO) Enable/Disable
4071 * If the tx hardware checksum is not enabled, LSO should be
4074 (void) e1000g_get_prop(Adapter
, "lso_enable",
4075 0, 1, DEFAULT_LSO_ENABLE
, &propval
);
4076 Adapter
->lso_enable
= (propval
== 1);
4078 switch (hw
->mac
.type
) {
4080 case e1000_82546_rev_3
:
4081 if (Adapter
->lso_enable
)
4082 Adapter
->lso_premature_issue
= B_TRUE
;
4087 case e1000_80003es2lan
:
4090 Adapter
->lso_enable
= B_FALSE
;
4093 if (!Adapter
->tx_hcksum_enable
) {
4094 Adapter
->lso_premature_issue
= B_FALSE
;
4095 Adapter
->lso_enable
= B_FALSE
;
4099 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4100 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4101 * will not cross 64k boundary.
4103 (void) e1000g_get_prop(Adapter
, "mem_workaround_82546",
4104 0, 1, DEFAULT_MEM_WORKAROUND_82546
, &propval
);
4105 Adapter
->mem_workaround_82546
= (propval
== 1);
4108 * Max number of multicast addresses
4110 (void) e1000g_get_prop(Adapter
, "mcast_max_num",
4111 MIN_MCAST_NUM
, MAX_MCAST_NUM
, hw
->mac
.mta_reg_count
* 32,
4113 Adapter
->mcast_max_num
= propval
;
4117 * e1000g_get_prop - routine to read properties
4119 * Get a user-configure property value out of the configuration
4122 * Caller provides name of the property, a default value, a minimum
4123 * value, a maximum value and a pointer to the returned property
4126 * Return B_TRUE if the configured value of the property is not a default
4127 * value, otherwise return B_FALSE.
4130 e1000g_get_prop(struct e1000g
*Adapter
, /* point to per-adapter structure */
4131 char *propname
, /* name of the property */
4132 int minval
, /* minimum acceptable value */
4133 int maxval
, /* maximim acceptable value */
4134 int defval
, /* default value */
4135 int *propvalue
) /* property value return to caller */
4137 int propval
; /* value returned for requested property */
4138 int *props
; /* point to array of properties returned */
4139 uint_t nprops
; /* number of property value returned */
4140 boolean_t ret
= B_TRUE
;
4143 * get the array of properties from the config file
4145 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, Adapter
->dip
,
4146 DDI_PROP_DONTPASS
, propname
, &props
, &nprops
) == DDI_PROP_SUCCESS
) {
4147 /* got some properties, test if we got enough */
4148 if (Adapter
->instance
< nprops
) {
4149 propval
= props
[Adapter
->instance
];
4151 /* not enough properties configured */
4153 E1000G_DEBUGLOG_2(Adapter
, E1000G_INFO_LEVEL
,
4154 "Not Enough %s values found in e1000g.conf"
4160 /* free memory allocated for properties */
4161 ddi_prop_free(props
);
4171 if (propval
> maxval
) {
4173 E1000G_DEBUGLOG_2(Adapter
, E1000G_INFO_LEVEL
,
4174 "Too High %s value in e1000g.conf - set to %d\n",
4178 if (propval
< minval
) {
4180 E1000G_DEBUGLOG_2(Adapter
, E1000G_INFO_LEVEL
,
4181 "Too Low %s value in e1000g.conf - set to %d\n",
4185 *propvalue
= propval
;
4190 e1000g_link_check(struct e1000g
*Adapter
)
4192 uint16_t speed
, duplex
, phydata
;
4193 boolean_t link_changed
= B_FALSE
;
4194 struct e1000_hw
*hw
;
4197 hw
= &Adapter
->shared
;
4199 if (e1000g_link_up(Adapter
)) {
4201 * The Link is up, check whether it was marked as down earlier
4203 if (Adapter
->link_state
!= LINK_STATE_UP
) {
4204 (void) e1000_get_speed_and_duplex(hw
, &speed
, &duplex
);
4205 Adapter
->link_speed
= speed
;
4206 Adapter
->link_duplex
= duplex
;
4207 Adapter
->link_state
= LINK_STATE_UP
;
4208 link_changed
= B_TRUE
;
4210 if (Adapter
->link_speed
== SPEED_1000
)
4211 Adapter
->stall_threshold
= TX_STALL_TIME_2S
;
4213 Adapter
->stall_threshold
= TX_STALL_TIME_8S
;
4215 Adapter
->tx_link_down_timeout
= 0;
4217 if ((hw
->mac
.type
== e1000_82571
) ||
4218 (hw
->mac
.type
== e1000_82572
)) {
4219 reg_tarc
= E1000_READ_REG(hw
, E1000_TARC(0));
4220 if (speed
== SPEED_1000
)
4221 reg_tarc
|= (1 << 21);
4223 reg_tarc
&= ~(1 << 21);
4224 E1000_WRITE_REG(hw
, E1000_TARC(0), reg_tarc
);
4227 Adapter
->smartspeed
= 0;
4229 if (Adapter
->link_state
!= LINK_STATE_DOWN
) {
4230 Adapter
->link_speed
= 0;
4231 Adapter
->link_duplex
= 0;
4232 Adapter
->link_state
= LINK_STATE_DOWN
;
4233 link_changed
= B_TRUE
;
4236 * SmartSpeed workaround for Tabor/TanaX, When the
4237 * driver loses link disable auto master/slave
4240 if (hw
->phy
.type
== e1000_phy_igp
) {
4241 (void) e1000_read_phy_reg(hw
,
4242 PHY_1000T_CTRL
, &phydata
);
4243 phydata
|= CR_1000T_MS_ENABLE
;
4244 (void) e1000_write_phy_reg(hw
,
4245 PHY_1000T_CTRL
, phydata
);
4248 e1000g_smartspeed(Adapter
);
4251 if (Adapter
->e1000g_state
& E1000G_STARTED
) {
4252 if (Adapter
->tx_link_down_timeout
<
4253 MAX_TX_LINK_DOWN_TIMEOUT
) {
4254 Adapter
->tx_link_down_timeout
++;
4255 } else if (Adapter
->tx_link_down_timeout
==
4256 MAX_TX_LINK_DOWN_TIMEOUT
) {
4257 e1000g_tx_clean(Adapter
);
4258 Adapter
->tx_link_down_timeout
++;
4263 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
)
4264 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
4266 return (link_changed
);
4270 * e1000g_reset_link - Using the link properties to setup the link
4273 e1000g_reset_link(struct e1000g
*Adapter
)
4275 struct e1000_mac_info
*mac
;
4276 struct e1000_phy_info
*phy
;
4277 struct e1000_hw
*hw
;
4280 mac
= &Adapter
->shared
.mac
;
4281 phy
= &Adapter
->shared
.phy
;
4282 hw
= &Adapter
->shared
;
4285 if (hw
->phy
.media_type
!= e1000_media_type_copper
)
4288 if (Adapter
->param_adv_autoneg
== 1) {
4289 mac
->autoneg
= B_TRUE
;
4290 phy
->autoneg_advertised
= 0;
4293 * 1000hdx is not supported for autonegotiation
4295 if (Adapter
->param_adv_1000fdx
== 1)
4296 phy
->autoneg_advertised
|= ADVERTISE_1000_FULL
;
4298 if (Adapter
->param_adv_100fdx
== 1)
4299 phy
->autoneg_advertised
|= ADVERTISE_100_FULL
;
4301 if (Adapter
->param_adv_100hdx
== 1)
4302 phy
->autoneg_advertised
|= ADVERTISE_100_HALF
;
4304 if (Adapter
->param_adv_10fdx
== 1)
4305 phy
->autoneg_advertised
|= ADVERTISE_10_FULL
;
4307 if (Adapter
->param_adv_10hdx
== 1)
4308 phy
->autoneg_advertised
|= ADVERTISE_10_HALF
;
4310 if (phy
->autoneg_advertised
== 0)
4313 mac
->autoneg
= B_FALSE
;
4316 * For Intel copper cards, 1000fdx and 1000hdx are not
4317 * supported for forced link
4319 if (Adapter
->param_adv_100fdx
== 1)
4320 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
4321 else if (Adapter
->param_adv_100hdx
== 1)
4322 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
4323 else if (Adapter
->param_adv_10fdx
== 1)
4324 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
4325 else if (Adapter
->param_adv_10hdx
== 1)
4326 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
4333 e1000g_log(Adapter
, CE_WARN
,
4334 "Invalid link settings. Setup link to "
4335 "support autonegotiation with all link capabilities.");
4336 mac
->autoneg
= B_TRUE
;
4337 phy
->autoneg_advertised
= AUTONEG_ADVERTISE_SPEED_DEFAULT
;
4341 return (e1000_setup_link(&Adapter
->shared
));
4345 e1000g_timer_tx_resched(struct e1000g
*Adapter
)
4347 e1000g_tx_ring_t
*tx_ring
= Adapter
->tx_ring
;
4349 rw_enter(&Adapter
->chip_lock
, RW_READER
);
4351 if (tx_ring
->resched_needed
&&
4352 ((ddi_get_lbolt() - tx_ring
->resched_timestamp
) >
4353 drv_usectohz(1000000)) &&
4354 (Adapter
->e1000g_state
& E1000G_STARTED
) &&
4355 (tx_ring
->tbd_avail
>= DEFAULT_TX_NO_RESOURCE
)) {
4356 tx_ring
->resched_needed
= B_FALSE
;
4357 mac_tx_update(Adapter
->mh
);
4358 E1000G_STAT(tx_ring
->stat_reschedule
);
4359 E1000G_STAT(tx_ring
->stat_timer_reschedule
);
4362 rw_exit(&Adapter
->chip_lock
);
4366 e1000g_local_timer(void *ws
)
4368 struct e1000g
*Adapter
= (struct e1000g
*)ws
;
4369 struct e1000_hw
*hw
;
4370 e1000g_ether_addr_t ether_addr
;
4371 boolean_t link_changed
;
4373 hw
= &Adapter
->shared
;
4375 if (Adapter
->e1000g_state
& E1000G_ERROR
) {
4376 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
4377 Adapter
->e1000g_state
&= ~E1000G_ERROR
;
4378 rw_exit(&Adapter
->chip_lock
);
4380 Adapter
->reset_count
++;
4381 if (e1000g_global_reset(Adapter
)) {
4382 ddi_fm_service_impact(Adapter
->dip
,
4383 DDI_SERVICE_RESTORED
);
4384 e1000g_timer_tx_resched(Adapter
);
4386 ddi_fm_service_impact(Adapter
->dip
,
4391 if (e1000g_stall_check(Adapter
)) {
4392 E1000G_DEBUGLOG_0(Adapter
, E1000G_INFO_LEVEL
,
4393 "Tx stall detected. Activate automatic recovery.\n");
4394 e1000g_fm_ereport(Adapter
, DDI_FM_DEVICE_STALL
);
4395 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_LOST
);
4396 Adapter
->reset_count
++;
4397 if (e1000g_reset_adapter(Adapter
)) {
4398 ddi_fm_service_impact(Adapter
->dip
,
4399 DDI_SERVICE_RESTORED
);
4400 e1000g_timer_tx_resched(Adapter
);
4405 link_changed
= B_FALSE
;
4406 rw_enter(&Adapter
->chip_lock
, RW_READER
);
4407 if (Adapter
->link_complete
)
4408 link_changed
= e1000g_link_check(Adapter
);
4409 rw_exit(&Adapter
->chip_lock
);
4412 if (!Adapter
->reset_flag
&&
4413 (Adapter
->e1000g_state
& E1000G_STARTED
) &&
4414 !(Adapter
->e1000g_state
& E1000G_SUSPENDED
))
4415 mac_link_update(Adapter
->mh
, Adapter
->link_state
);
4416 if (Adapter
->link_state
== LINK_STATE_UP
)
4417 Adapter
->reset_flag
= B_FALSE
;
4420 * Workaround for esb2. Data stuck in fifo on a link
4421 * down event. Reset the adapter to recover it.
4423 if (Adapter
->esb2_workaround
) {
4424 Adapter
->esb2_workaround
= B_FALSE
;
4425 (void) e1000g_reset_adapter(Adapter
);
4430 * With 82571 controllers, any locally administered address will
4431 * be overwritten when there is a reset on the other port.
4432 * Detect this circumstance and correct it.
4434 if ((hw
->mac
.type
== e1000_82571
) &&
4435 (e1000_get_laa_state_82571(hw
) == B_TRUE
)) {
4436 ether_addr
.reg
.low
= E1000_READ_REG_ARRAY(hw
, E1000_RA
, 0);
4437 ether_addr
.reg
.high
= E1000_READ_REG_ARRAY(hw
, E1000_RA
, 1);
4439 ether_addr
.reg
.low
= ntohl(ether_addr
.reg
.low
);
4440 ether_addr
.reg
.high
= ntohl(ether_addr
.reg
.high
);
4442 if ((ether_addr
.mac
.addr
[5] != hw
->mac
.addr
[0]) ||
4443 (ether_addr
.mac
.addr
[4] != hw
->mac
.addr
[1]) ||
4444 (ether_addr
.mac
.addr
[3] != hw
->mac
.addr
[2]) ||
4445 (ether_addr
.mac
.addr
[2] != hw
->mac
.addr
[3]) ||
4446 (ether_addr
.mac
.addr
[1] != hw
->mac
.addr
[4]) ||
4447 (ether_addr
.mac
.addr
[0] != hw
->mac
.addr
[5])) {
4448 (void) e1000_rar_set(hw
, hw
->mac
.addr
, 0);
4453 * Long TTL workaround for 82541/82547
4455 (void) e1000_igp_ttl_workaround_82547(hw
);
4458 * Check for Adaptive IFS settings If there are lots of collisions
4459 * change the value in steps...
4460 * These properties should only be set for 10/100
4462 if ((hw
->phy
.media_type
== e1000_media_type_copper
) &&
4463 ((Adapter
->link_speed
== SPEED_100
) ||
4464 (Adapter
->link_speed
== SPEED_10
))) {
4465 e1000_update_adaptive(hw
);
4468 * Set Timer Interrupts
4470 E1000_WRITE_REG(hw
, E1000_ICS
, E1000_IMS_RXT0
);
4472 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
)
4473 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
4475 e1000g_timer_tx_resched(Adapter
);
4477 restart_watchdog_timer(Adapter
);
4481 * The function e1000g_link_timer() is called when the timer for link setup
4482 * is expired, which indicates the completion of the link setup. The link
4483 * state will not be updated until the link setup is completed. And the
4484 * link state will not be sent to the upper layer through mac_link_update()
4485 * in this function. It will be updated in the local timer routine or the
4486 * interrupt service routine after the interface is started (plumbed).
4489 e1000g_link_timer(void *arg
)
4491 struct e1000g
*Adapter
= (struct e1000g
*)arg
;
4493 mutex_enter(&Adapter
->link_lock
);
4494 Adapter
->link_complete
= B_TRUE
;
4495 Adapter
->link_tid
= 0;
4496 mutex_exit(&Adapter
->link_lock
);
4500 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4502 * This function read the forced speed and duplex for 10/100 Mbps speeds
4503 * and also for 1000 Mbps speeds from the e1000g.conf file
4506 e1000g_force_speed_duplex(struct e1000g
*Adapter
)
4510 struct e1000_mac_info
*mac
= &Adapter
->shared
.mac
;
4511 struct e1000_phy_info
*phy
= &Adapter
->shared
.phy
;
4514 * get value out of config file
4516 (void) e1000g_get_prop(Adapter
, "ForceSpeedDuplex",
4517 GDIAG_10_HALF
, GDIAG_ANY
, GDIAG_ANY
, &forced
);
4522 * Disable Auto Negotiation
4524 mac
->autoneg
= B_FALSE
;
4525 mac
->forced_speed_duplex
= ADVERTISE_10_HALF
;
4529 * Disable Auto Negotiation
4531 mac
->autoneg
= B_FALSE
;
4532 mac
->forced_speed_duplex
= ADVERTISE_10_FULL
;
4534 case GDIAG_100_HALF
:
4536 * Disable Auto Negotiation
4538 mac
->autoneg
= B_FALSE
;
4539 mac
->forced_speed_duplex
= ADVERTISE_100_HALF
;
4541 case GDIAG_100_FULL
:
4543 * Disable Auto Negotiation
4545 mac
->autoneg
= B_FALSE
;
4546 mac
->forced_speed_duplex
= ADVERTISE_100_FULL
;
4548 case GDIAG_1000_FULL
:
4550 * The gigabit spec requires autonegotiation. Therefore,
4551 * when the user wants to force the speed to 1000Mbps, we
4552 * enable AutoNeg, but only allow the harware to advertise
4553 * 1000Mbps. This is different from 10/100 operation, where
4554 * we are allowed to link without any negotiation.
4556 mac
->autoneg
= B_TRUE
;
4557 phy
->autoneg_advertised
= ADVERTISE_1000_FULL
;
4559 default: /* obey the setting of AutoNegAdvertised */
4560 mac
->autoneg
= B_TRUE
;
4561 (void) e1000g_get_prop(Adapter
, "AutoNegAdvertised",
4562 0, AUTONEG_ADVERTISE_SPEED_DEFAULT
,
4563 AUTONEG_ADVERTISE_SPEED_DEFAULT
, &propval
);
4564 phy
->autoneg_advertised
= (uint16_t)propval
;
4570 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4572 * This function reads MaxFrameSize from e1000g.conf
4575 e1000g_get_max_frame_size(struct e1000g
*Adapter
)
4580 * get value out of config file
4582 (void) e1000g_get_prop(Adapter
, "MaxFrameSize", 0, 3, 0,
4585 switch (max_frame
) {
4587 Adapter
->default_mtu
= ETHERMTU
;
4590 Adapter
->default_mtu
= FRAME_SIZE_UPTO_4K
-
4591 sizeof (struct ether_vlan_header
) - ETHERFCSL
;
4594 Adapter
->default_mtu
= FRAME_SIZE_UPTO_8K
-
4595 sizeof (struct ether_vlan_header
) - ETHERFCSL
;
4598 Adapter
->default_mtu
= FRAME_SIZE_UPTO_16K
-
4599 sizeof (struct ether_vlan_header
) - ETHERFCSL
;
4602 Adapter
->default_mtu
= ETHERMTU
;
4607 * If the user configed MTU is larger than the deivce's maximum MTU,
4608 * the MTU is set to the deivce's maximum value.
4610 if (Adapter
->default_mtu
> Adapter
->max_mtu
)
4611 Adapter
->default_mtu
= Adapter
->max_mtu
;
4613 Adapter
->max_frame_size
= e1000g_mtu2maxframe(Adapter
->default_mtu
);
4617 * e1000g_pch_limits - Apply limits of the PCH silicon type
4619 * At any frame size larger than the ethernet default,
4620 * prevent linking at 10/100 speeds.
4623 e1000g_pch_limits(struct e1000g
*Adapter
)
4625 struct e1000_hw
*hw
= &Adapter
->shared
;
4627 /* only applies to PCH silicon type */
4628 if (hw
->mac
.type
!= e1000_pchlan
&& hw
->mac
.type
!= e1000_pch2lan
)
4631 /* only applies to frames larger than ethernet default */
4632 if (Adapter
->max_frame_size
> DEFAULT_FRAME_SIZE
) {
4633 hw
->mac
.autoneg
= B_TRUE
;
4634 hw
->phy
.autoneg_advertised
= ADVERTISE_1000_FULL
;
4636 Adapter
->param_adv_autoneg
= 1;
4637 Adapter
->param_adv_1000fdx
= 1;
4639 Adapter
->param_adv_100fdx
= 0;
4640 Adapter
->param_adv_100hdx
= 0;
4641 Adapter
->param_adv_10fdx
= 0;
4642 Adapter
->param_adv_10hdx
= 0;
4644 e1000g_param_sync(Adapter
);
4649 * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4652 e1000g_mtu2maxframe(uint32_t mtu
)
4656 maxframe
= mtu
+ sizeof (struct ether_vlan_header
) + ETHERFCSL
;
4662 arm_watchdog_timer(struct e1000g
*Adapter
)
4664 Adapter
->watchdog_tid
=
4665 timeout(e1000g_local_timer
,
4666 (void *)Adapter
, 1 * drv_usectohz(1000000));
4668 #pragma inline(arm_watchdog_timer)
4671 enable_watchdog_timer(struct e1000g
*Adapter
)
4673 mutex_enter(&Adapter
->watchdog_lock
);
4675 if (!Adapter
->watchdog_timer_enabled
) {
4676 Adapter
->watchdog_timer_enabled
= B_TRUE
;
4677 Adapter
->watchdog_timer_started
= B_TRUE
;
4678 arm_watchdog_timer(Adapter
);
4681 mutex_exit(&Adapter
->watchdog_lock
);
4685 disable_watchdog_timer(struct e1000g
*Adapter
)
4689 mutex_enter(&Adapter
->watchdog_lock
);
4691 Adapter
->watchdog_timer_enabled
= B_FALSE
;
4692 Adapter
->watchdog_timer_started
= B_FALSE
;
4693 tid
= Adapter
->watchdog_tid
;
4694 Adapter
->watchdog_tid
= 0;
4696 mutex_exit(&Adapter
->watchdog_lock
);
4699 (void) untimeout(tid
);
4703 start_watchdog_timer(struct e1000g
*Adapter
)
4705 mutex_enter(&Adapter
->watchdog_lock
);
4707 if (Adapter
->watchdog_timer_enabled
) {
4708 if (!Adapter
->watchdog_timer_started
) {
4709 Adapter
->watchdog_timer_started
= B_TRUE
;
4710 arm_watchdog_timer(Adapter
);
4714 mutex_exit(&Adapter
->watchdog_lock
);
4718 restart_watchdog_timer(struct e1000g
*Adapter
)
4720 mutex_enter(&Adapter
->watchdog_lock
);
4722 if (Adapter
->watchdog_timer_started
)
4723 arm_watchdog_timer(Adapter
);
4725 mutex_exit(&Adapter
->watchdog_lock
);
4729 stop_watchdog_timer(struct e1000g
*Adapter
)
4733 mutex_enter(&Adapter
->watchdog_lock
);
4735 Adapter
->watchdog_timer_started
= B_FALSE
;
4736 tid
= Adapter
->watchdog_tid
;
4737 Adapter
->watchdog_tid
= 0;
4739 mutex_exit(&Adapter
->watchdog_lock
);
4742 (void) untimeout(tid
);
4746 stop_link_timer(struct e1000g
*Adapter
)
4750 /* Disable the link timer */
4751 mutex_enter(&Adapter
->link_lock
);
4753 tid
= Adapter
->link_tid
;
4754 Adapter
->link_tid
= 0;
4756 mutex_exit(&Adapter
->link_lock
);
4759 (void) untimeout(tid
);
4763 stop_82547_timer(e1000g_tx_ring_t
*tx_ring
)
4767 /* Disable the tx timer for 82547 chipset */
4768 mutex_enter(&tx_ring
->tx_lock
);
4770 tx_ring
->timer_enable_82547
= B_FALSE
;
4771 tid
= tx_ring
->timer_id_82547
;
4772 tx_ring
->timer_id_82547
= 0;
4774 mutex_exit(&tx_ring
->tx_lock
);
4777 (void) untimeout(tid
);
4781 e1000g_clear_interrupt(struct e1000g
*Adapter
)
4783 E1000_WRITE_REG(&Adapter
->shared
, E1000_IMC
,
4784 0xffffffff & ~E1000_IMS_RXSEQ
);
4788 e1000g_mask_interrupt(struct e1000g
*Adapter
)
4790 E1000_WRITE_REG(&Adapter
->shared
, E1000_IMS
,
4791 IMS_ENABLE_MASK
& ~E1000_IMS_TXDW
);
4793 if (Adapter
->tx_intr_enable
)
4794 e1000g_mask_tx_interrupt(Adapter
);
4798 * This routine is called by e1000g_quiesce(), therefore must not block.
4801 e1000g_clear_all_interrupts(struct e1000g
*Adapter
)
4803 E1000_WRITE_REG(&Adapter
->shared
, E1000_IMC
, 0xffffffff);
4807 e1000g_mask_tx_interrupt(struct e1000g
*Adapter
)
4809 E1000_WRITE_REG(&Adapter
->shared
, E1000_IMS
, E1000_IMS_TXDW
);
4813 e1000g_clear_tx_interrupt(struct e1000g
*Adapter
)
4815 E1000_WRITE_REG(&Adapter
->shared
, E1000_IMC
, E1000_IMS_TXDW
);
4819 e1000g_smartspeed(struct e1000g
*Adapter
)
4821 struct e1000_hw
*hw
= &Adapter
->shared
;
4822 uint16_t phy_status
;
4826 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4827 * advertising 1000Full, we don't even use the workaround
4829 if ((hw
->phy
.type
!= e1000_phy_igp
) ||
4831 !(hw
->phy
.autoneg_advertised
& ADVERTISE_1000_FULL
))
4835 * True if this is the first call of this function or after every
4836 * 30 seconds of not having link
4838 if (Adapter
->smartspeed
== 0) {
4840 * If Master/Slave config fault is asserted twice, we
4841 * assume back-to-back
4843 (void) e1000_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_status
);
4844 if (!(phy_status
& SR_1000T_MS_CONFIG_FAULT
))
4847 (void) e1000_read_phy_reg(hw
, PHY_1000T_STATUS
, &phy_status
);
4848 if (!(phy_status
& SR_1000T_MS_CONFIG_FAULT
))
4851 * We're assuming back-2-back because our status register
4852 * insists! there's a fault in the master/slave
4853 * relationship that was "negotiated"
4855 (void) e1000_read_phy_reg(hw
, PHY_1000T_CTRL
, &phy_ctrl
);
4857 * Is the phy configured for manual configuration of
4860 if (phy_ctrl
& CR_1000T_MS_ENABLE
) {
4862 * Yes. Then disable manual configuration (enable
4863 * auto configuration) of master/slave
4865 phy_ctrl
&= ~CR_1000T_MS_ENABLE
;
4866 (void) e1000_write_phy_reg(hw
,
4867 PHY_1000T_CTRL
, phy_ctrl
);
4869 * Effectively starting the clock
4871 Adapter
->smartspeed
++;
4873 * Restart autonegotiation
4875 if (!e1000_phy_setup_autoneg(hw
) &&
4876 !e1000_read_phy_reg(hw
, PHY_CONTROL
, &phy_ctrl
)) {
4877 phy_ctrl
|= (MII_CR_AUTO_NEG_EN
|
4878 MII_CR_RESTART_AUTO_NEG
);
4879 (void) e1000_write_phy_reg(hw
,
4880 PHY_CONTROL
, phy_ctrl
);
4885 * Has 6 seconds transpired still without link? Remember,
4886 * you should reset the smartspeed counter once you obtain
4889 } else if (Adapter
->smartspeed
== E1000_SMARTSPEED_DOWNSHIFT
) {
4891 * Yes. Remember, we did at the start determine that
4892 * there's a master/slave configuration fault, so we're
4893 * still assuming there's someone on the other end, but we
4894 * just haven't yet been able to talk to it. We then
4895 * re-enable auto configuration of master/slave to see if
4896 * we're running 2/3 pair cables.
4899 * If still no link, perhaps using 2/3 pair cable
4901 (void) e1000_read_phy_reg(hw
, PHY_1000T_CTRL
, &phy_ctrl
);
4902 phy_ctrl
|= CR_1000T_MS_ENABLE
;
4903 (void) e1000_write_phy_reg(hw
, PHY_1000T_CTRL
, phy_ctrl
);
4905 * Restart autoneg with phy enabled for manual
4906 * configuration of master/slave
4908 if (!e1000_phy_setup_autoneg(hw
) &&
4909 !e1000_read_phy_reg(hw
, PHY_CONTROL
, &phy_ctrl
)) {
4911 (MII_CR_AUTO_NEG_EN
| MII_CR_RESTART_AUTO_NEG
);
4912 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
, phy_ctrl
);
4915 * Hopefully, there are no more faults and we've obtained
4920 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4923 if (Adapter
->smartspeed
++ == E1000_SMARTSPEED_MAX
)
4924 Adapter
->smartspeed
= 0;
4928 is_valid_mac_addr(uint8_t *mac_addr
)
4930 const uint8_t addr_test1
[6] = { 0, 0, 0, 0, 0, 0 };
4931 const uint8_t addr_test2
[6] =
4932 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4934 if (!(bcmp(addr_test1
, mac_addr
, ETHERADDRL
)) ||
4935 !(bcmp(addr_test2
, mac_addr
, ETHERADDRL
)))
4942 * e1000g_stall_check - check for tx stall
4944 * This function checks if the adapter is stalled (in transmit).
4946 * It is called each time the watchdog timeout is invoked.
4947 * If the transmit descriptor reclaim continuously fails,
4948 * the watchdog value will increment by 1. If the watchdog
4949 * value exceeds the threshold, the adapter is assumed to
4950 * have stalled and need to be reset.
4953 e1000g_stall_check(struct e1000g
*Adapter
)
4955 e1000g_tx_ring_t
*tx_ring
;
4957 tx_ring
= Adapter
->tx_ring
;
4959 if (Adapter
->link_state
!= LINK_STATE_UP
)
4962 (void) e1000g_recycle(tx_ring
);
4964 if (Adapter
->stall_flag
)
4971 static enum ioc_reply
4972 e1000g_pp_ioctl(struct e1000g
*e1000gp
, struct iocblk
*iocp
, mblk_t
*mp
)
4974 void (*ppfn
)(struct e1000g
*e1000gp
, e1000g_peekpoke_t
*ppd
);
4975 e1000g_peekpoke_t
*ppd
;
4980 switch (iocp
->ioc_cmd
) {
4982 case E1000G_IOC_REG_PEEK
:
4986 case E1000G_IOC_REG_POKE
:
4991 E1000G_DEBUGLOG_1(e1000gp
, E1000G_INFO_LEVEL
,
4992 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4998 * Validate format of ioctl
5000 if (iocp
->ioc_count
!= sizeof (e1000g_peekpoke_t
))
5002 if (mp
->b_cont
== NULL
)
5005 ppd
= (e1000g_peekpoke_t
*)(uintptr_t)mp
->b_cont
->b_rptr
;
5008 * Validate request parameters
5010 switch (ppd
->pp_acc_space
) {
5013 E1000G_DEBUGLOG_1(e1000gp
, E1000G_INFO_LEVEL
,
5014 "e1000g_diag_ioctl: invalid access space 0x%X\n",
5018 case E1000G_PP_SPACE_REG
:
5020 * Memory-mapped I/O space
5022 ASSERT(ppd
->pp_acc_size
== 4);
5023 if (ppd
->pp_acc_size
!= 4)
5026 if ((ppd
->pp_acc_offset
% ppd
->pp_acc_size
) != 0)
5031 ppfn
= peek
? e1000g_ioc_peek_reg
: e1000g_ioc_poke_reg
;
5034 case E1000G_PP_SPACE_E1000G
:
5036 * E1000g data structure!
5038 mem_va
= (uintptr_t)e1000gp
;
5039 maxoff
= sizeof (struct e1000g
);
5040 ppfn
= peek
? e1000g_ioc_peek_mem
: e1000g_ioc_poke_mem
;
5045 if (ppd
->pp_acc_offset
>= maxoff
)
5048 if (ppd
->pp_acc_offset
+ ppd
->pp_acc_size
> maxoff
)
5054 ppd
->pp_acc_offset
+= mem_va
;
5055 (*ppfn
)(e1000gp
, ppd
);
5056 return (peek
? IOC_REPLY
: IOC_ACK
);
5060 e1000g_ioc_peek_reg(struct e1000g
*e1000gp
, e1000g_peekpoke_t
*ppd
)
5062 ddi_acc_handle_t handle
;
5065 handle
= e1000gp
->osdep
.reg_handle
;
5066 regaddr
= (uint32_t *)((uintptr_t)e1000gp
->shared
.hw_addr
+
5067 (uintptr_t)ppd
->pp_acc_offset
);
5069 ppd
->pp_acc_data
= ddi_get32(handle
, regaddr
);
5073 e1000g_ioc_poke_reg(struct e1000g
*e1000gp
, e1000g_peekpoke_t
*ppd
)
5075 ddi_acc_handle_t handle
;
5079 handle
= e1000gp
->osdep
.reg_handle
;
5080 regaddr
= (uint32_t *)((uintptr_t)e1000gp
->shared
.hw_addr
+
5081 (uintptr_t)ppd
->pp_acc_offset
);
5082 value
= (uint32_t)ppd
->pp_acc_data
;
5084 ddi_put32(handle
, regaddr
, value
);
5088 e1000g_ioc_peek_mem(struct e1000g
*e1000gp
, e1000g_peekpoke_t
*ppd
)
5093 vaddr
= (void *)(uintptr_t)ppd
->pp_acc_offset
;
5095 switch (ppd
->pp_acc_size
) {
5097 value
= *(uint8_t *)vaddr
;
5101 value
= *(uint16_t *)vaddr
;
5105 value
= *(uint32_t *)vaddr
;
5109 value
= *(uint64_t *)vaddr
;
5113 E1000G_DEBUGLOG_4(e1000gp
, E1000G_INFO_LEVEL
,
5114 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5115 (void *)e1000gp
, (void *)ppd
, value
, vaddr
);
5117 ppd
->pp_acc_data
= value
;
5121 e1000g_ioc_poke_mem(struct e1000g
*e1000gp
, e1000g_peekpoke_t
*ppd
)
5126 vaddr
= (void *)(uintptr_t)ppd
->pp_acc_offset
;
5127 value
= ppd
->pp_acc_data
;
5129 E1000G_DEBUGLOG_4(e1000gp
, E1000G_INFO_LEVEL
,
5130 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5131 (void *)e1000gp
, (void *)ppd
, value
, vaddr
);
5133 switch (ppd
->pp_acc_size
) {
5135 *(uint8_t *)vaddr
= (uint8_t)value
;
5139 *(uint16_t *)vaddr
= (uint16_t)value
;
5143 *(uint32_t *)vaddr
= (uint32_t)value
;
5147 *(uint64_t *)vaddr
= (uint64_t)value
;
5156 static lb_property_t lb_normal
=
5157 { normal
, "normal", E1000G_LB_NONE
};
5158 static lb_property_t lb_external1000
=
5159 { external
, "1000Mbps", E1000G_LB_EXTERNAL_1000
};
5160 static lb_property_t lb_external100
=
5161 { external
, "100Mbps", E1000G_LB_EXTERNAL_100
};
5162 static lb_property_t lb_external10
=
5163 { external
, "10Mbps", E1000G_LB_EXTERNAL_10
};
5164 static lb_property_t lb_phy
=
5165 { internal
, "PHY", E1000G_LB_INTERNAL_PHY
};
5167 static enum ioc_reply
5168 e1000g_loopback_ioctl(struct e1000g
*Adapter
, struct iocblk
*iocp
, mblk_t
*mp
)
5171 lb_property_t
*lbpp
;
5172 struct e1000_hw
*hw
;
5177 hw
= &Adapter
->shared
;
5179 if (mp
->b_cont
== NULL
)
5182 if (!e1000g_check_loopback_support(hw
)) {
5183 e1000g_log(NULL
, CE_WARN
,
5184 "Loopback is not supported on e1000g%d", Adapter
->instance
);
5188 switch (iocp
->ioc_cmd
) {
5192 case LB_GET_INFO_SIZE
:
5193 size
= sizeof (lb_info_sz_t
);
5194 if (iocp
->ioc_count
!= size
)
5197 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
5198 e1000g_get_phy_state(Adapter
);
5201 * Workaround for hardware faults. In order to get a stable
5202 * state of phy, we will wait for a specific interval and
5203 * try again. The time delay is an experiential value based
5207 e1000g_get_phy_state(Adapter
);
5208 rw_exit(&Adapter
->chip_lock
);
5210 value
= sizeof (lb_normal
);
5211 if ((Adapter
->phy_ext_status
& IEEE_ESR_1000T_FD_CAPS
) ||
5212 (Adapter
->phy_ext_status
& IEEE_ESR_1000X_FD_CAPS
) ||
5213 (hw
->phy
.media_type
== e1000_media_type_fiber
) ||
5214 (hw
->phy
.media_type
== e1000_media_type_internal_serdes
)) {
5215 value
+= sizeof (lb_phy
);
5216 switch (hw
->mac
.type
) {
5219 case e1000_80003es2lan
:
5220 value
+= sizeof (lb_external1000
);
5224 if ((Adapter
->phy_status
& MII_SR_100X_FD_CAPS
) ||
5225 (Adapter
->phy_status
& MII_SR_100T2_FD_CAPS
))
5226 value
+= sizeof (lb_external100
);
5227 if (Adapter
->phy_status
& MII_SR_10T_FD_CAPS
)
5228 value
+= sizeof (lb_external10
);
5230 lbsp
= (lb_info_sz_t
*)(uintptr_t)mp
->b_cont
->b_rptr
;
5235 value
= sizeof (lb_normal
);
5236 if ((Adapter
->phy_ext_status
& IEEE_ESR_1000T_FD_CAPS
) ||
5237 (Adapter
->phy_ext_status
& IEEE_ESR_1000X_FD_CAPS
) ||
5238 (hw
->phy
.media_type
== e1000_media_type_fiber
) ||
5239 (hw
->phy
.media_type
== e1000_media_type_internal_serdes
)) {
5240 value
+= sizeof (lb_phy
);
5241 switch (hw
->mac
.type
) {
5244 case e1000_80003es2lan
:
5245 value
+= sizeof (lb_external1000
);
5249 if ((Adapter
->phy_status
& MII_SR_100X_FD_CAPS
) ||
5250 (Adapter
->phy_status
& MII_SR_100T2_FD_CAPS
))
5251 value
+= sizeof (lb_external100
);
5252 if (Adapter
->phy_status
& MII_SR_10T_FD_CAPS
)
5253 value
+= sizeof (lb_external10
);
5256 if (iocp
->ioc_count
!= size
)
5260 lbpp
= (lb_property_t
*)(uintptr_t)mp
->b_cont
->b_rptr
;
5261 lbpp
[value
++] = lb_normal
;
5262 if ((Adapter
->phy_ext_status
& IEEE_ESR_1000T_FD_CAPS
) ||
5263 (Adapter
->phy_ext_status
& IEEE_ESR_1000X_FD_CAPS
) ||
5264 (hw
->phy
.media_type
== e1000_media_type_fiber
) ||
5265 (hw
->phy
.media_type
== e1000_media_type_internal_serdes
)) {
5266 lbpp
[value
++] = lb_phy
;
5267 switch (hw
->mac
.type
) {
5270 case e1000_80003es2lan
:
5271 lbpp
[value
++] = lb_external1000
;
5275 if ((Adapter
->phy_status
& MII_SR_100X_FD_CAPS
) ||
5276 (Adapter
->phy_status
& MII_SR_100T2_FD_CAPS
))
5277 lbpp
[value
++] = lb_external100
;
5278 if (Adapter
->phy_status
& MII_SR_10T_FD_CAPS
)
5279 lbpp
[value
++] = lb_external10
;
5283 size
= sizeof (uint32_t);
5284 if (iocp
->ioc_count
!= size
)
5287 lbmp
= (uint32_t *)(uintptr_t)mp
->b_cont
->b_rptr
;
5288 *lbmp
= Adapter
->loopback_mode
;
5293 if (iocp
->ioc_count
!= sizeof (uint32_t))
5296 lbmp
= (uint32_t *)(uintptr_t)mp
->b_cont
->b_rptr
;
5297 if (!e1000g_set_loopback_mode(Adapter
, *lbmp
))
5302 iocp
->ioc_count
= size
;
5303 iocp
->ioc_error
= 0;
5305 if (e1000g_check_acc_handle(Adapter
->osdep
.reg_handle
) != DDI_FM_OK
) {
5306 ddi_fm_service_impact(Adapter
->dip
, DDI_SERVICE_DEGRADED
);
5314 e1000g_check_loopback_support(struct e1000_hw
*hw
)
5316 switch (hw
->mac
.type
) {
5319 case e1000_82545_rev_3
:
5321 case e1000_82546_rev_3
:
5323 case e1000_82541_rev_2
:
5325 case e1000_82547_rev_2
:
5330 case e1000_80003es2lan
:
5332 case e1000_ich10lan
:
5339 e1000g_set_loopback_mode(struct e1000g
*Adapter
, uint32_t mode
)
5341 struct e1000_hw
*hw
;
5345 if (mode
== Adapter
->loopback_mode
)
5348 hw
= &Adapter
->shared
;
5351 Adapter
->loopback_mode
= mode
;
5353 if (mode
== E1000G_LB_NONE
) {
5354 /* Reset the chip */
5355 hw
->phy
.autoneg_wait_to_complete
= B_TRUE
;
5356 (void) e1000g_reset_adapter(Adapter
);
5357 hw
->phy
.autoneg_wait_to_complete
= B_FALSE
;
5363 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
5367 rw_exit(&Adapter
->chip_lock
);
5370 case E1000G_LB_EXTERNAL_1000
:
5371 e1000g_set_external_loopback_1000(Adapter
);
5374 case E1000G_LB_EXTERNAL_100
:
5375 e1000g_set_external_loopback_100(Adapter
);
5378 case E1000G_LB_EXTERNAL_10
:
5379 e1000g_set_external_loopback_10(Adapter
);
5382 case E1000G_LB_INTERNAL_PHY
:
5383 e1000g_set_internal_loopback(Adapter
);
5389 rw_exit(&Adapter
->chip_lock
);
5391 /* Wait for link up */
5392 for (i
= (PHY_FORCE_LIMIT
* 2); i
> 0; i
--)
5395 rw_enter(&Adapter
->chip_lock
, RW_WRITER
);
5397 link_up
= e1000g_link_up(Adapter
);
5399 rw_exit(&Adapter
->chip_lock
);
5402 E1000G_DEBUGLOG_0(Adapter
, E1000G_INFO_LEVEL
,
5403 "Failed to get the link up");
5405 /* Reset the link */
5406 E1000G_DEBUGLOG_0(Adapter
, E1000G_INFO_LEVEL
,
5407 "Reset the link ...");
5408 (void) e1000g_reset_adapter(Adapter
);
5413 * Reset driver to loopback none when set loopback failed
5414 * for the second time.
5416 Adapter
->loopback_mode
= E1000G_LB_NONE
;
5418 /* Reset the chip */
5419 hw
->phy
.autoneg_wait_to_complete
= B_TRUE
;
5420 (void) e1000g_reset_adapter(Adapter
);
5421 hw
->phy
.autoneg_wait_to_complete
= B_FALSE
;
5423 E1000G_DEBUGLOG_0(Adapter
, E1000G_INFO_LEVEL
,
5424 "Set loopback mode failed, reset to loopback none");
5433 * The following loopback settings are from Intel's technical
5434 * document - "How To Loopback". All the register settings and
5435 * time delay values are directly inherited from the document
5436 * without more explanations available.
5439 e1000g_set_internal_loopback(struct e1000g
*Adapter
)
5441 struct e1000_hw
*hw
;
5448 hw
= &Adapter
->shared
;
5450 /* Disable Smart Power Down */
5451 phy_spd_state(hw
, B_FALSE
);
5453 (void) e1000_read_phy_reg(hw
, PHY_CONTROL
, &phy_ctrl
);
5454 phy_ctrl
&= ~(MII_CR_AUTO_NEG_EN
| MII_CR_SPEED_100
| MII_CR_SPEED_10
);
5455 phy_ctrl
|= MII_CR_FULL_DUPLEX
| MII_CR_SPEED_1000
;
5457 switch (hw
->mac
.type
) {
5460 case e1000_82545_rev_3
:
5462 case e1000_82546_rev_3
:
5464 /* Auto-MDI/MDIX off */
5465 (void) e1000_write_phy_reg(hw
, M88E1000_PHY_SPEC_CTRL
, 0x0808);
5466 /* Reset PHY to update Auto-MDI/MDIX */
5467 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
,
5468 phy_ctrl
| MII_CR_RESET
| MII_CR_AUTO_NEG_EN
);
5469 /* Reset PHY to auto-neg off and force 1000 */
5470 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
,
5471 phy_ctrl
| MII_CR_RESET
);
5473 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5474 * See comments above e1000g_set_internal_loopback() for the
5477 (void) e1000_write_phy_reg(hw
, 29, 0x001F);
5478 (void) e1000_write_phy_reg(hw
, 30, 0x8FFC);
5479 (void) e1000_write_phy_reg(hw
, 29, 0x001A);
5480 (void) e1000_write_phy_reg(hw
, 30, 0x8FF0);
5482 case e1000_80003es2lan
:
5484 (void) e1000_write_phy_reg(hw
, GG82563_PHY_KMRN_MODE_CTRL
,
5486 /* Sets PCS loopback at 1Gbs */
5487 (void) e1000_write_phy_reg(hw
, GG82563_PHY_MAC_SPEC_CTRL
,
5493 * The following registers should be set for e1000_phy_bm phy type.
5494 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5495 * For others, we do not need to set these registers.
5497 if (hw
->phy
.type
== e1000_phy_bm
) {
5498 /* Set Default MAC Interface speed to 1GB */
5499 (void) e1000_read_phy_reg(hw
, PHY_REG(2, 21), &phy_reg
);
5502 (void) e1000_write_phy_reg(hw
, PHY_REG(2, 21), phy_reg
);
5503 /* Assert SW reset for above settings to take effect */
5504 (void) e1000_phy_commit(hw
);
5506 /* Force Full Duplex */
5507 (void) e1000_read_phy_reg(hw
, PHY_REG(769, 16), &phy_reg
);
5508 (void) e1000_write_phy_reg(hw
, PHY_REG(769, 16),
5510 /* Set Link Up (in force link) */
5511 (void) e1000_read_phy_reg(hw
, PHY_REG(776, 16), &phy_reg
);
5512 (void) e1000_write_phy_reg(hw
, PHY_REG(776, 16),
5515 (void) e1000_read_phy_reg(hw
, PHY_REG(769, 16), &phy_reg
);
5516 (void) e1000_write_phy_reg(hw
, PHY_REG(769, 16),
5518 /* Set Early Link Enable */
5519 (void) e1000_read_phy_reg(hw
, PHY_REG(769, 20), &phy_reg
);
5520 (void) e1000_write_phy_reg(hw
, PHY_REG(769, 20),
5525 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
, phy_ctrl
| MII_CR_LOOPBACK
);
5529 /* Now set up the MAC to the same speed/duplex as the PHY. */
5530 ctrl
= E1000_READ_REG(hw
, E1000_CTRL
);
5531 ctrl
&= ~E1000_CTRL_SPD_SEL
; /* Clear the speed sel bits */
5532 ctrl
|= (E1000_CTRL_FRCSPD
| /* Set the Force Speed Bit */
5533 E1000_CTRL_FRCDPX
| /* Set the Force Duplex Bit */
5534 E1000_CTRL_SPD_1000
| /* Force Speed to 1000 */
5535 E1000_CTRL_FD
); /* Force Duplex to FULL */
5537 switch (hw
->mac
.type
) {
5540 case e1000_82545_rev_3
:
5542 case e1000_82546_rev_3
:
5544 * For some serdes we'll need to commit the writes now
5545 * so that the status is updated on link
5547 if (hw
->phy
.media_type
== e1000_media_type_internal_serdes
) {
5548 E1000_WRITE_REG(hw
, E1000_CTRL
, ctrl
);
5550 ctrl
= E1000_READ_REG(hw
, E1000_CTRL
);
5553 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
5554 /* Invert Loss of Signal */
5555 ctrl
|= E1000_CTRL_ILOS
;
5557 /* Set ILOS on fiber nic if half duplex is detected */
5558 status
= E1000_READ_REG(hw
, E1000_STATUS
);
5559 if ((status
& E1000_STATUS_FD
) == 0)
5560 ctrl
|= E1000_CTRL_ILOS
| E1000_CTRL_SLU
;
5567 * The fiber/SerDes versions of this adapter do not contain an
5568 * accessible PHY. Therefore, loopback beyond MAC must be done
5569 * using SerDes analog loopback.
5571 if (hw
->phy
.media_type
!= e1000_media_type_copper
) {
5572 /* Disable autoneg by setting bit 31 of TXCW to zero */
5573 txcw
= E1000_READ_REG(hw
, E1000_TXCW
);
5574 txcw
&= ~((uint32_t)1 << 31);
5575 E1000_WRITE_REG(hw
, E1000_TXCW
, txcw
);
5578 * Write 0x410 to Serdes Control register
5579 * to enable Serdes analog loopback
5581 E1000_WRITE_REG(hw
, E1000_SCTL
, 0x0410);
5585 status
= E1000_READ_REG(hw
, E1000_STATUS
);
5586 /* Set ILOS on fiber nic if half duplex is detected */
5587 if ((hw
->phy
.media_type
== e1000_media_type_fiber
) &&
5588 ((status
& E1000_STATUS_FD
) == 0 ||
5589 (status
& E1000_STATUS_LU
) == 0))
5590 ctrl
|= E1000_CTRL_ILOS
| E1000_CTRL_SLU
;
5591 else if (hw
->phy
.media_type
== e1000_media_type_internal_serdes
)
5592 ctrl
|= E1000_CTRL_SLU
;
5596 ctrl
|= E1000_CTRL_ILOS
;
5599 case e1000_ich10lan
:
5600 ctrl
|= E1000_CTRL_SLU
;
5603 if (hw
->phy
.type
== e1000_phy_bm
)
5604 ctrl
|= E1000_CTRL_SLU
| E1000_CTRL_ILOS
;
5606 E1000_WRITE_REG(hw
, E1000_CTRL
, ctrl
);
5610 e1000g_set_external_loopback_1000(struct e1000g
*Adapter
)
5612 struct e1000_hw
*hw
;
5620 hw
= &Adapter
->shared
;
5622 /* Disable Smart Power Down */
5623 phy_spd_state(hw
, B_FALSE
);
5625 switch (hw
->mac
.type
) {
5628 switch (hw
->phy
.media_type
) {
5629 case e1000_media_type_copper
:
5630 /* Force link up (Must be done before the PHY writes) */
5631 ctrl
= E1000_READ_REG(hw
, E1000_CTRL
);
5632 ctrl
|= E1000_CTRL_SLU
; /* Force Link Up */
5633 E1000_WRITE_REG(hw
, E1000_CTRL
, ctrl
);
5635 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
5636 rctl
|= (E1000_RCTL_EN
|
5641 E1000_RCTL_BAM
); /* 0x803E */
5642 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
5644 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
5645 ctrl_ext
|= (E1000_CTRL_EXT_SDP4_DATA
|
5646 E1000_CTRL_EXT_SDP6_DATA
|
5647 E1000_CTRL_EXT_SDP3_DATA
|
5648 E1000_CTRL_EXT_SDP4_DIR
|
5649 E1000_CTRL_EXT_SDP6_DIR
|
5650 E1000_CTRL_EXT_SDP3_DIR
); /* 0x0DD0 */
5651 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, ctrl_ext
);
5654 * This sequence tunes the PHY's SDP and no customer
5655 * settable values. For background, see comments above
5656 * e1000g_set_internal_loopback().
5658 (void) e1000_write_phy_reg(hw
, 0x0, 0x140);
5660 (void) e1000_write_phy_reg(hw
, 0x9, 0x1A00);
5661 (void) e1000_write_phy_reg(hw
, 0x12, 0xC10);
5662 (void) e1000_write_phy_reg(hw
, 0x12, 0x1C10);
5663 (void) e1000_write_phy_reg(hw
, 0x1F37, 0x76);
5664 (void) e1000_write_phy_reg(hw
, 0x1F33, 0x1);
5665 (void) e1000_write_phy_reg(hw
, 0x1F33, 0x0);
5667 (void) e1000_write_phy_reg(hw
, 0x1F35, 0x65);
5668 (void) e1000_write_phy_reg(hw
, 0x1837, 0x3F7C);
5669 (void) e1000_write_phy_reg(hw
, 0x1437, 0x3FDC);
5670 (void) e1000_write_phy_reg(hw
, 0x1237, 0x3F7C);
5671 (void) e1000_write_phy_reg(hw
, 0x1137, 0x3FDC);
5675 case e1000_media_type_fiber
:
5676 case e1000_media_type_internal_serdes
:
5677 status
= E1000_READ_REG(hw
, E1000_STATUS
);
5678 if (((status
& E1000_STATUS_LU
) == 0) ||
5679 (hw
->phy
.media_type
==
5680 e1000_media_type_internal_serdes
)) {
5681 ctrl
= E1000_READ_REG(hw
, E1000_CTRL
);
5682 ctrl
|= E1000_CTRL_ILOS
| E1000_CTRL_SLU
;
5683 E1000_WRITE_REG(hw
, E1000_CTRL
, ctrl
);
5686 /* Disable autoneg by setting bit 31 of TXCW to zero */
5687 txcw
= E1000_READ_REG(hw
, E1000_TXCW
);
5688 txcw
&= ~((uint32_t)1 << 31);
5689 E1000_WRITE_REG(hw
, E1000_TXCW
, txcw
);
5692 * Write 0x410 to Serdes Control register
5693 * to enable Serdes analog loopback
5695 E1000_WRITE_REG(hw
, E1000_SCTL
, 0x0410);
5703 case e1000_80003es2lan
:
5705 case e1000_ich10lan
:
5706 (void) e1000_read_phy_reg(hw
, GG82563_REG(6, 16), &phydata
);
5707 (void) e1000_write_phy_reg(hw
, GG82563_REG(6, 16),
5708 phydata
| (1 << 5));
5709 Adapter
->param_adv_autoneg
= 1;
5710 Adapter
->param_adv_1000fdx
= 1;
5711 (void) e1000g_reset_link(Adapter
);
5717 e1000g_set_external_loopback_100(struct e1000g
*Adapter
)
5719 struct e1000_hw
*hw
;
5723 hw
= &Adapter
->shared
;
5725 /* Disable Smart Power Down */
5726 phy_spd_state(hw
, B_FALSE
);
5728 phy_ctrl
= (MII_CR_FULL_DUPLEX
|
5731 /* Force 100/FD, reset PHY */
5732 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
,
5733 phy_ctrl
| MII_CR_RESET
); /* 0xA100 */
5737 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
,
5738 phy_ctrl
); /* 0x2100 */
5741 /* Now setup the MAC to the same speed/duplex as the PHY. */
5742 ctrl
= E1000_READ_REG(hw
, E1000_CTRL
);
5743 ctrl
&= ~E1000_CTRL_SPD_SEL
; /* Clear the speed sel bits */
5744 ctrl
|= (E1000_CTRL_SLU
| /* Force Link Up */
5745 E1000_CTRL_FRCSPD
| /* Set the Force Speed Bit */
5746 E1000_CTRL_FRCDPX
| /* Set the Force Duplex Bit */
5747 E1000_CTRL_SPD_100
| /* Force Speed to 100 */
5748 E1000_CTRL_FD
); /* Force Duplex to FULL */
5750 E1000_WRITE_REG(hw
, E1000_CTRL
, ctrl
);
5754 e1000g_set_external_loopback_10(struct e1000g
*Adapter
)
5756 struct e1000_hw
*hw
;
5760 hw
= &Adapter
->shared
;
5762 /* Disable Smart Power Down */
5763 phy_spd_state(hw
, B_FALSE
);
5765 phy_ctrl
= (MII_CR_FULL_DUPLEX
|
5768 /* Force 10/FD, reset PHY */
5769 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
,
5770 phy_ctrl
| MII_CR_RESET
); /* 0x8100 */
5774 (void) e1000_write_phy_reg(hw
, PHY_CONTROL
,
5775 phy_ctrl
); /* 0x0100 */
5778 /* Now setup the MAC to the same speed/duplex as the PHY. */
5779 ctrl
= E1000_READ_REG(hw
, E1000_CTRL
);
5780 ctrl
&= ~E1000_CTRL_SPD_SEL
; /* Clear the speed sel bits */
5781 ctrl
|= (E1000_CTRL_SLU
| /* Force Link Up */
5782 E1000_CTRL_FRCSPD
| /* Set the Force Speed Bit */
5783 E1000_CTRL_FRCDPX
| /* Set the Force Duplex Bit */
5784 E1000_CTRL_SPD_10
| /* Force Speed to 10 */
5785 E1000_CTRL_FD
); /* Force Duplex to FULL */
5787 E1000_WRITE_REG(hw
, E1000_CTRL
, ctrl
);
5792 e1000g_add_intrs(struct e1000g
*Adapter
)
5794 dev_info_t
*devinfo
;
5798 devinfo
= Adapter
->dip
;
5800 /* Get supported interrupt types */
5801 rc
= ddi_intr_get_supported_types(devinfo
, &intr_types
);
5803 if (rc
!= DDI_SUCCESS
) {
5804 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
5805 "Get supported interrupt types failed: %d\n", rc
);
5806 return (DDI_FAILURE
);
5810 * Based on Intel Technical Advisory document (TA-160), there are some
5811 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5812 * that it supports MSI, but in fact has problems.
5813 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5816 if (Adapter
->shared
.mac
.type
< e1000_82571
)
5817 Adapter
->msi_enable
= B_FALSE
;
5819 if ((intr_types
& DDI_INTR_TYPE_MSI
) && Adapter
->msi_enable
) {
5820 rc
= e1000g_intr_add(Adapter
, DDI_INTR_TYPE_MSI
);
5822 if (rc
!= DDI_SUCCESS
) {
5824 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
5825 "Add MSI failed, trying Legacy interrupts\n");
5827 Adapter
->intr_type
= DDI_INTR_TYPE_MSI
;
5831 if ((Adapter
->intr_type
== 0) &&
5832 (intr_types
& DDI_INTR_TYPE_FIXED
)) {
5833 rc
= e1000g_intr_add(Adapter
, DDI_INTR_TYPE_FIXED
);
5835 if (rc
!= DDI_SUCCESS
) {
5836 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
5837 "Add Legacy interrupts failed\n");
5838 return (DDI_FAILURE
);
5841 Adapter
->intr_type
= DDI_INTR_TYPE_FIXED
;
5844 if (Adapter
->intr_type
== 0) {
5845 E1000G_DEBUGLOG_0(Adapter
, E1000G_WARN_LEVEL
,
5846 "No interrupts registered\n");
5847 return (DDI_FAILURE
);
5850 return (DDI_SUCCESS
);
5854 * e1000g_intr_add() handles MSI/Legacy interrupts
5857 e1000g_intr_add(struct e1000g
*Adapter
, int intr_type
)
5859 dev_info_t
*devinfo
;
5860 int count
, avail
, actual
;
5861 int x
, y
, rc
, inum
= 0;
5863 ddi_intr_handler_t
*intr_handler
;
5865 devinfo
= Adapter
->dip
;
5867 /* get number of interrupts */
5868 rc
= ddi_intr_get_nintrs(devinfo
, intr_type
, &count
);
5869 if ((rc
!= DDI_SUCCESS
) || (count
== 0)) {
5870 E1000G_DEBUGLOG_2(Adapter
, E1000G_WARN_LEVEL
,
5871 "Get interrupt number failed. Return: %d, count: %d\n",
5873 return (DDI_FAILURE
);
5876 /* get number of available interrupts */
5877 rc
= ddi_intr_get_navail(devinfo
, intr_type
, &avail
);
5878 if ((rc
!= DDI_SUCCESS
) || (avail
== 0)) {
5879 E1000G_DEBUGLOG_2(Adapter
, E1000G_WARN_LEVEL
,
5880 "Get interrupt available number failed. "
5881 "Return: %d, available: %d\n", rc
, avail
);
5882 return (DDI_FAILURE
);
5885 if (avail
< count
) {
5887 E1000G_DEBUGLOG_2(Adapter
, E1000G_WARN_LEVEL
,
5888 "Interrupts count: %d, available: %d\n",
5892 /* Allocate an array of interrupt handles */
5893 Adapter
->intr_size
= count
* sizeof (ddi_intr_handle_t
);
5894 Adapter
->htable
= kmem_alloc(Adapter
->intr_size
, KM_SLEEP
);
5896 /* Set NORMAL behavior for both MSI and FIXED interrupt */
5897 flag
= DDI_INTR_ALLOC_NORMAL
;
5899 /* call ddi_intr_alloc() */
5900 rc
= ddi_intr_alloc(devinfo
, Adapter
->htable
, intr_type
, inum
,
5901 count
, &actual
, flag
);
5903 if ((rc
!= DDI_SUCCESS
) || (actual
== 0)) {
5904 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
5905 "Allocate interrupts failed: %d\n", rc
);
5907 kmem_free(Adapter
->htable
, Adapter
->intr_size
);
5908 return (DDI_FAILURE
);
5911 if (actual
< count
) {
5913 E1000G_DEBUGLOG_2(Adapter
, E1000G_WARN_LEVEL
,
5914 "Interrupts requested: %d, received: %d\n",
5918 Adapter
->intr_cnt
= actual
;
5920 /* Get priority for first msi, assume remaining are all the same */
5921 rc
= ddi_intr_get_pri(Adapter
->htable
[0], &Adapter
->intr_pri
);
5923 if (rc
!= DDI_SUCCESS
) {
5924 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
5925 "Get interrupt priority failed: %d\n", rc
);
5927 /* Free already allocated intr */
5928 for (y
= 0; y
< actual
; y
++)
5929 (void) ddi_intr_free(Adapter
->htable
[y
]);
5931 kmem_free(Adapter
->htable
, Adapter
->intr_size
);
5932 return (DDI_FAILURE
);
5936 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5937 * use the interrupt service routine e1000g_intr_pciexpress()
5938 * to avoid interrupt stealing when sharing interrupt with other
5941 if (Adapter
->shared
.mac
.type
< e1000_82571
)
5942 intr_handler
= (ddi_intr_handler_t
*)e1000g_intr
;
5944 intr_handler
= (ddi_intr_handler_t
*)e1000g_intr_pciexpress
;
5946 /* Call ddi_intr_add_handler() */
5947 for (x
= 0; x
< actual
; x
++) {
5948 rc
= ddi_intr_add_handler(Adapter
->htable
[x
],
5949 intr_handler
, (caddr_t
)Adapter
, NULL
);
5951 if (rc
!= DDI_SUCCESS
) {
5952 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
5953 "Add interrupt handler failed: %d\n", rc
);
5955 /* Remove already added handler */
5956 for (y
= 0; y
< x
; y
++)
5957 (void) ddi_intr_remove_handler(
5958 Adapter
->htable
[y
]);
5960 /* Free already allocated intr */
5961 for (y
= 0; y
< actual
; y
++)
5962 (void) ddi_intr_free(Adapter
->htable
[y
]);
5964 kmem_free(Adapter
->htable
, Adapter
->intr_size
);
5965 return (DDI_FAILURE
);
5969 rc
= ddi_intr_get_cap(Adapter
->htable
[0], &Adapter
->intr_cap
);
5971 if (rc
!= DDI_SUCCESS
) {
5972 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
5973 "Get interrupt cap failed: %d\n", rc
);
5975 /* Free already allocated intr */
5976 for (y
= 0; y
< actual
; y
++) {
5977 (void) ddi_intr_remove_handler(Adapter
->htable
[y
]);
5978 (void) ddi_intr_free(Adapter
->htable
[y
]);
5981 kmem_free(Adapter
->htable
, Adapter
->intr_size
);
5982 return (DDI_FAILURE
);
5985 return (DDI_SUCCESS
);
5989 e1000g_rem_intrs(struct e1000g
*Adapter
)
5994 for (x
= 0; x
< Adapter
->intr_cnt
; x
++) {
5995 rc
= ddi_intr_remove_handler(Adapter
->htable
[x
]);
5996 if (rc
!= DDI_SUCCESS
) {
5997 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
5998 "Remove intr handler failed: %d\n", rc
);
5999 return (DDI_FAILURE
);
6002 rc
= ddi_intr_free(Adapter
->htable
[x
]);
6003 if (rc
!= DDI_SUCCESS
) {
6004 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
6005 "Free intr failed: %d\n", rc
);
6006 return (DDI_FAILURE
);
6010 kmem_free(Adapter
->htable
, Adapter
->intr_size
);
6012 return (DDI_SUCCESS
);
6016 e1000g_enable_intrs(struct e1000g
*Adapter
)
6021 /* Enable interrupts */
6022 if (Adapter
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
6023 /* Call ddi_intr_block_enable() for MSI */
6024 rc
= ddi_intr_block_enable(Adapter
->htable
,
6026 if (rc
!= DDI_SUCCESS
) {
6027 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
6028 "Enable block intr failed: %d\n", rc
);
6029 return (DDI_FAILURE
);
6032 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
6033 for (x
= 0; x
< Adapter
->intr_cnt
; x
++) {
6034 rc
= ddi_intr_enable(Adapter
->htable
[x
]);
6035 if (rc
!= DDI_SUCCESS
) {
6036 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
6037 "Enable intr failed: %d\n", rc
);
6038 return (DDI_FAILURE
);
6043 return (DDI_SUCCESS
);
6047 e1000g_disable_intrs(struct e1000g
*Adapter
)
6052 /* Disable all interrupts */
6053 if (Adapter
->intr_cap
& DDI_INTR_FLAG_BLOCK
) {
6054 rc
= ddi_intr_block_disable(Adapter
->htable
,
6056 if (rc
!= DDI_SUCCESS
) {
6057 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
6058 "Disable block intr failed: %d\n", rc
);
6059 return (DDI_FAILURE
);
6062 for (x
= 0; x
< Adapter
->intr_cnt
; x
++) {
6063 rc
= ddi_intr_disable(Adapter
->htable
[x
]);
6064 if (rc
!= DDI_SUCCESS
) {
6065 E1000G_DEBUGLOG_1(Adapter
, E1000G_WARN_LEVEL
,
6066 "Disable intr failed: %d\n", rc
);
6067 return (DDI_FAILURE
);
6072 return (DDI_SUCCESS
);
6076 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6079 e1000g_get_phy_state(struct e1000g
*Adapter
)
6081 struct e1000_hw
*hw
= &Adapter
->shared
;
6083 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
6084 (void) e1000_read_phy_reg(hw
, PHY_CONTROL
, &Adapter
->phy_ctrl
);
6085 (void) e1000_read_phy_reg(hw
, PHY_STATUS
, &Adapter
->phy_status
);
6086 (void) e1000_read_phy_reg(hw
, PHY_AUTONEG_ADV
,
6087 &Adapter
->phy_an_adv
);
6088 (void) e1000_read_phy_reg(hw
, PHY_AUTONEG_EXP
,
6089 &Adapter
->phy_an_exp
);
6090 (void) e1000_read_phy_reg(hw
, PHY_EXT_STATUS
,
6091 &Adapter
->phy_ext_status
);
6092 (void) e1000_read_phy_reg(hw
, PHY_1000T_CTRL
,
6093 &Adapter
->phy_1000t_ctrl
);
6094 (void) e1000_read_phy_reg(hw
, PHY_1000T_STATUS
,
6095 &Adapter
->phy_1000t_status
);
6096 (void) e1000_read_phy_reg(hw
, PHY_LP_ABILITY
,
6097 &Adapter
->phy_lp_able
);
6099 Adapter
->param_autoneg_cap
=
6100 (Adapter
->phy_status
& MII_SR_AUTONEG_CAPS
) ? 1 : 0;
6101 Adapter
->param_pause_cap
=
6102 (Adapter
->phy_an_adv
& NWAY_AR_PAUSE
) ? 1 : 0;
6103 Adapter
->param_asym_pause_cap
=
6104 (Adapter
->phy_an_adv
& NWAY_AR_ASM_DIR
) ? 1 : 0;
6105 Adapter
->param_1000fdx_cap
=
6106 ((Adapter
->phy_ext_status
& IEEE_ESR_1000T_FD_CAPS
) ||
6107 (Adapter
->phy_ext_status
& IEEE_ESR_1000X_FD_CAPS
)) ? 1 : 0;
6108 Adapter
->param_1000hdx_cap
=
6109 ((Adapter
->phy_ext_status
& IEEE_ESR_1000T_HD_CAPS
) ||
6110 (Adapter
->phy_ext_status
& IEEE_ESR_1000X_HD_CAPS
)) ? 1 : 0;
6111 Adapter
->param_100t4_cap
=
6112 (Adapter
->phy_status
& MII_SR_100T4_CAPS
) ? 1 : 0;
6113 Adapter
->param_100fdx_cap
=
6114 ((Adapter
->phy_status
& MII_SR_100X_FD_CAPS
) ||
6115 (Adapter
->phy_status
& MII_SR_100T2_FD_CAPS
)) ? 1 : 0;
6116 Adapter
->param_100hdx_cap
=
6117 ((Adapter
->phy_status
& MII_SR_100X_HD_CAPS
) ||
6118 (Adapter
->phy_status
& MII_SR_100T2_HD_CAPS
)) ? 1 : 0;
6119 Adapter
->param_10fdx_cap
=
6120 (Adapter
->phy_status
& MII_SR_10T_FD_CAPS
) ? 1 : 0;
6121 Adapter
->param_10hdx_cap
=
6122 (Adapter
->phy_status
& MII_SR_10T_HD_CAPS
) ? 1 : 0;
6124 Adapter
->param_adv_autoneg
= hw
->mac
.autoneg
;
6125 Adapter
->param_adv_pause
=
6126 (Adapter
->phy_an_adv
& NWAY_AR_PAUSE
) ? 1 : 0;
6127 Adapter
->param_adv_asym_pause
=
6128 (Adapter
->phy_an_adv
& NWAY_AR_ASM_DIR
) ? 1 : 0;
6129 Adapter
->param_adv_1000hdx
=
6130 (Adapter
->phy_1000t_ctrl
& CR_1000T_HD_CAPS
) ? 1 : 0;
6131 Adapter
->param_adv_100t4
=
6132 (Adapter
->phy_an_adv
& NWAY_AR_100T4_CAPS
) ? 1 : 0;
6133 if (Adapter
->param_adv_autoneg
== 1) {
6134 Adapter
->param_adv_1000fdx
=
6135 (Adapter
->phy_1000t_ctrl
& CR_1000T_FD_CAPS
)
6137 Adapter
->param_adv_100fdx
=
6138 (Adapter
->phy_an_adv
& NWAY_AR_100TX_FD_CAPS
)
6140 Adapter
->param_adv_100hdx
=
6141 (Adapter
->phy_an_adv
& NWAY_AR_100TX_HD_CAPS
)
6143 Adapter
->param_adv_10fdx
=
6144 (Adapter
->phy_an_adv
& NWAY_AR_10T_FD_CAPS
) ? 1 : 0;
6145 Adapter
->param_adv_10hdx
=
6146 (Adapter
->phy_an_adv
& NWAY_AR_10T_HD_CAPS
) ? 1 : 0;
6149 Adapter
->param_lp_autoneg
=
6150 (Adapter
->phy_an_exp
& NWAY_ER_LP_NWAY_CAPS
) ? 1 : 0;
6151 Adapter
->param_lp_pause
=
6152 (Adapter
->phy_lp_able
& NWAY_LPAR_PAUSE
) ? 1 : 0;
6153 Adapter
->param_lp_asym_pause
=
6154 (Adapter
->phy_lp_able
& NWAY_LPAR_ASM_DIR
) ? 1 : 0;
6155 Adapter
->param_lp_1000fdx
=
6156 (Adapter
->phy_1000t_status
& SR_1000T_LP_FD_CAPS
) ? 1 : 0;
6157 Adapter
->param_lp_1000hdx
=
6158 (Adapter
->phy_1000t_status
& SR_1000T_LP_HD_CAPS
) ? 1 : 0;
6159 Adapter
->param_lp_100t4
=
6160 (Adapter
->phy_lp_able
& NWAY_LPAR_100T4_CAPS
) ? 1 : 0;
6161 Adapter
->param_lp_100fdx
=
6162 (Adapter
->phy_lp_able
& NWAY_LPAR_100TX_FD_CAPS
) ? 1 : 0;
6163 Adapter
->param_lp_100hdx
=
6164 (Adapter
->phy_lp_able
& NWAY_LPAR_100TX_HD_CAPS
) ? 1 : 0;
6165 Adapter
->param_lp_10fdx
=
6166 (Adapter
->phy_lp_able
& NWAY_LPAR_10T_FD_CAPS
) ? 1 : 0;
6167 Adapter
->param_lp_10hdx
=
6168 (Adapter
->phy_lp_able
& NWAY_LPAR_10T_HD_CAPS
) ? 1 : 0;
6171 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6172 * it can only work with 1Gig Full Duplex Link Partner.
6174 Adapter
->param_autoneg_cap
= 0;
6175 Adapter
->param_pause_cap
= 1;
6176 Adapter
->param_asym_pause_cap
= 1;
6177 Adapter
->param_1000fdx_cap
= 1;
6178 Adapter
->param_1000hdx_cap
= 0;
6179 Adapter
->param_100t4_cap
= 0;
6180 Adapter
->param_100fdx_cap
= 0;
6181 Adapter
->param_100hdx_cap
= 0;
6182 Adapter
->param_10fdx_cap
= 0;
6183 Adapter
->param_10hdx_cap
= 0;
6185 Adapter
->param_adv_autoneg
= 0;
6186 Adapter
->param_adv_pause
= 1;
6187 Adapter
->param_adv_asym_pause
= 1;
6188 Adapter
->param_adv_1000fdx
= 1;
6189 Adapter
->param_adv_1000hdx
= 0;
6190 Adapter
->param_adv_100t4
= 0;
6191 Adapter
->param_adv_100fdx
= 0;
6192 Adapter
->param_adv_100hdx
= 0;
6193 Adapter
->param_adv_10fdx
= 0;
6194 Adapter
->param_adv_10hdx
= 0;
6196 Adapter
->param_lp_autoneg
= 0;
6197 Adapter
->param_lp_pause
= 0;
6198 Adapter
->param_lp_asym_pause
= 0;
6199 Adapter
->param_lp_1000fdx
= 0;
6200 Adapter
->param_lp_1000hdx
= 0;
6201 Adapter
->param_lp_100t4
= 0;
6202 Adapter
->param_lp_100fdx
= 0;
6203 Adapter
->param_lp_100hdx
= 0;
6204 Adapter
->param_lp_10fdx
= 0;
6205 Adapter
->param_lp_10hdx
= 0;
6214 e1000g_check_acc_handle(ddi_acc_handle_t handle
)
6218 ddi_fm_acc_err_get(handle
, &de
, DDI_FME_VERSION
);
6219 ddi_fm_acc_err_clear(handle
, DDI_FME_VERSION
);
6220 return (de
.fme_status
);
6224 e1000g_check_dma_handle(ddi_dma_handle_t handle
)
6228 ddi_fm_dma_err_get(handle
, &de
, DDI_FME_VERSION
);
6229 return (de
.fme_status
);
6233 * The IO fault service error handling callback function
6237 e1000g_fm_error_cb(dev_info_t
*dip
, ddi_fm_error_t
*err
, const void *impl_data
)
6240 * as the driver can always deal with an error in any dma or
6241 * access handle, we can just return the fme_status value.
6243 pci_ereport_post(dip
, err
, NULL
);
6244 return (err
->fme_status
);
6248 e1000g_fm_init(struct e1000g
*Adapter
)
6250 ddi_iblock_cookie_t iblk
;
6253 /* Only register with IO Fault Services if we have some capability */
6254 if (Adapter
->fm_capabilities
& DDI_FM_ACCCHK_CAPABLE
) {
6255 e1000g_regs_acc_attr
.devacc_attr_access
= DDI_FLAGERR_ACC
;
6257 e1000g_regs_acc_attr
.devacc_attr_access
= DDI_DEFAULT_ACC
;
6260 if (Adapter
->fm_capabilities
& DDI_FM_DMACHK_CAPABLE
) {
6266 (void) e1000g_set_fma_flags(fma_dma_flag
);
6268 if (Adapter
->fm_capabilities
) {
6270 /* Register capabilities with IO Fault Services */
6271 ddi_fm_init(Adapter
->dip
, &Adapter
->fm_capabilities
, &iblk
);
6274 * Initialize pci ereport capabilities if ereport capable
6276 if (DDI_FM_EREPORT_CAP(Adapter
->fm_capabilities
) ||
6277 DDI_FM_ERRCB_CAP(Adapter
->fm_capabilities
))
6278 pci_ereport_setup(Adapter
->dip
);
6281 * Register error callback if error callback capable
6283 if (DDI_FM_ERRCB_CAP(Adapter
->fm_capabilities
))
6284 ddi_fm_handler_register(Adapter
->dip
,
6285 e1000g_fm_error_cb
, (void*) Adapter
);
6290 e1000g_fm_fini(struct e1000g
*Adapter
)
6292 /* Only unregister FMA capabilities if we registered some */
6293 if (Adapter
->fm_capabilities
) {
6296 * Release any resources allocated by pci_ereport_setup()
6298 if (DDI_FM_EREPORT_CAP(Adapter
->fm_capabilities
) ||
6299 DDI_FM_ERRCB_CAP(Adapter
->fm_capabilities
))
6300 pci_ereport_teardown(Adapter
->dip
);
6303 * Un-register error callback if error callback capable
6305 if (DDI_FM_ERRCB_CAP(Adapter
->fm_capabilities
))
6306 ddi_fm_handler_unregister(Adapter
->dip
);
6308 /* Unregister from IO Fault Services */
6309 mutex_enter(&e1000g_rx_detach_lock
);
6310 ddi_fm_fini(Adapter
->dip
);
6311 if (Adapter
->priv_dip
!= NULL
) {
6312 DEVI(Adapter
->priv_dip
)->devi_fmhdl
= NULL
;
6314 mutex_exit(&e1000g_rx_detach_lock
);
6319 e1000g_fm_ereport(struct e1000g
*Adapter
, char *detail
)
6322 char buf
[FM_MAX_CLASS
];
6324 (void) snprintf(buf
, FM_MAX_CLASS
, "%s.%s", DDI_FM_DEVICE
, detail
);
6325 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
6326 if (DDI_FM_EREPORT_CAP(Adapter
->fm_capabilities
)) {
6327 ddi_fm_ereport_post(Adapter
->dip
, buf
, ena
, DDI_NOSLEEP
,
6328 FM_VERSION
, DATA_TYPE_UINT8
, FM_EREPORT_VERS0
, NULL
);
6333 * quiesce(9E) entry point.
6335 * This function is called when the system is single-threaded at high
6336 * PIL with preemption disabled. Therefore, this function must not be
6339 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6340 * DDI_FAILURE indicates an error condition and should almost never happen.
6343 e1000g_quiesce(dev_info_t
*devinfo
)
6345 struct e1000g
*Adapter
;
6347 Adapter
= (struct e1000g
*)ddi_get_driver_private(devinfo
);
6349 if (Adapter
== NULL
)
6350 return (DDI_FAILURE
);
6352 e1000g_clear_all_interrupts(Adapter
);
6354 (void) e1000_reset_hw(&Adapter
->shared
);
6356 /* Setup our HW Tx Head & Tail descriptor pointers */
6357 E1000_WRITE_REG(&Adapter
->shared
, E1000_TDH(0), 0);
6358 E1000_WRITE_REG(&Adapter
->shared
, E1000_TDT(0), 0);
6360 /* Setup our HW Rx Head & Tail descriptor pointers */
6361 E1000_WRITE_REG(&Adapter
->shared
, E1000_RDH(0), 0);
6362 E1000_WRITE_REG(&Adapter
->shared
, E1000_RDT(0), 0);
6364 return (DDI_SUCCESS
);
6368 * synchronize the adv* and en* parameters.
6370 * See comments in <sys/dld.h> for details of the *_en_*
6371 * parameters. The usage of ndd for setting adv parameters will
6372 * synchronize all the en parameters with the e1000g parameters,
6373 * implicitly disabling any settings made via dladm.
6376 e1000g_param_sync(struct e1000g
*Adapter
)
6378 Adapter
->param_en_1000fdx
= Adapter
->param_adv_1000fdx
;
6379 Adapter
->param_en_1000hdx
= Adapter
->param_adv_1000hdx
;
6380 Adapter
->param_en_100fdx
= Adapter
->param_adv_100fdx
;
6381 Adapter
->param_en_100hdx
= Adapter
->param_adv_100hdx
;
6382 Adapter
->param_en_10fdx
= Adapter
->param_adv_10fdx
;
6383 Adapter
->param_en_10hdx
= Adapter
->param_adv_10hdx
;
6387 * e1000g_get_driver_control - tell manageability firmware that the driver
6391 e1000g_get_driver_control(struct e1000_hw
*hw
)
6396 /* tell manageability firmware the driver has taken over */
6397 switch (hw
->mac
.type
) {
6399 swsm
= E1000_READ_REG(hw
, E1000_SWSM
);
6400 E1000_WRITE_REG(hw
, E1000_SWSM
, swsm
| E1000_SWSM_DRV_LOAD
);
6405 case e1000_80003es2lan
:
6408 case e1000_ich10lan
:
6411 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
6412 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
,
6413 ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
6416 /* no manageability firmware: do nothing */
6422 * e1000g_release_driver_control - tell manageability firmware that the driver
6423 * has released control.
6426 e1000g_release_driver_control(struct e1000_hw
*hw
)
6431 /* tell manageability firmware the driver has released control */
6432 switch (hw
->mac
.type
) {
6434 swsm
= E1000_READ_REG(hw
, E1000_SWSM
);
6435 E1000_WRITE_REG(hw
, E1000_SWSM
, swsm
& ~E1000_SWSM_DRV_LOAD
);
6440 case e1000_80003es2lan
:
6443 case e1000_ich10lan
:
6446 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
6447 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
,
6448 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
6451 /* no manageability firmware: do nothing */
6457 * Restore e1000g promiscuous mode.
6460 e1000g_restore_promisc(struct e1000g
*Adapter
)
6462 if (Adapter
->e1000g_promisc
) {
6465 rctl
= E1000_READ_REG(&Adapter
->shared
, E1000_RCTL
);
6466 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
| E1000_RCTL_BAM
);
6467 E1000_WRITE_REG(&Adapter
->shared
, E1000_RCTL
, rctl
);