Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / pcn / pcn.c
blob1697787f7a4949f301e6dbd9f8edaee12f30ef48
1 /*
2 * Copyright (c) 2011 Jason King.
3 * Copyright (c) 2000 Berkeley Software Design, Inc.
4 * Copyright (c) 1997, 1998, 1999, 2000
5 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/varargs.h>
36 #include <sys/types.h>
37 #include <sys/modctl.h>
38 #include <sys/devops.h>
39 #include <sys/stream.h>
40 #include <sys/strsun.h>
41 #include <sys/cmn_err.h>
42 #include <sys/ethernet.h>
43 #include <sys/kmem.h>
44 #include <sys/crc32.h>
45 #include <sys/mii.h>
46 #include <sys/miiregs.h>
47 #include <sys/mac.h>
48 #include <sys/mac_ether.h>
49 #include <sys/ddi.h>
50 #include <sys/sunddi.h>
51 #include <sys/vlan.h>
52 #include <sys/pci.h>
53 #include <sys/conf.h>
55 #include "pcn.h"
56 #include "pcnimpl.h"
58 #define ETHERVLANMTU (ETHERMAX + 4)
60 #define CSR_WRITE_4(pcnp, reg, val) \
61 ddi_put32(pcnp->pcn_regshandle, (uint32_t *)(pcnp->pcn_regs + reg), val)
63 #define CSR_WRITE_2(pcnp, reg, val) \
64 ddi_put16(pcnp->pcn_regshandle, (uint16_t *)(pcnp->pcn_regs + reg), val)
66 #define CSR_READ_4(pcnp, reg) \
67 ddi_get32(pcnp->pcn_regshandle, (uint32_t *)(pcnp->pcn_regs + reg))
69 #define CSR_READ_2(pcnp, reg) \
70 ddi_get16(pcnp->pcn_regshandle, (uint16_t *)(pcnp->pcn_regs + reg))
72 #define PCN_CSR_SETBIT(pcnp, reg, x) \
73 pcn_csr_write(pcnp, reg, pcn_csr_read(pcnp, reg) | (x))
75 #define PCN_CSR_CLRBIT(pcnp, reg, x) \
76 pcn_csr_write(pcnp, reg, pcn_csr_read(pcnp, reg) & ~(x))
78 #define PCN_BCR_SETBIT(pncp, reg, x) \
79 pcn_bcr_write(pcnp, reg, pcn_bcr_read(pcnp, reg) | (x))
81 #define PCN_BCR_CLRBIT(pcnp, reg, x) \
82 pcn_bcr_write(pcnp, reg, pcn_bcr_read(pcnp, reg) & ~(x))
84 static int pcn_attach(dev_info_t *, ddi_attach_cmd_t);
85 static int pcn_detach(dev_info_t *, ddi_detach_cmd_t);
86 static int pcn_ddi_resume(dev_info_t *);
87 static int pcn_quiesce(dev_info_t *);
89 static void pcn_teardown(pcn_t *);
91 static int pcn_m_unicast(void *, const uint8_t *);
92 static int pcn_m_multicast(void *, boolean_t, const uint8_t *);
93 static int pcn_m_promisc(void *, boolean_t);
94 static mblk_t *pcn_m_tx(void *, mblk_t *);
95 static void pcn_m_ioctl(void *, queue_t *, mblk_t *);
96 static int pcn_m_stat(void *, uint_t, uint64_t *);
97 static int pcn_m_start(void *);
98 static void pcn_m_stop(void *);
99 static int pcn_m_getprop(void *, const char *, mac_prop_id_t, uint_t,
100 void *);
101 static int pcn_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
102 const void *);
103 static void pcn_m_propinfo(void *, const char *, mac_prop_id_t,
104 mac_prop_info_handle_t);
105 static int pcn_watchdog(pcn_t *);
107 static unsigned pcn_intr(caddr_t);
109 static uint16_t pcn_mii_read(void *, uint8_t, uint8_t);
110 static void pcn_mii_write(void *, uint8_t, uint8_t, uint16_t);
111 static void pcn_mii_notify(void *, link_state_t);
113 static uint32_t pcn_csr_read(pcn_t *, uint32_t);
114 static uint16_t pcn_csr_read16(pcn_t *, uint32_t);
115 static void pcn_csr_write(pcn_t *, uint32_t, uint32_t);
117 static uint32_t pcn_bcr_read(pcn_t *, uint32_t);
118 static uint16_t pcn_bcr_read16(pcn_t *, uint32_t);
119 static void pcn_bcr_write(pcn_t *, uint32_t, uint32_t);
121 static boolean_t pcn_send(pcn_t *, mblk_t *);
123 static pcn_buf_t *pcn_allocbuf(pcn_t *);
124 static void pcn_destroybuf(pcn_buf_t *);
125 static int pcn_allocrxring(pcn_t *);
126 static int pcn_alloctxring(pcn_t *);
127 static void pcn_freetxring(pcn_t *);
128 static void pcn_freerxring(pcn_t *);
129 static void pcn_resetrings(pcn_t *);
130 static int pcn_initialize(pcn_t *, boolean_t);
131 static mblk_t *pcn_receive(pcn_t *);
132 static void pcn_resetall(pcn_t *);
133 static void pcn_startall(pcn_t *);
134 static void pcn_stopall(pcn_t *);
135 static void pcn_reclaim(pcn_t *);
136 static void pcn_getfactaddr(pcn_t *);
137 static int pcn_set_chipid(pcn_t *, uint32_t);
138 static const pcn_type_t *pcn_match(uint16_t, uint16_t);
139 static void pcn_start_timer(pcn_t *);
140 static void pcn_stop_timer(pcn_t *);
142 static void pcn_error(dev_info_t *, char *, ...);
144 void *pcn_ssp = NULL;
146 static uchar_t pcn_broadcast[ETHERADDRL] = {
147 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
150 static const pcn_type_t pcn_devs[] = {
151 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" },
152 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" },
153 { 0, 0, NULL }
156 static mii_ops_t pcn_mii_ops = {
157 MII_OPS_VERSION,
158 pcn_mii_read,
159 pcn_mii_write,
160 pcn_mii_notify,
161 NULL
164 static mac_callbacks_t pcn_m_callbacks = {
165 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
166 pcn_m_stat,
167 pcn_m_start,
168 pcn_m_stop,
169 pcn_m_promisc,
170 pcn_m_multicast,
171 pcn_m_unicast,
172 pcn_m_tx,
173 NULL,
174 pcn_m_ioctl,
175 NULL, /* mc_getcapab */
176 NULL, /* mc_open */
177 NULL, /* mc_close */
178 pcn_m_setprop,
179 pcn_m_getprop,
180 pcn_m_propinfo
183 DDI_DEFINE_STREAM_OPS(pcn_devops, nulldev, nulldev, pcn_attach, pcn_detach,
184 nodev, NULL, D_MP, NULL, pcn_quiesce);
186 static struct modldrv pcn_modldrv = {
187 &mod_driverops,
188 "AMD PCnet",
189 &pcn_devops
192 static struct modlinkage pcn_modlinkage = {
193 MODREV_1,
194 { &pcn_modldrv, NULL }
197 static ddi_device_acc_attr_t pcn_devattr = {
198 DDI_DEVICE_ATTR_V0,
199 DDI_STRUCTURE_LE_ACC,
200 DDI_STRICTORDER_ACC
203 static ddi_device_acc_attr_t pcn_bufattr = {
204 DDI_DEVICE_ATTR_V0,
205 DDI_NEVERSWAP_ACC,
206 DDI_STRICTORDER_ACC
209 static ddi_dma_attr_t pcn_dma_attr = {
210 DMA_ATTR_V0, /* dm_attr_version */
211 0, /* dma_attr_addr_lo */
212 0xFFFFFFFFU, /* dma_attr_addr_hi */
213 0x7FFFFFFFU, /* dma_attr_count_max */
214 4, /* dma_attr_align */
215 0x3F, /* dma_attr_burstsizes */
216 1, /* dma_attr_minxfer */
217 0xFFFFFFFFU, /* dma_attr_maxxfer */
218 0xFFFFFFFFU, /* dma_attr_seg */
219 1, /* dma_attr_sgllen */
220 1, /* dma_attr_granular */
221 0 /* dma_attr_flags */
224 static ddi_dma_attr_t pcn_dmadesc_attr = {
225 DMA_ATTR_V0, /* dm_attr_version */
226 0, /* dma_attr_addr_lo */
227 0xFFFFFFFFU, /* dma_attr_addr_hi */
228 0x7FFFFFFFU, /* dma_attr_count_max */
229 16, /* dma_attr_align */
230 0x3F, /* dma_attr_burstsizes */
231 1, /* dma_attr_minxfer */
232 0xFFFFFFFFU, /* dma_attr_maxxfer */
233 0xFFFFFFFFU, /* dma_attr_seg */
234 1, /* dma_attr_sgllen */
235 1, /* dma_attr_granular */
236 0 /* dma_attr_flags */
240 * DDI entry points
243 _init(void)
245 int rc;
247 if ((rc = ddi_soft_state_init(&pcn_ssp, sizeof (pcn_t), 1)) != 0)
248 return (rc);
250 mac_init_ops(&pcn_devops, "pcn");
251 if ((rc = mod_install(&pcn_modlinkage)) != DDI_SUCCESS) {
252 mac_fini_ops(&pcn_devops);
253 ddi_soft_state_fini(&pcn_ssp);
255 return (rc);
259 _fini(void)
261 int rc;
263 if ((rc = mod_remove(&pcn_modlinkage)) == DDI_SUCCESS) {
264 mac_fini_ops(&pcn_devops);
265 ddi_soft_state_fini(&pcn_ssp);
267 return (rc);
271 _info(struct modinfo *modinfop)
273 return (mod_info(&pcn_modlinkage, modinfop));
277 pcn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
279 pcn_t *pcnp;
280 mac_register_t *macp;
281 const pcn_type_t *pcn_type;
282 int instance = ddi_get_instance(dip);
283 int rc;
284 ddi_acc_handle_t pci;
285 uint16_t venid;
286 uint16_t devid;
287 uint16_t svid;
288 uint16_t ssid;
290 switch (cmd) {
291 case DDI_RESUME:
292 return (pcn_ddi_resume(dip));
294 case DDI_ATTACH:
295 break;
297 default:
298 return (DDI_FAILURE);
301 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
302 pcn_error(dip, "slot does not support PCI bus-master");
303 return (DDI_FAILURE);
306 if (ddi_intr_hilevel(dip, 0) != 0) {
307 pcn_error(dip, "hilevel interrupts not supported");
308 return (DDI_FAILURE);
311 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
312 pcn_error(dip, "unable to setup PCI config handle");
313 return (DDI_FAILURE);
316 venid = pci_config_get16(pci, PCI_CONF_VENID);
317 devid = pci_config_get16(pci, PCI_CONF_DEVID);
318 svid = pci_config_get16(pci, PCI_CONF_SUBVENID);
319 ssid = pci_config_get16(pci, PCI_CONF_SUBSYSID);
321 if ((pcn_type = pcn_match(venid, devid)) == NULL) {
322 pci_config_teardown(&pci);
323 pcn_error(dip, "Unable to identify PCI card");
324 return (DDI_FAILURE);
327 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
328 pcn_type->pcn_name) != DDI_PROP_SUCCESS) {
329 pci_config_teardown(&pci);
330 pcn_error(dip, "Unable to create model property");
331 return (DDI_FAILURE);
334 if (ddi_soft_state_zalloc(pcn_ssp, instance) != DDI_SUCCESS) {
335 pcn_error(dip, "Unable to allocate soft state");
336 pci_config_teardown(&pci);
337 return (DDI_FAILURE);
340 pcnp = ddi_get_soft_state(pcn_ssp, instance);
341 pcnp->pcn_dip = dip;
342 pcnp->pcn_instance = instance;
343 pcnp->pcn_extphyaddr = -1;
345 if (ddi_get_iblock_cookie(dip, 0, &pcnp->pcn_icookie) != DDI_SUCCESS) {
346 pcn_error(pcnp->pcn_dip, "ddi_get_iblock_cookie failed");
347 ddi_soft_state_free(pcn_ssp, instance);
348 pci_config_teardown(&pci);
349 return (DDI_FAILURE);
353 mutex_init(&pcnp->pcn_xmtlock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie);
354 mutex_init(&pcnp->pcn_intrlock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie);
355 mutex_init(&pcnp->pcn_reglock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie);
358 * Enable bus master, IO space, and memory space accesses
360 pci_config_put16(pci, PCI_CONF_COMM,
361 pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_ME | PCI_COMM_MAE);
363 pci_config_teardown(&pci);
365 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&pcnp->pcn_regs, 0, 0,
366 &pcn_devattr, &pcnp->pcn_regshandle)) {
367 pcn_error(dip, "ddi_regs_map_setup failed");
368 goto fail;
371 if (pcn_set_chipid(pcnp, (uint32_t)ssid << 16 | (uint32_t)svid) !=
372 DDI_SUCCESS) {
373 goto fail;
376 if ((pcnp->pcn_mii = mii_alloc(pcnp, dip, &pcn_mii_ops)) == NULL)
377 goto fail;
379 /* XXX: need to set based on device */
380 mii_set_pauseable(pcnp->pcn_mii, B_FALSE, B_FALSE);
382 if ((pcn_allocrxring(pcnp) != DDI_SUCCESS) ||
383 (pcn_alloctxring(pcnp) != DDI_SUCCESS)) {
384 pcn_error(dip, "unable to allocate DMA resources");
385 goto fail;
388 pcnp->pcn_promisc = B_FALSE;
390 mutex_enter(&pcnp->pcn_intrlock);
391 mutex_enter(&pcnp->pcn_xmtlock);
392 rc = pcn_initialize(pcnp, B_TRUE);
393 mutex_exit(&pcnp->pcn_xmtlock);
394 mutex_exit(&pcnp->pcn_intrlock);
395 if (rc != DDI_SUCCESS)
396 goto fail;
398 if (ddi_add_intr(dip, 0, NULL, NULL, pcn_intr, (caddr_t)pcnp) !=
399 DDI_SUCCESS) {
400 pcn_error(dip, "unable to add interrupt");
401 goto fail;
404 pcnp->pcn_flags |= PCN_INTR_ENABLED;
406 if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
407 pcn_error(pcnp->pcn_dip, "mac_alloc failed");
408 goto fail;
411 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
412 macp->m_driver = pcnp;
413 macp->m_dip = dip;
414 macp->m_src_addr = pcnp->pcn_addr;
415 macp->m_callbacks = &pcn_m_callbacks;
416 macp->m_min_sdu = 0;
417 macp->m_max_sdu = ETHERMTU;
418 macp->m_margin = VLAN_TAGSZ;
420 if (mac_register(macp, &pcnp->pcn_mh) == DDI_SUCCESS) {
421 mac_free(macp);
422 return (DDI_SUCCESS);
425 mac_free(macp);
427 return (DDI_SUCCESS);
429 fail:
430 pcn_teardown(pcnp);
431 return (DDI_FAILURE);
435 pcn_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
437 pcn_t *pcnp;
439 pcnp = ddi_get_soft_state(pcn_ssp, ddi_get_instance(dip));
441 if (pcnp == NULL) {
442 pcn_error(dip, "no soft state in detach!");
443 return (DDI_FAILURE);
446 switch (cmd) {
447 case DDI_DETACH:
448 if (mac_unregister(pcnp->pcn_mh) != 0)
449 return (DDI_FAILURE);
451 mutex_enter(&pcnp->pcn_intrlock);
452 mutex_enter(&pcnp->pcn_xmtlock);
453 pcnp->pcn_flags &= ~PCN_RUNNING;
454 pcn_stopall(pcnp);
455 mutex_exit(&pcnp->pcn_xmtlock);
456 mutex_exit(&pcnp->pcn_intrlock);
458 pcn_teardown(pcnp);
459 return (DDI_SUCCESS);
461 case DDI_SUSPEND:
462 mii_suspend(pcnp->pcn_mii);
464 mutex_enter(&pcnp->pcn_intrlock);
465 mutex_enter(&pcnp->pcn_xmtlock);
466 pcnp->pcn_flags |= PCN_SUSPENDED;
467 pcn_stopall(pcnp);
468 mutex_exit(&pcnp->pcn_xmtlock);
469 mutex_exit(&pcnp->pcn_intrlock);
470 return (DDI_SUCCESS);
472 default:
473 return (DDI_FAILURE);
478 pcn_ddi_resume(dev_info_t *dip)
480 pcn_t *pcnp;
482 if ((pcnp = ddi_get_soft_state(pcn_ssp, ddi_get_instance(dip))) == NULL)
483 return (DDI_FAILURE);
485 mutex_enter(&pcnp->pcn_intrlock);
486 mutex_enter(&pcnp->pcn_xmtlock);
488 pcnp->pcn_flags &= ~PCN_SUSPENDED;
490 if (!pcn_initialize(pcnp, B_FALSE)) {
491 pcn_error(pcnp->pcn_dip, "unable to resume chip");
492 pcnp->pcn_flags |= PCN_SUSPENDED;
493 mutex_exit(&pcnp->pcn_intrlock);
494 mutex_exit(&pcnp->pcn_xmtlock);
495 return (DDI_SUCCESS);
498 if (IS_RUNNING(pcnp))
499 pcn_startall(pcnp);
501 mutex_exit(&pcnp->pcn_xmtlock);
502 mutex_exit(&pcnp->pcn_intrlock);
504 mii_resume(pcnp->pcn_mii);
506 return (DDI_SUCCESS);
510 pcn_quiesce(dev_info_t *dip)
512 pcn_t *pcnp;
514 if ((pcnp = ddi_get_soft_state(pcn_ssp, ddi_get_instance(dip))) == NULL)
515 return (DDI_FAILURE);
517 /* don't want to take the chance of blocking */
518 CSR_WRITE_4(pcnp, PCN_IO32_RAP, PCN_CSR_EXTCTL1);
519 CSR_WRITE_4(pcnp, PCN_IO32_RDP, CSR_READ_4(pcnp, PCN_IO32_RDP) &
520 ~(PCN_EXTCTL1_SINTEN));
522 CSR_WRITE_4(pcnp, PCN_IO32_RAP, PCN_CSR_CSR);
523 CSR_WRITE_4(pcnp, PCN_IO32_RDP,
524 (CSR_READ_4(pcnp, PCN_IO32_RDP) & ~(PCN_CSR_INTEN)) |
525 PCN_CSR_STOP);
527 return (DDI_SUCCESS);
530 static void
531 pcn_teardown(pcn_t *pcnp)
533 ASSERT(!(pcnp->pcn_flags & PCN_RUNNING));
535 if (pcnp->pcn_mii != NULL) {
536 mii_free(pcnp->pcn_mii);
537 pcnp->pcn_mii = NULL;
540 if (pcnp->pcn_flags & PCN_INTR_ENABLED)
541 ddi_remove_intr(pcnp->pcn_dip, 0, pcnp->pcn_icookie);
543 /* These will exit gracefully if not yet allocated */
544 pcn_freerxring(pcnp);
545 pcn_freetxring(pcnp);
547 if (pcnp->pcn_regshandle != NULL)
548 ddi_regs_map_free(&pcnp->pcn_regshandle);
551 mutex_destroy(&pcnp->pcn_xmtlock);
552 mutex_destroy(&pcnp->pcn_intrlock);
553 mutex_destroy(&pcnp->pcn_reglock);
555 ddi_soft_state_free(pcn_ssp, ddi_get_instance(pcnp->pcn_dip));
559 * Drains any FIFOs in the card, then pauses it
561 static void
562 pcn_suspend(pcn_t *pcnp)
564 uint32_t val;
565 int i;
567 PCN_CSR_SETBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
568 for (i = 0; i < 5000; i++) {
569 if ((val = pcn_csr_read(pcnp, PCN_CSR_EXTCTL1)) &
570 PCN_EXTCTL1_SPND)
571 return;
572 drv_usecwait(1000);
575 pcn_error(pcnp->pcn_dip, "Unable to suspend, EXTCTL1 was 0x%b", val,
576 PCN_EXTCTL1_STR);
579 static void
580 pcn_resume(pcn_t *pcnp)
582 PCN_CSR_CLRBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND);
585 static int
586 pcn_m_multicast(void *arg, boolean_t add, const uint8_t *macaddr)
588 pcn_t *pcnp = (pcn_t *)arg;
589 int index;
590 uint32_t crc;
591 uint16_t bit;
592 uint16_t newval, oldval;
595 * PCNet uses the upper 6 bits of the CRC of the macaddr
596 * to index into a 64bit mask
598 CRC32(crc, macaddr, ETHERADDRL, -1U, crc32_table);
599 crc >>= 26;
600 index = crc / 16;
601 bit = (1U << (crc % 16));
603 mutex_enter(&pcnp->pcn_intrlock);
604 mutex_enter(&pcnp->pcn_xmtlock);
605 newval = oldval = pcnp->pcn_mctab[index];
607 if (add) {
608 pcnp->pcn_mccount[crc]++;
609 if (pcnp->pcn_mccount[crc] == 1)
610 newval |= bit;
611 } else {
612 pcnp->pcn_mccount[crc]--;
613 if (pcnp->pcn_mccount[crc] == 0)
614 newval &= ~bit;
616 if (newval != oldval) {
617 pcnp->pcn_mctab[index] = newval;
618 pcn_suspend(pcnp);
619 pcn_csr_write(pcnp, PCN_CSR_MAR0 + index, newval);
620 pcn_resume(pcnp);
623 mutex_exit(&pcnp->pcn_xmtlock);
624 mutex_exit(&pcnp->pcn_intrlock);
626 return (0);
629 static int
630 pcn_m_promisc(void *arg, boolean_t on)
632 pcn_t *pcnp = (pcn_t *)arg;
634 mutex_enter(&pcnp->pcn_intrlock);
635 mutex_enter(&pcnp->pcn_xmtlock);
637 pcnp->pcn_promisc = on;
639 if (IS_RUNNING(pcnp))
640 pcn_suspend(pcnp);
642 /* set promiscuous mode */
643 if (pcnp->pcn_promisc)
644 PCN_CSR_SETBIT(pcnp, PCN_CSR_MODE, PCN_MODE_PROMISC);
645 else
646 PCN_CSR_CLRBIT(pcnp, PCN_CSR_MODE, PCN_MODE_PROMISC);
648 if (IS_RUNNING(pcnp))
649 pcn_resume(pcnp);
651 mutex_exit(&pcnp->pcn_xmtlock);
652 mutex_exit(&pcnp->pcn_intrlock);
654 return (0);
657 static int
658 pcn_m_unicast(void *arg, const uint8_t *macaddr)
660 pcn_t *pcnp = (pcn_t *)arg;
661 int i;
662 uint16_t addr[3];
664 bcopy(macaddr, addr, sizeof (addr));
666 mutex_enter(&pcnp->pcn_intrlock);
667 mutex_enter(&pcnp->pcn_xmtlock);
669 if (IS_RUNNING(pcnp))
670 pcn_suspend(pcnp);
672 for (i = 0; i < 3; i++)
673 pcn_csr_write(pcnp, PCN_CSR_PAR0 + i, addr[i]);
675 bcopy(macaddr, pcnp->pcn_addr, ETHERADDRL);
677 if (IS_RUNNING(pcnp))
678 pcn_resume(pcnp);
680 mutex_exit(&pcnp->pcn_xmtlock);
681 mutex_exit(&pcnp->pcn_intrlock);
683 return (0);
686 static mblk_t *
687 pcn_m_tx(void *arg, mblk_t *mp)
689 pcn_t *pcnp = (pcn_t *)arg;
690 mblk_t *nmp;
692 mutex_enter(&pcnp->pcn_xmtlock);
694 if (pcnp->pcn_flags & PCN_SUSPENDED) {
695 while ((nmp = mp) != NULL) {
696 pcnp->pcn_carrier_errors++;
697 mp = mp->b_next;
698 freemsg(nmp);
700 mutex_exit(&pcnp->pcn_xmtlock);
701 return (NULL);
704 while (mp != NULL) {
705 nmp = mp->b_next;
706 mp->b_next = NULL;
708 if (!pcn_send(pcnp, mp)) {
709 mp->b_next = nmp;
710 break;
712 mp = nmp;
714 mutex_exit(&pcnp->pcn_xmtlock);
716 return (mp);
719 static boolean_t
720 pcn_send(pcn_t *pcnp, mblk_t *mp)
722 size_t len;
723 pcn_buf_t *txb;
724 pcn_tx_desc_t *tmd;
725 int txsend;
727 ASSERT(mutex_owned(&pcnp->pcn_xmtlock));
728 ASSERT(mp != NULL);
730 len = msgsize(mp);
731 if (len > ETHERVLANMTU) {
732 pcnp->pcn_macxmt_errors++;
733 freemsg(mp);
734 return (B_TRUE);
737 if (pcnp->pcn_txavail < PCN_TXRECLAIM)
738 pcn_reclaim(pcnp);
740 if (pcnp->pcn_txavail == 0) {
741 pcnp->pcn_wantw = B_TRUE;
743 /* enable tx interrupt */
744 PCN_CSR_SETBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_LTINTEN);
745 return (B_FALSE);
748 txsend = pcnp->pcn_txsend;
751 * We copy the packet to a single buffer. NetBSD sources suggest
752 * that if multiple segements are ever used, VMware has a bug that will
753 * only allow 8 segments to be used, while the physical chips allow 16
755 txb = pcnp->pcn_txbufs[txsend];
756 mcopymsg(mp, txb->pb_buf); /* frees mp! */
758 pcnp->pcn_opackets++;
759 pcnp->pcn_obytes += len;
760 if (txb->pb_buf[0] & 0x1) {
761 if (bcmp(txb->pb_buf, pcn_broadcast, ETHERADDRL) != 0)
762 pcnp->pcn_multixmt++;
763 else
764 pcnp->pcn_brdcstxmt++;
767 tmd = &pcnp->pcn_txdescp[txsend];
769 SYNCBUF(txb, len, DDI_DMA_SYNC_FORDEV);
770 tmd->pcn_txstat = 0;
771 tmd->pcn_tbaddr = txb->pb_paddr;
773 /* PCNet wants the 2's complement of the length of the buffer */
774 tmd->pcn_txctl = (~(len) + 1) & PCN_TXCTL_BUFSZ;
775 tmd->pcn_txctl |= PCN_TXCTL_MBO;
776 tmd->pcn_txctl |= PCN_TXCTL_STP | PCN_TXCTL_ENP | PCN_TXCTL_ADD_FCS |
777 PCN_TXCTL_OWN | PCN_TXCTL_MORE_LTINT;
779 SYNCTXDESC(pcnp, txsend, DDI_DMA_SYNC_FORDEV);
781 pcnp->pcn_txavail--;
782 pcnp->pcn_txsend = (txsend + 1) % PCN_TXRING;
783 pcnp->pcn_txstall_time = gethrtime() + (5 * 1000000000ULL);
785 pcn_csr_write(pcnp, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);
787 return (B_TRUE);
790 static void
791 pcn_reclaim(pcn_t *pcnp)
793 pcn_tx_desc_t *tmdp;
795 while (pcnp->pcn_txavail != PCN_TXRING) {
796 int index = pcnp->pcn_txreclaim;
798 tmdp = &pcnp->pcn_txdescp[index];
800 /* sync before reading */
801 SYNCTXDESC(pcnp, index, DDI_DMA_SYNC_FORKERNEL);
803 /* check if chip is still working on it */
804 if (tmdp->pcn_txctl & PCN_TXCTL_OWN)
805 break;
807 pcnp->pcn_txavail++;
808 pcnp->pcn_txreclaim = (index + 1) % PCN_TXRING;
811 if (pcnp->pcn_txavail >= PCN_TXRESCHED) {
812 if (pcnp->pcn_wantw) {
813 pcnp->pcn_wantw = B_FALSE;
815 /* Disable TX interrupt */
816 PCN_CSR_CLRBIT(pcnp, PCN_CSR_EXTCTL1,
817 PCN_EXTCTL1_LTINTEN);
819 mac_tx_update(pcnp->pcn_mh);
824 static unsigned
825 pcn_intr(caddr_t arg1)
827 pcn_t *pcnp = (void *)arg1;
828 mblk_t *mp = NULL;
829 uint32_t status, status2;
830 boolean_t do_reset = B_FALSE;
832 mutex_enter(&pcnp->pcn_intrlock);
834 if (IS_SUSPENDED(pcnp)) {
835 mutex_exit(&pcnp->pcn_intrlock);
836 return (DDI_INTR_UNCLAIMED);
839 while ((status = pcn_csr_read(pcnp, PCN_CSR_CSR)) & PCN_CSR_INTR) {
840 pcn_csr_write(pcnp, PCN_CSR_CSR, status);
842 status2 = pcn_csr_read(pcnp, PCN_CSR_EXTCTL2);
844 if (status & PCN_CSR_TINT) {
845 mutex_enter(&pcnp->pcn_xmtlock);
846 pcn_reclaim(pcnp);
847 mutex_exit(&pcnp->pcn_xmtlock);
850 if (status & PCN_CSR_RINT)
851 mp = pcn_receive(pcnp);
853 if (status & PCN_CSR_ERR) {
854 do_reset = B_TRUE;
855 break;
858 /* timer interrupt */
859 if (status2 & PCN_EXTCTL2_STINT) {
860 /* ack it */
861 PCN_CSR_SETBIT(pcnp, PCN_CSR_EXTCTL2,
862 PCN_EXTCTL2_STINT);
864 if (pcn_watchdog(pcnp) != DDI_SUCCESS) {
865 do_reset = B_TRUE;
866 break;
871 if (do_reset) {
872 mutex_enter(&pcnp->pcn_xmtlock);
873 pcn_resetall(pcnp);
874 mutex_exit(&pcnp->pcn_xmtlock);
875 mutex_exit(&pcnp->pcn_intrlock);
877 mii_reset(pcnp->pcn_mii);
878 } else {
879 mutex_exit(&pcnp->pcn_intrlock);
882 if (mp)
883 mac_rx(pcnp->pcn_mh, NULL, mp);
885 return (DDI_INTR_CLAIMED);
888 static mblk_t *
889 pcn_receive(pcn_t *pcnp)
891 uint32_t len;
892 pcn_buf_t *rxb;
893 pcn_rx_desc_t *rmd;
894 mblk_t *mpchain, **mpp, *mp;
895 int head, cnt;
897 mpchain = NULL;
898 mpp = &mpchain;
899 head = pcnp->pcn_rxhead;
901 for (cnt = 0; cnt < PCN_RXRING; cnt++) {
902 rmd = &pcnp->pcn_rxdescp[head];
903 rxb = pcnp->pcn_rxbufs[head];
905 SYNCRXDESC(pcnp, head, DDI_DMA_SYNC_FORKERNEL);
906 if (rmd->pcn_rxstat & PCN_RXSTAT_OWN)
907 break;
909 len = rmd->pcn_rxlen - ETHERFCSL;
911 if (rmd->pcn_rxstat & PCN_RXSTAT_ERR) {
912 pcnp->pcn_errrcv++;
914 if (rmd->pcn_rxstat & PCN_RXSTAT_FRAM)
915 pcnp->pcn_align_errors++;
916 if (rmd->pcn_rxstat & PCN_RXSTAT_OFLOW)
917 pcnp->pcn_overflow++;
918 if (rmd->pcn_rxstat & PCN_RXSTAT_CRC)
919 pcnp->pcn_fcs_errors++;
920 } else if (len > ETHERVLANMTU) {
921 pcnp->pcn_errrcv++;
922 pcnp->pcn_toolong_errors++;
923 } else {
924 mp = allocb(len + PCN_HEADROOM, 0);
925 if (mp == NULL) {
926 pcnp->pcn_errrcv++;
927 pcnp->pcn_norcvbuf++;
928 goto skip;
931 SYNCBUF(rxb, len, DDI_DMA_SYNC_FORKERNEL);
932 mp->b_rptr += PCN_HEADROOM;
933 mp->b_wptr = mp->b_rptr + len;
934 bcopy((char *)rxb->pb_buf, mp->b_rptr, len);
936 pcnp->pcn_ipackets++;
937 pcnp->pcn_rbytes++;
939 if (rmd->pcn_rxstat & PCN_RXSTAT_LAFM|PCN_RXSTAT_BAM) {
940 if (rmd->pcn_rxstat & PCN_RXSTAT_BAM)
941 pcnp->pcn_brdcstrcv++;
942 else
943 pcnp->pcn_multircv++;
945 *mpp = mp;
946 mpp = &mp->b_next;
949 skip:
950 rmd->pcn_rxstat = PCN_RXSTAT_OWN;
951 SYNCRXDESC(pcnp, head, DDI_DMA_SYNC_FORDEV);
953 head = (head + 1) % PCN_RXRING;
956 pcnp->pcn_rxhead = head;
957 return (mpchain);
960 static void
961 pcn_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
963 pcn_t *pcnp = (pcn_t *)arg;
965 if (mii_m_loop_ioctl(pcnp->pcn_mii, wq, mp))
966 return;
968 miocnak(wq, mp, 0, EINVAL);
971 static int
972 pcn_m_start(void *arg)
974 pcn_t *pcnp = (pcn_t *)arg;
976 mutex_enter(&pcnp->pcn_intrlock);
977 mutex_enter(&pcnp->pcn_xmtlock);
979 pcn_startall(pcnp);
980 pcnp->pcn_flags |= PCN_RUNNING;
982 mutex_exit(&pcnp->pcn_xmtlock);
983 mutex_exit(&pcnp->pcn_intrlock);
985 mii_start(pcnp->pcn_mii);
987 return (0);
990 static void
991 pcn_m_stop(void *arg)
993 pcn_t *pcnp = (pcn_t *)arg;
995 mii_stop(pcnp->pcn_mii);
997 mutex_enter(&pcnp->pcn_intrlock);
998 mutex_enter(&pcnp->pcn_xmtlock);
1000 pcn_stopall(pcnp);
1001 pcnp->pcn_flags &= ~PCN_RUNNING;
1003 mutex_exit(&pcnp->pcn_xmtlock);
1004 mutex_exit(&pcnp->pcn_intrlock);
1007 static int
1008 pcn_initialize(pcn_t *pcnp, boolean_t getfact)
1010 int i;
1011 uint16_t addr[3];
1013 bcopy(pcnp->pcn_addr, addr, sizeof (addr));
1016 * Issue a reset by reading from the RESET register.
1017 * Note that we don't know if the chip is operating in
1018 * 16-bit or 32-bit mode at this point, so we attempt
1019 * to reset the chip both ways. If one fails, the other
1020 * will succeed.
1022 (void) CSR_READ_2(pcnp, PCN_IO16_RESET);
1023 (void) CSR_READ_4(pcnp, PCN_IO32_RESET);
1025 drv_usecwait(1000);
1027 /* Select 32-bit (DWIO) mode */
1028 CSR_WRITE_4(pcnp, PCN_IO32_RDP, 0);
1030 /* The timer is not affected by a reset, so explicitly disable */
1031 pcn_stop_timer(pcnp);
1033 /* Enable fast suspend */
1034 pcn_csr_write(pcnp, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE);
1036 /* Select Style 3 descriptors */
1037 pcn_bcr_write(pcnp, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI);
1039 /* Set MAC address */
1040 if (getfact)
1041 pcn_getfactaddr(pcnp);
1043 pcn_csr_write(pcnp, PCN_CSR_PAR0, addr[0]);
1044 pcn_csr_write(pcnp, PCN_CSR_PAR1, addr[1]);
1045 pcn_csr_write(pcnp, PCN_CSR_PAR2, addr[2]);
1047 /* Clear PCN_MISC_ASEL so we can set the port via PCN_CSR_MODE. */
1048 PCN_BCR_CLRBIT(pcnp, PCN_BCR_MISCCFG, PCN_MISC_ASEL);
1051 * XXX: need to find a way to determine when 10bt media is
1052 * selected for non Am79C978, and set to PCN_PORT_10BASET
1053 * instead of PCN_PORT_MII
1055 pcn_csr_write(pcnp, PCN_CSR_MODE, PCN_PORT_MII);
1057 /* Reenable auto negotiation for external phy */
1058 PCN_BCR_SETBIT(pcnp, PCN_BCR_MIICTL, PCN_MIICTL_XPHYANE);
1060 if (pcnp->pcn_promisc)
1061 PCN_CSR_SETBIT(pcnp, PCN_CSR_MODE, PCN_MODE_PROMISC);
1063 /* Initalize mcast addr filter */
1064 for (i = 0; i < 4; i++)
1065 pcn_csr_write(pcnp, PCN_CSR_MAR0 + i, pcnp->pcn_mctab[i]);
1067 pcn_resetrings(pcnp);
1069 /* We're not using the initialization block. */
1070 pcn_csr_write(pcnp, PCN_CSR_IAB1, 0);
1073 * Enable burst read and write. Also set the no underflow
1074 * bit. This will avoid transmit underruns in ceratin
1075 * conditions while still providing decent performance.
1077 PCN_BCR_SETBIT(pcnp, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW |
1078 PCN_BUSCTL_BREAD | PCN_BUSCTL_BWRITE);
1080 /* Enable graceful recovery from underflow. */
1081 PCN_CSR_SETBIT(pcnp, PCN_CSR_IMR, PCN_IMR_DXSUFLO);
1083 /* Enable auto-padding of short TX frames. */
1084 PCN_CSR_SETBIT(pcnp, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX);
1086 if (pcnp->pcn_type == Am79C978)
1087 pcn_bcr_write(pcnp, PCN_BCR_PHYSEL,
1088 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA);
1090 return (DDI_SUCCESS);
1093 static void
1094 pcn_resetall(pcn_t *pcnp)
1096 pcn_stopall(pcnp);
1097 pcn_startall(pcnp);
1100 static void
1101 pcn_startall(pcn_t *pcnp)
1103 ASSERT(mutex_owned(&pcnp->pcn_intrlock));
1104 ASSERT(mutex_owned(&pcnp->pcn_xmtlock));
1106 (void) pcn_initialize(pcnp, B_FALSE);
1108 /* Start chip and enable interrupts */
1109 PCN_CSR_SETBIT(pcnp, PCN_CSR_CSR, PCN_CSR_START|PCN_CSR_INTEN);
1111 pcn_start_timer(pcnp);
1113 if (IS_RUNNING(pcnp))
1114 mac_tx_update(pcnp->pcn_mh);
1117 static void
1118 pcn_stopall(pcn_t *pcnp)
1120 ASSERT(mutex_owned(&pcnp->pcn_intrlock));
1121 ASSERT(mutex_owned(&pcnp->pcn_xmtlock));
1123 pcn_stop_timer(pcnp);
1124 PCN_CSR_SETBIT(pcnp, PCN_CSR_CSR, PCN_CSR_STOP);
1128 * The soft timer is not affected by a soft reset (according to the datasheet)
1129 * so it must always be explicitly enabled and disabled
1131 static void
1132 pcn_start_timer(pcn_t *pcnp)
1134 PCN_CSR_SETBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SINTEN);
1137 * The frequency this fires varies based on the particular
1138 * model, this value is largely arbitrary. It just needs to
1139 * fire often enough to detect a stall
1141 pcn_bcr_write(pcnp, PCN_BCR_TIMER, 0xa000);
1145 static void
1146 pcn_stop_timer(pcn_t *pcnp)
1148 PCN_CSR_CLRBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SINTEN);
1151 static int
1152 pcn_m_stat(void *arg, uint_t stat, uint64_t *val)
1154 pcn_t *pcnp = (pcn_t *)arg;
1156 if (mii_m_getstat(pcnp->pcn_mii, stat, val) == 0)
1157 return (0);
1159 switch (stat) {
1160 case MAC_STAT_MULTIRCV:
1161 *val = pcnp->pcn_multircv;
1162 break;
1164 case MAC_STAT_BRDCSTRCV:
1165 *val = pcnp->pcn_brdcstrcv;
1166 break;
1168 case MAC_STAT_MULTIXMT:
1169 *val = pcnp->pcn_multixmt;
1170 break;
1172 case MAC_STAT_BRDCSTXMT:
1173 *val = pcnp->pcn_brdcstxmt;
1174 break;
1176 case MAC_STAT_IPACKETS:
1177 *val = pcnp->pcn_ipackets;
1178 break;
1180 case MAC_STAT_RBYTES:
1181 *val = pcnp->pcn_rbytes;
1182 break;
1184 case MAC_STAT_OPACKETS:
1185 *val = pcnp->pcn_opackets;
1186 break;
1188 case MAC_STAT_OBYTES:
1189 *val = pcnp->pcn_obytes;
1190 break;
1192 case MAC_STAT_NORCVBUF:
1193 *val = pcnp->pcn_norcvbuf;
1194 break;
1196 case MAC_STAT_NOXMTBUF:
1197 *val = 0;
1198 break;
1200 case MAC_STAT_COLLISIONS:
1201 *val = pcnp->pcn_collisions;
1202 break;
1204 case MAC_STAT_IERRORS:
1205 *val = pcnp->pcn_errrcv;
1206 break;
1208 case MAC_STAT_OERRORS:
1209 *val = pcnp->pcn_errxmt;
1210 break;
1212 case ETHER_STAT_ALIGN_ERRORS:
1213 *val = pcnp->pcn_align_errors;
1214 break;
1216 case ETHER_STAT_FCS_ERRORS:
1217 *val = pcnp->pcn_fcs_errors;
1218 break;
1220 case ETHER_STAT_SQE_ERRORS:
1221 *val = pcnp->pcn_sqe_errors;
1222 break;
1224 case ETHER_STAT_DEFER_XMTS:
1225 *val = pcnp->pcn_defer_xmts;
1226 break;
1228 case ETHER_STAT_FIRST_COLLISIONS:
1229 *val = pcnp->pcn_first_collisions;
1230 break;
1232 case ETHER_STAT_MULTI_COLLISIONS:
1233 *val = pcnp->pcn_multi_collisions;
1234 break;
1236 case ETHER_STAT_TX_LATE_COLLISIONS:
1237 *val = pcnp->pcn_tx_late_collisions;
1238 break;
1240 case ETHER_STAT_EX_COLLISIONS:
1241 *val = pcnp->pcn_ex_collisions;
1242 break;
1244 case ETHER_STAT_MACXMT_ERRORS:
1245 *val = pcnp->pcn_macxmt_errors;
1246 break;
1248 case ETHER_STAT_CARRIER_ERRORS:
1249 *val = pcnp->pcn_carrier_errors;
1250 break;
1252 case ETHER_STAT_TOOLONG_ERRORS:
1253 *val = pcnp->pcn_toolong_errors;
1254 break;
1256 case ETHER_STAT_MACRCV_ERRORS:
1257 *val = pcnp->pcn_macrcv_errors;
1258 break;
1260 case MAC_STAT_OVERFLOWS:
1261 *val = pcnp->pcn_overflow;
1262 break;
1264 case MAC_STAT_UNDERFLOWS:
1265 *val = pcnp->pcn_underflow;
1266 break;
1268 case ETHER_STAT_TOOSHORT_ERRORS:
1269 *val = pcnp->pcn_runt;
1270 break;
1272 case ETHER_STAT_JABBER_ERRORS:
1273 *val = pcnp->pcn_jabber;
1274 break;
1276 default:
1277 return (ENOTSUP);
1279 return (0);
1282 static int
1283 pcn_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1284 void *val)
1286 pcn_t *pcnp = (pcn_t *)arg;
1288 return (mii_m_getprop(pcnp->pcn_mii, name, num, sz, val));
1291 static int
1292 pcn_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1293 const void *val)
1295 pcn_t *pcnp = (pcn_t *)arg;
1297 return (mii_m_setprop(pcnp->pcn_mii, name, num, sz, val));
1300 static void
1301 pcn_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
1302 mac_prop_info_handle_t prh)
1304 pcn_t *pcnp = arg;
1306 mii_m_propinfo(pcnp->pcn_mii, name, num, prh);
1309 static int
1310 pcn_watchdog(pcn_t *pcnp)
1312 if ((pcnp->pcn_txstall_time != 0) &&
1313 (gethrtime() > pcnp->pcn_txstall_time) &&
1314 (pcnp->pcn_txavail != PCN_TXRING)) {
1315 pcnp->pcn_txstall_time = 0;
1316 pcn_error(pcnp->pcn_dip, "TX stall detected!");
1317 return (DDI_FAILURE);
1318 } else {
1319 return (DDI_SUCCESS);
1323 static uint16_t
1324 pcn_mii_read(void *arg, uint8_t phy, uint8_t reg)
1326 pcn_t *pcnp = (pcn_t *)arg;
1327 uint16_t val;
1330 * At least Am79C971 with DP83840A wedge when isolating the
1331 * external PHY so we can't allow multiple external PHYs.
1332 * There are cards that use Am79C971 with both the internal
1333 * and an external PHY though.
1334 * For internal PHYs it doesn't really matter whether we can
1335 * isolate the remaining internal and the external ones in
1336 * the PHY drivers as the internal PHYs have to be enabled
1337 * individually in PCN_BCR_PHYSEL, PCN_CSR_MODE, etc.
1338 * With Am79C97{3,5,8} we don't support switching beetween
1339 * the internal and external PHYs, yet, so we can't allow
1340 * multiple PHYs with these either.
1341 * Am79C97{2,6} actually only support external PHYs (not
1342 * connectable internal ones respond at the usual addresses,
1343 * which don't hurt if we let them show up on the bus) and
1344 * isolating them works.
1346 if (((pcnp->pcn_type == Am79C971 && phy != PCN_PHYAD_10BT) ||
1347 pcnp->pcn_type == Am79C973 || pcnp->pcn_type == Am79C975 ||
1348 pcnp->pcn_type == Am79C978) && pcnp->pcn_extphyaddr != -1 &&
1349 phy != pcnp->pcn_extphyaddr) {
1350 return (0);
1353 val = ((uint16_t)phy << 5) | reg;
1354 pcn_bcr_write(pcnp, PCN_BCR_MIIADDR, phy << 5 | reg);
1355 val = pcn_bcr_read(pcnp, PCN_BCR_MIIDATA) & 0xFFFF;
1356 if (val == 0xFFFF) {
1357 return (0);
1360 if (((pcnp->pcn_type == Am79C971 && phy != PCN_PHYAD_10BT) ||
1361 pcnp->pcn_type == Am79C973 || pcnp->pcn_type == Am79C975 ||
1362 pcnp->pcn_type == Am79C978) && pcnp->pcn_extphyaddr == -1)
1363 pcnp->pcn_extphyaddr = phy;
1365 return (val);
1368 static void
1369 pcn_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val)
1371 pcn_t *pcnp = (pcn_t *)arg;
1373 pcn_bcr_write(pcnp, PCN_BCR_MIIADDR, reg | (phy << 5));
1374 pcn_bcr_write(pcnp, PCN_BCR_MIIDATA, val);
1377 static void
1378 pcn_mii_notify(void *arg, link_state_t link)
1380 pcn_t *pcnp = (pcn_t *)arg;
1382 mac_link_update(pcnp->pcn_mh, link);
1385 static const pcn_type_t *
1386 pcn_match(uint16_t vid, uint16_t did)
1388 const pcn_type_t *t;
1390 t = pcn_devs;
1391 while (t->pcn_name != NULL) {
1392 if ((vid == t->pcn_vid) && (did == t->pcn_did))
1393 return (t);
1394 t++;
1396 return (NULL);
1399 static void
1400 pcn_getfactaddr(pcn_t *pcnp)
1402 uint32_t addr[2];
1404 addr[0] = CSR_READ_4(pcnp, PCN_IO32_APROM00);
1405 addr[1] = CSR_READ_4(pcnp, PCN_IO32_APROM01);
1407 bcopy(&addr[0], &pcnp->pcn_addr[0], sizeof (pcnp->pcn_addr));
1410 static uint32_t
1411 pcn_csr_read(pcn_t *pcnp, uint32_t reg)
1413 uint32_t val;
1415 mutex_enter(&pcnp->pcn_reglock);
1416 CSR_WRITE_4(pcnp, PCN_IO32_RAP, reg);
1417 val = CSR_READ_4(pcnp, PCN_IO32_RDP);
1418 mutex_exit(&pcnp->pcn_reglock);
1419 return (val);
1422 static uint16_t
1423 pcn_csr_read16(pcn_t *pcnp, uint32_t reg)
1425 uint16_t val;
1427 mutex_enter(&pcnp->pcn_reglock);
1428 CSR_WRITE_2(pcnp, PCN_IO16_RAP, reg);
1429 val = CSR_READ_2(pcnp, PCN_IO16_RDP);
1430 mutex_exit(&pcnp->pcn_reglock);
1431 return (val);
1434 static void
1435 pcn_csr_write(pcn_t *pcnp, uint32_t reg, uint32_t val)
1437 mutex_enter(&pcnp->pcn_reglock);
1438 CSR_WRITE_4(pcnp, PCN_IO32_RAP, reg);
1439 CSR_WRITE_4(pcnp, PCN_IO32_RDP, val);
1440 mutex_exit(&pcnp->pcn_reglock);
1443 static uint32_t
1444 pcn_bcr_read(pcn_t *pcnp, uint32_t reg)
1446 uint32_t val;
1448 mutex_enter(&pcnp->pcn_reglock);
1449 CSR_WRITE_4(pcnp, PCN_IO32_RAP, reg);
1450 val = CSR_READ_4(pcnp, PCN_IO32_BDP);
1451 mutex_exit(&pcnp->pcn_reglock);
1452 return (val);
1455 static uint16_t
1456 pcn_bcr_read16(pcn_t *pcnp, uint32_t reg)
1458 uint16_t val;
1460 mutex_enter(&pcnp->pcn_reglock);
1461 CSR_WRITE_2(pcnp, PCN_IO16_RAP, reg);
1462 val = CSR_READ_2(pcnp, PCN_IO16_BDP);
1463 mutex_exit(&pcnp->pcn_reglock);
1464 return (val);
1467 static void
1468 pcn_bcr_write(pcn_t *pcnp, uint32_t reg, uint32_t val)
1470 mutex_enter(&pcnp->pcn_reglock);
1471 CSR_WRITE_4(pcnp, PCN_IO32_RAP, reg);
1472 CSR_WRITE_4(pcnp, PCN_IO32_BDP, val);
1473 mutex_exit(&pcnp->pcn_reglock);
1476 static void
1477 pcn_resetrings(pcn_t *pcnp)
1479 int i;
1480 uint16_t bufsz = ((~(PCN_BUFSZ) + 1) & PCN_RXLEN_BUFSZ) | PCN_RXLEN_MBO;
1482 pcnp->pcn_rxhead = 0;
1483 pcnp->pcn_txreclaim = 0;
1484 pcnp->pcn_txsend = 0;
1485 pcnp->pcn_txavail = PCN_TXRING;
1487 /* reset rx descriptor values */
1488 for (i = 0; i < PCN_RXRING; i++) {
1489 pcn_rx_desc_t *rmd = &pcnp->pcn_rxdescp[i];
1490 pcn_buf_t *rxb = pcnp->pcn_rxbufs[i];
1492 rmd->pcn_rxlen = rmd->pcn_rsvd0 = 0;
1493 rmd->pcn_rbaddr = rxb->pb_paddr;
1494 rmd->pcn_bufsz = bufsz;
1495 rmd->pcn_rxstat = PCN_RXSTAT_OWN;
1497 (void) ddi_dma_sync(pcnp->pcn_rxdesc_dmah, 0,
1498 PCN_RXRING * sizeof (pcn_rx_desc_t), DDI_DMA_SYNC_FORDEV);
1500 /* reset tx descriptor values */
1501 for (i = 0; i < PCN_TXRING; i++) {
1502 pcn_tx_desc_t *txd = &pcnp->pcn_txdescp[i];
1503 pcn_buf_t *txb = pcnp->pcn_txbufs[i];
1505 txd->pcn_txstat = txd->pcn_txctl = txd->pcn_uspace = 0;
1506 txd->pcn_tbaddr = txb->pb_paddr;
1508 (void) ddi_dma_sync(pcnp->pcn_txdesc_dmah, 0,
1509 PCN_TXRING * sizeof (pcn_tx_desc_t), DDI_DMA_SYNC_FORDEV);
1511 /* set addresses of decriptors */
1512 pcn_csr_write(pcnp, PCN_CSR_RXADDR0, pcnp->pcn_rxdesc_paddr & 0xFFFF);
1513 pcn_csr_write(pcnp, PCN_CSR_RXADDR1,
1514 (pcnp->pcn_rxdesc_paddr >> 16) & 0xFFFF);
1516 pcn_csr_write(pcnp, PCN_CSR_TXADDR0, pcnp->pcn_txdesc_paddr & 0xFFFF);
1517 pcn_csr_write(pcnp, PCN_CSR_TXADDR1,
1518 (pcnp->pcn_txdesc_paddr >> 16) & 0xFFFF);
1520 /* set the ring sizes */
1521 pcn_csr_write(pcnp, PCN_CSR_RXRINGLEN, (~PCN_RXRING) + 1);
1522 pcn_csr_write(pcnp, PCN_CSR_TXRINGLEN, (~PCN_TXRING) + 1);
1525 static void
1526 pcn_destroybuf(pcn_buf_t *buf)
1528 if (buf == NULL)
1529 return;
1531 if (buf->pb_paddr)
1532 (void) ddi_dma_unbind_handle(buf->pb_dmah);
1533 if (buf->pb_acch)
1534 ddi_dma_mem_free(&buf->pb_acch);
1535 if (buf->pb_dmah)
1536 ddi_dma_free_handle(&buf->pb_dmah);
1537 kmem_free(buf, sizeof (*buf));
1540 static pcn_buf_t *
1541 pcn_allocbuf(pcn_t *pcnp)
1543 pcn_buf_t *buf;
1544 size_t len;
1545 unsigned ccnt;
1546 ddi_dma_cookie_t dmac;
1548 buf = kmem_zalloc(sizeof (*buf), KM_SLEEP);
1550 if (ddi_dma_alloc_handle(pcnp->pcn_dip, &pcn_dma_attr, DDI_DMA_SLEEP,
1551 NULL, &buf->pb_dmah) != DDI_SUCCESS) {
1552 kmem_free(buf, sizeof (*buf));
1553 return (NULL);
1556 if (ddi_dma_mem_alloc(buf->pb_dmah, PCN_BUFSZ, &pcn_bufattr,
1557 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &buf->pb_buf, &len,
1558 &buf->pb_acch) != DDI_SUCCESS) {
1559 pcn_destroybuf(buf);
1560 return (NULL);
1563 if (ddi_dma_addr_bind_handle(buf->pb_dmah, NULL, buf->pb_buf, len,
1564 DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &dmac,
1565 &ccnt) != DDI_DMA_MAPPED) {
1566 pcn_destroybuf(buf);
1567 return (NULL);
1569 buf->pb_paddr = dmac.dmac_address;
1571 return (buf);
1574 static int
1575 pcn_alloctxring(pcn_t *pcnp)
1577 int rval;
1578 int i;
1579 size_t size;
1580 size_t len;
1581 ddi_dma_cookie_t dmac;
1582 unsigned ncookies;
1583 caddr_t kaddr;
1585 size = PCN_TXRING * sizeof (pcn_tx_desc_t);
1587 rval = ddi_dma_alloc_handle(pcnp->pcn_dip, &pcn_dma_attr, DDI_DMA_SLEEP,
1588 NULL, &pcnp->pcn_txdesc_dmah);
1589 if (rval != DDI_SUCCESS) {
1590 pcn_error(pcnp->pcn_dip, "unable to allocate DMA handle for tx "
1591 "descriptors");
1592 return (DDI_FAILURE);
1595 rval = ddi_dma_mem_alloc(pcnp->pcn_txdesc_dmah, size, &pcn_devattr,
1596 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
1597 &pcnp->pcn_txdesc_acch);
1598 if (rval != DDI_SUCCESS) {
1599 pcn_error(pcnp->pcn_dip, "unable to allocate DMA memory for tx "
1600 "descriptors");
1601 return (DDI_FAILURE);
1604 rval = ddi_dma_addr_bind_handle(pcnp->pcn_txdesc_dmah, NULL, kaddr,
1605 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmac,
1606 &ncookies);
1607 if (rval != DDI_DMA_MAPPED) {
1608 pcn_error(pcnp->pcn_dip, "unable to bind DMA for tx "
1609 "descriptors");
1610 return (DDI_FAILURE);
1613 ASSERT(ncookies == 1);
1615 pcnp->pcn_txdesc_paddr = dmac.dmac_address;
1616 pcnp->pcn_txdescp = (void *)kaddr;
1618 pcnp->pcn_txbufs = kmem_zalloc(PCN_TXRING * sizeof (pcn_buf_t *),
1619 KM_SLEEP);
1621 for (i = 0; i < PCN_TXRING; i++) {
1622 pcn_buf_t *txb = pcn_allocbuf(pcnp);
1623 if (txb == NULL)
1624 return (DDI_FAILURE);
1625 pcnp->pcn_txbufs[i] = txb;
1628 return (DDI_SUCCESS);
1631 static int
1632 pcn_allocrxring(pcn_t *pcnp)
1634 int rval;
1635 int i;
1636 size_t len;
1637 size_t size;
1638 ddi_dma_cookie_t dmac;
1639 unsigned ncookies;
1640 caddr_t kaddr;
1642 size = PCN_RXRING * sizeof (pcn_rx_desc_t);
1644 rval = ddi_dma_alloc_handle(pcnp->pcn_dip, &pcn_dmadesc_attr,
1645 DDI_DMA_SLEEP, NULL, &pcnp->pcn_rxdesc_dmah);
1646 if (rval != DDI_SUCCESS) {
1647 pcn_error(pcnp->pcn_dip, "unable to allocate DMA handle for rx "
1648 "descriptors");
1649 return (DDI_FAILURE);
1652 rval = ddi_dma_mem_alloc(pcnp->pcn_rxdesc_dmah, size, &pcn_devattr,
1653 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len,
1654 &pcnp->pcn_rxdesc_acch);
1655 if (rval != DDI_SUCCESS) {
1656 pcn_error(pcnp->pcn_dip, "unable to allocate DMA memory for rx "
1657 "descriptors");
1658 return (DDI_FAILURE);
1661 rval = ddi_dma_addr_bind_handle(pcnp->pcn_rxdesc_dmah, NULL, kaddr,
1662 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dmac,
1663 &ncookies);
1664 if (rval != DDI_DMA_MAPPED) {
1665 pcn_error(pcnp->pcn_dip, "unable to bind DMA for rx "
1666 "descriptors");
1667 return (DDI_FAILURE);
1670 ASSERT(ncookies == 1);
1672 pcnp->pcn_rxdesc_paddr = dmac.dmac_address;
1673 pcnp->pcn_rxdescp = (void *)kaddr;
1675 pcnp->pcn_rxbufs = kmem_zalloc(PCN_RXRING * sizeof (pcn_buf_t *),
1676 KM_SLEEP);
1678 for (i = 0; i < PCN_RXRING; i++) {
1679 pcn_buf_t *rxb = pcn_allocbuf(pcnp);
1680 if (rxb == NULL)
1681 return (DDI_FAILURE);
1682 pcnp->pcn_rxbufs[i] = rxb;
1685 return (DDI_SUCCESS);
1688 static void
1689 pcn_freetxring(pcn_t *pcnp)
1691 int i;
1693 if (pcnp->pcn_txbufs) {
1694 for (i = 0; i < PCN_TXRING; i++)
1695 pcn_destroybuf(pcnp->pcn_txbufs[i]);
1697 kmem_free(pcnp->pcn_txbufs, PCN_TXRING * sizeof (pcn_buf_t *));
1700 if (pcnp->pcn_txdesc_paddr)
1701 (void) ddi_dma_unbind_handle(pcnp->pcn_txdesc_dmah);
1702 if (pcnp->pcn_txdesc_acch)
1703 ddi_dma_mem_free(&pcnp->pcn_txdesc_acch);
1704 if (pcnp->pcn_txdesc_dmah)
1705 ddi_dma_free_handle(&pcnp->pcn_txdesc_dmah);
1708 static void
1709 pcn_freerxring(pcn_t *pcnp)
1711 int i;
1713 if (pcnp->pcn_rxbufs) {
1714 for (i = 0; i < PCN_RXRING; i++)
1715 pcn_destroybuf(pcnp->pcn_rxbufs[i]);
1717 kmem_free(pcnp->pcn_rxbufs, PCN_RXRING * sizeof (pcn_buf_t *));
1720 if (pcnp->pcn_rxdesc_paddr)
1721 (void) ddi_dma_unbind_handle(pcnp->pcn_rxdesc_dmah);
1722 if (pcnp->pcn_rxdesc_acch)
1723 ddi_dma_mem_free(&pcnp->pcn_rxdesc_acch);
1724 if (pcnp->pcn_rxdesc_dmah)
1725 ddi_dma_free_handle(&pcnp->pcn_rxdesc_dmah);
1728 static int
1729 pcn_set_chipid(pcn_t *pcnp, uint32_t conf_id)
1731 char *name = NULL;
1732 uint32_t chipid;
1735 * Note: we can *NOT* put the chip into 32-bit mode yet. If a
1736 * lance ethernet device is present and pcn tries to attach, it can
1737 * hang the device (requiring a hardware reset), since they only work
1738 * in 16-bit mode.
1740 * The solution is check using 16-bit operations first, and determine
1741 * if 32-bit mode operations are supported.
1743 * The safest way to do this is to read the PCI subsystem ID from
1744 * BCR23/24 and compare that with the value read from PCI config
1745 * space.
1747 chipid = pcn_bcr_read16(pcnp, PCN_BCR_PCISUBSYSID);
1748 chipid <<= 16;
1749 chipid |= pcn_bcr_read16(pcnp, PCN_BCR_PCISUBVENID);
1752 * The test for 0x10001000 is a hack to pacify VMware, who's
1753 * pseudo-PCnet interface is broken. Reading the subsystem register
1754 * from PCI config space yields 0x00000000 while reading the same value
1755 * from I/O space yields 0x10001000. It's not supposed to be that way.
1757 if (chipid == conf_id || chipid == 0x10001000) {
1758 /* We're in 16-bit mode. */
1759 chipid = pcn_csr_read16(pcnp, PCN_CSR_CHIPID1);
1760 chipid <<= 16;
1761 chipid |= pcn_csr_read16(pcnp, PCN_CSR_CHIPID0);
1762 } else {
1763 chipid = pcn_csr_read(pcnp, PCN_CSR_CHIPID1);
1764 chipid <<= 16;
1765 chipid |= pcn_csr_read(pcnp, PCN_CSR_CHIPID0);
1768 chipid = CHIPID_PARTID(chipid);
1770 /* Set default value and override as needed */
1771 switch (chipid) {
1772 case Am79C970:
1773 name = "Am79C970 PCnet-PCI";
1774 pcn_error(pcnp->pcn_dip, "Unsupported chip: %s", name);
1775 return (DDI_FAILURE);
1776 case Am79C970A:
1777 name = "Am79C970A PCnet-PCI II";
1778 pcn_error(pcnp->pcn_dip, "Unsupported chip: %s", name);
1779 return (DDI_FAILURE);
1780 case Am79C971:
1781 name = "Am79C971 PCnet-FAST";
1782 break;
1783 case Am79C972:
1784 name = "Am79C972 PCnet-FAST+";
1785 break;
1786 case Am79C973:
1787 name = "Am79C973 PCnet-FAST III";
1788 break;
1789 case Am79C975:
1790 name = "Am79C975 PCnet-FAST III";
1791 break;
1792 case Am79C976:
1793 name = "Am79C976";
1794 break;
1795 case Am79C978:
1796 name = "Am79C978";
1797 break;
1798 default:
1799 name = "Unknown";
1800 pcn_error(pcnp->pcn_dip, "Unknown chip id 0x%x", chipid);
1803 if (ddi_prop_update_string(DDI_DEV_T_NONE, pcnp->pcn_dip, "chipid",
1804 name) != DDI_SUCCESS) {
1805 pcn_error(pcnp->pcn_dip, "Unable to set chipid property");
1806 return (DDI_FAILURE);
1809 return (DDI_SUCCESS);
1812 static void
1813 pcn_error(dev_info_t *dip, char *fmt, ...)
1815 va_list ap;
1816 char buf[256];
1818 va_start(ap, fmt);
1819 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
1820 va_end(ap);
1822 if (dip)
1823 cmn_err(CE_WARN, "%s%d: %s", ddi_driver_name(dip),
1824 ddi_get_instance(dip), buf);
1825 else
1826 cmn_err(CE_WARN, "pcn: %s", buf);