preprocessor cleanup: __sparc
[unleashed/tickless.git] / kernel / drivers / net / chxge / pe.c
blob019ed8a909a22bd48219bc5050f782e2ca446b64
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * This file is part of the Chelsio T1 Ethernet driver.
30 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved.
34 * Solaris Multithreaded STREAMS Chelsio PCI Ethernet Driver.
35 * Interface code
38 #include <sys/types.h>
39 #include <sys/systm.h>
40 #include <sys/cmn_err.h>
41 #include <sys/ddi.h>
42 #include <sys/sunddi.h>
43 #include <sys/byteorder.h>
44 #include <sys/atomic.h>
45 #include <sys/ethernet.h>
46 #if PE_PROFILING_ENABLED
47 #include <sys/time.h>
48 #endif
49 #include <sys/gld.h>
50 #include "ostypes.h"
51 #include "common.h"
52 #include "oschtoe.h"
53 #ifdef CONFIG_CHELSIO_T1_1G
54 #include "fpga_defs.h"
55 #endif
56 #include "regs.h"
57 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
58 #include "mc3.h"
59 #include "mc4.h"
60 #endif
61 #include "sge.h"
62 #include "tp.h"
63 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
64 #include "ulp.h"
65 #endif
66 #include "espi.h"
67 #include "elmer0.h"
68 #include "gmac.h"
69 #include "cphy.h"
70 #include "suni1x10gexp_regs.h"
71 #include "ch.h"
73 #define MLEN(mp) ((mp)->b_wptr - (mp)->b_rptr)
75 extern uint32_t buffers_in_use[];
76 extern kmutex_t in_use_l;
77 extern uint32_t in_use_index;
79 static void link_start(ch_t *sa, struct pe_port_t *pp);
80 static ch_esb_t *ch_alloc_small_esbbuf(ch_t *sa, uint32_t i);
81 static ch_esb_t *ch_alloc_big_esbbuf(ch_t *sa, uint32_t i);
82 void ch_big_rbuf_recycle(ch_esb_t *rbp);
83 void ch_small_rbuf_recycle(ch_esb_t *rbp);
84 static const struct board_info *pe_sa_init(ch_t *sa);
85 static int ch_set_config_data(ch_t *chp);
86 void pe_rbuf_pool_free(ch_t *chp);
87 static void pe_free_driver_resources(ch_t *sa);
88 static void update_mtu_tab(ch_t *adapter);
89 static int pe_change_mtu(ch_t *chp);
92 * CPL5 Defines (from netinet/cpl5_commands.h)
94 #define FLITSTOBYTES 8
96 #define CPL_FORMAT_0_SIZE 8
97 #define CPL_FORMAT_1_SIZE 16
98 #define CPL_FORMAT_2_SIZE 24
99 #define CPL_FORMAT_3_SIZE 32
100 #define CPL_FORMAT_4_SIZE 40
101 #define CPL_FORMAT_5_SIZE 48
103 #define TID_MASK 0xffffff
105 #define PE_LINK_SPEED_AUTONEG 5
107 static int pe_small_rbuf_pool_init(ch_t *sa);
108 static int pe_big_rbuf_pool_init(ch_t *sa);
109 static int pe_make_fake_arp(ch_t *chp, unsigned char *arpp);
110 static uint32_t pe_get_ip(unsigned char *arpp);
113 * May be set in /etc/system to 0 to use default latency timer for 10G.
114 * See PCI register 0xc definition.
116 int enable_latency_timer = 1;
119 * May be set in /etc/system to 0 to disable hardware checksum for
120 * TCP and UDP.
122 int enable_checksum_offload = 1;
125 * Multiplier for freelist pool.
127 int fl_sz_multiplier = 6;
129 uint_t
130 pe_intr(ch_t *sa)
132 mutex_enter(&sa->ch_intr);
134 if (sge_data_in(sa->sge)) {
135 sa->isr_intr++;
136 mutex_exit(&sa->ch_intr);
137 return (DDI_INTR_CLAIMED);
140 mutex_exit(&sa->ch_intr);
142 return (DDI_INTR_UNCLAIMED);
146 * Each setup struct will call this function to
147 * initialize.
149 void
150 pe_init(void* xsa)
152 ch_t *sa = NULL;
153 int i = 0;
155 sa = (ch_t *)xsa;
158 * Need to count the number of times this routine is called
159 * because we only want the resources to be allocated once.
160 * The 7500 has four ports and so this routine can be called
161 * once for each port.
163 if (sa->init_counter == 0) {
164 for_each_port(sa, i) {
167 * We only want to initialize the line if it is down.
169 if (sa->port[i].line_up == 0) {
170 link_start(sa, &sa->port[i]);
171 sa->port[i].line_up = 1;
175 (void) t1_init_hw_modules(sa);
178 * Enable/Disable checksum offloading.
180 if (sa->ch_config.cksum_enabled) {
181 if (sa->config_data.offload_ip_cksum) {
182 /* Notify that HW will do the checksum. */
183 t1_tp_set_ip_checksum_offload(sa->tp, 1);
186 if (sa->config_data.offload_tcp_cksum) {
187 /* Notify that HW will do the checksum. */
188 t1_tp_set_tcp_checksum_offload(sa->tp, 1);
191 if (sa->config_data.offload_udp_cksum) {
192 /* Notify that HW will do the checksum. */
193 t1_tp_set_udp_checksum_offload(sa->tp, 1);
197 sa->ch_flags |= PEINITDONE;
199 sa->init_counter++;
203 * Enable interrupts after starting the SGE so
204 * that the SGE is ready to handle interrupts.
206 (void) sge_start(sa->sge);
207 t1_interrupts_enable(sa);
210 * set mtu (either 1500 or bigger)
212 (void) pe_change_mtu(sa);
213 #ifdef HOST_PAUSE
215 * get the configured value of the MAC.
217 (void) t1_tpi_read(sa, SUNI1x10GEXP_REG_TXXG_CONFIG_1 << 2,
218 &sa->txxg_cfg1);
219 #endif
222 /* ARGSUSED */
223 static void
224 link_start(ch_t *sa, struct pe_port_t *p)
226 struct cmac *mac = p->mac;
228 mac->ops->reset(mac);
229 if (mac->ops->macaddress_set)
230 mac->ops->macaddress_set(mac, p->enaddr);
231 (void) t1_link_start(p->phy, mac, &p->link_config);
232 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
236 * turn off interrupts...
238 void
239 pe_stop(ch_t *sa)
241 t1_interrupts_disable(sa);
242 (void) sge_stop(sa->sge);
245 * we can still be running an interrupt thread in sge_data_in().
246 * If we are, we'll block on the ch_intr lock
248 mutex_enter(&sa->ch_intr);
249 mutex_exit(&sa->ch_intr);
253 * output mblk to SGE level and out to the wire.
257 pe_start(ch_t *sa, mblk_t *mp, uint32_t flg)
259 mblk_t *m0 = mp;
260 cmdQ_ce_t cm[16];
261 cmdQ_ce_t *cmp;
262 cmdQ_ce_t *hmp = &cm[0]; /* head of cm table (may be kmem_alloed) */
263 int cm_flg = 0; /* flag (1 - if kmem-alloced) */
264 int nseg = 0; /* number cmdQ_ce entries created */
265 int mseg = 16; /* maximum entries in hmp arrary */
266 int freeme = 0; /* we have an mblk to free in case of error */
267 uint32_t ch_bind_dma_handle(ch_t *, int, caddr_t, cmdQ_ce_t *,
268 uint32_t);
269 int rv; /* return value on error */
271 #ifdef CONFIG_CHELSIO_T1_OFFLOAD
272 if (flg & CH_OFFLOAD) {
273 hmp->ce_pa = ((tbuf_t *)mp)->tb_pa;
274 hmp->ce_dh = NULL;
275 hmp->ce_flg = DH_TOE;
276 hmp->ce_len = ((tbuf_t *)mp)->tb_len;
277 hmp->ce_mp = mp;
279 /* make sure data is flushed to physical memory */
280 (void) ddi_dma_sync((ddi_dma_handle_t)((tbuf_t *)mp)->tb_dh,
281 (off_t)0, hmp->ce_len, DDI_DMA_SYNC_FORDEV);
283 if (sge_data_out(sa->sge, 0, mp, hmp, 1, flg) == 0) {
284 return (0);
288 * set a flag so we'll restart upper layer when
289 * resources become available.
291 sa->ch_blked = 1;
292 return (1);
294 #endif /* CONFIG_CHELSIO_T1_OFFLOAD */
296 /* writes from toe will always have CPL header in place */
297 if (flg & CH_NO_CPL) {
298 struct cpl_tx_pkt *cpl;
300 /* PR2928 & PR3309 */
301 if (sa->ch_ip == 0) {
302 ushort_t ethertype = ntohs(*(short *)&mp->b_rptr[12]);
303 if (ethertype == ETHERTYPE_ARP) {
304 if (is_T2(sa)) {
306 * We assume here that the arp will be
307 * contained in one mblk.
309 if (pe_make_fake_arp(sa, mp->b_rptr)) {
310 freemsg(mp);
311 sa->oerr++;
312 return (0);
314 } else {
315 sa->ch_ip = pe_get_ip(mp->b_rptr);
321 * if space in front of packet big enough for CPL
322 * header, then use it. We'll allocate an mblk
323 * otherwise.
325 if ((mp->b_rptr - mp->b_datap->db_base) >= SZ_CPL_TX_PKT) {
327 mp->b_rptr -= SZ_CPL_TX_PKT;
329 } else {
331 #ifdef SUN_KSTATS
332 sa->sge->intr_cnt.tx_need_cpl_space++;
333 #endif
334 m0 = allocb(SZ_CPL_TX_PKT, BPRI_HI);
335 if (m0 == NULL) {
336 freemsg(mp);
337 sa->oerr++;
338 return (0);
341 m0->b_wptr = m0->b_rptr + SZ_CPL_TX_PKT;
342 m0->b_cont = mp;
343 freeme = 1;
345 mp = m0;
348 /* fill in cpl header */
349 cpl = (struct cpl_tx_pkt *)mp->b_rptr;
350 cpl->opcode = CPL_TX_PKT;
351 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
352 cpl->ip_csum_dis = 1; /* no IP header cksum */
353 cpl->l4_csum_dis =
354 flg & CH_NO_HWCKSUM; /* CH_NO_HWCKSUM == 1 */
355 cpl->vlan_valid = 0; /* no vlan */
358 if (m0->b_cont) {
360 #ifdef SUN_KSTATS
361 sa->sge->intr_cnt.tx_multi_mblks++;
362 #endif
364 while (mp) {
365 int lseg; /* added by ch_bind_dma_handle() */
366 int len;
368 len = MLEN(mp);
369 /* skip mlks with no data */
370 if (len == 0) {
371 mp = mp->b_cont;
372 continue;
376 * if we've run out of space on stack, then we
377 * allocate a temporary buffer to hold the
378 * information. This will kill the the performance,
379 * but since it shouldn't really occur, we can live
380 * with it. Since jumbo frames may map multiple
381 * descriptors, we reallocate the hmp[] array before
382 * we reach the end.
384 if (nseg >= (mseg-4)) {
385 cmdQ_ce_t *buf;
386 int j;
388 buf = kmem_alloc(sizeof (cmdQ_ce_t) * 2 * mseg,
389 KM_SLEEP);
391 for (j = 0; j < nseg; j++)
392 buf[j] = hmp[j];
394 if (cm_flg) {
395 kmem_free(hmp,
396 mseg * sizeof (cmdQ_ce_t));
397 } else
398 cm_flg = 1;
400 hmp = buf;
401 mseg = 2*mseg;
404 * We've used up ch table on stack
408 lseg = ch_bind_dma_handle(sa, len,
409 (void *)mp->b_rptr, &hmp[nseg],
410 mseg - nseg);
411 if (lseg == 0) {
412 sa->sge->intr_cnt.tx_no_dma1++;
415 * ran out of space. Gona bale
417 rv = 0;
420 * we may have processed previous mblks and
421 * have descriptors. If so, we need to free
422 * the meta struct entries before freeing
423 * the mblk.
425 if (nseg)
426 goto error;
427 goto error1;
429 nseg += lseg;
430 mp = mp->b_cont;
434 * SHOULD NEVER OCCUR, BUT...
435 * no data if nseg 0 or
436 * nseg 1 and a CPL mblk (CPL mblk only with offload mode)
437 * and no data
439 if ((nseg == 0) || (freeme && (nseg == 1))) {
440 rv = 0;
441 goto error1;
444 } else {
445 int len;
447 /* we assume that we always have data with one packet */
448 len = MLEN(mp);
450 nseg = ch_bind_dma_handle(sa, len,
451 (void *)mp->b_rptr, &hmp[0], 16);
452 if (nseg == 0) {
453 sa->sge->intr_cnt.tx_no_dma2++;
456 * ran out of space. Gona bale
458 rv = 0;
459 goto error1;
463 * dummy arp message to handle PR3309 & PR2928
465 if (flg & CH_ARP)
466 hmp->ce_flg |= DH_ARP;
469 if (sge_data_out(sa->sge, 0, m0, hmp, nseg, flg) == 0) {
470 if (cm_flg)
471 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
472 return (0);
476 * set a flag so we'll restart upper layer when
477 * resources become available.
479 if ((flg & CH_ARP) == 0)
480 sa->ch_blked = 1;
481 rv = 1;
483 error:
485 * unmap the physical addresses allocated earlier.
487 cmp = hmp;
488 for (--nseg; nseg >= 0; nseg--) {
489 if (cmp->ce_dh) {
490 if (cmp->ce_flg == DH_DMA)
491 ch_unbind_dma_handle(sa, cmp->ce_dh);
493 cmp++;
496 error1:
498 /* free the temporary array */
499 if (cm_flg)
500 kmem_free(hmp, mseg * sizeof (cmdQ_ce_t));
503 * if we've allocated an mblk above, then we need to free it
504 * before returning. This is safe since we haven't done anything to
505 * the original message. The caller, gld, will still have a pointer
506 * to the original mblk.
508 if (rv == 1) {
509 if (freeme) {
510 /* we had to allocate an mblk. Free it. */
511 freeb(m0);
512 } else {
513 /* adjust the mblk back to original start */
514 if (flg & CH_NO_CPL)
515 m0->b_rptr += SZ_CPL_TX_PKT;
517 } else {
518 freemsg(m0);
519 sa->oerr++;
522 return (rv);
525 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
526 void
527 pe_set_mac(ch_t *sa, unsigned char *ac_enaddr)
529 sa->port[0].mac->ops->macaddress_set(sa->port[0].mac, ac_enaddr);
532 /* KLUDGE ALERT. HARD WIRED TO PORT ZERO */
533 unsigned char *
534 pe_get_mac(ch_t *sa)
536 return (sa->port[0].enaddr);
539 /* KLUDGE ALERT. HARD WIRED TO ONE PORT */
540 void
541 pe_set_promiscuous(ch_t *sa, int flag)
543 struct cmac *mac = sa->port[0].mac;
544 struct t1_rx_mode rm;
546 switch (flag) {
547 case 0: /* turn off promiscuous mode */
548 sa->ch_flags &= ~(PEPROMISC|PEALLMULTI);
549 break;
551 case 1: /* turn on promiscuous mode */
552 sa->ch_flags |= PEPROMISC;
553 break;
555 case 2: /* turn on multicast reception */
556 sa->ch_flags |= PEALLMULTI;
557 break;
560 mutex_enter(&sa->ch_mc_lck);
561 rm.chp = sa;
562 rm.mc = sa->ch_mc;
564 mac->ops->set_rx_mode(mac, &rm);
565 mutex_exit(&sa->ch_mc_lck);
569 pe_set_mc(ch_t *sa, uint8_t *ep, int flg)
571 struct cmac *mac = sa->port[0].mac;
572 struct t1_rx_mode rm;
574 if (flg == GLD_MULTI_ENABLE) {
575 ch_mc_t *mcp;
577 mcp = (ch_mc_t *)kmem_zalloc(sizeof (struct ch_mc),
578 KM_NOSLEEP);
579 if (mcp == NULL)
580 return (GLD_NORESOURCES);
582 bcopy(ep, &mcp->cmc_mca, 6);
584 mutex_enter(&sa->ch_mc_lck);
585 mcp->cmc_next = sa->ch_mc;
586 sa->ch_mc = mcp;
587 sa->ch_mc_cnt++;
588 mutex_exit(&sa->ch_mc_lck);
590 } else if (flg == GLD_MULTI_DISABLE) {
591 ch_mc_t **p = &sa->ch_mc;
592 ch_mc_t *q = NULL;
594 mutex_enter(&sa->ch_mc_lck);
595 p = &sa->ch_mc;
596 while (*p) {
597 if (bcmp(ep, (*p)->cmc_mca, 6) == 0) {
598 q = *p;
599 *p = (*p)->cmc_next;
600 kmem_free(q, sizeof (*q));
601 sa->ch_mc_cnt--;
602 break;
605 p = &(*p)->cmc_next;
607 mutex_exit(&sa->ch_mc_lck);
609 if (q == NULL)
610 return (GLD_BADARG);
611 } else
612 return (GLD_BADARG);
614 mutex_enter(&sa->ch_mc_lck);
615 rm.chp = sa;
616 rm.mc = sa->ch_mc;
618 mac->ops->set_rx_mode(mac, &rm);
619 mutex_exit(&sa->ch_mc_lck);
621 return (GLD_SUCCESS);
625 * return: speed - bandwidth of interface
626 * return: intrcnt - # interrupts
627 * return: norcvbuf - # recedived packets dropped by driver
628 * return: oerrors - # bad send packets
629 * return: ierrors - # bad receive packets
630 * return: underrun - # bad underrun xmit packets
631 * return: overrun - # bad overrun recv packets
632 * return: framing - # bad aligned recv packets
633 * return: crc - # bad FCS (crc) recv packets
634 * return: carrier - times carrier was lost
635 * return: collisions - # xmit collisions
636 * return: xcollisions - # xmit pkts dropped due to collisions
637 * return: late - # late xmit collisions
638 * return: defer - # deferred xmit packets
639 * return: xerrs - # xmit dropped packets
640 * return: rerrs - # recv dropped packets
641 * return: toolong - # recv pkts too long
642 * return: runt - # recv runt pkts
643 * return: multixmt - # multicast pkts xmitted
644 * return: multircv - # multicast pkts recved
645 * return: brdcstxmt - # broadcast pkts xmitted
646 * return: brdcstrcv - # broadcast pkts rcv
650 pe_get_stats(ch_t *sa, uint64_t *speed, uint32_t *intrcnt, uint32_t *norcvbuf,
651 uint32_t *oerrors, uint32_t *ierrors, uint32_t *underrun,
652 uint32_t *overrun, uint32_t *framing, uint32_t *crc,
653 uint32_t *carrier, uint32_t *collisions, uint32_t *xcollisions,
654 uint32_t *late, uint32_t *defer, uint32_t *xerrs, uint32_t *rerrs,
655 uint32_t *toolong, uint32_t *runt, ulong_t *multixmt, ulong_t *multircv,
656 ulong_t *brdcstxmt, ulong_t *brdcstrcv)
658 struct pe_port_t *pt;
659 int line_speed;
660 int line_duplex;
661 int line_is_active;
662 uint64_t v;
663 const struct cmac_statistics *sp;
665 pt = &(sa->port[0]);
666 (void) pt->phy->ops->get_link_status(pt->phy,
667 &line_is_active, &line_speed, &line_duplex, NULL);
669 switch (line_speed) {
670 case SPEED_10:
671 *speed = 10000000;
672 break;
673 case SPEED_100:
674 *speed = 100000000;
675 break;
676 case SPEED_1000:
677 *speed = 1000000000;
678 break;
679 case SPEED_10000:
681 * kludge to get 10,000,000,000 constant (and keep
682 * compiler happy).
684 v = 10000000;
685 v *= 1000;
686 *speed = v;
687 break;
688 default:
689 goto error;
692 *intrcnt = sa->isr_intr;
693 *norcvbuf = sa->norcvbuf;
695 sp = sa->port[0].mac->ops->statistics_update(sa->port[0].mac,
696 MAC_STATS_UPDATE_FULL);
698 *ierrors = sp->RxOctetsBad;
701 * not sure this is correct. # aborted at driver level +
702 * # at hardware level
704 *oerrors = sa->oerr + sp->TxFramesAbortedDueToXSCollisions +
705 sp->TxUnderrun + sp->TxLengthErrors +
706 sp->TxInternalMACXmitError +
707 sp->TxFramesWithExcessiveDeferral +
708 sp->TxFCSErrors;
710 *underrun = sp->TxUnderrun;
711 *overrun = sp->RxFrameTooLongErrors;
712 *framing = sp->RxAlignErrors;
713 *crc = sp->RxFCSErrors;
714 *carrier = 0; /* need to find this */
715 *collisions = sp->TxTotalCollisions;
716 *xcollisions = sp->TxFramesAbortedDueToXSCollisions;
717 *late = sp->TxLateCollisions;
718 *defer = sp->TxFramesWithDeferredXmissions;
719 *xerrs = sp->TxUnderrun + sp->TxLengthErrors +
720 sp->TxInternalMACXmitError + sp->TxFCSErrors;
721 *rerrs = sp->RxSymbolErrors + sp->RxSequenceErrors + sp->RxRuntErrors +
722 sp->RxJabberErrors + sp->RxInternalMACRcvError +
723 sp->RxInRangeLengthErrors + sp->RxOutOfRangeLengthField;
724 *toolong = sp->RxFrameTooLongErrors;
725 *runt = sp->RxRuntErrors;
727 *multixmt = sp->TxMulticastFramesOK;
728 *multircv = sp->RxMulticastFramesOK;
729 *brdcstxmt = sp->TxBroadcastFramesOK;
730 *brdcstrcv = sp->RxBroadcastFramesOK;
732 return (0);
734 error:
735 *speed = 0;
736 *intrcnt = 0;
737 *norcvbuf = 0;
738 *norcvbuf = 0;
739 *oerrors = 0;
740 *ierrors = 0;
741 *underrun = 0;
742 *overrun = 0;
743 *framing = 0;
744 *crc = 0;
745 *carrier = 0;
746 *collisions = 0;
747 *xcollisions = 0;
748 *late = 0;
749 *defer = 0;
750 *xerrs = 0;
751 *rerrs = 0;
752 *toolong = 0;
753 *runt = 0;
754 *multixmt = 0;
755 *multircv = 0;
756 *brdcstxmt = 0;
757 *brdcstrcv = 0;
759 return (1);
762 uint32_t ch_gtm = 0; /* Default: Global Tunnel Mode off */
763 uint32_t ch_global_config = 0x07000000; /* Default: errors, warnings, status */
764 uint32_t ch_is_asic = 0; /* Default: non-ASIC */
765 uint32_t ch_link_speed = PE_LINK_SPEED_AUTONEG; /* Default: auto-negoiate */
766 uint32_t ch_num_of_ports = 1; /* Default: 1 port */
767 uint32_t ch_tp_reset_cm = 1; /* Default: reset CM memory map */
768 uint32_t ch_phy_tx_fifo = 0; /* Default: 0 phy tx fifo depth */
769 uint32_t ch_phy_rx_fifo = 0; /* Default: 0 phy rx fifo depth */
770 uint32_t ch_phy_force_master = 1; /* Default: link always master mode */
771 uint32_t ch_mc5_rtbl_size = 2048; /* Default: TCAM routing table size */
772 uint32_t ch_mc5_dbsvr_size = 128; /* Default: TCAM server size */
773 uint32_t ch_mc5_parity = 1; /* Default: parity error checking */
774 uint32_t ch_mc5_issue_syn = 0; /* Default: Allow transaction overlap */
775 uint32_t ch_packet_tracing = 0; /* Default: no packet tracing */
776 uint32_t ch_server_region_len =
777 DEFAULT_SERVER_REGION_LEN;
778 uint32_t ch_rt_region_len =
779 DEFAULT_RT_REGION_LEN;
780 uint32_t ch_offload_ip_cksum = 0; /* Default: no checksum offloading */
781 uint32_t ch_offload_udp_cksum = 1; /* Default: offload UDP ckecksum */
782 uint32_t ch_offload_tcp_cksum = 1; /* Default: offload TCP checksum */
783 uint32_t ch_sge_cmdq_threshold = 0; /* Default: threshold 0 */
784 uint32_t ch_sge_flq_threshold = 0; /* Default: SGE flq threshold */
785 uint32_t ch_sge_cmdq0_cnt = /* Default: cmd queue 0 size */
786 SGE_CMDQ0_CNT;
787 uint32_t ch_sge_cmdq1_cnt = /* Default: cmd queue 1 size */
788 SGE_CMDQ0_CNT;
789 uint32_t ch_sge_flq0_cnt = /* Default: free list queue-0 length */
790 SGE_FLQ0_CNT;
791 uint32_t ch_sge_flq1_cnt = /* Default: free list queue-1 length */
792 SGE_FLQ0_CNT;
793 uint32_t ch_sge_respq_cnt = /* Default: reqsponse queue size */
794 SGE_RESPQ_CNT;
795 uint32_t ch_stats = 1; /* Default: Automatic Update MAC stats */
796 uint32_t ch_tx_delay_us = 0; /* Default: No Msec delay to Tx pkts */
797 int32_t ch_chip = -1; /* Default: use hardware lookup tbl */
798 uint32_t ch_exit_early = 0; /* Default: complete initialization */
799 uint32_t ch_rb_num_of_entries = 1000; /* Default: number ring buffer entries */
800 uint32_t ch_rb_size_of_entries = 64; /* Default: ring buffer entry size */
801 uint32_t ch_rb_flag = 1; /* Default: ring buffer flag */
802 uint32_t ch_type;
803 uint64_t ch_cat_opt0 = 0;
804 uint64_t ch_cat_opt1 = 0;
805 uint32_t ch_timer_delay = 0; /* Default: use value from board entry */
808 pe_attach(ch_t *chp)
810 int return_val = 1;
811 const struct board_info *bi;
812 uint32_t pcix_cmd;
814 (void) ch_set_config_data(chp);
816 bi = pe_sa_init(chp);
817 if (bi == 0)
818 return (1);
820 if (t1_init_sw_modules(chp, bi) < 0)
821 return (1);
823 if (pe_small_rbuf_pool_init(chp) == 0)
824 return (1);
826 if (pe_big_rbuf_pool_init(chp) == 0)
827 return (1);
830 * We gain significaint performance improvements when we
831 * increase the PCI's maximum memory read byte count to
832 * 2K(HW doesn't support 4K at this time) and set the PCI's
833 * maximum outstanding split transactions to 4. We want to do
834 * this for 10G. Done by software utility.
837 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
838 (void) t1_os_pci_read_config_4(chp, A_PCICFG_PCIX_CMD,
839 &pcix_cmd);
841 * if the burstsize is set, then use it instead of default
843 if (chp->ch_config.burstsize_set) {
844 pcix_cmd &= ~0xc0000;
845 pcix_cmd |= (chp->ch_config.burstsize << 18);
848 * if the split transaction count is set, then use it.
850 if (chp->ch_config.transaction_cnt_set) {
851 pcix_cmd &= ~ 0x700000;
852 pcix_cmd |= (chp->ch_config.transaction_cnt << 20);
856 * set ralaxed ordering flag as configured in chxge.conf
858 pcix_cmd |= (chp->ch_config.relaxed_ordering << 17);
860 (void) t1_os_pci_write_config_4(chp, A_PCICFG_PCIX_CMD,
861 pcix_cmd);
865 * set the latency time to F8 for 10G cards.
866 * Done by software utiltiy.
868 if (enable_latency_timer) {
869 if (board_info(chp)->caps & SUPPORTED_10000baseT_Full) {
870 (void) t1_os_pci_write_config_4(chp, 0xc, 0xf800);
875 * update mtu table (regs: 0x404 - 0x420) with bigger values than
876 * default.
878 update_mtu_tab(chp);
881 * Clear all interrupts now. Don't enable
882 * them until later.
884 t1_interrupts_clear(chp);
887 * Function succeeded.
889 return_val = 0;
891 return (return_val);
895 * DESC: Read variables set in /boot/loader.conf and save
896 * them internally. These internal values are then
897 * used to make decisions at run-time on behavior thus
898 * allowing a certain level of customization.
899 * OUT: p_config - pointer to config structure that
900 * contains all of the new values.
901 * RTN: 0 - Success;
903 static int
904 ch_set_config_data(ch_t *chp)
906 pe_config_data_t *p_config = (pe_config_data_t *)&chp->config_data;
908 bzero(p_config, sizeof (pe_config_data_t));
911 * Global Tunnel Mode configuration
913 p_config->gtm = ch_gtm;
915 p_config->global_config = ch_global_config;
917 if (p_config->gtm)
918 p_config->global_config |= CFGMD_TUNNEL;
920 p_config->tp_reset_cm = ch_tp_reset_cm;
921 p_config->is_asic = ch_is_asic;
924 * MC5 configuration.
926 p_config->mc5_rtbl_size = ch_mc5_rtbl_size;
927 p_config->mc5_dbsvr_size = ch_mc5_dbsvr_size;
928 p_config->mc5_parity = ch_mc5_parity;
929 p_config->mc5_issue_syn = ch_mc5_issue_syn;
931 p_config->offload_ip_cksum = ch_offload_ip_cksum;
932 p_config->offload_udp_cksum = ch_offload_udp_cksum;
933 p_config->offload_tcp_cksum = ch_offload_tcp_cksum;
935 p_config->packet_tracing = ch_packet_tracing;
937 p_config->server_region_len = ch_server_region_len;
938 p_config->rt_region_len = ch_rt_region_len;
941 * Link configuration.
943 * 5-auto-neg 2-1000Gbps; 1-100Gbps; 0-10Gbps
945 p_config->link_speed = ch_link_speed;
946 p_config->num_of_ports = ch_num_of_ports;
949 * Catp options
951 p_config->cat_opt0 = ch_cat_opt0;
952 p_config->cat_opt1 = ch_cat_opt1;
955 * SGE configuration.
957 p_config->sge_cmdq0_cnt = ch_sge_cmdq0_cnt;
958 p_config->sge_cmdq1_cnt = ch_sge_cmdq1_cnt;
959 p_config->sge_flq0_cnt = ch_sge_flq0_cnt;
960 p_config->sge_flq1_cnt = ch_sge_flq1_cnt;
961 p_config->sge_respq_cnt = ch_sge_respq_cnt;
963 p_config->phy_rx_fifo = ch_phy_rx_fifo;
964 p_config->phy_tx_fifo = ch_phy_tx_fifo;
966 p_config->sge_cmdq_threshold = ch_sge_cmdq_threshold;
968 p_config->sge_flq_threshold = ch_sge_flq_threshold;
970 p_config->phy_force_master = ch_phy_force_master;
972 p_config->rb_num_of_entries = ch_rb_num_of_entries;
974 p_config->rb_size_of_entries = ch_rb_size_of_entries;
976 p_config->rb_flag = ch_rb_flag;
978 p_config->exit_early = ch_exit_early;
980 p_config->chip = ch_chip;
982 p_config->stats = ch_stats;
984 p_config->tx_delay_us = ch_tx_delay_us;
986 return (0);
989 static const struct board_info *
990 pe_sa_init(ch_t *sa)
992 uint16_t device_id;
993 uint16_t device_subid;
994 const struct board_info *bi;
996 sa->config = sa->config_data.global_config;
997 device_id = pci_config_get16(sa->ch_hpci, 2);
998 device_subid = pci_config_get16(sa->ch_hpci, 0x2e);
1000 bi = t1_get_board_info_from_ids(device_id, device_subid);
1001 if (bi == NULL) {
1002 cmn_err(CE_NOTE,
1003 "The adapter with device_id %d %d is not supported.\n",
1004 device_id, device_subid);
1005 return (NULL);
1008 if (t1_get_board_rev(sa, bi, &sa->params)) {
1009 cmn_err(CE_NOTE, "unknown device_id %d %d\n",
1010 device_id, device_subid);
1011 return ((const struct board_info *)NULL);
1014 return (bi);
1018 * allocate pool of small receive buffers (with vaddr & paddr) and
1019 * receiver buffer control structure (ch_esb_t *rbp).
1020 * XXX we should allow better tuning of the # of preallocated
1021 * free buffers against the # of freelist entries.
1023 static int
1024 pe_small_rbuf_pool_init(ch_t *sa)
1026 int i;
1027 ch_esb_t *rbp;
1028 extern uint32_t sge_flq0_cnt;
1029 extern uint32_t sge_flq1_cnt;
1030 int size;
1031 uint32_t j;
1033 if (is_T2(sa))
1034 size = sge_flq1_cnt * fl_sz_multiplier;
1035 else
1036 size = sge_flq0_cnt * fl_sz_multiplier;
1038 mutex_init(&sa->ch_small_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1040 mutex_enter(&in_use_l);
1041 j = in_use_index++;
1042 if (in_use_index >= SZ_INUSE)
1043 in_use_index = 0;
1044 mutex_exit(&in_use_l);
1046 sa->ch_small_owner = NULL;
1047 sa->ch_sm_index = j;
1048 sa->ch_small_esb_free = NULL;
1049 for (i = 0; i < size; i++) {
1050 rbp = ch_alloc_small_esbbuf(sa, j);
1051 if (rbp == NULL)
1052 goto error;
1054 * add entry to free list
1056 rbp->cs_next = sa->ch_small_esb_free;
1057 sa->ch_small_esb_free = rbp;
1060 * add entry to owned list
1062 rbp->cs_owner = sa->ch_small_owner;
1063 sa->ch_small_owner = rbp;
1065 return (1);
1067 error:
1068 sa->ch_small_owner = NULL;
1070 /* free whatever we've already allocated */
1071 pe_rbuf_pool_free(sa);
1073 return (0);
1077 * allocate pool of receive buffers (with vaddr & paddr) and
1078 * receiver buffer control structure (ch_esb_t *rbp).
1079 * XXX we should allow better tuning of the # of preallocated
1080 * free buffers against the # of freelist entries.
1082 static int
1083 pe_big_rbuf_pool_init(ch_t *sa)
1085 int i;
1086 ch_esb_t *rbp;
1087 extern uint32_t sge_flq0_cnt;
1088 extern uint32_t sge_flq1_cnt;
1089 int size;
1090 uint32_t j;
1092 if (is_T2(sa))
1093 size = sge_flq0_cnt * fl_sz_multiplier;
1094 else
1095 size = sge_flq1_cnt * fl_sz_multiplier;
1097 mutex_init(&sa->ch_big_esbl, NULL, MUTEX_DRIVER, sa->ch_icookp);
1099 mutex_enter(&in_use_l);
1100 j = in_use_index++;
1101 if (in_use_index >= SZ_INUSE)
1102 in_use_index = 0;
1103 mutex_exit(&in_use_l);
1105 sa->ch_big_owner = NULL;
1106 sa->ch_big_index = j;
1107 sa->ch_big_esb_free = NULL;
1108 for (i = 0; i < size; i++) {
1109 rbp = ch_alloc_big_esbbuf(sa, j);
1110 if (rbp == NULL)
1111 goto error;
1112 rbp->cs_next = sa->ch_big_esb_free;
1113 sa->ch_big_esb_free = rbp;
1116 * add entry to owned list
1118 rbp->cs_owner = sa->ch_big_owner;
1119 sa->ch_big_owner = rbp;
1121 return (1);
1123 error:
1124 sa->ch_big_owner = NULL;
1126 /* free whatever we've already allocated */
1127 pe_rbuf_pool_free(sa);
1129 return (0);
1133 * allocate receive buffer structure and dma mapped buffer (SGE_SM_BUF_SZ bytes)
1134 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1135 * is enabled.
1137 static ch_esb_t *
1138 ch_alloc_small_esbbuf(ch_t *sa, uint32_t i)
1140 ch_esb_t *rbp;
1142 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1143 if (rbp == NULL) {
1144 return ((ch_esb_t *)0);
1147 #if BYTE_ORDER == BIG_ENDIAN
1148 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_SMALN,
1149 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1150 #else
1151 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_SMALN,
1152 SGE_SM_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1153 #endif
1155 if (rbp->cs_buf == NULL) {
1156 kmem_free(rbp, sizeof (ch_esb_t));
1157 return ((ch_esb_t *)0);
1160 rbp->cs_sa = sa;
1161 rbp->cs_index = i;
1163 rbp->cs_frtn.free_func = (void (*)())&ch_small_rbuf_recycle;
1164 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1166 return (rbp);
1170 * allocate receive buffer structure and dma mapped buffer (SGE_BG_BUF_SZ bytes)
1171 * note that we will DMA at a 2 byte offset for Solaris when checksum offload
1172 * is enabled.
1174 static ch_esb_t *
1175 ch_alloc_big_esbbuf(ch_t *sa, uint32_t i)
1177 ch_esb_t *rbp;
1179 rbp = (ch_esb_t *)kmem_zalloc(sizeof (ch_esb_t), KM_SLEEP);
1180 if (rbp == NULL) {
1181 return ((ch_esb_t *)0);
1184 #if BYTE_ORDER == BIG_ENDIAN
1185 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 1, DMA_STREAM|DMA_BGALN,
1186 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1187 #else
1188 rbp->cs_buf = (caddr_t)ch_alloc_dma_mem(sa, 0, DMA_STREAM|DMA_BGALN,
1189 SGE_BG_BUF_SZ(sa), &rbp->cs_pa, &rbp->cs_dh, &rbp->cs_ah);
1190 #endif
1192 if (rbp->cs_buf == NULL) {
1193 kmem_free(rbp, sizeof (ch_esb_t));
1194 return ((ch_esb_t *)0);
1197 rbp->cs_sa = sa;
1198 rbp->cs_index = i;
1200 rbp->cs_frtn.free_func = (void (*)())&ch_big_rbuf_recycle;
1201 rbp->cs_frtn.free_arg = (caddr_t)rbp;
1203 return (rbp);
1207 * free entries on the receive buffer list.
1209 void
1210 pe_rbuf_pool_free(ch_t *sa)
1212 ch_esb_t *rbp;
1214 mutex_enter(&sa->ch_small_esbl);
1217 * Now set-up the rest to commit suicide.
1219 while (sa->ch_small_owner) {
1220 rbp = sa->ch_small_owner;
1221 sa->ch_small_owner = rbp->cs_owner;
1222 rbp->cs_owner = NULL;
1223 rbp->cs_flag = 1;
1226 while ((rbp = sa->ch_small_esb_free) != NULL) {
1227 /* advance head ptr to next entry */
1228 sa->ch_small_esb_free = rbp->cs_next;
1229 /* free private buffer allocated in ch_alloc_esbbuf() */
1230 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1231 /* free descripter buffer */
1232 kmem_free(rbp, sizeof (ch_esb_t));
1235 mutex_exit(&sa->ch_small_esbl);
1237 /* destroy ch_esbl lock */
1238 mutex_destroy(&sa->ch_small_esbl);
1241 mutex_enter(&sa->ch_big_esbl);
1244 * Now set-up the rest to commit suicide.
1246 while (sa->ch_big_owner) {
1247 rbp = sa->ch_big_owner;
1248 sa->ch_big_owner = rbp->cs_owner;
1249 rbp->cs_owner = NULL;
1250 rbp->cs_flag = 1;
1253 while ((rbp = sa->ch_big_esb_free) != NULL) {
1254 /* advance head ptr to next entry */
1255 sa->ch_big_esb_free = rbp->cs_next;
1256 /* free private buffer allocated in ch_alloc_esbbuf() */
1257 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1258 /* free descripter buffer */
1259 kmem_free(rbp, sizeof (ch_esb_t));
1262 mutex_exit(&sa->ch_big_esbl);
1264 /* destroy ch_esbl lock */
1265 mutex_destroy(&sa->ch_big_esbl);
1268 void
1269 ch_small_rbuf_recycle(ch_esb_t *rbp)
1271 ch_t *sa = rbp->cs_sa;
1273 if (rbp->cs_flag) {
1274 uint32_t i;
1276 * free private buffer allocated in ch_alloc_esbbuf()
1278 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1280 i = rbp->cs_index;
1283 * free descripter buffer
1285 kmem_free(rbp, sizeof (ch_esb_t));
1288 * decrement count of receive buffers freed by callback
1289 * We decrement here so anyone trying to do fini will
1290 * only remove the driver once the counts go to 0.
1292 atomic_dec_32(&buffers_in_use[i]);
1294 return;
1297 mutex_enter(&sa->ch_small_esbl);
1298 rbp->cs_next = sa->ch_small_esb_free;
1299 sa->ch_small_esb_free = rbp;
1300 mutex_exit(&sa->ch_small_esbl);
1303 * decrement count of receive buffers freed by callback
1305 atomic_dec_32(&buffers_in_use[rbp->cs_index]);
1309 * callback function from freeb() when esballoced mblk freed.
1311 void
1312 ch_big_rbuf_recycle(ch_esb_t *rbp)
1314 ch_t *sa = rbp->cs_sa;
1316 if (rbp->cs_flag) {
1317 uint32_t i;
1319 * free private buffer allocated in ch_alloc_esbbuf()
1321 ch_free_dma_mem(rbp->cs_dh, rbp->cs_ah);
1323 i = rbp->cs_index;
1326 * free descripter buffer
1328 kmem_free(rbp, sizeof (ch_esb_t));
1331 * decrement count of receive buffers freed by callback
1332 * We decrement here so anyone trying to do fini will
1333 * only remove the driver once the counts go to 0.
1335 atomic_dec_32(&buffers_in_use[i]);
1337 return;
1340 mutex_enter(&sa->ch_big_esbl);
1341 rbp->cs_next = sa->ch_big_esb_free;
1342 sa->ch_big_esb_free = rbp;
1343 mutex_exit(&sa->ch_big_esbl);
1346 * decrement count of receive buffers freed by callback
1348 atomic_dec_32(&buffers_in_use[rbp->cs_index]);
1352 * get a pre-allocated, pre-mapped receive buffer from free list.
1353 * (used sge.c)
1355 ch_esb_t *
1356 ch_get_small_rbuf(ch_t *sa)
1358 ch_esb_t *rbp;
1360 mutex_enter(&sa->ch_small_esbl);
1361 rbp = sa->ch_small_esb_free;
1362 if (rbp) {
1363 sa->ch_small_esb_free = rbp->cs_next;
1365 mutex_exit(&sa->ch_small_esbl);
1367 return (rbp);
1371 * get a pre-allocated, pre-mapped receive buffer from free list.
1372 * (used sge.c)
1375 ch_esb_t *
1376 ch_get_big_rbuf(ch_t *sa)
1378 ch_esb_t *rbp;
1380 mutex_enter(&sa->ch_big_esbl);
1381 rbp = sa->ch_big_esb_free;
1382 if (rbp) {
1383 sa->ch_big_esb_free = rbp->cs_next;
1385 mutex_exit(&sa->ch_big_esbl);
1387 return (rbp);
1390 void
1391 pe_detach(ch_t *sa)
1393 (void) sge_stop(sa->sge);
1395 pe_free_driver_resources(sa);
1398 static void
1399 pe_free_driver_resources(ch_t *sa)
1401 if (sa) {
1402 t1_free_sw_modules(sa);
1404 /* free pool of receive buffers */
1405 pe_rbuf_pool_free(sa);
1410 * Processes elmer0 external interrupts in process context.
1412 static void
1413 ext_intr_task(ch_t *adapter)
1415 u32 enable;
1417 (void) elmer0_ext_intr_handler(adapter);
1419 /* Now reenable external interrupts */
1420 t1_write_reg_4(adapter, A_PL_CAUSE, F_PL_INTR_EXT);
1421 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1422 t1_write_reg_4(adapter, A_PL_ENABLE, enable | F_PL_INTR_EXT);
1423 adapter->slow_intr_mask |= F_PL_INTR_EXT;
1427 * Interrupt-context handler for elmer0 external interrupts.
1429 void
1430 t1_os_elmer0_ext_intr(ch_t *adapter)
1432 u32 enable = t1_read_reg_4(adapter, A_PL_ENABLE);
1434 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
1435 t1_write_reg_4(adapter, A_PL_ENABLE, enable & ~F_PL_INTR_EXT);
1436 #ifdef NOTYET
1437 schedule_work(&adapter->ext_intr_handler_task);
1438 #else
1439 ext_intr_task(adapter);
1440 #endif
1443 uint8_t *
1444 t1_get_next_mcaddr(struct t1_rx_mode *rmp)
1446 uint8_t *addr = 0;
1447 if (rmp->mc) {
1448 addr = rmp->mc->cmc_mca;
1449 rmp->mc = rmp->mc->cmc_next;
1451 return (addr);
1454 void
1455 pe_dma_handle_init(ch_t *chp, int cnt)
1457 free_dh_t *dhe;
1458 while (cnt--) {
1459 dhe = ch_get_dma_handle(chp);
1460 if (dhe == NULL)
1461 return;
1462 mutex_enter(&chp->ch_dh_lck);
1463 dhe->dhe_next = chp->ch_dh;
1464 chp->ch_dh = dhe;
1465 mutex_exit(&chp->ch_dh_lck);
1470 * Write new values to the MTU table. Caller must validate that the new MTUs
1471 * are in ascending order. params.mtus[] is initialized by init_mtus()
1472 * called in t1_init_sw_modules().
1474 #define MTUREG(idx) (A_TP_MTU_REG0 + (idx) * 4)
1476 static void
1477 update_mtu_tab(ch_t *adapter)
1479 int i;
1481 for (i = 0; i < NMTUS; ++i) {
1482 int mtu = (unsigned int)adapter->params.mtus[i];
1484 t1_write_reg_4(adapter, MTUREG(i), mtu);
1488 static int
1489 pe_change_mtu(ch_t *chp)
1491 struct cmac *mac = chp->port[0].mac;
1492 int ret;
1494 if (!mac->ops->set_mtu) {
1495 return (EOPNOTSUPP);
1497 if (chp->ch_mtu < 68) {
1498 return (EINVAL);
1500 if (ret = mac->ops->set_mtu(mac, chp->ch_mtu)) {
1501 return (ret);
1504 return (0);
1507 typedef struct fake_arp {
1508 char fa_dst[6]; /* ethernet header */
1509 char fa_src[6]; /* ethernet header */
1510 ushort_t fa_typ; /* ethernet header */
1512 ushort_t fa_hrd; /* arp */
1513 ushort_t fa_pro;
1514 char fa_hln;
1515 char fa_pln;
1516 ushort_t fa_op;
1517 char fa_src_mac[6];
1518 uint_t fa_src_ip;
1519 char fa_dst_mac[6];
1520 char fa_dst_ip[4];
1521 } fake_arp_t;
1524 * PR2928 & PR3309
1525 * construct packet in mblk and attach it to sge structure.
1527 static int
1528 pe_make_fake_arp(ch_t *chp, unsigned char *arpp)
1530 pesge *sge = chp->sge;
1531 mblk_t *bp;
1532 fake_arp_t *fap;
1533 static char buf[6] = {0, 7, 0x43, 0, 0, 0};
1534 struct cpl_tx_pkt *cpl;
1536 bp = allocb(sizeof (struct fake_arp) + SZ_CPL_TX_PKT, BPRI_HI);
1537 if (bp == NULL) {
1538 return (1);
1540 bzero(bp->b_rptr, sizeof (struct fake_arp) + SZ_CPL_TX_PKT);
1542 /* fill in cpl header */
1543 cpl = (struct cpl_tx_pkt *)bp->b_rptr;
1544 cpl->opcode = CPL_TX_PKT;
1545 cpl->iff = 0; /* XXX port 0 needs fixing with NEMO */
1546 cpl->ip_csum_dis = 1; /* no IP header cksum */
1547 cpl->l4_csum_dis = 1; /* no tcp/udp cksum */
1548 cpl->vlan_valid = 0; /* no vlan */
1550 fap = (fake_arp_t *)&bp->b_rptr[SZ_CPL_TX_PKT];
1552 bcopy(arpp, fap, sizeof (*fap)); /* copy first arp to mblk */
1554 bcopy(buf, fap->fa_dst, 6); /* overwrite dst mac */
1555 chp->ch_ip = fap->fa_src_ip; /* not used yet */
1556 bcopy(buf, fap->fa_dst_mac, 6); /* overwrite dst mac */
1558 bp->b_wptr = bp->b_rptr + sizeof (struct fake_arp)+SZ_CPL_TX_PKT;
1560 sge_add_fake_arp(sge, (void *)bp);
1562 return (0);
1566 * PR2928 & PR3309
1567 * free the fake arp's mblk on sge structure.
1569 void
1570 pe_free_fake_arp(void *arp)
1572 mblk_t *bp = (mblk_t *)(arp);
1574 freemsg(bp);
1578 * extract ip address of nic from first outgoing arp.
1580 static uint32_t
1581 pe_get_ip(unsigned char *arpp)
1583 fake_arp_t fap;
1586 * first copy packet to buffer so we know
1587 * it will be properly aligned.
1589 bcopy(arpp, &fap, sizeof (fap)); /* copy first arp to buffer */
1590 return (fap.fa_src_ip);
1593 /* ARGSUSED */
1594 void
1595 t1_os_link_changed(ch_t *obj, int port_id, int link_status,
1596 int speed, int duplex, int fc)
1598 gld_mac_info_t *macinfo = obj->ch_macp;
1599 if (link_status) {
1600 gld_linkstate(macinfo, GLD_LINKSTATE_UP);
1602 * Link states should be reported to user
1603 * whenever it changes
1605 cmn_err(CE_NOTE, "%s: link is up", adapter_name(obj));
1606 } else {
1607 gld_linkstate(macinfo, GLD_LINKSTATE_DOWN);
1609 * Link states should be reported to user
1610 * whenever it changes
1612 cmn_err(CE_NOTE, "%s: link is down", adapter_name(obj));