4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 NetXen, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
29 #include <sys/types.h>
31 #include <sys/debug.h>
32 #include <sys/stropts.h>
33 #include <sys/stream.h>
34 #include <sys/strlog.h>
37 #include <sys/kstat.h>
38 #include <sys/vtrace.h>
40 #include <sys/strsun.h>
41 #include <sys/ethernet.h>
42 #include <sys/modctl.h>
43 #include <sys/errno.h>
44 #include <sys/dditypes.h>
46 #include <sys/sunddi.h>
47 #include <sys/sysmacros.h>
51 #include <netinet/in.h>
55 #include <sys/rwlock.h>
56 #include <sys/mutex.h>
57 #include <sys/pattr.h>
58 #include <sys/strsubr.h>
59 #include <sys/ddi_impldefs.h>
62 #include "unm_nic_hw.h"
65 #include "nic_phan_reg.h"
66 #include "unm_nic_ioctl.h"
68 #include "unm_version.h"
69 #include "unm_brdcfg.h"
75 #define UNM_ADAPTER_UP_MAGIC 777
76 #define VLAN_TAGSZ 0x4
78 #define index2rxbuf(_rdp_, _idx_) ((_rdp_)->rx_buf_pool + (_idx_))
79 #define rxbuf2index(_rdp_, _bufp_) ((_bufp_) - (_rdp_)->rx_buf_pool)
82 * Receive ISR processes NX_RX_MAXBUFS incoming packets at most, then posts
83 * as many buffers as packets processed. This loop repeats as required to
84 * process all incoming packets delivered in a single interrupt. Higher
85 * value of NX_RX_MAXBUFS improves performance by posting rx buffers less
86 * frequently, but at the cost of not posting quickly enough when card is
87 * running out of rx buffers.
89 #define NX_RX_THRESHOLD 32
90 #define NX_RX_MAXBUFS 128
91 #define NX_MAX_TXCOMPS 256
93 extern int create_rxtx_rings(unm_adapter
*adapter
);
94 extern void destroy_rxtx_rings(unm_adapter
*adapter
);
96 static void unm_post_rx_buffers_nodb(struct unm_adapter_s
*adapter
,
98 static mblk_t
*unm_process_rcv(unm_adapter
*adapter
, statusDesc_t
*desc
);
99 static int unm_process_rcv_ring(unm_adapter
*, int);
100 static int unm_process_cmd_ring(struct unm_adapter_s
*adapter
);
102 static int unm_nic_do_ioctl(unm_adapter
*adapter
, queue_t
*q
, mblk_t
*mp
);
103 static void unm_nic_ioctl(struct unm_adapter_s
*adapter
, int cmd
, queue_t
*q
,
106 /* GLDv3 interface functions */
107 static int ntxn_m_start(void *);
108 static void ntxn_m_stop(void *);
109 static int ntxn_m_multicst(void *, boolean_t
, const uint8_t *);
110 static int ntxn_m_promisc(void *, boolean_t
);
111 static int ntxn_m_stat(void *arg
, uint_t stat
, uint64_t *val
);
112 static mblk_t
*ntxn_m_tx(void *, mblk_t
*);
113 static void ntxn_m_ioctl(void *arg
, queue_t
*wq
, mblk_t
*mp
);
114 static boolean_t
ntxn_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
);
117 * Allocates DMA handle, virtual memory and binds them
118 * returns size of actual memory binded and the physical address.
121 unm_pci_alloc_consistent(unm_adapter
*adapter
,
122 int size
, caddr_t
*address
, ddi_dma_cookie_t
*cookie
,
123 ddi_dma_handle_t
*dma_handle
, ddi_acc_handle_t
*handlep
)
128 uint_t dma_flags
= DDI_DMA_RDWR
| DDI_DMA_CONSISTENT
;
135 err
= ddi_dma_alloc_handle(adapter
->dip
,
136 &adapter
->gc_dma_attr_desc
,
137 DDI_DMA_DONTWAIT
, NULL
, dma_handle
);
138 if (err
!= DDI_SUCCESS
) {
139 cmn_err(CE_WARN
, "!%s: %s: ddi_dma_alloc_handle FAILED:"
140 " %d", unm_nic_driver_name
, __func__
, err
);
144 err
= ddi_dma_mem_alloc(*dma_handle
,
145 size
, &adapter
->gc_attr_desc
,
146 dma_flags
& (DDI_DMA_STREAMING
| DDI_DMA_CONSISTENT
),
147 DDI_DMA_DONTWAIT
, NULL
, address
, &ring_len
,
149 if (err
!= DDI_SUCCESS
) {
150 cmn_err(CE_WARN
, "!%s: %s: ddi_dma_mem_alloc failed:"
151 "ret %d, request size: %d",
152 unm_nic_driver_name
, __func__
, err
, size
);
153 ddi_dma_free_handle(dma_handle
);
157 if (ring_len
< size
) {
158 cmn_err(CE_WARN
, "%s: %s: could not allocate required "
159 "memory :%d\n", unm_nic_driver_name
,
161 ddi_dma_mem_free(handlep
);
162 ddi_dma_free_handle(dma_handle
);
163 return (DDI_FAILURE
);
166 (void) memset(*address
, 0, size
);
168 if (((err
= ddi_dma_addr_bind_handle(*dma_handle
,
169 NULL
, *address
, ring_len
,
171 DDI_DMA_DONTWAIT
, NULL
,
172 cookie
, &ncookies
)) != DDI_DMA_MAPPED
) ||
175 "!%s: %s: ddi_dma_addr_bind_handle FAILED: %d",
176 unm_nic_driver_name
, __func__
, err
);
177 ddi_dma_mem_free(handlep
);
178 ddi_dma_free_handle(dma_handle
);
179 return (DDI_FAILURE
);
182 return (DDI_SUCCESS
);
186 * Unbinds the memory, frees the DMA handle and at the end, frees the memory
189 unm_pci_free_consistent(ddi_dma_handle_t
*dma_handle
,
190 ddi_acc_handle_t
*acc_handle
)
194 err
= ddi_dma_unbind_handle(*dma_handle
);
195 if (err
!= DDI_SUCCESS
) {
196 cmn_err(CE_WARN
, "%s: Error unbinding memory\n", __func__
);
200 ddi_dma_mem_free(acc_handle
);
201 ddi_dma_free_handle(dma_handle
);
204 static uint32_t msi_tgt_status
[] = {
205 ISR_INT_TARGET_STATUS
, ISR_INT_TARGET_STATUS_F1
,
206 ISR_INT_TARGET_STATUS_F2
, ISR_INT_TARGET_STATUS_F3
,
207 ISR_INT_TARGET_STATUS_F4
, ISR_INT_TARGET_STATUS_F5
,
208 ISR_INT_TARGET_STATUS_F6
, ISR_INT_TARGET_STATUS_F7
212 unm_nic_disable_int(unm_adapter
*adapter
)
216 adapter
->unm_nic_hw_write_wx(adapter
, adapter
->interrupt_crb
,
221 unm_nic_clear_int(unm_adapter
*adapter
)
223 uint32_t mask
, temp
, our_int
, status
;
225 UNM_READ_LOCK(&adapter
->adapter_lock
);
227 /* check whether it's our interrupt */
228 if (!UNM_IS_MSI_FAMILY(adapter
)) {
230 /* Legacy Interrupt case */
231 adapter
->unm_nic_pci_read_immediate(adapter
, ISR_INT_VECTOR
,
234 if (!(status
& adapter
->legacy_intr
.int_vec_bit
)) {
235 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
239 if (adapter
->ahw
.revision_id
>= NX_P3_B1
) {
240 adapter
->unm_nic_pci_read_immediate(adapter
,
241 ISR_INT_STATE_REG
, &temp
);
242 if (!ISR_IS_LEGACY_INTR_TRIGGERED(temp
)) {
243 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
246 } else if (NX_IS_REVISION_P2(adapter
->ahw
.revision_id
)) {
247 our_int
= adapter
->unm_nic_pci_read_normalize(adapter
,
250 /* FIXME: Assumes pci_func is same as ctx */
251 if ((our_int
& (0x80 << adapter
->portnum
)) == 0) {
253 /* not our interrupt */
254 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
258 temp
= our_int
& ~((u32
)(0x80 << adapter
->portnum
));
259 adapter
->unm_nic_pci_write_normalize(adapter
,
260 CRB_INT_VECTOR
, temp
);
263 if (adapter
->fw_major
< 4)
264 unm_nic_disable_int(adapter
);
266 /* claim interrupt */
268 adapter
->unm_nic_pci_write_immediate(adapter
,
269 adapter
->legacy_intr
.tgt_status_reg
, &temp
);
271 adapter
->unm_nic_pci_read_immediate(adapter
, ISR_INT_VECTOR
,
275 * Read again to make sure the legacy interrupt message got
278 adapter
->unm_nic_pci_read_immediate(adapter
, ISR_INT_VECTOR
,
280 } else if (adapter
->flags
& UNM_NIC_MSI_ENABLED
) {
281 /* clear interrupt */
283 adapter
->unm_nic_pci_write_immediate(adapter
,
284 msi_tgt_status
[adapter
->ahw
.pci_func
], &temp
);
287 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
293 unm_nic_enable_int(unm_adapter
*adapter
)
297 adapter
->unm_nic_hw_write_wx(adapter
, adapter
->interrupt_crb
,
300 if (!UNM_IS_MSI_FAMILY(adapter
)) {
303 adapter
->unm_nic_pci_write_immediate(adapter
,
304 adapter
->legacy_intr
.tgt_mask_reg
, &mask
);
309 unm_free_hw_resources(unm_adapter
*adapter
)
311 unm_recv_context_t
*recv_ctx
;
312 unm_rcv_desc_ctx_t
*rcv_desc
;
315 if (adapter
->context_alloced
== 1) {
316 netxen_destroy_rxtx(adapter
);
317 adapter
->context_alloced
= 0;
320 if (adapter
->ctxDesc
!= NULL
) {
321 unm_pci_free_consistent(&adapter
->ctxDesc_dma_handle
,
322 &adapter
->ctxDesc_acc_handle
);
323 adapter
->ctxDesc
= NULL
;
326 if (adapter
->ahw
.cmdDescHead
!= NULL
) {
327 unm_pci_free_consistent(&adapter
->ahw
.cmd_desc_dma_handle
,
328 &adapter
->ahw
.cmd_desc_acc_handle
);
329 adapter
->ahw
.cmdDesc_physAddr
= (uintptr_t)NULL
;
330 adapter
->ahw
.cmdDescHead
= NULL
;
333 for (ctx
= 0; ctx
< MAX_RCV_CTX
; ++ctx
) {
334 recv_ctx
= &adapter
->recv_ctx
[ctx
];
335 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
336 rcv_desc
= &recv_ctx
->rcv_desc
[ring
];
338 if (rcv_desc
->desc_head
!= NULL
) {
339 unm_pci_free_consistent(
340 &rcv_desc
->rx_desc_dma_handle
,
341 &rcv_desc
->rx_desc_acc_handle
);
342 rcv_desc
->desc_head
= NULL
;
343 rcv_desc
->phys_addr
= (uintptr_t)NULL
;
347 if (recv_ctx
->rcvStatusDescHead
!= NULL
) {
348 unm_pci_free_consistent(
349 &recv_ctx
->status_desc_dma_handle
,
350 &recv_ctx
->status_desc_acc_handle
);
351 recv_ctx
->rcvStatusDesc_physAddr
= (uintptr_t)NULL
;
352 recv_ctx
->rcvStatusDescHead
= NULL
;
358 cleanup_adapter(struct unm_adapter_s
*adapter
)
360 ddi_regs_map_free(&(adapter
->regs_handle
));
361 ddi_regs_map_free(&(adapter
->db_handle
));
362 kmem_free(adapter
, sizeof (unm_adapter
));
366 unm_nic_remove(unm_adapter
*adapter
)
368 mac_link_update(adapter
->mach
, LINK_STATE_DOWN
);
369 unm_nic_stop_port(adapter
);
371 if (adapter
->interrupt_crb
) {
372 UNM_READ_LOCK(&adapter
->adapter_lock
);
373 unm_nic_disable_int(adapter
);
374 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
376 (void) untimeout(adapter
->watchdog_timer
);
378 unm_free_hw_resources(adapter
);
380 if (adapter
->is_up
== UNM_ADAPTER_UP_MAGIC
)
381 destroy_rxtx_rings(adapter
);
383 if (adapter
->portnum
== 0)
384 unm_free_dummy_dma(adapter
);
386 unm_destroy_intr(adapter
);
388 ddi_set_driver_private(adapter
->dip
, NULL
);
389 cleanup_adapter(adapter
);
393 init_firmware(unm_adapter
*adapter
)
395 uint32_t state
= 0, loops
= 0, tempout
;
398 UNM_READ_LOCK(&adapter
->adapter_lock
);
399 state
= adapter
->unm_nic_pci_read_normalize(adapter
, CRB_CMDPEG_STATE
);
400 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
402 if (state
== PHAN_INITIALIZE_ACK
)
405 while (state
!= PHAN_INITIALIZE_COMPLETE
&& loops
< 200000) {
408 UNM_READ_LOCK(&adapter
->adapter_lock
);
409 state
= adapter
->unm_nic_pci_read_normalize(adapter
,
411 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
415 if (loops
>= 200000) {
416 cmn_err(CE_WARN
, "%s%d: CmdPeg init incomplete:%x\n",
417 adapter
->name
, adapter
->instance
, state
);
422 UNM_READ_LOCK(&adapter
->adapter_lock
);
423 tempout
= INTR_SCHEME_PERPORT
;
424 adapter
->unm_nic_hw_write_wx(adapter
, CRB_NIC_CAPABILITIES_HOST
,
426 tempout
= MSI_MODE_MULTIFUNC
;
427 adapter
->unm_nic_hw_write_wx(adapter
, CRB_NIC_MSI_MODE_HOST
,
429 tempout
= MPORT_MULTI_FUNCTION_MODE
;
430 adapter
->unm_nic_hw_write_wx(adapter
, CRB_MPORT_MODE
, &tempout
, 4);
431 tempout
= PHAN_INITIALIZE_ACK
;
432 adapter
->unm_nic_hw_write_wx(adapter
, CRB_CMDPEG_STATE
, &tempout
, 4);
433 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
439 * Utility to synchronize with receive peg.
440 * Returns 0 on sucess
444 receive_peg_ready(struct unm_adapter_s
*adapter
)
447 int loops
= 0, err
= 0;
450 UNM_READ_LOCK(&adapter
->adapter_lock
);
451 state
= adapter
->unm_nic_pci_read_normalize(adapter
, CRB_RCVPEG_STATE
);
452 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
454 while ((state
!= PHAN_PEG_RCV_INITIALIZED
) && (loops
< 20000)) {
458 UNM_READ_LOCK(&adapter
->adapter_lock
);
459 state
= adapter
->unm_nic_pci_read_normalize(adapter
,
461 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
466 if (loops
>= 20000) {
467 cmn_err(CE_WARN
, "Receive Peg initialization incomplete 0x%x\n",
476 * check if the firmware has been downloaded and ready to run and
477 * setup the address for the descriptors in the adapter
480 unm_nic_hw_resources(unm_adapter
*adapter
)
482 hardware_context
*hw
= &adapter
->ahw
;
486 unm_recv_context_t
*recv_ctx
;
487 unm_rcv_desc_ctx_t
*rcv_desc
;
488 ddi_dma_cookie_t cookie
;
491 if (err
= receive_peg_ready(adapter
))
494 size
= (sizeof (RingContext
) + sizeof (uint32_t));
496 err
= unm_pci_alloc_consistent(adapter
,
497 size
, (caddr_t
*)&addr
, &cookie
,
498 &adapter
->ctxDesc_dma_handle
,
499 &adapter
->ctxDesc_acc_handle
);
500 if (err
!= DDI_SUCCESS
) {
501 cmn_err(CE_WARN
, "Failed to allocate HW context\n");
505 adapter
->ctxDesc_physAddr
= cookie
.dmac_laddress
;
507 (void) memset(addr
, 0, sizeof (RingContext
));
509 adapter
->ctxDesc
= (RingContext
*) addr
;
510 adapter
->ctxDesc
->CtxId
= adapter
->portnum
;
511 adapter
->ctxDesc
->CMD_CONSUMER_OFFSET
=
512 adapter
->ctxDesc_physAddr
+ sizeof (RingContext
);
513 adapter
->cmdConsumer
=
514 (uint32_t *)(uintptr_t)(((char *)addr
) + sizeof (RingContext
));
516 ASSERT(!((unsigned long)adapter
->ctxDesc_physAddr
& 0x3f));
519 * Allocate command descriptor ring.
521 size
= (sizeof (cmdDescType0_t
) * adapter
->MaxTxDescCount
);
522 err
= unm_pci_alloc_consistent(adapter
,
523 size
, (caddr_t
*)&addr
, &cookie
,
524 &hw
->cmd_desc_dma_handle
,
525 &hw
->cmd_desc_acc_handle
);
526 if (err
!= DDI_SUCCESS
) {
527 cmn_err(CE_WARN
, "Failed to allocate cmd desc ring\n");
531 hw
->cmdDesc_physAddr
= cookie
.dmac_laddress
;
532 hw
->cmdDescHead
= (cmdDescType0_t
*)addr
;
534 for (ctx
= 0; ctx
< MAX_RCV_CTX
; ++ctx
) {
535 recv_ctx
= &adapter
->recv_ctx
[ctx
];
537 size
= (sizeof (statusDesc_t
)* adapter
->MaxRxDescCount
);
538 err
= unm_pci_alloc_consistent(adapter
,
539 size
, (caddr_t
*)&addr
,
540 &recv_ctx
->status_desc_dma_cookie
,
541 &recv_ctx
->status_desc_dma_handle
,
542 &recv_ctx
->status_desc_acc_handle
);
543 if (err
!= DDI_SUCCESS
) {
544 cmn_err(CE_WARN
, "Failed to allocate sts desc ring\n");
548 (void) memset(addr
, 0, size
);
549 recv_ctx
->rcvStatusDesc_physAddr
=
550 recv_ctx
->status_desc_dma_cookie
.dmac_laddress
;
551 recv_ctx
->rcvStatusDescHead
= (statusDesc_t
*)addr
;
554 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
555 rcv_desc
= &recv_ctx
->rcv_desc
[ring
];
557 size
= (sizeof (rcvDesc_t
) * adapter
->MaxRxDescCount
);
558 err
= unm_pci_alloc_consistent(adapter
,
559 size
, (caddr_t
*)&addr
,
560 &rcv_desc
->rx_desc_dma_cookie
,
561 &rcv_desc
->rx_desc_dma_handle
,
562 &rcv_desc
->rx_desc_acc_handle
);
563 if (err
!= DDI_SUCCESS
) {
564 cmn_err(CE_WARN
, "Failed to allocate "
565 "rx desc ring %d\n", ring
);
566 goto free_status_desc
;
569 rcv_desc
->phys_addr
=
570 rcv_desc
->rx_desc_dma_cookie
.dmac_laddress
;
571 rcv_desc
->desc_head
= (rcvDesc_t
*)addr
;
575 if (err
= netxen_create_rxtx(adapter
))
576 goto free_statusrx_desc
;
577 adapter
->context_alloced
= 1;
579 return (DDI_SUCCESS
);
584 unm_free_hw_resources(adapter
);
589 void unm_desc_dma_sync(ddi_dma_handle_t handle
, uint_t start
, uint_t count
,
590 uint_t range
, uint_t unit_size
, uint_t direction
)
592 if ((start
+ count
) < range
) {
593 (void) ddi_dma_sync(handle
, start
* unit_size
,
594 count
* unit_size
, direction
);
596 (void) ddi_dma_sync(handle
, start
* unit_size
, 0, direction
);
597 (void) ddi_dma_sync(handle
, 0,
598 (start
+ count
- range
) * unit_size
, DDI_DMA_SYNC_FORCPU
);
602 static uint32_t crb_cmd_producer
[4] = { CRB_CMD_PRODUCER_OFFSET
,
603 CRB_CMD_PRODUCER_OFFSET_1
, CRB_CMD_PRODUCER_OFFSET_2
,
604 CRB_CMD_PRODUCER_OFFSET_3
};
606 static uint32_t crb_cmd_consumer
[4] = { CRB_CMD_CONSUMER_OFFSET
,
607 CRB_CMD_CONSUMER_OFFSET_1
, CRB_CMD_CONSUMER_OFFSET_2
,
608 CRB_CMD_CONSUMER_OFFSET_3
};
611 unm_nic_update_cmd_producer(struct unm_adapter_s
*adapter
,
612 uint32_t crb_producer
)
614 int data
= crb_producer
;
616 if (adapter
->crb_addr_cmd_producer
) {
617 UNM_READ_LOCK(&adapter
->adapter_lock
);
618 adapter
->unm_nic_hw_write_wx(adapter
,
619 adapter
->crb_addr_cmd_producer
, &data
, 4);
620 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
625 unm_nic_update_cmd_consumer(struct unm_adapter_s
*adapter
,
626 uint32_t crb_producer
)
628 int data
= crb_producer
;
630 if (adapter
->crb_addr_cmd_consumer
)
631 adapter
->unm_nic_hw_write_wx(adapter
,
632 adapter
->crb_addr_cmd_consumer
, &data
, 4);
636 * Looks for type of packet and sets opcode accordingly
637 * so that checksum offload can be used.
640 unm_tx_csum(cmdDescType0_t
*desc
, mblk_t
*mp
, pktinfo_t
*pktinfo
)
642 if (pktinfo
->mac_hlen
== sizeof (struct ether_vlan_header
))
643 desc
->u1
.s1
.flags
= FLAGS_VLAN_TAGGED
;
645 if (pktinfo
->etype
== htons(ETHERTYPE_IP
)) {
646 uint32_t start
, flags
;
648 mac_hcksum_get(mp
, &start
, NULL
, NULL
, NULL
, &flags
);
649 if ((flags
& (HCK_FULLCKSUM
| HCK_IPV4_HDRCKSUM
)) == 0)
653 * For TCP/UDP, ask hardware to do both IP header and
654 * full checksum, even if stack has already done one or
655 * the other. Hardware will always get it correct even
656 * if stack has already done it.
658 switch (pktinfo
->l4_proto
) {
660 desc
->u1
.s1
.opcode
= TX_TCP_PKT
;
663 desc
->u1
.s1
.opcode
= TX_UDP_PKT
;
666 /* Must be here with HCK_IPV4_HDRCKSUM */
667 desc
->u1
.s1
.opcode
= TX_IP_PKT
;
671 desc
->u1
.s1
.ipHdrOffset
= pktinfo
->mac_hlen
;
672 desc
->u1
.s1
.tcpHdrOffset
= pktinfo
->mac_hlen
+ pktinfo
->ip_hlen
;
677 * For IP/UDP/TCP checksum offload, this checks for MAC+IP header in one
678 * contiguous block ending at 8 byte aligned address as required by hardware.
679 * Caller assumes pktinfo->total_len will be updated by this function and
680 * if pktinfo->etype is set to 0, it will need to linearize the mblk and
681 * invoke unm_update_pkt_info() to determine ethertype, IP header len and
685 unm_get_pkt_info(mblk_t
*mp
, pktinfo_t
*pktinfo
)
690 (void) memset(pktinfo
, 0, sizeof (pktinfo_t
));
692 for (bp
= mp
; bp
!= NULL
; bp
= bp
->b_cont
) {
696 pktinfo
->total_len
+= MBLKL(bp
);
699 if (MBLKL(mp
) < (sizeof (struct ether_header
) + sizeof (ipha_t
)))
703 * We just need non 1 byte aligned address, since ether_type is
706 if ((uintptr_t)mp
->b_rptr
& 1)
709 type
= ((struct ether_header
*)(uintptr_t)mp
->b_rptr
)->ether_type
;
710 if (type
== htons(ETHERTYPE_VLAN
)) {
711 if (MBLKL(mp
) < (sizeof (struct ether_vlan_header
) +
714 type
= ((struct ether_vlan_header
*) \
715 (uintptr_t)mp
->b_rptr
)->ether_type
;
716 pktinfo
->mac_hlen
= sizeof (struct ether_vlan_header
);
718 pktinfo
->mac_hlen
= sizeof (struct ether_header
);
720 pktinfo
->etype
= type
;
722 if (pktinfo
->etype
== htons(ETHERTYPE_IP
)) {
723 uchar_t
*ip_off
= mp
->b_rptr
+ pktinfo
->mac_hlen
;
725 pktinfo
->ip_hlen
= IPH_HDR_LENGTH((uintptr_t)ip_off
);
727 ((ipha_t
*)(uintptr_t)ip_off
)->ipha_protocol
;
729 /* IP header not aligned to quadward boundary? */
730 if ((unsigned long)(ip_off
+ pktinfo
->ip_hlen
) % 8 != 0)
738 unm_update_pkt_info(char *ptr
, pktinfo_t
*pktinfo
)
742 type
= ((struct ether_header
*)(uintptr_t)ptr
)->ether_type
;
743 if (type
== htons(ETHERTYPE_VLAN
)) {
744 type
= ((struct ether_vlan_header
*)(uintptr_t)ptr
)->ether_type
;
745 pktinfo
->mac_hlen
= sizeof (struct ether_vlan_header
);
747 pktinfo
->mac_hlen
= sizeof (struct ether_header
);
749 pktinfo
->etype
= type
;
751 if (pktinfo
->etype
== htons(ETHERTYPE_IP
)) {
752 char *ipp
= ptr
+ pktinfo
->mac_hlen
;
754 pktinfo
->ip_hlen
= IPH_HDR_LENGTH((uintptr_t)ipp
);
755 pktinfo
->l4_proto
= ((ipha_t
*)(uintptr_t)ipp
)->ipha_protocol
;
760 unm_send_copy(struct unm_adapter_s
*adapter
, mblk_t
*mp
, pktinfo_t
*pktinfo
)
762 hardware_context
*hw
;
764 cmdDescType0_t
*hwdesc
;
765 struct unm_cmd_buffer
*pbuf
= NULL
;
773 MaxTxDescCount
= adapter
->MaxTxDescCount
;
775 UNM_SPIN_LOCK(&adapter
->tx_lock
);
778 if (find_diff_among(adapter
->cmdProducer
, adapter
->lastCmdConsumer
,
779 MaxTxDescCount
) <= 2) {
780 adapter
->stats
.outofcmddesc
++;
781 adapter
->resched_needed
= 1;
783 UNM_SPIN_UNLOCK(&adapter
->tx_lock
);
786 adapter
->freecmds
-= no_of_desc
;
788 producer
= adapter
->cmdProducer
;
790 adapter
->cmdProducer
= get_index_range(adapter
->cmdProducer
,
791 MaxTxDescCount
, no_of_desc
);
793 hwdesc
= &hw
->cmdDescHead
[producer
];
794 (void) memset(hwdesc
, 0, sizeof (cmdDescType0_t
));
795 pbuf
= &adapter
->cmd_buf_arr
[producer
];
801 txb
= pbuf
->dma_area
.vaddr
;
803 for (bp
= mp
; bp
!= NULL
; bp
= bp
->b_cont
) {
804 if ((mblen
= MBLKL(bp
)) == 0)
806 bcopy(bp
->b_rptr
, txb
, mblen
);
811 * Determine metadata if not previously done due to fragmented mblk.
813 if (pktinfo
->etype
== 0)
814 unm_update_pkt_info(pbuf
->dma_area
.vaddr
, pktinfo
);
816 (void) ddi_dma_sync(pbuf
->dma_area
.dma_hdl
,
817 0, pktinfo
->total_len
, DDI_DMA_SYNC_FORDEV
);
819 /* hwdesc->u1.s1.tcpHdrOffset = 0; */
820 /* hwdesc->mss = 0; */
821 hwdesc
->u1
.s1
.opcode
= TX_ETHER_PKT
;
822 hwdesc
->u3
.s1
.port
= adapter
->portnum
;
823 hwdesc
->u3
.s1
.ctx_id
= adapter
->portnum
;
825 hwdesc
->u6
.s1
.buffer1Length
= pktinfo
->total_len
;
826 hwdesc
->u5
.AddrBuffer1
= pbuf
->dma_area
.dma_addr
;
827 hwdesc
->u1
.s1
.numOfBuffers
= 1;
828 hwdesc
->u1
.s1
.totalLength
= pktinfo
->total_len
;
830 unm_tx_csum(hwdesc
, mp
, pktinfo
);
832 unm_desc_dma_sync(hw
->cmd_desc_dma_handle
,
836 sizeof (cmdDescType0_t
),
837 DDI_DMA_SYNC_FORDEV
);
839 hw
->cmdProducer
= adapter
->cmdProducer
;
840 unm_nic_update_cmd_producer(adapter
, adapter
->cmdProducer
);
842 adapter
->stats
.txbytes
+= pktinfo
->total_len
;
843 adapter
->stats
.xmitfinished
++;
844 adapter
->stats
.txcopyed
++;
845 UNM_SPIN_UNLOCK(&adapter
->tx_lock
);
851 /* Should be called with adapter->tx_lock held. */
853 unm_return_dma_handle(unm_adapter
*adapter
, unm_dmah_node_t
*head
,
854 unm_dmah_node_t
*tail
, uint32_t num
)
856 ASSERT(tail
!= NULL
);
857 tail
->next
= adapter
->dmahdl_pool
;
858 adapter
->dmahdl_pool
= head
;
859 adapter
->freehdls
+= num
;
862 static unm_dmah_node_t
*
863 unm_reserve_dma_handle(unm_adapter
* adapter
)
865 unm_dmah_node_t
*dmah
= NULL
;
867 dmah
= adapter
->dmahdl_pool
;
869 adapter
->dmahdl_pool
= dmah
->next
;
879 unm_send_mapped(struct unm_adapter_s
*adapter
, mblk_t
*mp
, pktinfo_t
*pktinfo
)
881 hardware_context
*hw
;
883 u32 saved_producer
= 0;
884 cmdDescType0_t
*hwdesc
;
885 struct unm_cmd_buffer
*pbuf
= NULL
;
891 unm_dmah_node_t
*dmah
, *head
= NULL
, *tail
= NULL
, *hdlp
;
892 ddi_dma_cookie_t cookie
[MAX_COOKIES_PER_CMD
+ 1];
894 uint32_t hdl_reserved
= 0;
896 uint32_t ncookies
, index
= 0, total_cookies
= 0;
898 MaxTxDescCount
= adapter
->MaxTxDescCount
;
900 UNM_SPIN_LOCK(&adapter
->tx_lock
);
902 /* bind all the mblks of the packet first */
903 for (bp
= mp
; bp
!= NULL
; bp
= bp
->b_cont
) {
908 dmah
= unm_reserve_dma_handle(adapter
);
910 adapter
->stats
.outoftxdmahdl
++;
914 ret
= ddi_dma_addr_bind_handle(dmah
->dmahdl
,
915 NULL
, (caddr_t
)bp
->b_rptr
, mblen
,
916 DDI_DMA_STREAMING
| DDI_DMA_WRITE
,
917 DDI_DMA_DONTWAIT
, NULL
, &cookie
[index
], &ncookies
);
919 if (ret
!= DDI_DMA_MAPPED
)
930 total_cookies
+= ncookies
;
931 if (total_cookies
> MAX_COOKIES_PER_CMD
) {
937 size_t hsize
= cookie
[0].dmac_size
;
940 * For TCP/UDP packets with checksum offload,
941 * MAC/IP headers need to be contiguous. Otherwise,
942 * there must be at least 16 bytes in the first
945 if ((pktinfo
->l4_proto
== IPPROTO_TCP
) ||
946 (pktinfo
->l4_proto
== IPPROTO_UDP
)) {
947 if (hsize
< (pktinfo
->mac_hlen
+
962 for (i
= 0; i
< ncookies
; i
++, index
++)
963 ddi_dma_nextcookie(dmah
->dmahdl
, &cookie
[index
]);
968 no_of_desc
= (total_cookies
+ 3) >> 2;
971 if (find_diff_among(adapter
->cmdProducer
, adapter
->lastCmdConsumer
,
972 MaxTxDescCount
) < no_of_desc
+2) {
974 * If we are going to be trying the copy path, no point
975 * scheduling an upcall when Tx resources are freed.
977 if (pktinfo
->total_len
> adapter
->maxmtu
) {
978 adapter
->stats
.outofcmddesc
++;
979 adapter
->resched_needed
= 1;
984 adapter
->freecmds
-= no_of_desc
;
986 /* Copy the descriptors into the hardware */
987 producer
= adapter
->cmdProducer
;
988 saved_producer
= producer
;
989 hwdesc
= &hw
->cmdDescHead
[producer
];
990 (void) memset(hwdesc
, 0, sizeof (cmdDescType0_t
));
991 pbuf
= &adapter
->cmd_buf_arr
[producer
];
997 hwdesc
->u1
.s1
.numOfBuffers
= total_cookies
;
998 hwdesc
->u1
.s1
.opcode
= TX_ETHER_PKT
;
999 hwdesc
->u3
.s1
.port
= adapter
->portnum
;
1000 /* hwdesc->u1.s1.tcpHdrOffset = 0; */
1001 /* hwdesc->mss = 0; */
1002 hwdesc
->u3
.s1
.ctx_id
= adapter
->portnum
;
1003 hwdesc
->u1
.s1
.totalLength
= pktinfo
->total_len
;
1004 unm_tx_csum(hwdesc
, mp
, pktinfo
);
1006 for (i
= k
= 0; i
< total_cookies
; i
++) {
1008 /* Move to the next descriptor */
1010 producer
= get_next_index(producer
, MaxTxDescCount
);
1011 hwdesc
= &hw
->cmdDescHead
[producer
];
1012 (void) memset(hwdesc
, 0, sizeof (cmdDescType0_t
));
1017 hwdesc
->u6
.s1
.buffer1Length
= cookie
[i
].dmac_size
;
1018 hwdesc
->u5
.AddrBuffer1
= cookie
[i
].dmac_laddress
;
1021 hwdesc
->u6
.s1
.buffer2Length
= cookie
[i
].dmac_size
;
1022 hwdesc
->u2
.AddrBuffer2
= cookie
[i
].dmac_laddress
;
1025 hwdesc
->u6
.s1
.buffer3Length
= cookie
[i
].dmac_size
;
1026 hwdesc
->u4
.AddrBuffer3
= cookie
[i
].dmac_laddress
;
1029 hwdesc
->u6
.s1
.buffer4Length
= cookie
[i
].dmac_size
;
1030 hwdesc
->u7
.AddrBuffer4
= cookie
[i
].dmac_laddress
;
1036 unm_desc_dma_sync(hw
->cmd_desc_dma_handle
, saved_producer
, no_of_desc
,
1037 MaxTxDescCount
, sizeof (cmdDescType0_t
), DDI_DMA_SYNC_FORDEV
);
1039 adapter
->cmdProducer
= get_next_index(producer
, MaxTxDescCount
);
1040 hw
->cmdProducer
= adapter
->cmdProducer
;
1041 unm_nic_update_cmd_producer(adapter
, adapter
->cmdProducer
);
1043 adapter
->stats
.txbytes
+= pktinfo
->total_len
;
1044 adapter
->stats
.xmitfinished
++;
1045 adapter
->stats
.txmapped
++;
1046 UNM_SPIN_UNLOCK(&adapter
->tx_lock
);
1053 while (hdlp
!= NULL
) {
1054 (void) ddi_dma_unbind_handle(hdlp
->dmahdl
);
1059 * add the reserved but bind failed one to the list to be returned
1072 unm_return_dma_handle(adapter
, head
, tail
, hdl_reserved
);
1074 UNM_SPIN_UNLOCK(&adapter
->tx_lock
);
1079 unm_nic_xmit_frame(unm_adapter
*adapter
, mblk_t
*mp
)
1082 boolean_t status
= B_FALSE
, send_mapped
;
1084 adapter
->stats
.xmitcalled
++;
1086 send_mapped
= unm_get_pkt_info(mp
, &pktinfo
);
1088 if (pktinfo
.total_len
<= adapter
->tx_bcopy_threshold
||
1089 pktinfo
.mblk_no
>= MAX_COOKIES_PER_CMD
)
1090 send_mapped
= B_FALSE
;
1092 if (send_mapped
== B_TRUE
)
1093 status
= unm_send_mapped(adapter
, mp
, &pktinfo
);
1095 if (status
!= B_TRUE
) {
1096 if (pktinfo
.total_len
<= adapter
->maxmtu
)
1097 return (unm_send_copy(adapter
, mp
, &pktinfo
));
1099 /* message too large */
1101 adapter
->stats
.txdropped
++;
1109 unm_nic_check_temp(struct unm_adapter_s
*adapter
)
1111 uint32_t temp
, temp_state
, temp_val
;
1114 if ((adapter
->ahw
.revision_id
== NX_P3_A2
) ||
1115 (adapter
->ahw
.revision_id
== NX_P3_A0
))
1118 temp
= adapter
->unm_nic_pci_read_normalize(adapter
, CRB_TEMP_STATE
);
1120 temp_state
= nx_get_temp_state(temp
);
1121 temp_val
= nx_get_temp_val(temp
);
1123 if (temp_state
== NX_TEMP_PANIC
) {
1124 cmn_err(CE_WARN
, "%s: Device temperature %d C exceeds "
1125 "maximum allowed, device has been shut down\n",
1126 unm_nic_driver_name
, temp_val
);
1128 } else if (temp_state
== NX_TEMP_WARN
) {
1129 if (adapter
->temp
== NX_TEMP_NORMAL
) {
1130 cmn_err(CE_WARN
, "%s: Device temperature %d C exceeds"
1131 "operating range. Immediate action needed.\n",
1132 unm_nic_driver_name
, temp_val
);
1135 if (adapter
->temp
== NX_TEMP_WARN
) {
1136 cmn_err(CE_WARN
, "%s: Device temperature is now %d "
1137 "degrees C in normal range.\n",
1138 unm_nic_driver_name
, temp_val
);
1142 adapter
->temp
= temp_state
;
1147 unm_watchdog(unsigned long v
)
1149 unm_adapter
*adapter
= (unm_adapter
*)v
;
1151 if ((adapter
->portnum
== 0) && unm_nic_check_temp(adapter
)) {
1153 * We return without turning on the netdev queue as there
1154 * was an overheated device
1159 unm_nic_handle_phy_intr(adapter
);
1162 * This function schedules a call for itself.
1164 adapter
->watchdog_timer
= timeout((void (*)(void *))&unm_watchdog
,
1165 (void *)adapter
, 2 * drv_usectohz(1000000));
1169 static void unm_nic_clear_stats(unm_adapter
*adapter
)
1171 (void) memset(&adapter
->stats
, 0, sizeof (adapter
->stats
));
1175 unm_nic_poll(unm_adapter
*adapter
)
1177 int work_done
, tx_complete
;
1179 adapter
->stats
.polled
++;
1182 tx_complete
= unm_process_cmd_ring(adapter
);
1183 work_done
= unm_process_rcv_ring(adapter
, NX_RX_MAXBUFS
);
1184 if ((!tx_complete
) || (!(work_done
< NX_RX_MAXBUFS
)))
1187 UNM_READ_LOCK(&adapter
->adapter_lock
);
1188 unm_nic_enable_int(adapter
);
1189 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
1194 unm_intr(caddr_t data
, caddr_t arg
)
1196 unm_adapter
*adapter
= (unm_adapter
*)(uintptr_t)data
;
1198 if (unm_nic_clear_int(adapter
))
1199 return (DDI_INTR_UNCLAIMED
);
1201 unm_nic_poll(adapter
);
1202 return (DDI_INTR_CLAIMED
);
1206 * This is invoked from receive isr. Due to the single threaded nature
1207 * of the invocation, pool_lock acquisition is not neccesary to protect
1211 unm_free_rx_buffer(unm_rcv_desc_ctx_t
*rcv_desc
, unm_rx_buffer_t
*rx_buffer
)
1213 /* mutex_enter(rcv_desc->pool_lock); */
1214 rx_buffer
->next
= rcv_desc
->pool_list
;
1215 rcv_desc
->pool_list
= rx_buffer
;
1216 rcv_desc
->rx_buf_free
++;
1217 /* mutex_exit(rcv_desc->pool_lock); */
1221 * unm_process_rcv() send the received packet to the protocol stack.
1224 unm_process_rcv(unm_adapter
*adapter
, statusDesc_t
*desc
)
1226 unm_recv_context_t
*recv_ctx
= &(adapter
->recv_ctx
[0]);
1227 unm_rx_buffer_t
*rx_buffer
;
1229 u32 desc_ctx
= desc
->u1
.s1
.type
;
1230 unm_rcv_desc_ctx_t
*rcv_desc
= &recv_ctx
->rcv_desc
[desc_ctx
];
1231 u32 pkt_length
= desc
->u1
.s1
.totalLength
;
1232 int poff
= desc
->u1
.s1
.pkt_offset
;
1233 int index
, cksum_flags
, docopy
;
1234 int index_lo
= desc
->u1
.s1
.referenceHandle_lo
;
1237 index
= ((desc
->u1
.s1
.referenceHandle_hi
<< 4) | index_lo
);
1239 rx_buffer
= index2rxbuf(rcv_desc
, index
);
1241 if (rx_buffer
== NULL
) {
1242 cmn_err(CE_WARN
, "\r\nNULL rx_buffer idx=%d", index
);
1245 vaddr
= (char *)rx_buffer
->dma_info
.vaddr
;
1246 if (vaddr
== NULL
) {
1247 cmn_err(CE_WARN
, "\r\nNULL vaddr");
1250 rcv_desc
->rx_desc_handled
++;
1251 rcv_desc
->rx_buf_card
--;
1253 (void) ddi_dma_sync(rx_buffer
->dma_info
.dma_hdl
, 0,
1254 pkt_length
+ poff
+ (adapter
->ahw
.cut_through
? 0 :
1255 IP_ALIGNMENT_BYTES
), DDI_DMA_SYNC_FORCPU
);
1258 * Copy packet into new allocated message buffer, if pkt_length
1259 * is below copy threshold.
1261 docopy
= (pkt_length
<= adapter
->rx_bcopy_threshold
) ? 1 : 0;
1264 * If card is running out of rx buffers, then attempt to allocate
1265 * new mblk so we can feed this rx buffer back to card (we
1266 * _could_ look at what's pending on free and recycle lists).
1268 if (rcv_desc
->rx_buf_card
< NX_RX_THRESHOLD
) {
1270 adapter
->stats
.rxbufshort
++;
1274 if ((mp
= allocb(pkt_length
+ IP_ALIGNMENT_BYTES
, 0)) == NULL
) {
1275 adapter
->stats
.allocbfailed
++;
1279 mp
->b_rptr
+= IP_ALIGNMENT_BYTES
;
1281 bcopy(vaddr
, mp
->b_rptr
, pkt_length
);
1282 adapter
->stats
.rxcopyed
++;
1283 unm_free_rx_buffer(rcv_desc
, rx_buffer
);
1285 mp
= (mblk_t
*)rx_buffer
->mp
;
1287 mp
= desballoc(rx_buffer
->dma_info
.vaddr
,
1288 rcv_desc
->dma_size
, 0, &rx_buffer
->rx_recycle
);
1290 adapter
->stats
.desballocfailed
++;
1296 adapter
->stats
.rxmapped
++;
1299 mp
->b_wptr
= (uchar_t
*)((unsigned long)mp
->b_rptr
+ pkt_length
);
1301 if (desc
->u1
.s1
.status
== STATUS_CKSUM_OK
) {
1302 adapter
->stats
.csummed
++;
1304 HCK_FULLCKSUM_OK
| HCK_IPV4_HDRCKSUM_OK
;
1308 mac_hcksum_set(mp
, 0, 0, 0, 0, cksum_flags
);
1310 adapter
->stats
.no_rcv
++;
1311 adapter
->stats
.rxbytes
+= pkt_length
;
1312 adapter
->stats
.uphappy
++;
1317 unm_free_rx_buffer(rcv_desc
, rx_buffer
);
1321 /* Process Receive status ring */
1323 unm_process_rcv_ring(unm_adapter
*adapter
, int max
)
1325 unm_recv_context_t
*recv_ctx
= &(adapter
->recv_ctx
[0]);
1326 statusDesc_t
*desc_head
= recv_ctx
->rcvStatusDescHead
;
1327 statusDesc_t
*desc
= NULL
;
1328 uint32_t consumer
, start
;
1329 int count
= 0, ring
;
1332 start
= consumer
= recv_ctx
->statusRxConsumer
;
1334 unm_desc_dma_sync(recv_ctx
->status_desc_dma_handle
, start
, max
,
1335 adapter
->MaxRxDescCount
, sizeof (statusDesc_t
),
1336 DDI_DMA_SYNC_FORCPU
);
1338 while (count
< max
) {
1339 desc
= &desc_head
[consumer
];
1340 if (!(desc
->u1
.s1
.owner
& STATUS_OWNER_HOST
))
1343 mp
= unm_process_rcv(adapter
, desc
);
1344 desc
->u1
.s1
.owner
= STATUS_OWNER_PHANTOM
;
1346 consumer
= (consumer
+ 1) % adapter
->MaxRxDescCount
;
1349 mac_rx(adapter
->mach
, NULL
, mp
);
1352 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
1353 if (recv_ctx
->rcv_desc
[ring
].rx_desc_handled
> 0)
1354 unm_post_rx_buffers_nodb(adapter
, ring
);
1358 unm_desc_dma_sync(recv_ctx
->status_desc_dma_handle
, start
,
1359 count
, adapter
->MaxRxDescCount
, sizeof (statusDesc_t
),
1360 DDI_DMA_SYNC_FORDEV
);
1362 /* update the consumer index in phantom */
1363 recv_ctx
->statusRxConsumer
= consumer
;
1365 UNM_READ_LOCK(&adapter
->adapter_lock
);
1366 adapter
->unm_nic_hw_write_wx(adapter
,
1367 recv_ctx
->host_sds_consumer
, &consumer
, 4);
1368 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
1374 /* Process Command status ring */
1376 unm_process_cmd_ring(struct unm_adapter_s
*adapter
)
1381 struct unm_cmd_buffer
*buffer
;
1383 unm_dmah_node_t
*dmah
, *head
= NULL
, *tail
= NULL
;
1384 uint32_t free_hdls
= 0;
1386 (void) ddi_dma_sync(adapter
->ctxDesc_dma_handle
, sizeof (RingContext
),
1387 sizeof (uint32_t), DDI_DMA_SYNC_FORCPU
);
1389 last_consumer
= adapter
->lastCmdConsumer
;
1390 consumer
= *(adapter
->cmdConsumer
);
1392 while (last_consumer
!= consumer
) {
1393 buffer
= &adapter
->cmd_buf_arr
[last_consumer
];
1394 if (buffer
->head
!= NULL
) {
1395 dmah
= buffer
->head
;
1396 while (dmah
!= NULL
) {
1397 (void) ddi_dma_unbind_handle(dmah
->dmahdl
);
1403 head
= buffer
->head
;
1404 tail
= buffer
->tail
;
1406 tail
->next
= buffer
->head
;
1407 tail
= buffer
->tail
;
1410 buffer
->head
= NULL
;
1411 buffer
->tail
= NULL
;
1413 if (buffer
->msg
!= NULL
) {
1414 freemsg(buffer
->msg
);
1419 last_consumer
= get_next_index(last_consumer
,
1420 adapter
->MaxTxDescCount
);
1421 if (++count
> NX_MAX_TXCOMPS
)
1428 UNM_SPIN_LOCK(&adapter
->tx_lock
);
1429 adapter
->lastCmdConsumer
= last_consumer
;
1430 adapter
->freecmds
+= count
;
1433 doresched
= adapter
->resched_needed
;
1435 adapter
->resched_needed
= 0;
1438 unm_return_dma_handle(adapter
, head
, tail
, free_hdls
);
1440 UNM_SPIN_UNLOCK(&adapter
->tx_lock
);
1443 mac_tx_update(adapter
->mach
);
1446 (void) ddi_dma_sync(adapter
->ctxDesc_dma_handle
, sizeof (RingContext
),
1447 sizeof (uint32_t), DDI_DMA_SYNC_FORCPU
);
1449 consumer
= *(adapter
->cmdConsumer
);
1450 done
= (adapter
->lastCmdConsumer
== consumer
);
1456 * This is invoked from receive isr, and at initialization time when no
1457 * rx buffers have been posted to card. Due to the single threaded nature
1458 * of the invocation, pool_lock acquisition is not neccesary to protect
1461 static unm_rx_buffer_t
*
1462 unm_reserve_rx_buffer(unm_rcv_desc_ctx_t
*rcv_desc
)
1464 unm_rx_buffer_t
*rx_buffer
= NULL
;
1466 /* mutex_enter(rcv_desc->pool_lock); */
1467 if (rcv_desc
->rx_buf_free
) {
1468 rx_buffer
= rcv_desc
->pool_list
;
1469 rcv_desc
->pool_list
= rx_buffer
->next
;
1470 rx_buffer
->next
= NULL
;
1471 rcv_desc
->rx_buf_free
--;
1473 mutex_enter(rcv_desc
->recycle_lock
);
1475 if (rcv_desc
->rx_buf_recycle
) {
1476 rcv_desc
->pool_list
= rcv_desc
->recycle_list
;
1477 rcv_desc
->recycle_list
= NULL
;
1478 rcv_desc
->rx_buf_free
+= rcv_desc
->rx_buf_recycle
;
1479 rcv_desc
->rx_buf_recycle
= 0;
1481 rx_buffer
= rcv_desc
->pool_list
;
1482 rcv_desc
->pool_list
= rx_buffer
->next
;
1483 rx_buffer
->next
= NULL
;
1484 rcv_desc
->rx_buf_free
--;
1487 mutex_exit(rcv_desc
->recycle_lock
);
1490 /* mutex_exit(rcv_desc->pool_lock); */
1495 post_rx_doorbell(struct unm_adapter_s
*adapter
, uint32_t ringid
, int count
)
1497 #define UNM_RCV_PEG_DB_ID 2
1498 #define UNM_RCV_PRODUCER_OFFSET 0
1502 * Write a doorbell msg to tell phanmon of change in
1503 * receive ring producer
1505 msg
.PegId
= UNM_RCV_PEG_DB_ID
;
1508 msg
.CtxId
= adapter
->portnum
;
1509 msg
.Opcode
= UNM_RCV_PRODUCER(ringid
);
1510 dbwritel(*((__uint32_t
*)&msg
),
1511 (void *)(DB_NORMALIZE(adapter
, UNM_RCV_PRODUCER_OFFSET
)));
1515 unm_post_rx_buffers(struct unm_adapter_s
*adapter
, uint32_t ringid
)
1517 unm_recv_context_t
*recv_ctx
= &(adapter
->recv_ctx
[0]);
1518 unm_rcv_desc_ctx_t
*rcv_desc
= &recv_ctx
->rcv_desc
[ringid
];
1519 unm_rx_buffer_t
*rx_buffer
;
1523 for (count
= 0; count
< rcv_desc
->MaxRxDescCount
; count
++) {
1524 rx_buffer
= unm_reserve_rx_buffer(rcv_desc
);
1525 if (rx_buffer
!= NULL
) {
1526 pdesc
= &rcv_desc
->desc_head
[count
];
1527 pdesc
->referenceHandle
= rxbuf2index(rcv_desc
,
1529 pdesc
->flags
= ringid
;
1530 pdesc
->bufferLength
= rcv_desc
->dma_size
;
1531 pdesc
->AddrBuffer
= rx_buffer
->dma_info
.dma_addr
;
1534 return (DDI_FAILURE
);
1537 rcv_desc
->producer
= count
% rcv_desc
->MaxRxDescCount
;
1539 unm_desc_dma_sync(rcv_desc
->rx_desc_dma_handle
,
1543 sizeof (rcvDesc_t
), /* unit_size */
1544 DDI_DMA_SYNC_FORDEV
); /* direction */
1546 rcv_desc
->rx_buf_card
= rcv_desc
->MaxRxDescCount
;
1547 UNM_READ_LOCK(&adapter
->adapter_lock
);
1548 adapter
->unm_nic_hw_write_wx(adapter
, rcv_desc
->host_rx_producer
,
1550 if (adapter
->fw_major
< 4)
1551 post_rx_doorbell(adapter
, ringid
, count
);
1552 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
1554 return (DDI_SUCCESS
);
1558 unm_post_rx_buffers_nodb(struct unm_adapter_s
*adapter
,
1561 unm_recv_context_t
*recv_ctx
= &(adapter
->recv_ctx
[0]);
1562 unm_rcv_desc_ctx_t
*rcv_desc
= &recv_ctx
->rcv_desc
[ringid
];
1563 struct unm_rx_buffer
*rx_buffer
;
1565 int count
, producer
= rcv_desc
->producer
;
1566 int last_producer
= producer
;
1568 for (count
= 0; count
< rcv_desc
->rx_desc_handled
; count
++) {
1569 rx_buffer
= unm_reserve_rx_buffer(rcv_desc
);
1570 if (rx_buffer
!= NULL
) {
1571 pdesc
= &rcv_desc
->desc_head
[producer
];
1572 pdesc
->referenceHandle
= rxbuf2index(rcv_desc
,
1574 pdesc
->flags
= ringid
;
1575 pdesc
->bufferLength
= rcv_desc
->dma_size
;
1576 pdesc
->AddrBuffer
= rx_buffer
->dma_info
.dma_addr
;
1578 adapter
->stats
.outofrxbuf
++;
1581 producer
= get_next_index(producer
, rcv_desc
->MaxRxDescCount
);
1584 /* if we did allocate buffers, then write the count to Phantom */
1586 /* Sync rx ring, considering case for wrap around */
1587 unm_desc_dma_sync(rcv_desc
->rx_desc_dma_handle
, last_producer
,
1588 count
, rcv_desc
->MaxRxDescCount
, sizeof (rcvDesc_t
),
1589 DDI_DMA_SYNC_FORDEV
);
1591 rcv_desc
->producer
= producer
;
1592 rcv_desc
->rx_desc_handled
-= count
;
1593 rcv_desc
->rx_buf_card
+= count
;
1595 producer
= (producer
- 1) % rcv_desc
->MaxRxDescCount
;
1596 UNM_READ_LOCK(&adapter
->adapter_lock
);
1597 adapter
->unm_nic_hw_write_wx(adapter
,
1598 rcv_desc
->host_rx_producer
, &producer
, 4);
1599 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
1604 unm_nic_fill_statistics_128M(struct unm_adapter_s
*adapter
,
1605 struct unm_statistics
*unm_stats
)
1608 if (adapter
->ahw
.board_type
== UNM_NIC_XGBE
) {
1609 UNM_WRITE_LOCK(&adapter
->adapter_lock
);
1610 unm_nic_pci_change_crbwindow_128M(adapter
, 0);
1612 /* LINTED: E_FALSE_LOGICAL_EXPR */
1613 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_BYTE_CNT
,
1614 &(unm_stats
->tx_bytes
));
1615 /* LINTED: E_FALSE_LOGICAL_EXPR */
1616 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_TX_FRAME_CNT
,
1617 &(unm_stats
->tx_packets
));
1618 /* LINTED: E_FALSE_LOGICAL_EXPR */
1619 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_BYTE_CNT
,
1620 &(unm_stats
->rx_bytes
));
1621 /* LINTED: E_FALSE_LOGICAL_EXPR */
1622 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_RX_FRAME_CNT
,
1623 &(unm_stats
->rx_packets
));
1624 /* LINTED: E_FALSE_LOGICAL_EXPR */
1625 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_AGGR_ERROR_CNT
,
1626 &(unm_stats
->rx_errors
));
1627 /* LINTED: E_FALSE_LOGICAL_EXPR */
1628 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_CRC_ERROR_CNT
,
1629 &(unm_stats
->rx_CRC_errors
));
1630 /* LINTED: E_FALSE_LOGICAL_EXPR */
1631 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR
,
1632 &(unm_stats
->rx_long_length_error
));
1633 /* LINTED: E_FALSE_LOGICAL_EXPR */
1634 UNM_NIC_LOCKED_READ_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR
,
1635 &(unm_stats
->rx_short_length_error
));
1638 * For reading rx_MAC_error bit different procedure
1639 * UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_TEST_MUX_CTL, 0x15);
1640 * UNM_NIC_LOCKED_READ_REG((UNM_CRB_NIU + 0xC0), &temp);
1641 * unm_stats->rx_MAC_errors = temp & 0xff;
1644 unm_nic_pci_change_crbwindow_128M(adapter
, 1);
1645 UNM_WRITE_UNLOCK(&adapter
->adapter_lock
);
1647 UNM_SPIN_LOCK_ISR(&adapter
->tx_lock
);
1648 unm_stats
->tx_bytes
= adapter
->stats
.txbytes
;
1649 unm_stats
->tx_packets
= adapter
->stats
.xmitedframes
+
1650 adapter
->stats
.xmitfinished
;
1651 unm_stats
->rx_bytes
= adapter
->stats
.rxbytes
;
1652 unm_stats
->rx_packets
= adapter
->stats
.no_rcv
;
1653 unm_stats
->rx_errors
= adapter
->stats
.rcvdbadmsg
;
1654 unm_stats
->tx_errors
= adapter
->stats
.nocmddescriptor
;
1655 unm_stats
->rx_short_length_error
= adapter
->stats
.uplcong
;
1656 unm_stats
->rx_long_length_error
= adapter
->stats
.uphcong
;
1657 unm_stats
->rx_CRC_errors
= 0;
1658 unm_stats
->rx_MAC_errors
= 0;
1659 UNM_SPIN_UNLOCK_ISR(&adapter
->tx_lock
);
1665 unm_nic_fill_statistics_2M(struct unm_adapter_s
*adapter
,
1666 struct unm_statistics
*unm_stats
)
1668 if (adapter
->ahw
.board_type
== UNM_NIC_XGBE
) {
1669 (void) unm_nic_hw_read_wx_2M(adapter
, UNM_NIU_XGE_TX_BYTE_CNT
,
1670 &(unm_stats
->tx_bytes
), 4);
1671 (void) unm_nic_hw_read_wx_2M(adapter
, UNM_NIU_XGE_TX_FRAME_CNT
,
1672 &(unm_stats
->tx_packets
), 4);
1673 (void) unm_nic_hw_read_wx_2M(adapter
, UNM_NIU_XGE_RX_BYTE_CNT
,
1674 &(unm_stats
->rx_bytes
), 4);
1675 (void) unm_nic_hw_read_wx_2M(adapter
, UNM_NIU_XGE_RX_FRAME_CNT
,
1676 &(unm_stats
->rx_packets
), 4);
1677 (void) unm_nic_hw_read_wx_2M(adapter
,
1678 UNM_NIU_XGE_AGGR_ERROR_CNT
, &(unm_stats
->rx_errors
), 4);
1679 (void) unm_nic_hw_read_wx_2M(adapter
, UNM_NIU_XGE_CRC_ERROR_CNT
,
1680 &(unm_stats
->rx_CRC_errors
), 4);
1681 (void) unm_nic_hw_read_wx_2M(adapter
,
1682 UNM_NIU_XGE_OVERSIZE_FRAME_ERR
,
1683 &(unm_stats
->rx_long_length_error
), 4);
1684 (void) unm_nic_hw_read_wx_2M(adapter
,
1685 UNM_NIU_XGE_UNDERSIZE_FRAME_ERR
,
1686 &(unm_stats
->rx_short_length_error
), 4);
1688 UNM_SPIN_LOCK_ISR(&adapter
->tx_lock
);
1689 unm_stats
->tx_bytes
= adapter
->stats
.txbytes
;
1690 unm_stats
->tx_packets
= adapter
->stats
.xmitedframes
+
1691 adapter
->stats
.xmitfinished
;
1692 unm_stats
->rx_bytes
= adapter
->stats
.rxbytes
;
1693 unm_stats
->rx_packets
= adapter
->stats
.no_rcv
;
1694 unm_stats
->rx_errors
= adapter
->stats
.rcvdbadmsg
;
1695 unm_stats
->tx_errors
= adapter
->stats
.nocmddescriptor
;
1696 unm_stats
->rx_short_length_error
= adapter
->stats
.uplcong
;
1697 unm_stats
->rx_long_length_error
= adapter
->stats
.uphcong
;
1698 unm_stats
->rx_CRC_errors
= 0;
1699 unm_stats
->rx_MAC_errors
= 0;
1700 UNM_SPIN_UNLOCK_ISR(&adapter
->tx_lock
);
1706 unm_nic_clear_statistics_128M(struct unm_adapter_s
*adapter
)
1711 UNM_WRITE_LOCK(&adapter
->adapter_lock
);
1712 unm_nic_pci_change_crbwindow_128M(adapter
, 0);
1714 /* LINTED: E_FALSE_LOGICAL_EXPR */
1715 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_BYTE_CNT
, &data
);
1716 /* LINTED: E_FALSE_LOGICAL_EXPR */
1717 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_TX_FRAME_CNT
, &data
);
1718 /* LINTED: E_FALSE_LOGICAL_EXPR */
1719 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_BYTE_CNT
, &data
);
1720 /* LINTED: E_FALSE_LOGICAL_EXPR */
1721 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_RX_FRAME_CNT
, &data
);
1722 /* LINTED: E_FALSE_LOGICAL_EXPR */
1723 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_AGGR_ERROR_CNT
, &data
);
1724 /* LINTED: E_FALSE_LOGICAL_EXPR */
1725 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_CRC_ERROR_CNT
, &data
);
1726 /* LINTED: E_FALSE_LOGICAL_EXPR */
1727 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_OVERSIZE_FRAME_ERR
, &data
);
1728 /* LINTED: E_FALSE_LOGICAL_EXPR */
1729 UNM_NIC_LOCKED_WRITE_REG(UNM_NIU_XGE_UNDERSIZE_FRAME_ERR
, &data
);
1731 unm_nic_pci_change_crbwindow_128M(adapter
, 1);
1732 UNM_WRITE_UNLOCK(&adapter
->adapter_lock
);
1733 unm_nic_clear_stats(adapter
);
1738 unm_nic_clear_statistics_2M(struct unm_adapter_s
*adapter
)
1742 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_TX_BYTE_CNT
,
1744 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_TX_FRAME_CNT
,
1746 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_RX_BYTE_CNT
,
1748 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_RX_FRAME_CNT
,
1750 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_AGGR_ERROR_CNT
,
1752 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_CRC_ERROR_CNT
,
1754 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_OVERSIZE_FRAME_ERR
,
1756 (void) unm_nic_hw_write_wx_2M(adapter
, UNM_NIU_XGE_UNDERSIZE_FRAME_ERR
,
1758 unm_nic_clear_stats(adapter
);
1763 * unm_nic_ioctl () We provide the tcl/phanmon support
1764 * through these ioctls.
1767 unm_nic_ioctl(struct unm_adapter_s
*adapter
, int cmd
, queue_t
*q
, mblk_t
*mp
)
1773 (void) unm_nic_do_ioctl(adapter
, q
, mp
);
1777 ptr
= (void *) mp
->b_cont
->b_rptr
;
1780 * Phanmon checks for "UNM-UNM" string
1781 * Replace the hardcoded value with appropriate macro
1783 DPRINTF(-1, (CE_CONT
, "UNM_NIC_NAME ioctl executed %d %d\n",
1785 (void) memcpy(ptr
, "UNM-UNM", 10);
1786 miocack(q
, mp
, 10, 0);
1790 cmn_err(CE_WARN
, "Netxen ioctl cmd %x not supported\n", cmd
);
1792 miocnak(q
, mp
, 0, EINVAL
);
1798 unm_nic_resume(unm_adapter
*adapter
)
1801 adapter
->watchdog_timer
= timeout((void (*)(void *))&unm_watchdog
,
1802 (void *) adapter
, 50000);
1804 if (adapter
->intr_type
== DDI_INTR_TYPE_MSI
)
1805 (void) ddi_intr_block_enable(&adapter
->intr_handle
, 1);
1807 (void) ddi_intr_enable(adapter
->intr_handle
);
1808 UNM_READ_LOCK(&adapter
->adapter_lock
);
1809 unm_nic_enable_int(adapter
);
1810 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
1812 mac_link_update(adapter
->mach
, LINK_STATE_UP
);
1814 return (DDI_SUCCESS
);
1818 unm_nic_suspend(unm_adapter
*adapter
)
1820 mac_link_update(adapter
->mach
, LINK_STATE_DOWN
);
1822 (void) untimeout(adapter
->watchdog_timer
);
1824 UNM_READ_LOCK(&adapter
->adapter_lock
);
1825 unm_nic_disable_int(adapter
);
1826 UNM_READ_UNLOCK(&adapter
->adapter_lock
);
1827 if (adapter
->intr_type
== DDI_INTR_TYPE_MSI
)
1828 (void) ddi_intr_block_disable(&adapter
->intr_handle
, 1);
1830 (void) ddi_intr_disable(adapter
->intr_handle
);
1832 return (DDI_SUCCESS
);
1836 unm_nic_do_ioctl(unm_adapter
*adapter
, queue_t
*wq
, mblk_t
*mp
)
1838 unm_nic_ioctl_data_t data
;
1839 struct unm_nic_ioctl_data
*up_data
;
1840 ddi_acc_handle_t conf_handle
;
1842 uint64_t efuse_chip_id
= 0;
1847 up_data
= (struct unm_nic_ioctl_data
*)(mp
->b_cont
->b_rptr
);
1848 (void) memcpy(&data
, (void **)(uintptr_t)(mp
->b_cont
->b_rptr
),
1851 /* Shouldn't access beyond legal limits of "char u[64];" member */
1852 if (data
.size
> sizeof (data
.uabc
)) {
1853 /* evil user tried to crash the kernel */
1854 cmn_err(CE_WARN
, "bad size: %d\n", data
.size
);
1855 retval
= GLD_BADARG
;
1860 case unm_nic_cmd_pci_read
:
1862 if ((retval
= adapter
->unm_nic_hw_read_ioctl(adapter
,
1863 data
.off
, up_data
, data
.size
))) {
1864 DPRINTF(-1, (CE_WARN
, "%s(%d) unm_nic_hw_read_wx "
1865 "returned %d\n", __FUNCTION__
, __LINE__
, retval
));
1874 case unm_nic_cmd_pci_write
:
1875 if ((data
.rv
= adapter
->unm_nic_hw_write_ioctl(adapter
,
1876 data
.off
, &(data
.uabc
), data
.size
))) {
1877 DPRINTF(-1, (CE_WARN
, "%s(%d) unm_nic_hw_write_wx "
1878 "returned %d\n", __FUNCTION__
,
1879 __LINE__
, data
.rv
));
1886 case unm_nic_cmd_pci_mem_read
:
1887 if ((data
.rv
= adapter
->unm_nic_pci_mem_read(adapter
,
1888 data
.off
, up_data
, data
.size
))) {
1889 DPRINTF(-1, (CE_WARN
, "%s(%d) unm_nic_pci_mem_read "
1890 "returned %d\n", __FUNCTION__
,
1891 __LINE__
, data
.rv
));
1898 case unm_nic_cmd_pci_mem_write
:
1899 if ((data
.rv
= adapter
->unm_nic_pci_mem_write(adapter
,
1900 data
.off
, &(data
.uabc
), data
.size
))) {
1901 DPRINTF(-1, (CE_WARN
,
1902 "%s(%d) unm_nic_cmd_pci_mem_write "
1904 __FUNCTION__
, __LINE__
, data
.rv
));
1913 case unm_nic_cmd_pci_config_read
:
1915 if (adapter
->pci_cfg_handle
!= NULL
) {
1916 conf_handle
= adapter
->pci_cfg_handle
;
1918 } else if ((retval
= pci_config_setup(adapter
->dip
,
1919 &conf_handle
)) != DDI_SUCCESS
) {
1920 DPRINTF(-1, (CE_WARN
, "!%s: pci_config_setup failed"
1921 " error:%d\n", unm_nic_driver_name
, retval
));
1925 adapter
->pci_cfg_handle
= conf_handle
;
1927 switch (data
.size
) {
1929 ptr1
= (char *)up_data
;
1930 *ptr1
= (char)pci_config_get8(conf_handle
, data
.off
);
1933 ptr2
= (short *)up_data
;
1934 *ptr2
= (short)pci_config_get16(conf_handle
, data
.off
);
1937 ptr4
= (int *)up_data
;
1938 *ptr4
= (int)pci_config_get32(conf_handle
, data
.off
);
1944 case unm_nic_cmd_pci_config_write
:
1946 if (adapter
->pci_cfg_handle
!= NULL
) {
1947 conf_handle
= adapter
->pci_cfg_handle
;
1948 } else if ((retval
= pci_config_setup(adapter
->dip
,
1949 &conf_handle
)) != DDI_SUCCESS
) {
1950 DPRINTF(-1, (CE_WARN
, "!%s: pci_config_setup failed"
1951 " error:%d\n", unm_nic_driver_name
, retval
));
1954 adapter
->pci_cfg_handle
= conf_handle
;
1957 switch (data
.size
) {
1959 pci_config_put8(conf_handle
,
1960 data
.off
, *(char *)&(data
.uabc
));
1963 pci_config_put16(conf_handle
,
1964 data
.off
, *(short *)(uintptr_t)&(data
.uabc
));
1967 pci_config_put32(conf_handle
,
1968 data
.off
, *(u32
*)(uintptr_t)&(data
.uabc
));
1974 case unm_nic_cmd_get_stats
:
1975 data
.rv
= adapter
->unm_nic_fill_statistics(adapter
,
1976 (struct unm_statistics
*)up_data
);
1977 data
.size
= sizeof (struct unm_statistics
);
1981 case unm_nic_cmd_clear_stats
:
1982 data
.rv
= adapter
->unm_nic_clear_statistics(adapter
);
1985 case unm_nic_cmd_get_version
:
1986 (void) memcpy(up_data
, UNM_NIC_VERSIONID
,
1987 sizeof (UNM_NIC_VERSIONID
));
1988 data
.size
= sizeof (UNM_NIC_VERSIONID
);
1992 case unm_nic_cmd_get_phy_type
:
1993 cmn_err(CE_WARN
, "unm_nic_cmd_get_phy_type unimplemented\n");
1996 case unm_nic_cmd_efuse_chip_id
:
1997 efuse_chip_id
= adapter
->unm_nic_pci_read_normalize(adapter
,
1998 UNM_EFUSE_CHIP_ID_HIGH
);
1999 efuse_chip_id
<<= 32;
2000 efuse_chip_id
|= adapter
->unm_nic_pci_read_normalize(adapter
,
2001 UNM_EFUSE_CHIP_ID_LOW
);
2002 (void) memcpy(up_data
, &efuse_chip_id
, sizeof (uint64_t));
2007 cmn_err(CE_WARN
, "%s%d: bad command %d\n", adapter
->name
,
2008 adapter
->instance
, data
.cmd
);
2009 data
.rv
= GLD_NOTSUPPORTED
;
2015 miocack(wq
, mp
, data
.size
, data
.rv
);
2016 return (DDI_SUCCESS
);
2019 cmn_err(CE_WARN
, "%s(%d) ioctl error\n", __FUNCTION__
, data
.cmd
);
2020 miocnak(wq
, mp
, 0, EINVAL
);
2025 * Local datatype for defining tables of (Offset, Name) pairs
2032 static const unm_ksindex_t unm_kstat
[] = {
2035 { 2, "tx_bcopy_threshold" },
2036 { 3, "rx_bcopy_threshold" },
2037 { 4, "xmitcalled" },
2038 { 5, "xmitedframes" },
2039 { 6, "xmitfinished" },
2043 { 10, "outoftxdmahdl" },
2044 { 11, "outofcmddesc" },
2045 { 12, "txdropped" },
2048 { 15, "updropped" },
2054 { 21, "desballocfailed" },
2055 { 22, "outofrxbuf" },
2056 { 23, "promiscmode" },
2057 { 24, "rxbufshort" },
2058 { 25, "allocbfailed" },
2063 unm_kstat_update(kstat_t
*ksp
, int flag
)
2065 unm_adapter
*adapter
;
2068 if (flag
!= KSTAT_READ
)
2071 adapter
= ksp
->ks_private
;
2074 (knp
++)->value
.ui32
= adapter
->freehdls
;
2075 (knp
++)->value
.ui64
= adapter
->freecmds
;
2076 (knp
++)->value
.ui64
= adapter
->tx_bcopy_threshold
;
2077 (knp
++)->value
.ui64
= adapter
->rx_bcopy_threshold
;
2079 (knp
++)->value
.ui64
= adapter
->stats
.xmitcalled
;
2080 (knp
++)->value
.ui64
= adapter
->stats
.xmitedframes
;
2081 (knp
++)->value
.ui64
= adapter
->stats
.xmitfinished
;
2082 (knp
++)->value
.ui64
= adapter
->stats
.txbytes
;
2083 (knp
++)->value
.ui64
= adapter
->stats
.txcopyed
;
2084 (knp
++)->value
.ui64
= adapter
->stats
.txmapped
;
2085 (knp
++)->value
.ui64
= adapter
->stats
.outoftxdmahdl
;
2086 (knp
++)->value
.ui64
= adapter
->stats
.outofcmddesc
;
2087 (knp
++)->value
.ui64
= adapter
->stats
.txdropped
;
2088 (knp
++)->value
.ui64
= adapter
->stats
.polled
;
2089 (knp
++)->value
.ui64
= adapter
->stats
.uphappy
;
2090 (knp
++)->value
.ui64
= adapter
->stats
.updropped
;
2091 (knp
++)->value
.ui64
= adapter
->stats
.csummed
;
2092 (knp
++)->value
.ui64
= adapter
->stats
.no_rcv
;
2093 (knp
++)->value
.ui64
= adapter
->stats
.rxbytes
;
2094 (knp
++)->value
.ui64
= adapter
->stats
.rxcopyed
;
2095 (knp
++)->value
.ui64
= adapter
->stats
.rxmapped
;
2096 (knp
++)->value
.ui64
= adapter
->stats
.desballocfailed
;
2097 (knp
++)->value
.ui64
= adapter
->stats
.outofrxbuf
;
2098 (knp
++)->value
.ui64
= adapter
->stats
.promiscmode
;
2099 (knp
++)->value
.ui64
= adapter
->stats
.rxbufshort
;
2100 (knp
++)->value
.ui64
= adapter
->stats
.allocbfailed
;
2106 unm_setup_named_kstat(unm_adapter
*adapter
, int instance
, char *name
,
2107 const unm_ksindex_t
*ksip
, size_t size
, int (*update
)(kstat_t
*, int))
2115 size
/= sizeof (unm_ksindex_t
);
2116 ksp
= kstat_create(unm_nic_driver_name
, instance
, name
, "net",
2117 KSTAT_TYPE_NAMED
, size
-1, KSTAT_FLAG_PERSISTENT
);
2121 ksp
->ks_private
= adapter
;
2122 ksp
->ks_update
= update
;
2123 for (knp
= ksp
->ks_data
; (np
= ksip
->name
) != NULL
; ++knp
, ++ksip
) {
2127 type
= KSTAT_DATA_UINT64
;
2131 type
= KSTAT_DATA_UINT32
;
2135 type
= KSTAT_DATA_STRING
;
2139 type
= KSTAT_DATA_CHAR
;
2142 kstat_named_init(knp
, np
, type
);
2150 unm_init_kstats(unm_adapter
* adapter
, int instance
)
2152 adapter
->kstats
[0] = unm_setup_named_kstat(adapter
,
2153 instance
, "kstatinfo", unm_kstat
,
2154 sizeof (unm_kstat
), unm_kstat_update
);
2158 unm_fini_kstats(unm_adapter
* adapter
)
2161 if (adapter
->kstats
[0] != NULL
) {
2162 kstat_delete(adapter
->kstats
[0]);
2163 adapter
->kstats
[0] = NULL
;
2168 unm_nic_set_pauseparam(unm_adapter
*adapter
, unm_pauseparam_t
*pause
)
2172 if (adapter
->ahw
.board_type
== UNM_NIC_GBE
) {
2173 if (unm_niu_gbe_set_rx_flow_ctl(adapter
, pause
->rx_pause
))
2176 if (unm_niu_gbe_set_tx_flow_ctl(adapter
, pause
->tx_pause
))
2179 } else if (adapter
->ahw
.board_type
== UNM_NIC_XGBE
) {
2180 if (unm_niu_xg_set_tx_flow_ctl(adapter
, pause
->tx_pause
))
2189 * GLD/MAC interfaces
2192 ntxn_m_start(void *arg
)
2194 unm_adapter
*adapter
= arg
;
2197 UNM_SPIN_LOCK(&adapter
->lock
);
2198 if (adapter
->is_up
== UNM_ADAPTER_UP_MAGIC
) {
2199 UNM_SPIN_UNLOCK(&adapter
->lock
);
2200 return (DDI_SUCCESS
);
2203 if (create_rxtx_rings(adapter
) != DDI_SUCCESS
) {
2204 UNM_SPIN_UNLOCK(&adapter
->lock
);
2205 return (DDI_FAILURE
);
2208 if (init_firmware(adapter
) != DDI_SUCCESS
) {
2209 UNM_SPIN_UNLOCK(&adapter
->lock
);
2210 cmn_err(CE_WARN
, "%s%d: Failed to init firmware\n",
2211 adapter
->name
, adapter
->instance
);
2215 unm_nic_clear_stats(adapter
);
2217 if (unm_nic_hw_resources(adapter
) != 0) {
2218 UNM_SPIN_UNLOCK(&adapter
->lock
);
2219 cmn_err(CE_WARN
, "%s%d: Error setting hw resources\n",
2220 adapter
->name
, adapter
->instance
);
2224 if (adapter
->fw_major
< 4) {
2225 adapter
->crb_addr_cmd_producer
=
2226 crb_cmd_producer
[adapter
->portnum
];
2227 adapter
->crb_addr_cmd_consumer
=
2228 crb_cmd_consumer
[adapter
->portnum
];
2229 unm_nic_update_cmd_producer(adapter
, 0);
2230 unm_nic_update_cmd_consumer(adapter
, 0);
2233 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
2234 if (unm_post_rx_buffers(adapter
, ring
) != DDI_SUCCESS
) {
2235 UNM_SPIN_UNLOCK(&adapter
->lock
);
2240 if (unm_nic_macaddr_set(adapter
, adapter
->mac_addr
) != 0) {
2241 UNM_SPIN_UNLOCK(&adapter
->lock
);
2242 cmn_err(CE_WARN
, "%s%d: Could not set mac address\n",
2243 adapter
->name
, adapter
->instance
);
2247 if (unm_nic_init_port(adapter
) != 0) {
2248 UNM_SPIN_UNLOCK(&adapter
->lock
);
2249 cmn_err(CE_WARN
, "%s%d: Could not initialize port\n",
2250 adapter
->name
, adapter
->instance
);
2254 unm_nic_set_link_parameters(adapter
);
2257 * P2 and P3 should be handled similarly.
2259 if (NX_IS_REVISION_P2(adapter
->ahw
.revision_id
)) {
2260 if (unm_nic_set_promisc_mode(adapter
) != 0) {
2261 UNM_SPIN_UNLOCK(&adapter
->lock
);
2262 cmn_err(CE_WARN
, "%s%d: Could not set promisc mode\n",
2263 adapter
->name
, adapter
->instance
);
2267 nx_p3_nic_set_multi(adapter
);
2269 adapter
->stats
.promiscmode
= 1;
2271 if (unm_nic_set_mtu(adapter
, adapter
->mtu
) != 0) {
2272 UNM_SPIN_UNLOCK(&adapter
->lock
);
2273 cmn_err(CE_WARN
, "%s%d: Could not set mtu\n",
2274 adapter
->name
, adapter
->instance
);
2278 adapter
->watchdog_timer
= timeout((void (*)(void *))&unm_watchdog
,
2279 (void *)adapter
, 0);
2281 adapter
->is_up
= UNM_ADAPTER_UP_MAGIC
;
2283 if (adapter
->intr_type
== DDI_INTR_TYPE_MSI
)
2284 (void) ddi_intr_block_enable(&adapter
->intr_handle
, 1);
2286 (void) ddi_intr_enable(adapter
->intr_handle
);
2287 unm_nic_enable_int(adapter
);
2289 UNM_SPIN_UNLOCK(&adapter
->lock
);
2290 return (GLD_SUCCESS
);
2293 unm_nic_stop_port(adapter
);
2295 unm_free_hw_resources(adapter
);
2297 destroy_rxtx_rings(adapter
);
2298 return (DDI_FAILURE
);
2303 * This code is kept here for reference so as to
2304 * see if something different is required to be done
2305 * in GLDV3. This will be deleted later.
2309 ntxn_m_stop(void *arg
)
2315 ntxn_m_multicst(void *arg
, boolean_t add
, const uint8_t *ep
)
2318 * When we correctly implement this, invoke nx_p3_nic_set_multi()
2319 * or nx_p2_nic_set_multi() here.
2321 return (GLD_SUCCESS
);
2326 ntxn_m_promisc(void *arg
, boolean_t on
)
2330 struct unm_adapter_s
*adapter
= arg
;
2332 err
= on
? unm_nic_set_promisc_mode(adapter
) :
2333 unm_nic_unset_promisc_mode(adapter
);
2336 return (GLD_FAILURE
);
2339 return (GLD_SUCCESS
);
2343 ntxn_m_stat(void *arg
, uint_t stat
, uint64_t *val
)
2345 struct unm_adapter_s
*adapter
= arg
;
2346 struct unm_adapter_stats
*portstat
= &adapter
->stats
;
2349 case MAC_STAT_IFSPEED
:
2350 if (adapter
->ahw
.board_type
== UNM_NIC_XGBE
) {
2352 *val
= 10000000000ULL;
2359 case MAC_STAT_MULTIRCV
:
2363 case MAC_STAT_BRDCSTRCV
:
2364 case MAC_STAT_BRDCSTXMT
:
2368 case MAC_STAT_NORCVBUF
:
2369 *val
= portstat
->updropped
;
2372 case MAC_STAT_NOXMTBUF
:
2373 *val
= portstat
->txdropped
;
2376 case MAC_STAT_RBYTES
:
2377 *val
= portstat
->rxbytes
;
2380 case MAC_STAT_OBYTES
:
2381 *val
= portstat
->txbytes
;
2384 case MAC_STAT_OPACKETS
:
2385 *val
= portstat
->xmitedframes
;
2388 case MAC_STAT_IPACKETS
:
2389 *val
= portstat
->uphappy
;
2392 case MAC_STAT_OERRORS
:
2393 *val
= portstat
->xmitcalled
- portstat
->xmitedframes
;
2396 case ETHER_STAT_LINK_DUPLEX
:
2397 *val
= LINK_DUPLEX_FULL
;
2402 * Shouldn't reach here...
2405 DPRINTF(0, (CE_WARN
, ": unrecognized parameter = %d, value "
2406 "returned 1\n", stat
));
2414 ntxn_m_unicst(void *arg
, const uint8_t *mac
)
2416 struct unm_adapter_s
*adapter
= arg
;
2418 DPRINTF(-1, (CE_CONT
, "%s: called\n", __func__
));
2420 if (unm_nic_macaddr_set(adapter
, (uint8_t *)mac
))
2422 bcopy(mac
, adapter
->mac_addr
, ETHERADDRL
);
2428 ntxn_m_tx(void *arg
, mblk_t
*mp
)
2430 unm_adapter
*adapter
= arg
;
2433 while (mp
!= NULL
) {
2437 if (unm_nic_xmit_frame(adapter
, mp
) != B_TRUE
) {
2442 adapter
->stats
.xmitedframes
++;
2449 ntxn_m_ioctl(void *arg
, queue_t
*wq
, mblk_t
*mp
)
2452 struct iocblk
*iocp
= (struct iocblk
*)(uintptr_t)mp
->b_rptr
;
2453 struct unm_adapter_s
*adapter
= (struct unm_adapter_s
*)arg
;
2454 enum ioc_reply status
= IOC_DONE
;
2456 iocp
->ioc_error
= 0;
2457 cmd
= iocp
->ioc_cmd
;
2459 if (cmd
== ND_GET
|| cmd
== ND_SET
) {
2460 status
= unm_nd_ioctl(adapter
, wq
, mp
, iocp
);
2464 miocnak(wq
, mp
, 0, iocp
->ioc_error
== 0 ?
2465 EINVAL
: iocp
->ioc_error
);
2471 case IOC_RESTART_ACK
:
2473 miocack(wq
, mp
, 0, 0);
2476 case IOC_RESTART_REPLY
:
2478 mp
->b_datap
->db_type
= iocp
->ioc_error
== 0 ?
2479 M_IOCACK
: M_IOCNAK
;
2483 } else if (cmd
<= UNM_NIC_NAME
&& cmd
>= UNM_CMD_START
) {
2484 unm_nic_ioctl(adapter
, cmd
, wq
, mp
);
2487 miocnak(wq
, mp
, 0, EINVAL
);
2494 ntxn_m_getcapab(void *arg
, mac_capab_t cap
, void *cap_data
)
2497 case MAC_CAPAB_HCKSUM
:
2499 uint32_t *txflags
= cap_data
;
2501 *txflags
= (HCKSUM_ENABLE
|
2502 HCKSUM_INET_FULL_V4
| HCKSUM_IPHDRCKSUM
);
2507 case MAC_CAPAB_ANCHOR_VNIC
:
2508 case MAC_CAPAB_MULTIFACTADDR
:
2510 case MAC_CAPAB_POLL
:
2511 case MAC_CAPAB_MULTIADDRESS
:
2520 #define NETXEN_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
2522 static mac_callbacks_t ntxn_m_callbacks
= {
2523 NETXEN_M_CALLBACK_FLAGS
,
2531 NULL
, /* mc_reserved */
2535 NULL
, /* mc_close */
2536 NULL
, /* mc_setprop */
2537 NULL
/* mc_getprop */
2541 unm_register_mac(unm_adapter
*adapter
)
2544 mac_register_t
*macp
;
2545 unm_pauseparam_t pause
;
2547 dev_info_t
*dip
= adapter
->dip
;
2549 if ((macp
= mac_alloc(MAC_VERSION
)) == NULL
) {
2550 cmn_err(CE_WARN
, "Memory not available\n");
2551 return (DDI_FAILURE
);
2554 macp
->m_type_ident
= MAC_PLUGIN_IDENT_ETHER
;
2555 macp
->m_driver
= adapter
;
2557 macp
->m_instance
= adapter
->instance
;
2558 macp
->m_src_addr
= adapter
->mac_addr
;
2559 macp
->m_callbacks
= &ntxn_m_callbacks
;
2560 macp
->m_min_sdu
= 0;
2561 macp
->m_max_sdu
= adapter
->mtu
;
2563 macp
->m_margin
= VLAN_TAGSZ
;
2564 #endif /* SOLARIS11 */
2566 ret
= mac_register(macp
, &adapter
->mach
);
2569 cmn_err(CE_WARN
, "mac_register failed for port %d\n",
2571 return (DDI_FAILURE
);
2574 unm_init_kstats(adapter
, adapter
->instance
);
2576 /* Register NDD-tweakable parameters */
2577 if (unm_nd_init(adapter
)) {
2578 cmn_err(CE_WARN
, "unm_nd_init() failed");
2579 return (DDI_FAILURE
);
2582 pause
.rx_pause
= adapter
->nd_params
[PARAM_ADV_PAUSE_CAP
].ndp_val
;
2583 pause
.tx_pause
= adapter
->nd_params
[PARAM_ADV_ASYM_PAUSE_CAP
].ndp_val
;
2585 if (unm_nic_set_pauseparam(adapter
, &pause
)) {
2586 cmn_err(CE_WARN
, "\nBad Pause settings RX %d, Tx %d",
2587 pause
.rx_pause
, pause
.tx_pause
);
2589 adapter
->nd_params
[PARAM_PAUSE_CAP
].ndp_val
= pause
.rx_pause
;
2590 adapter
->nd_params
[PARAM_ASYM_PAUSE_CAP
].ndp_val
= pause
.tx_pause
;
2592 return (DDI_SUCCESS
);