Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / xge / drv / xgell.c
blob6793c8fa363b5804b0ffe075685651d62e975150
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2002-2009 Neterion, Inc.
29 * All right Reserved.
31 * FileName : xgell.c
33 * Description: Xge Link Layer data path implementation
37 #include "xgell.h"
39 #include <netinet/ip.h>
40 #include <netinet/tcp.h>
41 #include <netinet/udp.h>
43 #define XGELL_MAX_FRAME_SIZE(hldev) ((hldev)->config.mtu + \
44 sizeof (struct ether_vlan_header))
46 #define HEADROOM 2 /* for DIX-only packets */
48 void header_free_func(void *arg) { }
49 frtn_t header_frtn = {header_free_func, NULL};
51 /* DMA attributes used for Tx side */
52 static struct ddi_dma_attr tx_dma_attr = {
53 DMA_ATTR_V0, /* dma_attr_version */
54 0x0ULL, /* dma_attr_addr_lo */
55 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */
56 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_count_max */
57 0x1000, /* dma_attr_align */
58 0xFC00FC, /* dma_attr_burstsizes */
59 0x1, /* dma_attr_minxfer */
60 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_maxxfer */
61 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */
62 18, /* dma_attr_sgllen */
63 (unsigned int)1, /* dma_attr_granular */
64 0 /* dma_attr_flags */
68 * DMA attributes used when using ddi_dma_mem_alloc to
69 * allocat HAL descriptors and Rx buffers during replenish
71 static struct ddi_dma_attr hal_dma_attr = {
72 DMA_ATTR_V0, /* dma_attr_version */
73 0x0ULL, /* dma_attr_addr_lo */
74 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_addr_hi */
75 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_count_max */
76 0x1000, /* dma_attr_align */
77 0xFC00FC, /* dma_attr_burstsizes */
78 0x1, /* dma_attr_minxfer */
79 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_maxxfer */
80 0xFFFFFFFFFFFFFFFFULL, /* dma_attr_seg */
81 1, /* dma_attr_sgllen */
82 (unsigned int)1, /* dma_attr_sgllen */
83 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
86 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
88 static int xgell_m_stat(void *, uint_t, uint64_t *);
89 static int xgell_m_start(void *);
90 static void xgell_m_stop(void *);
91 static int xgell_m_promisc(void *, boolean_t);
92 static int xgell_m_multicst(void *, boolean_t, const uint8_t *);
93 static void xgell_m_ioctl(void *, queue_t *, mblk_t *);
94 static boolean_t xgell_m_getcapab(void *, mac_capab_t, void *);
96 #define XGELL_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
98 static mac_callbacks_t xgell_m_callbacks = {
99 XGELL_M_CALLBACK_FLAGS,
100 xgell_m_stat,
101 xgell_m_start,
102 xgell_m_stop,
103 xgell_m_promisc,
104 xgell_m_multicst,
105 NULL,
106 NULL,
107 NULL,
108 xgell_m_ioctl,
109 xgell_m_getcapab
113 * xge_device_poll
115 * Timeout should call me every 1s. xge_callback_event_queued should call me
116 * when HAL hope event was rescheduled.
118 /*ARGSUSED*/
119 void
120 xge_device_poll(void *data)
122 xgelldev_t *lldev = xge_hal_device_private(data);
124 mutex_enter(&lldev->genlock);
125 if (lldev->is_initialized) {
126 xge_hal_device_poll(data);
127 lldev->timeout_id = timeout(xge_device_poll, data,
128 XGE_DEV_POLL_TICKS);
129 } else if (lldev->in_reset == 1) {
130 lldev->timeout_id = timeout(xge_device_poll, data,
131 XGE_DEV_POLL_TICKS);
132 } else {
133 lldev->timeout_id = 0;
135 mutex_exit(&lldev->genlock);
139 * xge_device_poll_now
141 * Will call xge_device_poll() immediately
143 void
144 xge_device_poll_now(void *data)
146 xgelldev_t *lldev = xge_hal_device_private(data);
148 mutex_enter(&lldev->genlock);
149 if (lldev->is_initialized) {
150 xge_hal_device_poll(data);
152 mutex_exit(&lldev->genlock);
156 * xgell_callback_link_up
158 * This function called by HAL to notify HW link up state change.
160 void
161 xgell_callback_link_up(void *userdata)
163 xgelldev_t *lldev = (xgelldev_t *)userdata;
165 mac_link_update(lldev->mh, LINK_STATE_UP);
169 * xgell_callback_link_down
171 * This function called by HAL to notify HW link down state change.
173 void
174 xgell_callback_link_down(void *userdata)
176 xgelldev_t *lldev = (xgelldev_t *)userdata;
178 mac_link_update(lldev->mh, LINK_STATE_DOWN);
182 * xgell_rx_buffer_replenish_all
184 * To replenish all freed dtr(s) with buffers in free pool. It's called by
185 * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
186 * Must be called with pool_lock held.
188 static void
189 xgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
191 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
192 xge_hal_dtr_h dtr;
193 xgell_rx_buffer_t *rx_buffer;
194 xgell_rxd_priv_t *rxd_priv;
196 xge_assert(mutex_owned(&bf_pool->pool_lock));
198 while ((bf_pool->free > 0) &&
199 (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
200 xge_assert(bf_pool->head);
202 rx_buffer = bf_pool->head;
204 bf_pool->head = rx_buffer->next;
205 bf_pool->free--;
207 xge_assert(rx_buffer->dma_addr);
209 rxd_priv = (xgell_rxd_priv_t *)
210 xge_hal_ring_dtr_private(ring->channelh, dtr);
211 xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
212 bf_pool->size);
214 rxd_priv->rx_buffer = rx_buffer;
215 xge_hal_ring_dtr_post(ring->channelh, dtr);
220 * xgell_rx_buffer_release
222 * The only thing done here is to put the buffer back to the pool.
223 * Calling this function need be protected by mutex, bf_pool.pool_lock.
225 static void
226 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
228 xgell_rx_ring_t *ring = rx_buffer->ring;
229 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
231 xge_assert(mutex_owned(&bf_pool->pool_lock));
233 /* Put the buffer back to pool */
234 rx_buffer->next = bf_pool->head;
235 bf_pool->head = rx_buffer;
237 bf_pool->free++;
241 * xgell_rx_buffer_recycle
243 * Called by desballoc() to "free" the resource.
244 * We will try to replenish all descripters.
248 * Previously there were much lock contention between xgell_rx_1b_compl() and
249 * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
250 * effect on rx performance. A separate recycle list is introduced to overcome
251 * this. The recycle list is used to record the rx buffer that has been recycled
252 * and these buffers will be retuned back to the free list in bulk instead of
253 * one-by-one.
256 static void
257 xgell_rx_buffer_recycle(char *arg)
259 xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
260 xgell_rx_ring_t *ring = rx_buffer->ring;
261 xgelldev_t *lldev = ring->lldev;
262 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
264 mutex_enter(&bf_pool->recycle_lock);
266 rx_buffer->next = bf_pool->recycle_head;
267 bf_pool->recycle_head = rx_buffer;
268 if (bf_pool->recycle_tail == NULL)
269 bf_pool->recycle_tail = rx_buffer;
270 bf_pool->recycle++;
273 * Before finding a good way to set this hiwat, just always call to
274 * replenish_all. *TODO*
276 if ((lldev->is_initialized != 0) && (ring->live) &&
277 (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
278 mutex_enter(&bf_pool->pool_lock);
279 bf_pool->recycle_tail->next = bf_pool->head;
280 bf_pool->head = bf_pool->recycle_head;
281 bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
282 bf_pool->post -= bf_pool->recycle;
283 bf_pool->free += bf_pool->recycle;
284 bf_pool->recycle = 0;
285 xgell_rx_buffer_replenish_all(ring);
286 mutex_exit(&bf_pool->pool_lock);
289 mutex_exit(&bf_pool->recycle_lock);
293 * xgell_rx_buffer_alloc
295 * Allocate one rx buffer and return with the pointer to the buffer.
296 * Return NULL if failed.
298 static xgell_rx_buffer_t *
299 xgell_rx_buffer_alloc(xgell_rx_ring_t *ring)
301 xgelldev_t *lldev = ring->lldev;
302 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
303 xge_hal_device_t *hldev;
304 void *vaddr;
305 ddi_dma_handle_t dma_handle;
306 ddi_acc_handle_t dma_acch;
307 dma_addr_t dma_addr;
308 uint_t ncookies;
309 ddi_dma_cookie_t dma_cookie;
310 size_t real_size;
311 extern ddi_device_acc_attr_t *p_xge_dev_attr;
312 xgell_rx_buffer_t *rx_buffer;
314 hldev = (xge_hal_device_t *)lldev->devh;
316 if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
317 0, &dma_handle) != DDI_SUCCESS) {
318 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
319 XGELL_IFNAME, lldev->instance);
320 goto handle_failed;
323 /* reserve some space at the end of the buffer for recycling */
324 if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
325 sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
326 DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
327 DDI_SUCCESS) {
328 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
329 XGELL_IFNAME, lldev->instance);
330 goto mem_failed;
333 if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
334 real_size) {
335 xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
336 XGELL_IFNAME, lldev->instance);
337 goto bind_failed;
340 if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
341 bf_pool->size, DDI_DMA_READ | DDI_DMA_STREAMING,
342 DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
343 xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
344 XGELL_IFNAME, lldev->instance);
345 goto bind_failed;
348 if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
349 xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
350 XGELL_IFNAME, lldev->instance);
351 goto check_failed;
354 dma_addr = dma_cookie.dmac_laddress;
356 rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
357 sizeof (xgell_rx_buffer_t));
358 rx_buffer->next = NULL;
359 rx_buffer->vaddr = vaddr;
360 rx_buffer->dma_addr = dma_addr;
361 rx_buffer->dma_handle = dma_handle;
362 rx_buffer->dma_acch = dma_acch;
363 rx_buffer->ring = ring;
364 rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
365 rx_buffer->frtn.free_arg = (void *)rx_buffer;
367 return (rx_buffer);
369 check_failed:
370 (void) ddi_dma_unbind_handle(dma_handle);
371 bind_failed:
372 XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
373 ddi_dma_mem_free(&dma_acch);
374 mem_failed:
375 ddi_dma_free_handle(&dma_handle);
376 handle_failed:
378 return (NULL);
382 * xgell_rx_destroy_buffer_pool
384 * Destroy buffer pool. If there is still any buffer hold by upper layer,
385 * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
387 static boolean_t
388 xgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
390 xgelldev_t *lldev = ring->lldev;
391 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
392 xgell_rx_buffer_t *rx_buffer;
393 ddi_dma_handle_t dma_handle;
394 ddi_acc_handle_t dma_acch;
395 int i;
398 * If the pool has been destroied, just return B_TRUE
400 if (!bf_pool->live)
401 return (B_TRUE);
403 mutex_enter(&bf_pool->recycle_lock);
404 if (bf_pool->recycle > 0) {
405 mutex_enter(&bf_pool->pool_lock);
406 bf_pool->recycle_tail->next = bf_pool->head;
407 bf_pool->head = bf_pool->recycle_head;
408 bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
409 bf_pool->post -= bf_pool->recycle;
410 bf_pool->free += bf_pool->recycle;
411 bf_pool->recycle = 0;
412 mutex_exit(&bf_pool->pool_lock);
414 mutex_exit(&bf_pool->recycle_lock);
417 * If there is any posted buffer, the driver should reject to be
418 * detached. Need notice upper layer to release them.
420 if (bf_pool->post != 0) {
421 xge_debug_ll(XGE_ERR,
422 "%s%d has some buffers not be recycled, try later!",
423 XGELL_IFNAME, lldev->instance);
424 return (B_FALSE);
428 * Release buffers one by one.
430 for (i = bf_pool->total; i > 0; i--) {
431 rx_buffer = bf_pool->head;
432 xge_assert(rx_buffer != NULL);
434 bf_pool->head = rx_buffer->next;
436 dma_handle = rx_buffer->dma_handle;
437 dma_acch = rx_buffer->dma_acch;
439 if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
440 xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
441 bf_pool->head = rx_buffer;
442 return (B_FALSE);
444 ddi_dma_mem_free(&dma_acch);
445 ddi_dma_free_handle(&dma_handle);
447 bf_pool->total--;
448 bf_pool->free--;
451 xge_assert(!mutex_owned(&bf_pool->pool_lock));
453 mutex_destroy(&bf_pool->recycle_lock);
454 mutex_destroy(&bf_pool->pool_lock);
455 bf_pool->live = B_FALSE;
457 return (B_TRUE);
461 * xgell_rx_create_buffer_pool
463 * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
465 static boolean_t
466 xgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
468 xgelldev_t *lldev = ring->lldev;
469 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
470 xge_hal_device_t *hldev;
471 xgell_rx_buffer_t *rx_buffer;
472 int i;
474 if (bf_pool->live)
475 return (B_TRUE);
477 hldev = (xge_hal_device_t *)lldev->devh;
479 bf_pool->total = 0;
480 bf_pool->size = XGELL_MAX_FRAME_SIZE(hldev);
481 bf_pool->head = NULL;
482 bf_pool->free = 0;
483 bf_pool->post = 0;
484 bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
485 bf_pool->recycle = 0;
486 bf_pool->recycle_head = NULL;
487 bf_pool->recycle_tail = NULL;
488 bf_pool->live = B_TRUE;
490 mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
491 DDI_INTR_PRI(hldev->irqh));
492 mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
493 DDI_INTR_PRI(hldev->irqh));
496 * Allocate buffers one by one. If failed, destroy whole pool by
497 * call to xgell_rx_destroy_buffer_pool().
500 for (i = 0; i < lldev->config.rx_buffer_total; i++) {
501 if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
502 (void) xgell_rx_destroy_buffer_pool(ring);
503 return (B_FALSE);
506 rx_buffer->next = bf_pool->head;
507 bf_pool->head = rx_buffer;
509 bf_pool->total++;
510 bf_pool->free++;
513 return (B_TRUE);
517 * xgell_rx_dtr_replenish
519 * Replenish descriptor with rx_buffer in RX buffer pool.
520 * The dtr should be post right away.
522 xge_hal_status_e
523 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
524 void *userdata, xge_hal_channel_reopen_e reopen)
526 xgell_rx_ring_t *ring = userdata;
527 xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
528 xgell_rx_buffer_t *rx_buffer;
529 xgell_rxd_priv_t *rxd_priv;
531 mutex_enter(&bf_pool->pool_lock);
532 if (bf_pool->head == NULL) {
533 xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
534 return (XGE_HAL_FAIL);
536 rx_buffer = bf_pool->head;
537 xge_assert(rx_buffer);
538 xge_assert(rx_buffer->dma_addr);
540 bf_pool->head = rx_buffer->next;
541 bf_pool->free--;
542 mutex_exit(&bf_pool->pool_lock);
544 rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
545 xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, bf_pool->size);
547 rxd_priv->rx_buffer = rx_buffer;
549 return (XGE_HAL_OK);
553 * xgell_get_ip_offset
555 * Calculate the offset to IP header.
557 static inline int
558 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
560 int ip_off;
562 /* get IP-header offset */
563 switch (ext_info->frame) {
564 case XGE_HAL_FRAME_TYPE_DIX:
565 ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
566 break;
567 case XGE_HAL_FRAME_TYPE_IPX:
568 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
569 XGE_HAL_HEADER_802_2_SIZE +
570 XGE_HAL_HEADER_SNAP_SIZE);
571 break;
572 case XGE_HAL_FRAME_TYPE_LLC:
573 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
574 XGE_HAL_HEADER_802_2_SIZE);
575 break;
576 case XGE_HAL_FRAME_TYPE_SNAP:
577 ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
578 XGE_HAL_HEADER_SNAP_SIZE);
579 break;
580 default:
581 ip_off = 0;
582 break;
585 if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
586 ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
587 (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
588 ip_off += XGE_HAL_HEADER_VLAN_SIZE;
591 return (ip_off);
595 * xgell_rx_hcksum_assoc
597 * Judge the packet type and then call to hcksum_assoc() to associate
598 * h/w checksum information.
600 static inline void
601 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
602 xge_hal_dtr_info_t *ext_info)
604 int cksum_flags = 0;
606 if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
607 if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
608 if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
609 cksum_flags |= HCK_IPV4_HDRCKSUM_OK;
611 if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
612 cksum_flags |= HCK_FULLCKSUM_OK;
614 if (cksum_flags != 0) {
615 mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
618 } else if (ext_info->proto &
619 (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
621 * Just pass the partial cksum up to IP.
623 int ip_off = xgell_get_ip_offset(ext_info);
624 int start, end = pkt_length - ip_off;
626 if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
627 struct ip *ip =
628 (struct ip *)(vaddr + ip_off);
629 start = ip->ip_hl * 4;
630 } else {
631 start = 40;
633 cksum_flags |= HCK_PARTIALCKSUM;
634 mac_hcksum_set(mp, start, 0, end,
635 ntohs(ext_info->l4_cksum), cksum_flags);
640 * xgell_rx_1b_msg_alloc
642 * Allocate message header for data buffer, and decide if copy the packet to
643 * new data buffer to release big rx_buffer to save memory.
645 * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
646 * new message and copy the payload in.
648 static mblk_t *
649 xgell_rx_1b_msg_alloc(xgell_rx_ring_t *ring, xgell_rx_buffer_t *rx_buffer,
650 int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
652 xgelldev_t *lldev = ring->lldev;
653 mblk_t *mp;
654 char *vaddr;
656 vaddr = (char *)rx_buffer->vaddr + HEADROOM;
658 * Copy packet into new allocated message buffer, if pkt_length
659 * is less than XGELL_RX_DMA_LOWAT
661 if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
662 if ((mp = allocb(pkt_length + HEADROOM, 0)) == NULL) {
663 return (NULL);
665 mp->b_rptr += HEADROOM;
666 bcopy(vaddr, mp->b_rptr, pkt_length);
667 mp->b_wptr = mp->b_rptr + pkt_length;
668 *copyit = B_TRUE;
669 return (mp);
673 * Just allocate mblk for current data buffer
675 if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
676 &rx_buffer->frtn)) == NULL) {
677 /* Drop it */
678 return (NULL);
681 * Adjust the b_rptr/b_wptr in the mblk_t structure.
683 mp->b_wptr += pkt_length;
685 return (mp);
689 * xgell_rx_1b_callback
691 * If the interrupt is because of a received frame or if the receive ring
692 * contains fresh as yet un-processed frames, this function is called.
694 static xge_hal_status_e
695 xgell_rx_1b_callback(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
696 void *userdata)
698 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)userdata;
699 xgelldev_t *lldev = ring->lldev;
700 xgell_rx_buffer_t *rx_buffer;
701 mblk_t *mp_head = NULL;
702 mblk_t *mp_end = NULL;
703 int pkt_burst = 0;
705 xge_debug_ll(XGE_TRACE, "xgell_rx_1b_callback on ring %d", ring->index);
707 mutex_enter(&ring->bf_pool.pool_lock);
708 do {
709 int pkt_length;
710 dma_addr_t dma_data;
711 mblk_t *mp;
712 boolean_t copyit = B_FALSE;
714 xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
715 xge_hal_ring_dtr_private(channelh, dtr));
716 xge_hal_dtr_info_t ext_info;
718 rx_buffer = rxd_priv->rx_buffer;
720 xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
721 xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
723 xge_assert(dma_data == rx_buffer->dma_addr);
725 if (t_code != 0) {
726 xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
727 " completed due to error t_code %01x", XGELL_IFNAME,
728 lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
730 (void) xge_hal_device_handle_tcode(channelh, dtr,
731 t_code);
732 xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
733 xgell_rx_buffer_release(rx_buffer);
734 continue;
738 * Sync the DMA memory
740 if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
741 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
742 xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
743 XGELL_IFNAME, lldev->instance);
744 xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
745 xgell_rx_buffer_release(rx_buffer);
746 continue;
750 * Allocate message for the packet.
752 if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
753 copyit = B_TRUE;
754 } else {
755 copyit = B_FALSE;
758 mp = xgell_rx_1b_msg_alloc(ring, rx_buffer, pkt_length,
759 &ext_info, &copyit);
761 xge_hal_ring_dtr_free(channelh, dtr);
764 * Release the buffer and recycle it later
766 if ((mp == NULL) || copyit) {
767 xgell_rx_buffer_release(rx_buffer);
768 } else {
770 * Count it since the buffer should be loaned up.
772 ring->bf_pool.post++;
774 if (mp == NULL) {
775 xge_debug_ll(XGE_ERR,
776 "%s%d: rx: can not allocate mp mblk",
777 XGELL_IFNAME, lldev->instance);
778 continue;
782 * Associate cksum_flags per packet type and h/w
783 * cksum flags.
785 xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + HEADROOM,
786 pkt_length, &ext_info);
788 ring->rx_pkts++;
789 ring->rx_bytes += pkt_length;
791 if (mp_head == NULL) {
792 mp_head = mp;
793 mp_end = mp;
794 } else {
795 mp_end->b_next = mp;
796 mp_end = mp;
800 * Inlined implemented polling function.
802 if ((ring->poll_mp == NULL) && (ring->poll_bytes > 0)) {
803 ring->poll_mp = mp_head;
805 if (ring->poll_mp != NULL) {
806 if ((ring->poll_bytes -= pkt_length) <= 0) {
807 /* have polled enough packets. */
808 break;
809 } else {
810 /* continue polling packets. */
811 continue;
816 * We're not in polling mode, so try to chain more messages
817 * or send the chain up according to pkt_burst.
819 if (++pkt_burst < lldev->config.rx_pkt_burst)
820 continue;
822 if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
823 /* Replenish rx buffers */
824 xgell_rx_buffer_replenish_all(ring);
826 mutex_exit(&ring->bf_pool.pool_lock);
827 if (mp_head != NULL) {
828 mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
829 ring->ring_gen_num);
831 mp_head = mp_end = NULL;
832 pkt_burst = 0;
833 mutex_enter(&ring->bf_pool.pool_lock);
835 } while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
836 XGE_HAL_OK);
839 * Always call replenish_all to recycle rx_buffers.
841 xgell_rx_buffer_replenish_all(ring);
842 mutex_exit(&ring->bf_pool.pool_lock);
845 * If we're not in polling cycle, call mac_rx(), otherwise
846 * just return while leaving packets chained to ring->poll_mp.
848 if ((ring->poll_mp == NULL) && (mp_head != NULL)) {
849 mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
850 ring->ring_gen_num);
853 return (XGE_HAL_OK);
856 mblk_t *
857 xgell_rx_poll(void *arg, int bytes_to_pickup)
859 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)arg;
860 int got_rx = 0;
861 mblk_t *mp;
863 xge_debug_ll(XGE_TRACE, "xgell_rx_poll on ring %d", ring->index);
865 ring->poll_mp = NULL;
866 ring->poll_bytes = bytes_to_pickup;
867 (void) xge_hal_device_poll_rx_channel(ring->channelh, &got_rx);
869 mp = ring->poll_mp;
870 ring->poll_bytes = -1;
871 ring->polled_bytes += got_rx;
872 ring->poll_mp = NULL;
874 return (mp);
878 * xgell_xmit_compl
880 * If an interrupt was raised to indicate DMA complete of the Tx packet,
881 * this function is called. It identifies the last TxD whose buffer was
882 * freed and frees all skbs whose data have already DMA'ed into the NICs
883 * internal memory.
885 static xge_hal_status_e
886 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
887 void *userdata)
889 xgell_tx_ring_t *ring = userdata;
890 xgelldev_t *lldev = ring->lldev;
892 do {
893 xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
894 xge_hal_fifo_dtr_private(dtr));
895 int i;
897 if (t_code) {
898 xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
899 " completed due to error t_code %01x", XGELL_IFNAME,
900 lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
902 (void) xge_hal_device_handle_tcode(channelh, dtr,
903 t_code);
906 for (i = 0; i < txd_priv->handle_cnt; i++) {
907 if (txd_priv->dma_handles[i] != NULL) {
908 xge_assert(txd_priv->dma_handles[i]);
909 (void) ddi_dma_unbind_handle(
910 txd_priv->dma_handles[i]);
911 ddi_dma_free_handle(&txd_priv->dma_handles[i]);
912 txd_priv->dma_handles[i] = 0;
915 txd_priv->handle_cnt = 0;
917 xge_hal_fifo_dtr_free(channelh, dtr);
919 if (txd_priv->mblk != NULL) {
920 freemsg(txd_priv->mblk);
921 txd_priv->mblk = NULL;
924 } while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
925 XGE_HAL_OK);
927 if (ring->need_resched)
928 mac_tx_ring_update(lldev->mh, ring->ring_handle);
930 return (XGE_HAL_OK);
933 mblk_t *
934 xgell_ring_tx(void *arg, mblk_t *mp)
936 xgell_tx_ring_t *ring = (xgell_tx_ring_t *)arg;
937 mblk_t *bp;
938 xgelldev_t *lldev = ring->lldev;
939 xge_hal_device_t *hldev = lldev->devh;
940 xge_hal_status_e status;
941 xge_hal_dtr_h dtr;
942 xgell_txd_priv_t *txd_priv;
943 uint32_t hckflags;
944 uint32_t lsoflags;
945 uint32_t mss;
946 int handle_cnt, frag_cnt, ret, i, copied;
947 boolean_t used_copy;
948 uint64_t sent_bytes;
950 _begin:
951 handle_cnt = frag_cnt = 0;
952 sent_bytes = 0;
954 if (!lldev->is_initialized || lldev->in_reset)
955 return (mp);
958 * If the free Tx dtrs count reaches the lower threshold,
959 * inform the gld to stop sending more packets till the free
960 * dtrs count exceeds higher threshold. Driver informs the
961 * gld through gld_sched call, when the free dtrs count exceeds
962 * the higher threshold.
964 if (xge_hal_channel_dtr_count(ring->channelh)
965 <= XGELL_TX_LEVEL_LOW) {
966 xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
967 "free descriptors count at low threshold %d",
968 XGELL_IFNAME, lldev->instance,
969 ((xge_hal_channel_t *)ring->channelh)->post_qid,
970 XGELL_TX_LEVEL_LOW);
971 goto _exit;
974 status = xge_hal_fifo_dtr_reserve(ring->channelh, &dtr);
975 if (status != XGE_HAL_OK) {
976 switch (status) {
977 case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
978 xge_debug_ll(XGE_ERR,
979 "%s%d: channel %d is not ready.", XGELL_IFNAME,
980 lldev->instance,
981 ((xge_hal_channel_t *)
982 ring->channelh)->post_qid);
983 goto _exit;
984 case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
985 xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
986 " out of descriptors.", XGELL_IFNAME,
987 lldev->instance,
988 ((xge_hal_channel_t *)
989 ring->channelh)->post_qid);
990 goto _exit;
991 default:
992 return (mp);
996 txd_priv = xge_hal_fifo_dtr_private(dtr);
997 txd_priv->mblk = mp;
1000 * VLAN tag should be passed down along with MAC header, so h/w needn't
1001 * do insertion.
1003 * For NIC driver that has to strip and re-insert VLAN tag, the example
1004 * is the other implementation for xge. The driver can simple bcopy()
1005 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1006 * automatically, since it's impossible that GLD sends down mp(s) with
1007 * splited ether_vlan_header.
1009 * struct ether_vlan_header *evhp;
1010 * uint16_t tci;
1012 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1013 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1014 * tci = ntohs(evhp->ether_tci);
1015 * (void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1016 * 2 * ETHERADDRL);
1017 * mp->b_rptr += VLAN_TAGSZ;
1019 * xge_hal_fifo_dtr_vlan_set(dtr, tci);
1023 copied = 0;
1024 used_copy = B_FALSE;
1025 for (bp = mp; bp != NULL; bp = bp->b_cont) {
1026 int mblen;
1027 uint_t ncookies;
1028 ddi_dma_cookie_t dma_cookie;
1029 ddi_dma_handle_t dma_handle;
1031 /* skip zero-length message blocks */
1032 mblen = MBLKL(bp);
1033 if (mblen == 0) {
1034 continue;
1037 sent_bytes += mblen;
1040 * Check the message length to decide to DMA or bcopy() data
1041 * to tx descriptor(s).
1043 if (mblen < lldev->config.tx_dma_lowat &&
1044 (copied + mblen) < lldev->tx_copied_max) {
1045 xge_hal_status_e rc;
1046 rc = xge_hal_fifo_dtr_buffer_append(ring->channelh,
1047 dtr, bp->b_rptr, mblen);
1048 if (rc == XGE_HAL_OK) {
1049 used_copy = B_TRUE;
1050 copied += mblen;
1051 continue;
1052 } else if (used_copy) {
1053 xge_hal_fifo_dtr_buffer_finalize(
1054 ring->channelh, dtr, frag_cnt++);
1055 used_copy = B_FALSE;
1057 } else if (used_copy) {
1058 xge_hal_fifo_dtr_buffer_finalize(ring->channelh,
1059 dtr, frag_cnt++);
1060 used_copy = B_FALSE;
1063 ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1064 DDI_DMA_DONTWAIT, 0, &dma_handle);
1065 if (ret != DDI_SUCCESS) {
1066 xge_debug_ll(XGE_ERR,
1067 "%s%d: can not allocate dma handle", XGELL_IFNAME,
1068 lldev->instance);
1069 goto _exit_cleanup;
1072 ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1073 (caddr_t)bp->b_rptr, mblen,
1074 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1075 &dma_cookie, &ncookies);
1077 switch (ret) {
1078 case DDI_DMA_MAPPED:
1079 /* everything's fine */
1080 break;
1082 case DDI_DMA_NORESOURCES:
1083 xge_debug_ll(XGE_ERR,
1084 "%s%d: can not bind dma address",
1085 XGELL_IFNAME, lldev->instance);
1086 ddi_dma_free_handle(&dma_handle);
1087 goto _exit_cleanup;
1089 case DDI_DMA_NOMAPPING:
1090 case DDI_DMA_INUSE:
1091 case DDI_DMA_TOOBIG:
1092 default:
1093 /* drop packet, don't retry */
1094 xge_debug_ll(XGE_ERR,
1095 "%s%d: can not map message buffer",
1096 XGELL_IFNAME, lldev->instance);
1097 ddi_dma_free_handle(&dma_handle);
1098 goto _exit_cleanup;
1101 if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1102 xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1103 "requested c:%d+f:%d", XGELL_IFNAME,
1104 lldev->instance, ncookies, frag_cnt);
1105 (void) ddi_dma_unbind_handle(dma_handle);
1106 ddi_dma_free_handle(&dma_handle);
1107 goto _exit_cleanup;
1110 /* setup the descriptors for this data buffer */
1111 while (ncookies) {
1112 xge_hal_fifo_dtr_buffer_set(ring->channelh, dtr,
1113 frag_cnt++, dma_cookie.dmac_laddress,
1114 dma_cookie.dmac_size);
1115 if (--ncookies) {
1116 ddi_dma_nextcookie(dma_handle, &dma_cookie);
1121 txd_priv->dma_handles[handle_cnt++] = dma_handle;
1123 if (bp->b_cont &&
1124 (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1125 hldev->config.fifo.max_frags)) {
1126 mblk_t *nmp;
1128 xge_debug_ll(XGE_TRACE,
1129 "too many FRAGs [%d], pull up them", frag_cnt);
1131 if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1132 /* Drop packet, don't retry */
1133 xge_debug_ll(XGE_ERR,
1134 "%s%d: can not pullup message buffer",
1135 XGELL_IFNAME, lldev->instance);
1136 goto _exit_cleanup;
1138 freemsg(bp->b_cont);
1139 bp->b_cont = nmp;
1143 /* finalize unfinished copies */
1144 if (used_copy) {
1145 xge_hal_fifo_dtr_buffer_finalize(ring->channelh, dtr,
1146 frag_cnt++);
1149 txd_priv->handle_cnt = handle_cnt;
1152 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1153 * do all necessary work.
1155 mac_lso_get(mp, &mss, &lsoflags);
1157 if (lsoflags & HW_LSO) {
1158 xge_assert((mss != 0) && (mss <= XGE_HAL_DEFAULT_MTU));
1159 xge_hal_fifo_dtr_mss_set(dtr, mss);
1162 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &hckflags);
1163 if (hckflags & HCK_IPV4_HDRCKSUM) {
1164 xge_hal_fifo_dtr_cksum_set_bits(dtr,
1165 XGE_HAL_TXD_TX_CKO_IPV4_EN);
1167 if (hckflags & HCK_FULLCKSUM) {
1168 xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1169 XGE_HAL_TXD_TX_CKO_UDP_EN);
1172 xge_hal_fifo_dtr_post(ring->channelh, dtr);
1174 /* Update per-ring tx statistics */
1175 atomic_inc_64(&ring->tx_pkts);
1176 atomic_add_64(&ring->tx_bytes, sent_bytes);
1178 return (NULL);
1180 _exit_cleanup:
1182 * Could not successfully transmit but have changed the message,
1183 * so just free it and return NULL
1185 for (i = 0; i < handle_cnt; i++) {
1186 (void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1187 ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1188 txd_priv->dma_handles[i] = 0;
1191 xge_hal_fifo_dtr_free(ring->channelh, dtr);
1193 freemsg(mp);
1194 return (NULL);
1196 _exit:
1197 ring->need_resched = B_TRUE;
1198 return (mp);
1202 * xgell_ring_macaddr_init
1204 static void
1205 xgell_rx_ring_maddr_init(xgell_rx_ring_t *ring)
1207 int i;
1208 xgelldev_t *lldev = ring->lldev;
1209 xge_hal_device_t *hldev = lldev->devh;
1210 int slot_start;
1212 xge_debug_ll(XGE_TRACE, "%s", "xgell_rx_ring_maddr_init");
1214 ring->mmac.naddr = XGE_RX_MULTI_MAC_ADDRESSES_MAX;
1215 ring->mmac.naddrfree = ring->mmac.naddr;
1218 * For the default rx ring, the first MAC address is the factory one.
1219 * This will be set by the framework, so need to clear it for now.
1221 (void) xge_hal_device_macaddr_clear(hldev, 0);
1224 * Read the MAC address Configuration Memory from HAL.
1225 * The first slot will hold a factory MAC address, contents in other
1226 * slots will be FF:FF:FF:FF:FF:FF.
1228 slot_start = ring->index * 32;
1229 for (i = 0; i < ring->mmac.naddr; i++) {
1230 (void) xge_hal_device_macaddr_get(hldev, slot_start + i,
1231 ring->mmac.mac_addr + i);
1232 ring->mmac.mac_addr_set[i] = B_FALSE;
1236 static int xgell_maddr_set(xgelldev_t *, int, uint8_t *);
1238 static int
1239 xgell_addmac(void *arg, const uint8_t *mac_addr)
1241 xgell_rx_ring_t *ring = arg;
1242 xgelldev_t *lldev = ring->lldev;
1243 xge_hal_device_t *hldev = lldev->devh;
1244 int slot;
1245 int slot_start;
1247 xge_debug_ll(XGE_TRACE, "%s", "xgell_addmac");
1249 mutex_enter(&lldev->genlock);
1251 if (ring->mmac.naddrfree == 0) {
1252 mutex_exit(&lldev->genlock);
1253 return (ENOSPC);
1256 /* First slot is for factory MAC address */
1257 for (slot = 0; slot < ring->mmac.naddr; slot++) {
1258 if (ring->mmac.mac_addr_set[slot] == B_FALSE) {
1259 break;
1263 ASSERT(slot < ring->mmac.naddr);
1265 slot_start = ring->index * 32;
1267 if (xgell_maddr_set(lldev, slot_start + slot, (uint8_t *)mac_addr) !=
1268 0) {
1269 mutex_exit(&lldev->genlock);
1270 return (EIO);
1273 /* Simply enable RTS for the whole section. */
1274 (void) xge_hal_device_rts_section_enable(hldev, slot_start + slot);
1277 * Read back the MAC address from HAL to keep the array up to date.
1279 if (xge_hal_device_macaddr_get(hldev, slot_start + slot,
1280 ring->mmac.mac_addr + slot) != XGE_HAL_OK) {
1281 (void) xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1282 return (EIO);
1285 ring->mmac.mac_addr_set[slot] = B_TRUE;
1286 ring->mmac.naddrfree--;
1288 mutex_exit(&lldev->genlock);
1290 return (0);
1293 static int
1294 xgell_remmac(void *arg, const uint8_t *mac_addr)
1296 xgell_rx_ring_t *ring = arg;
1297 xgelldev_t *lldev = ring->lldev;
1298 xge_hal_device_t *hldev = lldev->devh;
1299 xge_hal_status_e status;
1300 int slot;
1301 int slot_start;
1303 xge_debug_ll(XGE_TRACE, "%s", "xgell_remmac");
1305 slot = xge_hal_device_macaddr_find(hldev, (uint8_t *)mac_addr);
1306 if (slot == -1)
1307 return (EINVAL);
1309 slot_start = ring->index * 32;
1312 * Adjust slot to the offset in the MAC array of this ring (group).
1314 slot -= slot_start;
1317 * Only can remove a pre-set MAC address for this ring (group).
1319 if (slot < 0 || slot >= ring->mmac.naddr)
1320 return (EINVAL);
1323 xge_assert(ring->mmac.mac_addr_set[slot]);
1325 mutex_enter(&lldev->genlock);
1326 if (!ring->mmac.mac_addr_set[slot]) {
1327 mutex_exit(&lldev->genlock);
1329 * The result will be unexpected when reach here. WARNING!
1331 xge_debug_ll(XGE_ERR,
1332 "%s%d: caller is trying to remove an unset MAC address",
1333 XGELL_IFNAME, lldev->instance);
1334 return (ENXIO);
1337 status = xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1338 if (status != XGE_HAL_OK) {
1339 mutex_exit(&lldev->genlock);
1340 return (EIO);
1343 ring->mmac.mac_addr_set[slot] = B_FALSE;
1344 ring->mmac.naddrfree++;
1347 * TODO: Disable MAC RTS if all addresses have been cleared.
1351 * Read back the MAC address from HAL to keep the array up to date.
1353 (void) xge_hal_device_macaddr_get(hldev, slot_start + slot,
1354 ring->mmac.mac_addr + slot);
1355 mutex_exit(&lldev->genlock);
1357 return (0);
1361 * Temporarily calling hal function.
1363 * With MSI-X implementation, no lock is needed, so that the interrupt
1364 * handling could be faster.
1367 xgell_rx_ring_intr_enable(mac_intr_handle_t ih)
1369 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1371 mutex_enter(&ring->ring_lock);
1372 xge_hal_device_rx_channel_disable_polling(ring->channelh);
1373 mutex_exit(&ring->ring_lock);
1375 return (0);
1379 xgell_rx_ring_intr_disable(mac_intr_handle_t ih)
1381 xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1383 mutex_enter(&ring->ring_lock);
1384 xge_hal_device_rx_channel_enable_polling(ring->channelh);
1385 mutex_exit(&ring->ring_lock);
1387 return (0);
1390 static int
1391 xgell_rx_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1393 xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
1395 rx_ring->ring_gen_num = mr_gen_num;
1397 return (0);
1400 /*ARGSUSED*/
1401 static void
1402 xgell_rx_ring_stop(mac_ring_driver_t rh)
1406 /*ARGSUSED*/
1407 static int
1408 xgell_tx_ring_start(mac_ring_driver_t rh, uint64_t useless)
1410 return (0);
1413 /*ARGSUSED*/
1414 static void
1415 xgell_tx_ring_stop(mac_ring_driver_t rh)
1420 * Callback funtion for MAC layer to register all rings.
1422 * Xframe hardware doesn't support grouping explicitly, so the driver needs
1423 * to pretend having resource groups. We may also optionally group all 8 rx
1424 * rings into a single group for increased scalability on CMT architectures,
1425 * or group one rx ring per group for maximum virtualization.
1427 * TX grouping is actually done by framework, so, just register all TX
1428 * resources without grouping them.
1430 void
1431 xgell_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1432 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1434 xgelldev_t *lldev = (xgelldev_t *)arg;
1435 mac_intr_t *mintr;
1437 switch (rtype) {
1438 case MAC_RING_TYPE_RX: {
1439 xgell_rx_ring_t *rx_ring;
1441 xge_assert(index < lldev->init_rx_rings);
1442 xge_assert(rg_index < lldev->init_rx_groups);
1445 * Performance vs. Virtualization
1447 if (lldev->init_rx_rings == lldev->init_rx_groups)
1448 rx_ring = lldev->rx_ring + rg_index;
1449 else
1450 rx_ring = lldev->rx_ring + index;
1452 rx_ring->ring_handle = rh;
1454 infop->mri_driver = (mac_ring_driver_t)rx_ring;
1455 infop->mri_start = xgell_rx_ring_start;
1456 infop->mri_stop = xgell_rx_ring_stop;
1457 infop->mri_poll = xgell_rx_poll;
1458 infop->mri_stat = xgell_rx_ring_stat;
1460 mintr = &infop->mri_intr;
1461 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1462 mintr->mi_enable = xgell_rx_ring_intr_enable;
1463 mintr->mi_disable = xgell_rx_ring_intr_disable;
1465 break;
1467 case MAC_RING_TYPE_TX: {
1468 xgell_tx_ring_t *tx_ring;
1470 xge_assert(rg_index == -1);
1472 xge_assert((index >= 0) && (index < lldev->init_tx_rings));
1474 tx_ring = lldev->tx_ring + index;
1475 tx_ring->ring_handle = rh;
1477 infop->mri_driver = (mac_ring_driver_t)tx_ring;
1478 infop->mri_start = xgell_tx_ring_start;
1479 infop->mri_stop = xgell_tx_ring_stop;
1480 infop->mri_tx = xgell_ring_tx;
1481 infop->mri_stat = xgell_tx_ring_stat;
1483 break;
1485 default:
1486 break;
1490 void
1491 xgell_fill_group(void *arg, mac_ring_type_t rtype, const int index,
1492 mac_group_info_t *infop, mac_group_handle_t gh)
1494 xgelldev_t *lldev = (xgelldev_t *)arg;
1496 switch (rtype) {
1497 case MAC_RING_TYPE_RX: {
1498 xgell_rx_ring_t *rx_ring;
1500 xge_assert(index < lldev->init_rx_groups);
1502 rx_ring = lldev->rx_ring + index;
1504 rx_ring->group_handle = gh;
1506 infop->mgi_driver = (mac_group_driver_t)rx_ring;
1507 infop->mgi_start = NULL;
1508 infop->mgi_stop = NULL;
1509 infop->mgi_addmac = xgell_addmac;
1510 infop->mgi_remmac = xgell_remmac;
1511 infop->mgi_count = lldev->init_rx_rings / lldev->init_rx_groups;
1513 break;
1515 case MAC_RING_TYPE_TX:
1516 xge_assert(0);
1517 break;
1518 default:
1519 break;
1524 * xgell_macaddr_set
1526 static int
1527 xgell_maddr_set(xgelldev_t *lldev, int index, uint8_t *macaddr)
1529 xge_hal_device_t *hldev = lldev->devh;
1530 xge_hal_status_e status;
1532 xge_debug_ll(XGE_TRACE, "%s", "xgell_maddr_set");
1534 xge_debug_ll(XGE_TRACE,
1535 "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1536 macaddr[0], macaddr[1], macaddr[2],
1537 macaddr[3], macaddr[4], macaddr[5]);
1539 status = xge_hal_device_macaddr_set(hldev, index, (uchar_t *)macaddr);
1541 if (status != XGE_HAL_OK) {
1542 xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1543 XGELL_IFNAME, lldev->instance);
1544 return (EIO);
1547 return (0);
1551 * xgell_rx_dtr_term
1553 * Function will be called by HAL to terminate all DTRs for
1554 * Ring(s) type of channels.
1556 static void
1557 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1558 xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1560 xgell_rxd_priv_t *rxd_priv =
1561 ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1562 xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1564 if (state == XGE_HAL_DTR_STATE_POSTED) {
1565 xgell_rx_ring_t *ring = rx_buffer->ring;
1567 mutex_enter(&ring->bf_pool.pool_lock);
1568 xge_hal_ring_dtr_free(channelh, dtrh);
1569 xgell_rx_buffer_release(rx_buffer);
1570 mutex_exit(&ring->bf_pool.pool_lock);
1575 * To open a rx ring.
1577 static boolean_t
1578 xgell_rx_ring_open(xgell_rx_ring_t *rx_ring)
1580 xge_hal_status_e status;
1581 xge_hal_channel_attr_t attr;
1582 xgelldev_t *lldev = rx_ring->lldev;
1583 xge_hal_device_t *hldev = lldev->devh;
1585 if (rx_ring->live)
1586 return (B_TRUE);
1588 /* Create the buffer pool first */
1589 if (!xgell_rx_create_buffer_pool(rx_ring)) {
1590 xge_debug_ll(XGE_ERR, "can not create buffer pool for ring: %d",
1591 rx_ring->index);
1592 return (B_FALSE);
1595 /* Default ring initialization */
1596 attr.post_qid = rx_ring->index;
1597 attr.compl_qid = 0;
1598 attr.callback = xgell_rx_1b_callback;
1599 attr.per_dtr_space = sizeof (xgell_rxd_priv_t);
1600 attr.flags = 0;
1601 attr.type = XGE_HAL_CHANNEL_TYPE_RING;
1602 attr.dtr_init = xgell_rx_dtr_replenish;
1603 attr.dtr_term = xgell_rx_dtr_term;
1604 attr.userdata = rx_ring;
1606 status = xge_hal_channel_open(lldev->devh, &attr, &rx_ring->channelh,
1607 XGE_HAL_CHANNEL_OC_NORMAL);
1608 if (status != XGE_HAL_OK) {
1609 xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1610 " code %d", XGELL_IFNAME, lldev->instance, status);
1611 (void) xgell_rx_destroy_buffer_pool(rx_ring);
1612 return (B_FALSE);
1615 xgell_rx_ring_maddr_init(rx_ring);
1617 mutex_init(&rx_ring->ring_lock, NULL, MUTEX_DRIVER,
1618 DDI_INTR_PRI(hldev->irqh));
1620 rx_ring->poll_bytes = -1;
1621 rx_ring->polled_bytes = 0;
1622 rx_ring->poll_mp = NULL;
1623 rx_ring->live = B_TRUE;
1625 xge_debug_ll(XGE_TRACE, "RX ring [%d] is opened successfully",
1626 rx_ring->index);
1628 return (B_TRUE);
1631 static void
1632 xgell_rx_ring_close(xgell_rx_ring_t *rx_ring)
1634 if (!rx_ring->live)
1635 return;
1636 xge_hal_channel_close(rx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1637 rx_ring->channelh = NULL;
1638 /* This may not clean up all used buffers, driver will handle it */
1639 if (xgell_rx_destroy_buffer_pool(rx_ring))
1640 rx_ring->live = B_FALSE;
1642 mutex_destroy(&rx_ring->ring_lock);
1646 * xgell_rx_open
1647 * @lldev: the link layer object
1649 * Initialize and open all RX channels.
1651 static boolean_t
1652 xgell_rx_open(xgelldev_t *lldev)
1654 xgell_rx_ring_t *rx_ring;
1655 int i;
1657 if (lldev->live_rx_rings != 0)
1658 return (B_TRUE);
1660 lldev->live_rx_rings = 0;
1663 * Initialize all rings
1665 for (i = 0; i < lldev->init_rx_rings; i++) {
1666 rx_ring = &lldev->rx_ring[i];
1667 rx_ring->index = i;
1668 rx_ring->lldev = lldev;
1669 rx_ring->live = B_FALSE;
1671 if (!xgell_rx_ring_open(rx_ring))
1672 return (B_FALSE);
1674 lldev->live_rx_rings++;
1677 return (B_TRUE);
1680 static void
1681 xgell_rx_close(xgelldev_t *lldev)
1683 xgell_rx_ring_t *rx_ring;
1684 int i;
1686 if (lldev->live_rx_rings == 0)
1687 return;
1690 * Close all rx rings
1692 for (i = 0; i < lldev->init_rx_rings; i++) {
1693 rx_ring = &lldev->rx_ring[i];
1695 if (rx_ring->live) {
1696 xgell_rx_ring_close(rx_ring);
1697 lldev->live_rx_rings--;
1701 xge_assert(lldev->live_rx_rings == 0);
1705 * xgell_tx_term
1707 * Function will be called by HAL to terminate all DTRs for
1708 * Fifo(s) type of channels.
1710 static void
1711 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1712 xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1714 xgell_txd_priv_t *txd_priv =
1715 ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1716 mblk_t *mp = txd_priv->mblk;
1717 int i;
1720 * for Tx we must clean up the DTR *only* if it has been
1721 * posted!
1723 if (state != XGE_HAL_DTR_STATE_POSTED) {
1724 return;
1727 for (i = 0; i < txd_priv->handle_cnt; i++) {
1728 xge_assert(txd_priv->dma_handles[i]);
1729 (void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1730 ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1731 txd_priv->dma_handles[i] = 0;
1734 xge_hal_fifo_dtr_free(channelh, dtrh);
1736 if (mp) {
1737 txd_priv->mblk = NULL;
1738 freemsg(mp);
1742 static boolean_t
1743 xgell_tx_ring_open(xgell_tx_ring_t *tx_ring)
1745 xge_hal_status_e status;
1746 xge_hal_channel_attr_t attr;
1747 xgelldev_t *lldev = tx_ring->lldev;
1749 if (tx_ring->live)
1750 return (B_TRUE);
1752 attr.post_qid = tx_ring->index;
1753 attr.compl_qid = 0;
1754 attr.callback = xgell_xmit_compl;
1755 attr.per_dtr_space = sizeof (xgell_txd_priv_t);
1756 attr.flags = 0;
1757 attr.type = XGE_HAL_CHANNEL_TYPE_FIFO;
1758 attr.dtr_init = NULL;
1759 attr.dtr_term = xgell_tx_term;
1760 attr.userdata = tx_ring;
1762 status = xge_hal_channel_open(lldev->devh, &attr, &tx_ring->channelh,
1763 XGE_HAL_CHANNEL_OC_NORMAL);
1764 if (status != XGE_HAL_OK) {
1765 xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel got status "
1766 "code %d", XGELL_IFNAME, lldev->instance, status);
1767 return (B_FALSE);
1770 tx_ring->live = B_TRUE;
1772 return (B_TRUE);
1775 static void
1776 xgell_tx_ring_close(xgell_tx_ring_t *tx_ring)
1778 if (!tx_ring->live)
1779 return;
1780 xge_hal_channel_close(tx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1781 tx_ring->live = B_FALSE;
1785 * xgell_tx_open
1786 * @lldev: the link layer object
1788 * Initialize and open all TX channels.
1790 static boolean_t
1791 xgell_tx_open(xgelldev_t *lldev)
1793 xgell_tx_ring_t *tx_ring;
1794 int i;
1796 if (lldev->live_tx_rings != 0)
1797 return (B_TRUE);
1799 lldev->live_tx_rings = 0;
1802 * Enable rings by reserve sequence to match the h/w sequences.
1804 for (i = 0; i < lldev->init_tx_rings; i++) {
1805 tx_ring = &lldev->tx_ring[i];
1806 tx_ring->index = i;
1807 tx_ring->lldev = lldev;
1808 tx_ring->live = B_FALSE;
1810 if (!xgell_tx_ring_open(tx_ring))
1811 return (B_FALSE);
1813 lldev->live_tx_rings++;
1816 return (B_TRUE);
1819 static void
1820 xgell_tx_close(xgelldev_t *lldev)
1822 xgell_tx_ring_t *tx_ring;
1823 int i;
1825 if (lldev->live_tx_rings == 0)
1826 return;
1829 * Enable rings by reserve sequence to match the h/w sequences.
1831 for (i = 0; i < lldev->init_tx_rings; i++) {
1832 tx_ring = &lldev->tx_ring[i];
1833 if (tx_ring->live) {
1834 xgell_tx_ring_close(tx_ring);
1835 lldev->live_tx_rings--;
1840 static int
1841 xgell_initiate_start(xgelldev_t *lldev)
1843 xge_hal_status_e status;
1844 xge_hal_device_t *hldev = lldev->devh;
1845 int maxpkt = hldev->config.mtu;
1847 /* check initial mtu before enabling the device */
1848 status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1849 if (status != XGE_HAL_OK) {
1850 xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1851 XGELL_IFNAME, lldev->instance, maxpkt);
1852 return (EINVAL);
1855 /* set initial mtu before enabling the device */
1856 status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1857 if (status != XGE_HAL_OK) {
1858 xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1859 XGELL_IFNAME, lldev->instance, maxpkt);
1860 return (EIO);
1863 /* tune jumbo/normal frame UFC counters */
1864 hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_b =
1865 (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1866 XGE_HAL_DEFAULT_RX_UFC_B_J :
1867 XGE_HAL_DEFAULT_RX_UFC_B_N;
1869 hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_c =
1870 (maxpkt > XGE_HAL_DEFAULT_MTU) ?
1871 XGE_HAL_DEFAULT_RX_UFC_C_J :
1872 XGE_HAL_DEFAULT_RX_UFC_C_N;
1874 /* now, enable the device */
1875 status = xge_hal_device_enable(lldev->devh);
1876 if (status != XGE_HAL_OK) {
1877 xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1878 XGELL_IFNAME, lldev->instance);
1879 return (EIO);
1882 if (!xgell_rx_open(lldev)) {
1883 status = xge_hal_device_disable(lldev->devh);
1884 if (status != XGE_HAL_OK) {
1885 u64 adapter_status;
1886 (void) xge_hal_device_status(lldev->devh,
1887 &adapter_status);
1888 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1889 "the device. adaper status 0x%"PRIx64
1890 " returned status %d",
1891 XGELL_IFNAME, lldev->instance,
1892 (uint64_t)adapter_status, status);
1894 xgell_rx_close(lldev);
1895 xge_os_mdelay(1500);
1896 return (ENOMEM);
1899 if (!xgell_tx_open(lldev)) {
1900 status = xge_hal_device_disable(lldev->devh);
1901 if (status != XGE_HAL_OK) {
1902 u64 adapter_status;
1903 (void) xge_hal_device_status(lldev->devh,
1904 &adapter_status);
1905 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1906 "the device. adaper status 0x%"PRIx64
1907 " returned status %d",
1908 XGELL_IFNAME, lldev->instance,
1909 (uint64_t)adapter_status, status);
1911 xgell_tx_close(lldev);
1912 xgell_rx_close(lldev);
1913 xge_os_mdelay(1500);
1914 return (ENOMEM);
1917 /* time to enable interrupts */
1918 (void) xge_enable_intrs(lldev);
1919 xge_hal_device_intr_enable(lldev->devh);
1921 lldev->is_initialized = 1;
1923 return (0);
1926 static void
1927 xgell_initiate_stop(xgelldev_t *lldev)
1929 xge_hal_status_e status;
1931 lldev->is_initialized = 0;
1933 status = xge_hal_device_disable(lldev->devh);
1934 if (status != XGE_HAL_OK) {
1935 u64 adapter_status;
1936 (void) xge_hal_device_status(lldev->devh, &adapter_status);
1937 xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1938 "the device. adaper status 0x%"PRIx64" returned status %d",
1939 XGELL_IFNAME, lldev->instance,
1940 (uint64_t)adapter_status, status);
1942 xge_hal_device_intr_disable(lldev->devh);
1943 /* disable OS ISR's */
1944 xge_disable_intrs(lldev);
1946 xge_debug_ll(XGE_TRACE, "%s",
1947 "waiting for device irq to become quiescent...");
1948 xge_os_mdelay(1500);
1950 xge_queue_flush(xge_hal_device_queue(lldev->devh));
1952 xgell_rx_close(lldev);
1953 xgell_tx_close(lldev);
1957 * xgell_m_start
1958 * @arg: pointer to device private strucutre(hldev)
1960 * This function is called by MAC Layer to enable the XFRAME
1961 * firmware to generate interrupts and also prepare the
1962 * driver to call mac_rx for delivering receive packets
1963 * to MAC Layer.
1965 static int
1966 xgell_m_start(void *arg)
1968 xgelldev_t *lldev = arg;
1969 xge_hal_device_t *hldev = lldev->devh;
1970 int ret;
1972 xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1973 lldev->instance);
1975 mutex_enter(&lldev->genlock);
1977 if (lldev->is_initialized) {
1978 xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1979 XGELL_IFNAME, lldev->instance);
1980 mutex_exit(&lldev->genlock);
1981 return (EINVAL);
1984 hldev->terminating = 0;
1985 if (ret = xgell_initiate_start(lldev)) {
1986 mutex_exit(&lldev->genlock);
1987 return (ret);
1990 lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1992 mutex_exit(&lldev->genlock);
1994 return (0);
1998 * xgell_m_stop
1999 * @arg: pointer to device private data (hldev)
2001 * This function is called by the MAC Layer to disable
2002 * the XFRAME firmware for generating any interrupts and
2003 * also stop the driver from calling mac_rx() for
2004 * delivering data packets to the MAC Layer.
2006 static void
2007 xgell_m_stop(void *arg)
2009 xgelldev_t *lldev = arg;
2010 xge_hal_device_t *hldev = lldev->devh;
2012 xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
2014 mutex_enter(&lldev->genlock);
2015 if (!lldev->is_initialized) {
2016 xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
2017 mutex_exit(&lldev->genlock);
2018 return;
2021 xge_hal_device_terminating(hldev);
2022 xgell_initiate_stop(lldev);
2024 /* reset device */
2025 (void) xge_hal_device_reset(lldev->devh);
2027 mutex_exit(&lldev->genlock);
2029 if (lldev->timeout_id != 0) {
2030 (void) untimeout(lldev->timeout_id);
2033 xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
2037 * xgell_onerr_reset
2038 * @lldev: pointer to xgelldev_t structure
2040 * This function is called by HAL Event framework to reset the HW
2041 * This function is must be called with genlock taken.
2044 xgell_onerr_reset(xgelldev_t *lldev)
2046 int rc = 0;
2048 if (!lldev->is_initialized) {
2049 xge_debug_ll(XGE_ERR, "%s%d: can not reset",
2050 XGELL_IFNAME, lldev->instance);
2051 return (rc);
2054 lldev->in_reset = 1;
2055 xgell_initiate_stop(lldev);
2057 /* reset device */
2058 (void) xge_hal_device_reset(lldev->devh);
2060 rc = xgell_initiate_start(lldev);
2061 lldev->in_reset = 0;
2063 return (rc);
2067 * xgell_m_multicst
2068 * @arg: pointer to device private strucutre(hldev)
2069 * @add:
2070 * @mc_addr:
2072 * This function is called by MAC Layer to enable or
2073 * disable device-level reception of specific multicast addresses.
2075 static int
2076 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
2078 xge_hal_status_e status;
2079 xgelldev_t *lldev = (xgelldev_t *)arg;
2080 xge_hal_device_t *hldev = lldev->devh;
2082 xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
2084 mutex_enter(&lldev->genlock);
2086 if (!lldev->is_initialized) {
2087 xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
2088 XGELL_IFNAME, lldev->instance);
2089 mutex_exit(&lldev->genlock);
2090 return (EIO);
2093 /* FIXME: missing HAL functionality: enable_one() */
2095 status = (add) ?
2096 xge_hal_device_mcast_enable(hldev) :
2097 xge_hal_device_mcast_disable(hldev);
2099 if (status != XGE_HAL_OK) {
2100 xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
2101 add ? "enable" : "disable", status);
2102 mutex_exit(&lldev->genlock);
2103 return (EIO);
2106 mutex_exit(&lldev->genlock);
2108 return (0);
2113 * xgell_m_promisc
2114 * @arg: pointer to device private strucutre(hldev)
2115 * @on:
2117 * This function is called by MAC Layer to enable or
2118 * disable the reception of all the packets on the medium
2120 static int
2121 xgell_m_promisc(void *arg, boolean_t on)
2123 xgelldev_t *lldev = (xgelldev_t *)arg;
2124 xge_hal_device_t *hldev = lldev->devh;
2126 mutex_enter(&lldev->genlock);
2128 xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
2130 if (!lldev->is_initialized) {
2131 xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
2132 XGELL_IFNAME, lldev->instance);
2133 mutex_exit(&lldev->genlock);
2134 return (EIO);
2137 if (on) {
2138 xge_hal_device_promisc_enable(hldev);
2139 } else {
2140 xge_hal_device_promisc_disable(hldev);
2143 mutex_exit(&lldev->genlock);
2145 return (0);
2149 * xgell_m_stat
2150 * @arg: pointer to device private strucutre(hldev)
2152 * This function is called by MAC Layer to get network statistics
2153 * from the driver.
2155 static int
2156 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
2158 xge_hal_stats_hw_info_t *hw_info;
2159 xgelldev_t *lldev = (xgelldev_t *)arg;
2160 xge_hal_device_t *hldev = lldev->devh;
2162 xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
2164 mutex_enter(&lldev->genlock);
2166 if (!lldev->is_initialized) {
2167 mutex_exit(&lldev->genlock);
2168 return (EAGAIN);
2171 if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
2172 mutex_exit(&lldev->genlock);
2173 return (EAGAIN);
2176 switch (stat) {
2177 case MAC_STAT_IFSPEED:
2178 *val = 10000000000ull; /* 10G */
2179 break;
2181 case MAC_STAT_MULTIRCV:
2182 *val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
2183 hw_info->rmac_vld_mcst_frms;
2184 break;
2186 case MAC_STAT_BRDCSTRCV:
2187 *val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
2188 hw_info->rmac_vld_bcst_frms;
2189 break;
2191 case MAC_STAT_MULTIXMT:
2192 *val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
2193 hw_info->tmac_mcst_frms;
2194 break;
2196 case MAC_STAT_BRDCSTXMT:
2197 *val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
2198 hw_info->tmac_bcst_frms;
2199 break;
2201 case MAC_STAT_RBYTES:
2202 *val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
2203 hw_info->rmac_ttl_octets;
2204 break;
2206 case MAC_STAT_NORCVBUF:
2207 *val = hw_info->rmac_drop_frms;
2208 break;
2210 case MAC_STAT_IERRORS:
2211 *val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
2212 hw_info->rmac_discarded_frms;
2213 break;
2215 case MAC_STAT_OBYTES:
2216 *val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
2217 hw_info->tmac_ttl_octets;
2218 break;
2220 case MAC_STAT_NOXMTBUF:
2221 *val = hw_info->tmac_drop_frms;
2222 break;
2224 case MAC_STAT_OERRORS:
2225 *val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
2226 hw_info->tmac_any_err_frms;
2227 break;
2229 case MAC_STAT_IPACKETS:
2230 *val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
2231 hw_info->rmac_vld_frms;
2232 break;
2234 case MAC_STAT_OPACKETS:
2235 *val = ((u64) hw_info->tmac_frms_oflow << 32) |
2236 hw_info->tmac_frms;
2237 break;
2239 case ETHER_STAT_FCS_ERRORS:
2240 *val = hw_info->rmac_fcs_err_frms;
2241 break;
2243 case ETHER_STAT_TOOLONG_ERRORS:
2244 *val = hw_info->rmac_long_frms;
2245 break;
2247 case ETHER_STAT_LINK_DUPLEX:
2248 *val = LINK_DUPLEX_FULL;
2249 break;
2251 default:
2252 mutex_exit(&lldev->genlock);
2253 return (ENOTSUP);
2256 mutex_exit(&lldev->genlock);
2258 return (0);
2262 * Retrieve a value for one of the statistics for a particular rx ring
2265 xgell_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2267 xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
2269 switch (stat) {
2270 case MAC_STAT_RBYTES:
2271 *val = rx_ring->rx_bytes;
2272 break;
2274 case MAC_STAT_IPACKETS:
2275 *val = rx_ring->rx_pkts;
2276 break;
2278 default:
2279 *val = 0;
2280 return (ENOTSUP);
2283 return (0);
2287 * Retrieve a value for one of the statistics for a particular tx ring
2290 xgell_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
2292 xgell_tx_ring_t *tx_ring = (xgell_tx_ring_t *)rh;
2294 switch (stat) {
2295 case MAC_STAT_OBYTES:
2296 *val = tx_ring->tx_bytes;
2297 break;
2299 case MAC_STAT_OPACKETS:
2300 *val = tx_ring->tx_pkts;
2301 break;
2303 default:
2304 *val = 0;
2305 return (ENOTSUP);
2308 return (0);
2312 * xgell_device_alloc - Allocate new LL device
2315 xgell_device_alloc(xge_hal_device_h devh,
2316 dev_info_t *dev_info, xgelldev_t **lldev_out)
2318 xgelldev_t *lldev;
2319 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2320 int instance = ddi_get_instance(dev_info);
2322 *lldev_out = NULL;
2324 xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
2325 XGELL_IFNAME, instance);
2327 lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
2329 lldev->devh = hldev;
2330 lldev->instance = instance;
2331 lldev->dev_info = dev_info;
2333 *lldev_out = lldev;
2335 ddi_set_driver_private(dev_info, (caddr_t)hldev);
2337 return (DDI_SUCCESS);
2341 * xgell_device_free
2343 void
2344 xgell_device_free(xgelldev_t *lldev)
2346 xge_debug_ll(XGE_TRACE, "freeing device %s%d",
2347 XGELL_IFNAME, lldev->instance);
2349 kmem_free(lldev, sizeof (xgelldev_t));
2353 * xgell_ioctl
2355 static void
2356 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2358 xgelldev_t *lldev = arg;
2359 struct iocblk *iocp;
2360 int err = 0;
2361 int cmd;
2362 int need_privilege = 1;
2363 int ret = 0;
2366 iocp = (struct iocblk *)mp->b_rptr;
2367 iocp->ioc_error = 0;
2368 cmd = iocp->ioc_cmd;
2369 xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
2370 switch (cmd) {
2371 case ND_GET:
2372 need_privilege = 0;
2373 /* FALLTHRU */
2374 case ND_SET:
2375 break;
2376 default:
2377 xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
2378 miocnak(wq, mp, 0, EINVAL);
2379 return;
2382 if (need_privilege) {
2383 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2384 if (err != 0) {
2385 xge_debug_ll(XGE_ERR,
2386 "drv_priv(): rejected cmd 0x%x, err %d",
2387 cmd, err);
2388 miocnak(wq, mp, 0, err);
2389 return;
2393 switch (cmd) {
2394 case ND_GET:
2396 * If nd_getset() returns B_FALSE, the command was
2397 * not valid (e.g. unknown name), so we just tell the
2398 * top-level ioctl code to send a NAK (with code EINVAL).
2400 * Otherwise, nd_getset() will have built the reply to
2401 * be sent (but not actually sent it), so we tell the
2402 * caller to send the prepared reply.
2404 ret = nd_getset(wq, lldev->ndp, mp);
2405 xge_debug_ll(XGE_TRACE, "%s", "got ndd get ioctl");
2406 break;
2408 case ND_SET:
2409 ret = nd_getset(wq, lldev->ndp, mp);
2410 xge_debug_ll(XGE_TRACE, "%s", "got ndd set ioctl");
2411 break;
2413 default:
2414 break;
2417 if (ret == B_FALSE) {
2418 xge_debug_ll(XGE_ERR,
2419 "nd_getset(): rejected cmd 0x%x, err %d",
2420 cmd, err);
2421 miocnak(wq, mp, 0, EINVAL);
2422 } else {
2423 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2424 M_IOCACK : M_IOCNAK;
2425 qreply(wq, mp);
2430 static boolean_t
2431 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2433 xgelldev_t *lldev = arg;
2435 xge_debug_ll(XGE_TRACE, "xgell_m_getcapab: %x", cap);
2437 switch (cap) {
2438 case MAC_CAPAB_HCKSUM: {
2439 uint32_t *hcksum_txflags = cap_data;
2440 *hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2441 HCKSUM_IPHDRCKSUM;
2442 break;
2444 case MAC_CAPAB_LSO: {
2445 mac_capab_lso_t *cap_lso = cap_data;
2447 if (lldev->config.lso_enable) {
2448 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2449 cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
2450 break;
2451 } else {
2452 return (B_FALSE);
2455 case MAC_CAPAB_RINGS: {
2456 mac_capab_rings_t *cap_rings = cap_data;
2458 switch (cap_rings->mr_type) {
2459 case MAC_RING_TYPE_RX:
2460 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2461 cap_rings->mr_rnum = lldev->init_rx_rings;
2462 cap_rings->mr_gnum = lldev->init_rx_groups;
2463 cap_rings->mr_rget = xgell_fill_ring;
2464 cap_rings->mr_gget = xgell_fill_group;
2465 break;
2466 case MAC_RING_TYPE_TX:
2467 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2468 cap_rings->mr_rnum = lldev->init_tx_rings;
2469 cap_rings->mr_gnum = 0;
2470 cap_rings->mr_rget = xgell_fill_ring;
2471 cap_rings->mr_gget = NULL;
2472 break;
2473 default:
2474 break;
2476 break;
2478 default:
2479 return (B_FALSE);
2481 return (B_TRUE);
2484 static int
2485 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2487 xgelldev_t *lldev = (xgelldev_t *)cp;
2488 xge_hal_status_e status;
2489 int count = 0, retsize;
2490 char *buf;
2492 buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2493 if (buf == NULL) {
2494 return (ENOSPC);
2497 status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2498 buf, &retsize);
2499 if (status != XGE_HAL_OK) {
2500 kmem_free(buf, XGELL_STATS_BUFSIZE);
2501 xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2502 return (EINVAL);
2504 count += retsize;
2506 status = xge_hal_aux_stats_rmac_read(lldev->devh,
2507 XGELL_STATS_BUFSIZE - count,
2508 buf+count, &retsize);
2509 if (status != XGE_HAL_OK) {
2510 kmem_free(buf, XGELL_STATS_BUFSIZE);
2511 xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2512 return (EINVAL);
2514 count += retsize;
2516 status = xge_hal_aux_stats_pci_read(lldev->devh,
2517 XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2518 if (status != XGE_HAL_OK) {
2519 kmem_free(buf, XGELL_STATS_BUFSIZE);
2520 xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2521 return (EINVAL);
2523 count += retsize;
2525 status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2526 XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2527 if (status != XGE_HAL_OK) {
2528 kmem_free(buf, XGELL_STATS_BUFSIZE);
2529 xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2530 return (EINVAL);
2532 count += retsize;
2534 status = xge_hal_aux_stats_hal_read(lldev->devh,
2535 XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2536 if (status != XGE_HAL_OK) {
2537 kmem_free(buf, XGELL_STATS_BUFSIZE);
2538 xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2539 return (EINVAL);
2541 count += retsize;
2543 *(buf + count - 1) = '\0'; /* remove last '\n' */
2544 (void) mi_mpprintf(mp, "%s", buf);
2545 kmem_free(buf, XGELL_STATS_BUFSIZE);
2547 return (0);
2550 static int
2551 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2553 xgelldev_t *lldev = (xgelldev_t *)cp;
2554 xge_hal_status_e status;
2555 int retsize;
2556 char *buf;
2558 buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2559 if (buf == NULL) {
2560 return (ENOSPC);
2562 status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2563 buf, &retsize);
2564 if (status != XGE_HAL_OK) {
2565 kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2566 xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2567 return (EINVAL);
2569 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2570 (void) mi_mpprintf(mp, "%s", buf);
2571 kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2573 return (0);
2576 static int
2577 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2579 xgelldev_t *lldev = (xgelldev_t *)cp;
2580 xge_hal_status_e status;
2581 int retsize;
2582 char *buf;
2584 buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2585 if (buf == NULL) {
2586 return (ENOSPC);
2588 status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2589 buf, &retsize);
2590 if (status != XGE_HAL_OK) {
2591 kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2592 xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2593 return (EINVAL);
2595 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2596 (void) mi_mpprintf(mp, "%s", buf);
2597 kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2599 return (0);
2602 static unsigned long bar0_offset = 0x110; /* adapter_control */
2604 static int
2605 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2607 xgelldev_t *lldev = (xgelldev_t *)cp;
2608 xge_hal_status_e status;
2609 int retsize;
2610 char *buf;
2612 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2613 if (buf == NULL) {
2614 return (ENOSPC);
2616 status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2617 XGELL_IOCTL_BUFSIZE, buf, &retsize);
2618 if (status != XGE_HAL_OK) {
2619 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2620 xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2621 return (EINVAL);
2623 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2624 (void) mi_mpprintf(mp, "%s", buf);
2625 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2627 return (0);
2630 static int
2631 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2633 unsigned long old_offset = bar0_offset;
2634 char *end;
2636 if (value && *value == '0' &&
2637 (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2638 value += 2;
2641 bar0_offset = mi_strtol(value, &end, 16);
2642 if (end == value) {
2643 bar0_offset = old_offset;
2644 return (EINVAL);
2647 xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2649 return (0);
2652 static int
2653 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2655 char *buf;
2657 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2658 if (buf == NULL) {
2659 return (ENOSPC);
2661 (void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2662 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2664 return (0);
2667 static int
2668 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2669 cred_t *credp)
2671 int level;
2672 char *end;
2674 level = mi_strtol(value, &end, 10);
2675 if (level < XGE_NONE || level > XGE_ERR || end == value) {
2676 return (EINVAL);
2679 xge_hal_driver_debug_level_set(level);
2681 return (0);
2684 static int
2685 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2687 char *buf;
2689 buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2690 if (buf == NULL) {
2691 return (ENOSPC);
2693 (void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2694 xge_hal_driver_debug_module_mask());
2695 kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2697 return (0);
2700 static int
2701 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2702 cred_t *credp)
2704 u32 mask;
2705 char *end;
2707 if (value && *value == '0' &&
2708 (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2709 value += 2;
2712 mask = mi_strtol(value, &end, 16);
2713 if (end == value) {
2714 return (EINVAL);
2717 xge_hal_driver_debug_module_mask_set(mask);
2719 return (0);
2722 static int
2723 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2725 xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2726 xge_hal_status_e status;
2727 int retsize;
2728 char *buf;
2730 buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2731 if (buf == NULL) {
2732 return (ENOSPC);
2734 status = xge_hal_aux_device_config_read(lldev->devh,
2735 XGELL_DEVCONF_BUFSIZE, buf, &retsize);
2736 if (status != XGE_HAL_OK) {
2737 kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2738 xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2739 status);
2740 return (EINVAL);
2742 *(buf + retsize - 1) = '\0'; /* remove last '\n' */
2743 (void) mi_mpprintf(mp, "%s", buf);
2744 kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2746 return (0);
2750 * xgell_device_register
2751 * @devh: pointer on HAL device
2752 * @config: pointer on this network device configuration
2753 * @ll_out: output pointer. Will be assigned to valid LL device.
2755 * This function will allocate and register network device
2758 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2760 mac_register_t *macp = NULL;
2761 xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2764 * Initialize some NDD interface for internal debug.
2766 if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2767 (caddr_t)lldev) == B_FALSE)
2768 goto xgell_ndd_fail;
2770 if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2771 (caddr_t)lldev) == B_FALSE)
2772 goto xgell_ndd_fail;
2774 if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2775 (caddr_t)lldev) == B_FALSE)
2776 goto xgell_ndd_fail;
2778 if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2779 (caddr_t)lldev) == B_FALSE)
2780 goto xgell_ndd_fail;
2782 if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2783 xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2784 goto xgell_ndd_fail;
2786 if (nd_load(&lldev->ndp, "debug_module_mask",
2787 xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2788 (caddr_t)lldev) == B_FALSE)
2789 goto xgell_ndd_fail;
2791 if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2792 (caddr_t)lldev) == B_FALSE)
2793 goto xgell_ndd_fail;
2795 bcopy(config, &lldev->config, sizeof (xgell_config_t));
2797 mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER,
2798 DDI_INTR_PRI(hldev->irqh));
2800 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2801 goto xgell_register_fail;
2802 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2803 macp->m_driver = lldev;
2804 macp->m_dip = lldev->dev_info;
2805 macp->m_src_addr = hldev->macaddr[0];
2806 macp->m_callbacks = &xgell_m_callbacks;
2807 macp->m_min_sdu = 0;
2808 macp->m_max_sdu = hldev->config.mtu;
2809 macp->m_margin = VLAN_TAGSZ;
2810 macp->m_v12n = MAC_VIRT_LEVEL1;
2813 * MAC Registration.
2815 if (mac_register(macp, &lldev->mh) != 0)
2816 goto xgell_register_fail;
2818 /* Always free the macp after register */
2819 if (macp != NULL)
2820 mac_free(macp);
2822 /* Calculate tx_copied_max here ??? */
2823 lldev->tx_copied_max = hldev->config.fifo.max_frags *
2824 hldev->config.fifo.alignment_size *
2825 hldev->config.fifo.max_aligned_frags;
2827 xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2828 XGELL_IFNAME, lldev->instance);
2830 return (DDI_SUCCESS);
2832 xgell_ndd_fail:
2833 nd_free(&lldev->ndp);
2834 xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2835 return (DDI_FAILURE);
2837 xgell_register_fail:
2838 if (macp != NULL)
2839 mac_free(macp);
2840 nd_free(&lldev->ndp);
2841 mutex_destroy(&lldev->genlock);
2842 xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2843 return (DDI_FAILURE);
2847 * xgell_device_unregister
2848 * @devh: pointer on HAL device
2849 * @lldev: pointer to valid LL device.
2851 * This function will unregister and free network device
2854 xgell_device_unregister(xgelldev_t *lldev)
2856 if (mac_unregister(lldev->mh) != 0) {
2857 xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2858 XGELL_IFNAME, lldev->instance);
2859 return (DDI_FAILURE);
2862 mutex_destroy(&lldev->genlock);
2864 nd_free(&lldev->ndp);
2866 xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2867 XGELL_IFNAME, lldev->instance);
2869 return (DDI_SUCCESS);