preprocessor cleanup: __sparc
[unleashed/tickless.git] / kernel / drivers / net / e1000g / e1000g_alloc.c
blob2e26a41785236d0ffbd2fb6410ccabe8ac60be19
1 /*
2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
7 * CDDL LICENSE SUMMARY
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * **********************************************************************
27 * Module Name: *
28 * e1000g_alloc.c *
29 * *
30 * Abstract: *
31 * This file contains some routines that take care of *
32 * memory allocation for descriptors and buffers. *
33 * *
34 * **********************************************************************
37 #include "e1000g_sw.h"
38 #include "e1000g_debug.h"
40 #define TX_SW_PKT_AREA_SZ \
41 (sizeof (tx_sw_packet_t) * Adapter->tx_freelist_num)
43 static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
44 static int e1000g_alloc_rx_descriptors(e1000g_rx_data_t *);
45 static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
46 static void e1000g_free_rx_descriptors(e1000g_rx_data_t *);
47 static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
48 static int e1000g_alloc_rx_packets(e1000g_rx_data_t *);
49 static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
50 static void e1000g_free_rx_packets(e1000g_rx_data_t *, boolean_t);
51 static int e1000g_alloc_dma_buffer(struct e1000g *,
52 dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
55 * In order to avoid address error crossing 64KB boundary
56 * during PCI-X packets receving, e1000g_alloc_dma_buffer_82546
57 * is used by some necessary adapter types.
59 static int e1000g_alloc_dma_buffer_82546(struct e1000g *,
60 dma_buffer_t *, size_t, ddi_dma_attr_t *p_dma_attr);
61 static int e1000g_dma_mem_alloc_82546(dma_buffer_t *buf,
62 size_t size, size_t *len);
63 static boolean_t e1000g_cross_64k_bound(void *, uintptr_t);
65 static void e1000g_free_dma_buffer(dma_buffer_t *);
66 static int e1000g_alloc_descriptors(struct e1000g *Adapter);
67 static void e1000g_free_descriptors(struct e1000g *Adapter);
68 static int e1000g_alloc_packets(struct e1000g *Adapter);
69 static void e1000g_free_packets(struct e1000g *Adapter);
70 static p_rx_sw_packet_t e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *,
71 ddi_dma_attr_t *p_dma_attr);
73 /* DMA access attributes for descriptors <Little Endian> */
74 static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
75 DDI_DEVICE_ATTR_V0,
76 DDI_STRUCTURE_LE_ACC,
77 DDI_STRICTORDER_ACC
80 /* DMA access attributes for DMA buffers */
81 static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
82 DDI_DEVICE_ATTR_V0,
83 DDI_STRUCTURE_LE_ACC,
84 DDI_STRICTORDER_ACC,
87 /* DMA attributes for tx mblk buffers */
88 static ddi_dma_attr_t e1000g_tx_dma_attr = {
89 DMA_ATTR_V0, /* version of this structure */
90 0, /* lowest usable address */
91 0xffffffffffffffffULL, /* highest usable address */
92 0x7fffffff, /* maximum DMAable byte count */
93 1, /* alignment in bytes */
94 0x7ff, /* burst sizes (any?) */
95 1, /* minimum transfer */
96 0xffffffffU, /* maximum transfer */
97 0xffffffffffffffffULL, /* maximum segment length */
98 MAX_COOKIES, /* maximum number of segments */
99 1, /* granularity */
100 DDI_DMA_FLAGERR, /* dma_attr_flags */
103 /* DMA attributes for pre-allocated rx/tx buffers */
104 static ddi_dma_attr_t e1000g_buf_dma_attr = {
105 DMA_ATTR_V0, /* version of this structure */
106 0, /* lowest usable address */
107 0xffffffffffffffffULL, /* highest usable address */
108 0x7fffffff, /* maximum DMAable byte count */
109 1, /* alignment in bytes */
110 0x7ff, /* burst sizes (any?) */
111 1, /* minimum transfer */
112 0xffffffffU, /* maximum transfer */
113 0xffffffffffffffffULL, /* maximum segment length */
114 1, /* maximum number of segments */
115 1, /* granularity */
116 DDI_DMA_FLAGERR, /* dma_attr_flags */
119 /* DMA attributes for rx/tx descriptors */
120 static ddi_dma_attr_t e1000g_desc_dma_attr = {
121 DMA_ATTR_V0, /* version of this structure */
122 0, /* lowest usable address */
123 0xffffffffffffffffULL, /* highest usable address */
124 0x7fffffff, /* maximum DMAable byte count */
125 E1000_MDALIGN, /* default alignment is 4k but can be changed */
126 0x7ff, /* burst sizes (any?) */
127 1, /* minimum transfer */
128 0xffffffffU, /* maximum transfer */
129 0xffffffffffffffffULL, /* maximum segment length */
130 1, /* maximum number of segments */
131 1, /* granularity */
132 DDI_DMA_FLAGERR, /* dma_attr_flags */
136 static dma_type_t e1000g_dma_type = USE_DMA;
138 extern krwlock_t e1000g_dma_type_lock;
142 e1000g_alloc_dma_resources(struct e1000g *Adapter)
144 int result;
146 result = DDI_FAILURE;
148 while ((result != DDI_SUCCESS) &&
149 (Adapter->tx_desc_num >= MIN_NUM_TX_DESCRIPTOR) &&
150 (Adapter->rx_desc_num >= MIN_NUM_RX_DESCRIPTOR) &&
151 (Adapter->tx_freelist_num >= MIN_NUM_TX_FREELIST)) {
153 result = e1000g_alloc_descriptors(Adapter);
155 if (result == DDI_SUCCESS) {
156 result = e1000g_alloc_packets(Adapter);
158 if (result != DDI_SUCCESS)
159 e1000g_free_descriptors(Adapter);
163 * If the allocation fails due to resource shortage,
164 * we'll reduce the numbers of descriptors/buffers by
165 * half, and try the allocation again.
167 if (result != DDI_SUCCESS) {
169 * We must ensure the number of descriptors
170 * is always a multiple of 8.
172 Adapter->tx_desc_num =
173 (Adapter->tx_desc_num >> 4) << 3;
174 Adapter->rx_desc_num =
175 (Adapter->rx_desc_num >> 4) << 3;
177 Adapter->tx_freelist_num >>= 1;
181 return (result);
185 * e1000g_alloc_descriptors - allocate DMA buffers for descriptors
187 * This routine allocates neccesary DMA buffers for
188 * Transmit Descriptor Area
189 * Receive Descrpitor Area
191 static int
192 e1000g_alloc_descriptors(struct e1000g *Adapter)
194 int result;
195 e1000g_tx_ring_t *tx_ring;
196 e1000g_rx_data_t *rx_data;
198 if (Adapter->mem_workaround_82546 &&
199 ((Adapter->shared.mac.type == e1000_82545) ||
200 (Adapter->shared.mac.type == e1000_82546) ||
201 (Adapter->shared.mac.type == e1000_82546_rev_3))) {
202 /* Align on a 64k boundary for these adapter types */
203 Adapter->desc_align = E1000_MDALIGN_82546;
204 } else {
205 /* Align on a 4k boundary for all other adapter types */
206 Adapter->desc_align = E1000_MDALIGN;
209 tx_ring = Adapter->tx_ring;
211 result = e1000g_alloc_tx_descriptors(tx_ring);
212 if (result != DDI_SUCCESS)
213 return (DDI_FAILURE);
215 rx_data = Adapter->rx_ring->rx_data;
217 result = e1000g_alloc_rx_descriptors(rx_data);
218 if (result != DDI_SUCCESS) {
219 e1000g_free_tx_descriptors(tx_ring);
220 return (DDI_FAILURE);
223 return (DDI_SUCCESS);
226 static void
227 e1000g_free_descriptors(struct e1000g *Adapter)
229 e1000g_tx_ring_t *tx_ring;
230 e1000g_rx_data_t *rx_data;
232 tx_ring = Adapter->tx_ring;
233 rx_data = Adapter->rx_ring->rx_data;
235 e1000g_free_tx_descriptors(tx_ring);
236 e1000g_free_rx_descriptors(rx_data);
239 static int
240 e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *tx_ring)
242 int mystat;
243 boolean_t alloc_flag;
244 size_t size;
245 size_t len;
246 uintptr_t templong;
247 uint_t cookie_count;
248 dev_info_t *devinfo;
249 ddi_dma_cookie_t cookie;
250 struct e1000g *Adapter;
251 ddi_dma_attr_t dma_attr;
253 Adapter = tx_ring->adapter;
254 devinfo = Adapter->dip;
256 alloc_flag = B_FALSE;
257 dma_attr = e1000g_desc_dma_attr;
260 * Solaris 7 has a problem with allocating physically contiguous memory
261 * that is aligned on a 4K boundary. The transmit and rx descriptors
262 * need to aligned on a 4kbyte boundary. We first try to allocate the
263 * memory with DMA attributes set to 4K alignment and also no scatter/
264 * gather mechanism specified. In most cases, this does not allocate
265 * memory aligned at a 4Kbyte boundary. We then try asking for memory
266 * aligned on 4K boundary with scatter/gather set to 2. This works when
267 * the amount of memory is less than 4k i.e a page size. If neither of
268 * these options work or if the number of descriptors is greater than
269 * 4K, ie more than 256 descriptors, we allocate 4k extra memory and
270 * and then align the memory at a 4k boundary.
272 size = sizeof (struct e1000_tx_desc) * Adapter->tx_desc_num;
275 * Memory allocation for the transmit buffer descriptors.
277 dma_attr.dma_attr_sgllen = 1;
278 dma_attr.dma_attr_align = Adapter->desc_align;
281 * Allocate a new DMA handle for the transmit descriptor
282 * memory area.
284 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
285 DDI_DMA_DONTWAIT, 0,
286 &tx_ring->tbd_dma_handle);
288 if (mystat != DDI_SUCCESS) {
289 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
290 "Could not allocate tbd dma handle: %d", mystat);
291 tx_ring->tbd_dma_handle = NULL;
292 return (DDI_FAILURE);
296 * Allocate memory to DMA data to and from the transmit
297 * descriptors.
299 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
300 size,
301 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
302 DDI_DMA_DONTWAIT, 0,
303 (caddr_t *)&tx_ring->tbd_area,
304 &len, &tx_ring->tbd_acc_handle);
306 if ((mystat != DDI_SUCCESS) ||
307 ((uintptr_t)tx_ring->tbd_area & (Adapter->desc_align - 1))) {
308 if (mystat == DDI_SUCCESS) {
309 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
310 tx_ring->tbd_acc_handle = NULL;
311 tx_ring->tbd_area = NULL;
313 if (tx_ring->tbd_dma_handle != NULL) {
314 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
315 tx_ring->tbd_dma_handle = NULL;
317 alloc_flag = B_FALSE;
318 } else
319 alloc_flag = B_TRUE;
322 * Initialize the entire transmit buffer descriptor area to zero
324 if (alloc_flag)
325 bzero(tx_ring->tbd_area, len);
328 * If the previous DMA attributes setting could not give us contiguous
329 * memory or the number of descriptors is greater than the page size,
330 * we allocate extra memory and then align it at appropriate boundary.
332 if (!alloc_flag) {
333 size = size + Adapter->desc_align;
336 * DMA attributes set to no scatter/gather and 16 bit alignment
338 dma_attr.dma_attr_align = 1;
339 dma_attr.dma_attr_sgllen = 1;
342 * Allocate a new DMA handle for the transmit descriptor memory
343 * area.
345 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
346 DDI_DMA_DONTWAIT, 0,
347 &tx_ring->tbd_dma_handle);
349 if (mystat != DDI_SUCCESS) {
350 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
351 "Could not re-allocate tbd dma handle: %d", mystat);
352 tx_ring->tbd_dma_handle = NULL;
353 return (DDI_FAILURE);
357 * Allocate memory to DMA data to and from the transmit
358 * descriptors.
360 mystat = ddi_dma_mem_alloc(tx_ring->tbd_dma_handle,
361 size,
362 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
363 DDI_DMA_DONTWAIT, 0,
364 (caddr_t *)&tx_ring->tbd_area,
365 &len, &tx_ring->tbd_acc_handle);
367 if (mystat != DDI_SUCCESS) {
368 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
369 "Could not allocate tbd dma memory: %d", mystat);
370 tx_ring->tbd_acc_handle = NULL;
371 tx_ring->tbd_area = NULL;
372 if (tx_ring->tbd_dma_handle != NULL) {
373 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
374 tx_ring->tbd_dma_handle = NULL;
376 return (DDI_FAILURE);
377 } else
378 alloc_flag = B_TRUE;
381 * Initialize the entire transmit buffer descriptor area to zero
383 bzero(tx_ring->tbd_area, len);
385 * Memory has been allocated with the ddi_dma_mem_alloc call,
386 * but has not been aligned.
387 * We now align it on the appropriate boundary.
389 templong = P2NPHASE((uintptr_t)tx_ring->tbd_area,
390 Adapter->desc_align);
391 len = size - templong;
392 templong += (uintptr_t)tx_ring->tbd_area;
393 tx_ring->tbd_area = (struct e1000_tx_desc *)templong;
394 } /* alignment workaround */
397 * Transmit buffer descriptor memory allocation succeeded
399 ASSERT(alloc_flag);
402 * Allocates DMA resources for the memory that was allocated by
403 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
404 * the memory address
406 mystat = ddi_dma_addr_bind_handle(tx_ring->tbd_dma_handle,
407 NULL, (caddr_t)tx_ring->tbd_area,
408 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
409 DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
411 if (mystat != DDI_SUCCESS) {
412 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
413 "Could not bind tbd dma resource: %d", mystat);
414 if (tx_ring->tbd_acc_handle != NULL) {
415 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
416 tx_ring->tbd_acc_handle = NULL;
417 tx_ring->tbd_area = NULL;
419 if (tx_ring->tbd_dma_handle != NULL) {
420 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
421 tx_ring->tbd_dma_handle = NULL;
423 return (DDI_FAILURE);
426 ASSERT(cookie_count == 1); /* 1 cookie */
428 if (cookie_count != 1) {
429 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
430 "Could not bind tbd dma resource in a single frag. "
431 "Count - %d Len - %d", cookie_count, len);
432 e1000g_free_tx_descriptors(tx_ring);
433 return (DDI_FAILURE);
436 tx_ring->tbd_dma_addr = cookie.dmac_laddress;
437 tx_ring->tbd_first = tx_ring->tbd_area;
438 tx_ring->tbd_last = tx_ring->tbd_first +
439 (Adapter->tx_desc_num - 1);
441 return (DDI_SUCCESS);
444 static int
445 e1000g_alloc_rx_descriptors(e1000g_rx_data_t *rx_data)
447 int mystat;
448 boolean_t alloc_flag;
449 size_t size;
450 size_t len;
451 uintptr_t templong;
452 uint_t cookie_count;
453 dev_info_t *devinfo;
454 ddi_dma_cookie_t cookie;
455 struct e1000g *Adapter;
456 ddi_dma_attr_t dma_attr;
458 Adapter = rx_data->rx_ring->adapter;
459 devinfo = Adapter->dip;
461 alloc_flag = B_FALSE;
462 dma_attr = e1000g_desc_dma_attr;
465 * Memory allocation for the receive buffer descriptors.
467 size = (sizeof (struct e1000_rx_desc)) * Adapter->rx_desc_num;
470 * Asking for aligned memory with DMA attributes set for suitable value
472 dma_attr.dma_attr_sgllen = 1;
473 dma_attr.dma_attr_align = Adapter->desc_align;
476 * Allocate a new DMA handle for the receive descriptors
478 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
479 DDI_DMA_DONTWAIT, 0,
480 &rx_data->rbd_dma_handle);
482 if (mystat != DDI_SUCCESS) {
483 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
484 "Could not allocate rbd dma handle: %d", mystat);
485 rx_data->rbd_dma_handle = NULL;
486 return (DDI_FAILURE);
489 * Allocate memory to DMA data to and from the receive
490 * descriptors.
492 mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
493 size,
494 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
495 DDI_DMA_DONTWAIT, 0,
496 (caddr_t *)&rx_data->rbd_area,
497 &len, &rx_data->rbd_acc_handle);
500 * Check if memory allocation succeeded and also if the
501 * allocated memory is aligned correctly.
503 if ((mystat != DDI_SUCCESS) ||
504 ((uintptr_t)rx_data->rbd_area & (Adapter->desc_align - 1))) {
505 if (mystat == DDI_SUCCESS) {
506 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
507 rx_data->rbd_acc_handle = NULL;
508 rx_data->rbd_area = NULL;
510 if (rx_data->rbd_dma_handle != NULL) {
511 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
512 rx_data->rbd_dma_handle = NULL;
514 alloc_flag = B_FALSE;
515 } else
516 alloc_flag = B_TRUE;
519 * Initialize the allocated receive descriptor memory to zero.
521 if (alloc_flag)
522 bzero((caddr_t)rx_data->rbd_area, len);
525 * If memory allocation did not succeed, do the alignment ourselves
527 if (!alloc_flag) {
528 dma_attr.dma_attr_align = 1;
529 dma_attr.dma_attr_sgllen = 1;
530 size = size + Adapter->desc_align;
532 * Allocate a new DMA handle for the receive descriptor.
534 mystat = ddi_dma_alloc_handle(devinfo, &dma_attr,
535 DDI_DMA_DONTWAIT, 0,
536 &rx_data->rbd_dma_handle);
538 if (mystat != DDI_SUCCESS) {
539 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
540 "Could not re-allocate rbd dma handle: %d", mystat);
541 rx_data->rbd_dma_handle = NULL;
542 return (DDI_FAILURE);
545 * Allocate memory to DMA data to and from the receive
546 * descriptors.
548 mystat = ddi_dma_mem_alloc(rx_data->rbd_dma_handle,
549 size,
550 &e1000g_desc_acc_attr, DDI_DMA_CONSISTENT,
551 DDI_DMA_DONTWAIT, 0,
552 (caddr_t *)&rx_data->rbd_area,
553 &len, &rx_data->rbd_acc_handle);
555 if (mystat != DDI_SUCCESS) {
556 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
557 "Could not allocate rbd dma memory: %d", mystat);
558 rx_data->rbd_acc_handle = NULL;
559 rx_data->rbd_area = NULL;
560 if (rx_data->rbd_dma_handle != NULL) {
561 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
562 rx_data->rbd_dma_handle = NULL;
564 return (DDI_FAILURE);
565 } else
566 alloc_flag = B_TRUE;
569 * Initialize the allocated receive descriptor memory to zero.
571 bzero((caddr_t)rx_data->rbd_area, len);
572 templong = P2NPHASE((uintptr_t)rx_data->rbd_area,
573 Adapter->desc_align);
574 len = size - templong;
575 templong += (uintptr_t)rx_data->rbd_area;
576 rx_data->rbd_area = (struct e1000_rx_desc *)templong;
577 } /* alignment workaround */
580 * The memory allocation of the receive descriptors succeeded
582 ASSERT(alloc_flag);
585 * Allocates DMA resources for the memory that was allocated by
586 * the ddi_dma_mem_alloc call.
588 mystat = ddi_dma_addr_bind_handle(rx_data->rbd_dma_handle,
589 NULL, (caddr_t)rx_data->rbd_area,
590 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
591 DDI_DMA_DONTWAIT, 0, &cookie, &cookie_count);
593 if (mystat != DDI_SUCCESS) {
594 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
595 "Could not bind rbd dma resource: %d", mystat);
596 if (rx_data->rbd_acc_handle != NULL) {
597 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
598 rx_data->rbd_acc_handle = NULL;
599 rx_data->rbd_area = NULL;
601 if (rx_data->rbd_dma_handle != NULL) {
602 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
603 rx_data->rbd_dma_handle = NULL;
605 return (DDI_FAILURE);
608 ASSERT(cookie_count == 1);
609 if (cookie_count != 1) {
610 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
611 "Could not bind rbd dma resource in a single frag. "
612 "Count - %d Len - %d", cookie_count, len);
613 e1000g_free_rx_descriptors(rx_data);
614 return (DDI_FAILURE);
617 rx_data->rbd_dma_addr = cookie.dmac_laddress;
618 rx_data->rbd_first = rx_data->rbd_area;
619 rx_data->rbd_last = rx_data->rbd_first +
620 (Adapter->rx_desc_num - 1);
622 return (DDI_SUCCESS);
625 static void
626 e1000g_free_rx_descriptors(e1000g_rx_data_t *rx_data)
628 if (rx_data->rbd_dma_handle != NULL) {
629 (void) ddi_dma_unbind_handle(rx_data->rbd_dma_handle);
631 if (rx_data->rbd_acc_handle != NULL) {
632 ddi_dma_mem_free(&rx_data->rbd_acc_handle);
633 rx_data->rbd_acc_handle = NULL;
634 rx_data->rbd_area = NULL;
636 if (rx_data->rbd_dma_handle != NULL) {
637 ddi_dma_free_handle(&rx_data->rbd_dma_handle);
638 rx_data->rbd_dma_handle = NULL;
640 rx_data->rbd_dma_addr = (uintptr_t)NULL;
641 rx_data->rbd_first = NULL;
642 rx_data->rbd_last = NULL;
645 static void
646 e1000g_free_tx_descriptors(e1000g_tx_ring_t *tx_ring)
648 if (tx_ring->tbd_dma_handle != NULL) {
649 (void) ddi_dma_unbind_handle(tx_ring->tbd_dma_handle);
651 if (tx_ring->tbd_acc_handle != NULL) {
652 ddi_dma_mem_free(&tx_ring->tbd_acc_handle);
653 tx_ring->tbd_acc_handle = NULL;
654 tx_ring->tbd_area = NULL;
656 if (tx_ring->tbd_dma_handle != NULL) {
657 ddi_dma_free_handle(&tx_ring->tbd_dma_handle);
658 tx_ring->tbd_dma_handle = NULL;
660 tx_ring->tbd_dma_addr = (uintptr_t)NULL;
661 tx_ring->tbd_first = NULL;
662 tx_ring->tbd_last = NULL;
667 * e1000g_alloc_packets - allocate DMA buffers for rx/tx
669 * This routine allocates neccesary buffers for
670 * Transmit sw packet structure
671 * DMA handle for Transmit
672 * DMA buffer for Transmit
673 * Receive sw packet structure
674 * DMA buffer for Receive
676 static int
677 e1000g_alloc_packets(struct e1000g *Adapter)
679 int result;
680 e1000g_tx_ring_t *tx_ring;
681 e1000g_rx_data_t *rx_data;
683 tx_ring = Adapter->tx_ring;
684 rx_data = Adapter->rx_ring->rx_data;
686 again:
687 rw_enter(&e1000g_dma_type_lock, RW_READER);
689 result = e1000g_alloc_tx_packets(tx_ring);
690 if (result != DDI_SUCCESS) {
691 if (e1000g_dma_type == USE_DVMA) {
692 rw_exit(&e1000g_dma_type_lock);
694 rw_enter(&e1000g_dma_type_lock, RW_WRITER);
695 e1000g_dma_type = USE_DMA;
696 rw_exit(&e1000g_dma_type_lock);
698 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
699 "No enough dvma resource for Tx packets, "
700 "trying to allocate dma buffers...\n");
701 goto again;
703 rw_exit(&e1000g_dma_type_lock);
705 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
706 "Failed to allocate dma buffers for Tx packets\n");
707 return (DDI_FAILURE);
710 result = e1000g_alloc_rx_packets(rx_data);
711 if (result != DDI_SUCCESS) {
712 e1000g_free_tx_packets(tx_ring);
713 if (e1000g_dma_type == USE_DVMA) {
714 rw_exit(&e1000g_dma_type_lock);
716 rw_enter(&e1000g_dma_type_lock, RW_WRITER);
717 e1000g_dma_type = USE_DMA;
718 rw_exit(&e1000g_dma_type_lock);
720 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
721 "No enough dvma resource for Rx packets, "
722 "trying to allocate dma buffers...\n");
723 goto again;
725 rw_exit(&e1000g_dma_type_lock);
727 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
728 "Failed to allocate dma buffers for Rx packets\n");
729 return (DDI_FAILURE);
732 rw_exit(&e1000g_dma_type_lock);
734 return (DDI_SUCCESS);
737 static void
738 e1000g_free_packets(struct e1000g *Adapter)
740 e1000g_tx_ring_t *tx_ring;
741 e1000g_rx_data_t *rx_data;
743 tx_ring = Adapter->tx_ring;
744 rx_data = Adapter->rx_ring->rx_data;
746 e1000g_free_tx_packets(tx_ring);
747 e1000g_free_rx_packets(rx_data, B_FALSE);
751 static int
752 e1000g_alloc_dma_buffer(struct e1000g *Adapter,
753 dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
755 int mystat;
756 dev_info_t *devinfo;
757 ddi_dma_cookie_t cookie;
758 size_t len;
759 uint_t count;
761 if (e1000g_force_detach)
762 devinfo = Adapter->priv_dip;
763 else
764 devinfo = Adapter->dip;
766 mystat = ddi_dma_alloc_handle(devinfo,
767 p_dma_attr,
768 DDI_DMA_DONTWAIT, 0,
769 &buf->dma_handle);
771 if (mystat != DDI_SUCCESS) {
772 buf->dma_handle = NULL;
773 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
774 "Could not allocate dma buffer handle: %d\n", mystat);
775 return (DDI_FAILURE);
778 mystat = ddi_dma_mem_alloc(buf->dma_handle,
779 size, &e1000g_buf_acc_attr, DDI_DMA_STREAMING,
780 DDI_DMA_DONTWAIT, 0, &buf->address,
781 &len, &buf->acc_handle);
783 if (mystat != DDI_SUCCESS) {
784 buf->acc_handle = NULL;
785 buf->address = NULL;
786 if (buf->dma_handle != NULL) {
787 ddi_dma_free_handle(&buf->dma_handle);
788 buf->dma_handle = NULL;
790 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
791 "Could not allocate dma buffer memory: %d\n", mystat);
792 return (DDI_FAILURE);
795 mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
796 NULL, buf->address, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
797 DDI_DMA_DONTWAIT, 0, &cookie, &count);
799 if (mystat != DDI_SUCCESS) {
800 if (buf->acc_handle != NULL) {
801 ddi_dma_mem_free(&buf->acc_handle);
802 buf->acc_handle = NULL;
803 buf->address = NULL;
805 if (buf->dma_handle != NULL) {
806 ddi_dma_free_handle(&buf->dma_handle);
807 buf->dma_handle = NULL;
809 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
810 "Could not bind buffer dma handle: %d\n", mystat);
811 return (DDI_FAILURE);
814 ASSERT(count == 1);
815 if (count != 1) {
816 if (buf->dma_handle != NULL) {
817 (void) ddi_dma_unbind_handle(buf->dma_handle);
819 if (buf->acc_handle != NULL) {
820 ddi_dma_mem_free(&buf->acc_handle);
821 buf->acc_handle = NULL;
822 buf->address = NULL;
824 if (buf->dma_handle != NULL) {
825 ddi_dma_free_handle(&buf->dma_handle);
826 buf->dma_handle = NULL;
828 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
829 "Could not bind buffer as a single frag. "
830 "Count = %d\n", count);
831 return (DDI_FAILURE);
834 buf->dma_address = cookie.dmac_laddress;
835 buf->size = len;
836 buf->len = 0;
838 return (DDI_SUCCESS);
842 * e1000g_alloc_dma_buffer_82546 - allocate a dma buffer along with all
843 * necessary handles. Same as e1000g_alloc_dma_buffer() except ensure
844 * that buffer that doesn't cross a 64k boundary.
846 static int
847 e1000g_alloc_dma_buffer_82546(struct e1000g *Adapter,
848 dma_buffer_t *buf, size_t size, ddi_dma_attr_t *p_dma_attr)
850 int mystat;
851 dev_info_t *devinfo;
852 ddi_dma_cookie_t cookie;
853 size_t len;
854 uint_t count;
856 if (e1000g_force_detach)
857 devinfo = Adapter->priv_dip;
858 else
859 devinfo = Adapter->dip;
861 mystat = ddi_dma_alloc_handle(devinfo,
862 p_dma_attr,
863 DDI_DMA_DONTWAIT, 0,
864 &buf->dma_handle);
866 if (mystat != DDI_SUCCESS) {
867 buf->dma_handle = NULL;
868 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
869 "Could not allocate dma buffer handle: %d\n", mystat);
870 return (DDI_FAILURE);
873 mystat = e1000g_dma_mem_alloc_82546(buf, size, &len);
874 if (mystat != DDI_SUCCESS) {
875 buf->acc_handle = NULL;
876 buf->address = NULL;
877 if (buf->dma_handle != NULL) {
878 ddi_dma_free_handle(&buf->dma_handle);
879 buf->dma_handle = NULL;
881 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
882 "Could not allocate dma buffer memory: %d\n", mystat);
883 return (DDI_FAILURE);
886 mystat = ddi_dma_addr_bind_handle(buf->dma_handle,
887 NULL, buf->address, len, DDI_DMA_READ | DDI_DMA_STREAMING,
888 DDI_DMA_DONTWAIT, 0, &cookie, &count);
890 if (mystat != DDI_SUCCESS) {
891 if (buf->acc_handle != NULL) {
892 ddi_dma_mem_free(&buf->acc_handle);
893 buf->acc_handle = NULL;
894 buf->address = NULL;
896 if (buf->dma_handle != NULL) {
897 ddi_dma_free_handle(&buf->dma_handle);
898 buf->dma_handle = NULL;
900 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
901 "Could not bind buffer dma handle: %d\n", mystat);
902 return (DDI_FAILURE);
905 ASSERT(count == 1);
906 if (count != 1) {
907 if (buf->dma_handle != NULL) {
908 (void) ddi_dma_unbind_handle(buf->dma_handle);
910 if (buf->acc_handle != NULL) {
911 ddi_dma_mem_free(&buf->acc_handle);
912 buf->acc_handle = NULL;
913 buf->address = NULL;
915 if (buf->dma_handle != NULL) {
916 ddi_dma_free_handle(&buf->dma_handle);
917 buf->dma_handle = NULL;
919 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
920 "Could not bind buffer as a single frag. "
921 "Count = %d\n", count);
922 return (DDI_FAILURE);
925 buf->dma_address = cookie.dmac_laddress;
926 buf->size = len;
927 buf->len = 0;
929 return (DDI_SUCCESS);
933 * e1000g_dma_mem_alloc_82546 - allocate a dma buffer, making up to
934 * ALLOC_RETRY attempts to get a buffer that doesn't cross a 64k boundary.
936 static int
937 e1000g_dma_mem_alloc_82546(dma_buffer_t *buf, size_t size, size_t *len)
939 #define ALLOC_RETRY 10
940 int stat;
941 int cnt = 0;
942 ddi_acc_handle_t hold[ALLOC_RETRY];
944 while (cnt < ALLOC_RETRY) {
945 hold[cnt] = NULL;
947 /* allocate memory */
948 stat = ddi_dma_mem_alloc(buf->dma_handle, size,
949 &e1000g_buf_acc_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
950 0, &buf->address, len, &buf->acc_handle);
952 if (stat != DDI_SUCCESS) {
953 break;
957 * Check 64k bounday:
958 * if it is bad, hold it and retry
959 * if it is good, exit loop
961 if (e1000g_cross_64k_bound(buf->address, *len)) {
962 hold[cnt] = buf->acc_handle;
963 stat = DDI_FAILURE;
964 } else {
965 break;
968 cnt++;
971 /* Release any held buffers crossing 64k bounday */
972 for (--cnt; cnt >= 0; cnt--) {
973 if (hold[cnt])
974 ddi_dma_mem_free(&hold[cnt]);
977 return (stat);
981 * e1000g_cross_64k_bound - If starting and ending address cross a 64k boundary
982 * return true; otherwise return false
984 static boolean_t
985 e1000g_cross_64k_bound(void *addr, uintptr_t len)
987 uintptr_t start = (uintptr_t)addr;
988 uintptr_t end = start + len - 1;
990 return (((start ^ end) >> 16) == 0 ? B_FALSE : B_TRUE);
993 static void
994 e1000g_free_dma_buffer(dma_buffer_t *buf)
996 if (buf->dma_handle != NULL) {
997 (void) ddi_dma_unbind_handle(buf->dma_handle);
998 } else {
999 return;
1002 buf->dma_address = (uintptr_t)NULL;
1004 if (buf->acc_handle != NULL) {
1005 ddi_dma_mem_free(&buf->acc_handle);
1006 buf->acc_handle = NULL;
1007 buf->address = NULL;
1010 if (buf->dma_handle != NULL) {
1011 ddi_dma_free_handle(&buf->dma_handle);
1012 buf->dma_handle = NULL;
1015 buf->size = 0;
1016 buf->len = 0;
1019 static int
1020 e1000g_alloc_tx_packets(e1000g_tx_ring_t *tx_ring)
1022 int j;
1023 p_tx_sw_packet_t packet;
1024 int mystat;
1025 dma_buffer_t *tx_buf;
1026 struct e1000g *Adapter;
1027 dev_info_t *devinfo;
1028 ddi_dma_attr_t dma_attr;
1030 Adapter = tx_ring->adapter;
1031 devinfo = Adapter->dip;
1032 dma_attr = e1000g_buf_dma_attr;
1035 * Memory allocation for the Transmit software structure, the transmit
1036 * software packet. This structure stores all the relevant information
1037 * for transmitting a single packet.
1039 tx_ring->packet_area =
1040 kmem_zalloc(TX_SW_PKT_AREA_SZ, KM_NOSLEEP);
1042 if (tx_ring->packet_area == NULL)
1043 return (DDI_FAILURE);
1045 for (j = 0, packet = tx_ring->packet_area;
1046 j < Adapter->tx_freelist_num; j++, packet++) {
1048 ASSERT(packet != NULL);
1051 * Pre-allocate dma handles for transmit. These dma handles
1052 * will be dynamically bound to the data buffers passed down
1053 * from the upper layers at the time of transmitting. The
1054 * dynamic binding only applies for the packets that are larger
1055 * than the tx_bcopy_thresh.
1057 switch (e1000g_dma_type) {
1058 case USE_DMA:
1059 mystat = ddi_dma_alloc_handle(devinfo,
1060 &e1000g_tx_dma_attr,
1061 DDI_DMA_DONTWAIT, 0,
1062 &packet->tx_dma_handle);
1063 break;
1064 default:
1065 ASSERT(B_FALSE);
1066 break;
1068 if (mystat != DDI_SUCCESS) {
1069 packet->tx_dma_handle = NULL;
1070 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
1071 "Could not allocate tx dma handle: %d\n", mystat);
1072 goto tx_pkt_fail;
1076 * Pre-allocate transmit buffers for small packets that the
1077 * size is less than tx_bcopy_thresh. The data of those small
1078 * packets will be bcopy() to the transmit buffers instead of
1079 * using dynamical DMA binding. For small packets, bcopy will
1080 * bring better performance than DMA binding.
1082 tx_buf = packet->tx_buf;
1084 switch (e1000g_dma_type) {
1085 case USE_DMA:
1086 mystat = e1000g_alloc_dma_buffer(Adapter,
1087 tx_buf, Adapter->tx_buffer_size, &dma_attr);
1088 break;
1089 default:
1090 ASSERT(B_FALSE);
1091 break;
1093 if (mystat != DDI_SUCCESS) {
1094 ASSERT(packet->tx_dma_handle != NULL);
1095 switch (e1000g_dma_type) {
1096 case USE_DMA:
1097 ddi_dma_free_handle(&packet->tx_dma_handle);
1098 break;
1099 default:
1100 ASSERT(B_FALSE);
1101 break;
1103 packet->tx_dma_handle = NULL;
1104 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1105 "Allocate Tx buffer fail\n");
1106 goto tx_pkt_fail;
1109 packet->dma_type = e1000g_dma_type;
1110 } /* for */
1112 return (DDI_SUCCESS);
1114 tx_pkt_fail:
1115 e1000g_free_tx_packets(tx_ring);
1117 return (DDI_FAILURE);
1122 e1000g_increase_rx_packets(e1000g_rx_data_t *rx_data)
1124 int i;
1125 p_rx_sw_packet_t packet;
1126 p_rx_sw_packet_t cur, next;
1127 struct e1000g *Adapter;
1128 ddi_dma_attr_t dma_attr;
1130 Adapter = rx_data->rx_ring->adapter;
1131 dma_attr = e1000g_buf_dma_attr;
1132 dma_attr.dma_attr_align = Adapter->rx_buf_align;
1133 cur = NULL;
1135 for (i = 0; i < RX_FREELIST_INCREASE_SIZE; i++) {
1136 packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1137 if (packet == NULL)
1138 break;
1139 packet->next = cur;
1140 cur = packet;
1142 Adapter->rx_freelist_num += i;
1143 rx_data->avail_freepkt += i;
1145 while (cur != NULL) {
1146 QUEUE_PUSH_TAIL(&rx_data->free_list, &cur->Link);
1147 next = cur->next;
1148 cur->next = rx_data->packet_area;
1149 rx_data->packet_area = cur;
1151 cur = next;
1154 return (DDI_SUCCESS);
1158 static int
1159 e1000g_alloc_rx_packets(e1000g_rx_data_t *rx_data)
1161 int i;
1162 p_rx_sw_packet_t packet;
1163 struct e1000g *Adapter;
1164 uint32_t packet_num;
1165 ddi_dma_attr_t dma_attr;
1167 Adapter = rx_data->rx_ring->adapter;
1168 dma_attr = e1000g_buf_dma_attr;
1169 dma_attr.dma_attr_align = Adapter->rx_buf_align;
1172 * Allocate memory for the rx_sw_packet structures. Each one of these
1173 * structures will contain a virtual and physical address to an actual
1174 * receive buffer in host memory. Since we use one rx_sw_packet per
1175 * received packet, the maximum number of rx_sw_packet that we'll
1176 * need is equal to the number of receive descriptors plus the freelist
1177 * size.
1179 packet_num = Adapter->rx_desc_num + RX_FREELIST_INCREASE_SIZE;
1180 rx_data->packet_area = NULL;
1182 for (i = 0; i < packet_num; i++) {
1183 packet = e1000g_alloc_rx_sw_packet(rx_data, &dma_attr);
1184 if (packet == NULL)
1185 goto rx_pkt_fail;
1187 packet->next = rx_data->packet_area;
1188 rx_data->packet_area = packet;
1191 Adapter->rx_freelist_num = RX_FREELIST_INCREASE_SIZE;
1192 return (DDI_SUCCESS);
1194 rx_pkt_fail:
1195 e1000g_free_rx_packets(rx_data, B_TRUE);
1196 return (DDI_FAILURE);
1200 static p_rx_sw_packet_t
1201 e1000g_alloc_rx_sw_packet(e1000g_rx_data_t *rx_data, ddi_dma_attr_t *p_dma_attr)
1203 int mystat;
1204 p_rx_sw_packet_t packet;
1205 dma_buffer_t *rx_buf;
1206 struct e1000g *Adapter;
1208 Adapter = rx_data->rx_ring->adapter;
1210 packet = kmem_zalloc(sizeof (rx_sw_packet_t), KM_NOSLEEP);
1211 if (packet == NULL) {
1212 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1213 "Cound not allocate memory for Rx SwPacket\n");
1214 return (NULL);
1217 rx_buf = packet->rx_buf;
1219 switch (e1000g_dma_type) {
1220 case USE_DMA:
1221 if (Adapter->mem_workaround_82546 &&
1222 ((Adapter->shared.mac.type == e1000_82545) ||
1223 (Adapter->shared.mac.type == e1000_82546) ||
1224 (Adapter->shared.mac.type == e1000_82546_rev_3))) {
1225 mystat = e1000g_alloc_dma_buffer_82546(Adapter,
1226 rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1227 } else {
1228 mystat = e1000g_alloc_dma_buffer(Adapter,
1229 rx_buf, Adapter->rx_buffer_size, p_dma_attr);
1231 break;
1232 default:
1233 ASSERT(B_FALSE);
1234 break;
1237 if (mystat != DDI_SUCCESS) {
1238 if (packet != NULL)
1239 kmem_free(packet, sizeof (rx_sw_packet_t));
1241 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
1242 "Failed to allocate Rx buffer\n");
1243 return (NULL);
1246 rx_buf->size -= E1000G_IPALIGNROOM;
1247 rx_buf->address += E1000G_IPALIGNROOM;
1248 rx_buf->dma_address += E1000G_IPALIGNROOM;
1250 packet->rx_data = (caddr_t)rx_data;
1251 packet->free_rtn.free_func = e1000g_rxfree_func;
1252 packet->free_rtn.free_arg = (char *)packet;
1254 * esballoc is changed to desballoc which
1255 * is undocumented call but as per sun,
1256 * we can use it. It gives better efficiency.
1258 packet->mp = desballoc((unsigned char *)
1259 rx_buf->address,
1260 rx_buf->size,
1261 BPRI_MED, &packet->free_rtn);
1263 packet->dma_type = e1000g_dma_type;
1264 packet->ref_cnt = 1;
1266 return (packet);
1269 void
1270 e1000g_free_rx_sw_packet(p_rx_sw_packet_t packet, boolean_t full_release)
1272 dma_buffer_t *rx_buf;
1274 if (packet->mp != NULL) {
1275 freemsg(packet->mp);
1276 packet->mp = NULL;
1279 rx_buf = packet->rx_buf;
1281 switch (packet->dma_type) {
1282 case USE_DMA:
1283 e1000g_free_dma_buffer(rx_buf);
1284 break;
1285 default:
1286 break;
1289 packet->dma_type = USE_NONE;
1291 if (!full_release)
1292 return;
1294 kmem_free(packet, sizeof (rx_sw_packet_t));
1297 static void
1298 e1000g_free_rx_packets(e1000g_rx_data_t *rx_data, boolean_t full_release)
1300 p_rx_sw_packet_t packet, next_packet;
1301 uint32_t ref_cnt;
1303 mutex_enter(&e1000g_rx_detach_lock);
1305 packet = rx_data->packet_area;
1306 while (packet != NULL) {
1307 next_packet = packet->next;
1309 ref_cnt = atomic_dec_32_nv(&packet->ref_cnt);
1310 if (ref_cnt > 0) {
1311 atomic_inc_32(&rx_data->pending_count);
1312 atomic_inc_32(&e1000g_mblks_pending);
1313 } else {
1314 e1000g_free_rx_sw_packet(packet, full_release);
1317 packet = next_packet;
1320 if (full_release)
1321 rx_data->packet_area = NULL;
1323 mutex_exit(&e1000g_rx_detach_lock);
1327 static void
1328 e1000g_free_tx_packets(e1000g_tx_ring_t *tx_ring)
1330 int j;
1331 struct e1000g *Adapter;
1332 p_tx_sw_packet_t packet;
1333 dma_buffer_t *tx_buf;
1335 Adapter = tx_ring->adapter;
1337 for (j = 0, packet = tx_ring->packet_area;
1338 j < Adapter->tx_freelist_num; j++, packet++) {
1340 if (packet == NULL)
1341 break;
1343 /* Free the Tx DMA handle for dynamical binding */
1344 if (packet->tx_dma_handle != NULL) {
1345 switch (packet->dma_type) {
1346 case USE_DMA:
1347 ddi_dma_free_handle(&packet->tx_dma_handle);
1348 break;
1349 default:
1350 ASSERT(B_FALSE);
1351 break;
1353 packet->tx_dma_handle = NULL;
1354 } else {
1356 * If the dma handle is NULL, then we don't
1357 * need to check the packets left. For they
1358 * have not been initialized or have been freed.
1360 break;
1363 tx_buf = packet->tx_buf;
1365 switch (packet->dma_type) {
1366 case USE_DMA:
1367 e1000g_free_dma_buffer(tx_buf);
1368 break;
1369 default:
1370 ASSERT(B_FALSE);
1371 break;
1374 packet->dma_type = USE_NONE;
1376 if (tx_ring->packet_area != NULL) {
1377 kmem_free(tx_ring->packet_area, TX_SW_PKT_AREA_SZ);
1378 tx_ring->packet_area = NULL;
1383 * e1000g_release_dma_resources - release allocated DMA resources
1385 * This function releases any pending buffers that has been
1386 * previously allocated
1388 void
1389 e1000g_release_dma_resources(struct e1000g *Adapter)
1391 e1000g_free_descriptors(Adapter);
1392 e1000g_free_packets(Adapter);
1395 /* ARGSUSED */
1396 void
1397 e1000g_set_fma_flags(int dma_flag)
1399 if (dma_flag) {
1400 e1000g_tx_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1401 e1000g_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1402 e1000g_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1403 } else {
1404 e1000g_tx_dma_attr.dma_attr_flags = 0;
1405 e1000g_buf_dma_attr.dma_attr_flags = 0;
1406 e1000g_desc_dma_attr.dma_attr_flags = 0;