4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #ifndef _SYS_NXGE_NXGE_TXDMA_H
27 #define _SYS_NXGE_NXGE_TXDMA_H
33 #include <sys/taskq.h>
34 #include <sys/nxge/nxge_txdma_hw.h>
35 #include <npi_txdma.h>
37 #define TXDMA_PORT_BITMAP(nxgep) (nxgep->pt_config.tx_dma_map)
39 #define TXDMA_RECLAIM_PENDING_DEFAULT 64
40 #define TX_FULL_MARK 3
43 * Transmit load balancing definitions.
45 #define NXGE_TX_LB_TCPUDP 0 /* default policy */
46 #define NXGE_TX_LB_HASH 1 /* from the hint data */
47 #define NXGE_TX_LB_DEST_MAC 2 /* Dest. MAC */
50 * Descriptor ring empty:
51 * (1) head index is equal to tail index.
52 * (2) wrapped around bits are the same.
53 * Descriptor ring full:
54 * (1) head index is equal to tail index.
55 * (2) wrapped around bits are different.
58 #define TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap) \
59 ((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE)
61 #define TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap) \
62 ((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE)
64 #define TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \
65 ((index + entries) & wrap_mask)
67 #define TXDMA_DRR_WEIGHT_DEFAULT 0x001f
74 typedef struct _tx_msg_t
{
75 nxge_os_block_mv_t flags
; /* DMA, BCOPY, DVMA (?) */
76 nxge_os_dma_common_t buf_dma
; /* premapped buffer blocks */
77 nxge_os_dma_handle_t buf_dma_handle
; /* premapped buffer handle */
78 nxge_os_dma_handle_t dma_handle
; /* DMA handle for normal send */
79 nxge_os_dma_handle_t dvma_handle
; /* Fast DVMA handle */
80 struct _tx_msg_t
*nextp
;
87 } tx_msg_t
, *p_tx_msg_t
;
92 typedef struct _nxge_tx_ring_stats_t
{
101 uint32_t pkt_size_err
;
102 uint32_t tx_ring_oflow
;
103 uint32_t pre_buf_par_err
;
105 uint32_t nack_pkt_rd
;
106 uint32_t conf_part_err
;
107 uint32_t pkt_part_err
;
109 uint32_t tx_nocanput
;
110 uint32_t tx_msgdup_fail
;
111 uint32_t tx_allocb_fail
;
113 uint32_t tx_dma_bind_fail
;
116 uint32_t tx_hdr_pkts
;
117 uint32_t tx_ddi_pkts
;
118 uint32_t tx_dvma_pkts
;
120 uint32_t tx_max_pend
;
121 uint32_t tx_jumbo_pkts
;
123 txdma_ring_errlog_t errlog
;
124 } nxge_tx_ring_stats_t
, *p_nxge_tx_ring_stats_t
;
126 typedef struct _tx_ring_t
{
127 nxge_os_dma_common_t tdc_desc
;
128 struct _nxge_t
*nxgep
;
129 p_tx_msg_t tx_msg_ring
;
131 tx_rng_cfig_t tx_ring_cfig
;
132 tx_ring_hdl_t tx_ring_hdl
;
133 tx_ring_kick_t tx_ring_kick
;
135 tx_dma_ent_msk_t tx_evmask
;
136 txdma_mbh_t tx_mbox_mbh
;
137 txdma_mbl_t tx_mbox_mbl
;
138 log_page_vld_t page_valid
;
139 log_page_mask_t page_mask_1
;
140 log_page_mask_t page_mask_2
;
141 log_page_value_t page_value_1
;
142 log_page_value_t page_value_2
;
143 log_page_relo_t page_reloc_1
;
144 log_page_relo_t page_reloc_2
;
145 log_page_hdl_t page_hdl
;
146 txc_dma_max_burst_t max_burst
;
148 #define NXGE_TX_RING_ONLINE 0x00
149 #define NXGE_TX_RING_OFFLINING 0x01
150 #define NXGE_TX_RING_OFFLINED 0x02
151 uint32_t tx_ring_offline
;
152 boolean_t tx_ring_busy
;
154 nxge_os_mutex_t lock
;
155 mac_ring_handle_t tx_ring_handle
;
159 struct nxge_tdc_cfg
*tdc_p
;
166 boolean_t wr_index_wrap
;
167 tx_ring_hdl_t ring_head
;
168 tx_ring_kick_t ring_kick_tail
;
169 txdma_mailbox_t tx_mbox
;
174 nxge_os_mutex_t sq_lock
;
178 uint16_t ldg_group_id
;
179 p_nxge_tx_ring_stats_t tdc_stats
;
181 nxge_os_mutex_t dvma_lock
;
182 uint_t dvma_wr_index
;
183 uint_t dvma_rd_index
;
185 uint_t dvma_available
;
186 uint_t dvma_wrap_mask
;
188 nxge_os_dma_handle_t
*dvma_ring
;
190 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
191 uint64_t hv_tx_buf_base_ioaddr_pp
;
192 uint64_t hv_tx_buf_ioaddr_size
;
193 uint64_t hv_tx_cntl_base_ioaddr_pp
;
194 uint64_t hv_tx_cntl_ioaddr_size
;
197 } tx_ring_t
, *p_tx_ring_t
;
200 /* Transmit Mailbox */
201 typedef struct _tx_mbox_t
{
202 nxge_os_mutex_t lock
;
204 struct _nxge_t
*nxgep
;
206 nxge_os_dma_common_t tx_mbox
;
207 txdma_mbl_t tx_mbox_l
;
208 txdma_mbh_t tx_mbox_h
;
209 } tx_mbox_t
, *p_tx_mbox_t
;
211 typedef struct _tx_rings_t
{
213 boolean_t txdesc_allocated
;
215 nxge_os_dma_common_t tdc_dma
;
216 nxge_os_dma_common_t tdc_mbox
;
217 } tx_rings_t
, *p_tx_rings_t
;
220 typedef struct _tx_mbox_areas_t
{
221 p_tx_mbox_t
*txmbox_areas_p
;
222 boolean_t txmbox_allocated
;
223 } tx_mbox_areas_t
, *p_tx_mbox_areas_t
;
226 * Transmit prototypes.
228 nxge_status_t
nxge_init_txdma_channels(p_nxge_t
);
229 void nxge_uninit_txdma_channels(p_nxge_t
);
231 nxge_status_t
nxge_init_txdma_channel(p_nxge_t
, int);
232 void nxge_uninit_txdma_channel(p_nxge_t
, int);
234 void nxge_setup_dma_common(p_nxge_dma_common_t
, p_nxge_dma_common_t
,
236 nxge_status_t
nxge_reset_txdma_channel(p_nxge_t
, uint16_t,
238 nxge_status_t
nxge_init_txdma_channel_event_mask(p_nxge_t
,
239 uint16_t, p_tx_dma_ent_msk_t
);
240 nxge_status_t
nxge_init_txdma_channel_cntl_stat(p_nxge_t
,
242 nxge_status_t
nxge_enable_txdma_channel(p_nxge_t
, uint16_t,
243 p_tx_ring_t
, p_tx_mbox_t
);
245 p_mblk_t
nxge_tx_pkt_header_reserve(p_mblk_t
, uint8_t *);
246 int nxge_tx_pkt_nmblocks(p_mblk_t
, int *);
247 boolean_t
nxge_txdma_reclaim(p_nxge_t
, p_tx_ring_t
, int);
249 void nxge_fill_tx_hdr(p_mblk_t
, boolean_t
, boolean_t
,
250 int, uint8_t, p_tx_pkt_hdr_all_t
, t_uscalar_t
, t_uscalar_t
);
252 nxge_status_t
nxge_txdma_hw_mode(p_nxge_t
, boolean_t
);
253 void nxge_hw_start_tx(p_nxge_t
);
254 void nxge_txdma_stop(p_nxge_t
);
255 void nxge_txdma_stop_start(p_nxge_t
);
256 void nxge_fixup_txdma_rings(p_nxge_t
);
257 void nxge_txdma_hw_kick(p_nxge_t
);
258 void nxge_txdma_fix_channel(p_nxge_t
, uint16_t);
259 void nxge_txdma_fixup_channel(p_nxge_t
, p_tx_ring_t
,
261 void nxge_txdma_hw_kick_channel(p_nxge_t
, p_tx_ring_t
,
264 void nxge_txdma_regs_dump(p_nxge_t
, int);
265 void nxge_txdma_regs_dump_channels(p_nxge_t
);
267 void nxge_check_tx_hang(p_nxge_t
);
268 void nxge_fixup_hung_txdma_rings(p_nxge_t
);
270 void nxge_reclaim_rings(p_nxge_t
);
271 int nxge_txdma_channel_hung(p_nxge_t
,
272 p_tx_ring_t tx_ring_p
, uint16_t);
273 int nxge_txdma_hung(p_nxge_t
);
274 int nxge_txdma_stop_inj_err(p_nxge_t
, int);
275 void nxge_txdma_inject_err(p_nxge_t
, uint32_t, uint8_t);
277 extern nxge_status_t
nxge_alloc_tx_mem_pool(p_nxge_t
);
278 extern nxge_status_t
nxge_alloc_txb(p_nxge_t nxgep
, int channel
);
279 extern void nxge_free_txb(p_nxge_t nxgep
, int channel
);
285 #endif /* _SYS_NXGE_NXGE_TXDMA_H */