4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 * Copyright (c) 2002-2006 Neterion, Inc.
25 #include "xgehal-fifo.h"
28 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t
*
29 __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh
)
31 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)dtrh
;
32 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
35 txdl_priv
= (xge_hal_fifo_txdl_priv_t
*)
36 (ulong_t
)txdp
->host_control
;
38 xge_assert(txdl_priv
);
39 xge_assert(txdl_priv
->dma_object
);
40 xge_assert(txdl_priv
->dma_addr
);
42 xge_assert(txdl_priv
->dma_object
->handle
== txdl_priv
->dma_handle
);
47 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
48 __hal_fifo_dtr_post_single(xge_hal_channel_h channelh
, xge_hal_dtr_h dtrh
,
51 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
52 xge_hal_fifo_hw_pair_t
*hw_pair
= fifo
->hw_pair
;
53 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)dtrh
;
54 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
57 txdp
->control_1
|= XGE_HAL_TXD_LIST_OWN_XENA
;
59 #ifdef XGE_DEBUG_ASSERT
60 /* make sure Xena overwrites the (illegal) t_code value on completion */
61 XGE_HAL_SET_TXD_T_CODE(txdp
->control_1
, XGE_HAL_TXD_T_CODE_UNUSED_5
);
64 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
66 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
67 /* sync the TxDL to device */
68 xge_os_dma_sync(fifo
->channel
.pdev
,
69 txdl_priv
->dma_handle
,
71 txdl_priv
->dma_offset
,
72 txdl_priv
->frags
<< 5 /* sizeof(xge_hal_fifo_txd_t) */,
73 XGE_OS_DMA_DIR_TODEVICE
);
75 /* write the pointer first */
76 xge_os_pio_mem_write64(fifo
->channel
.pdev
,
79 &hw_pair
->txdl_pointer
);
81 /* spec: 0x00 = 1 TxD in the list */
82 ctrl
= XGE_HAL_TX_FIFO_LAST_TXD_NUM(txdl_priv
->frags
- 1);
84 ctrl
|= fifo
->no_snoop_bits
;
86 if (txdp
->control_1
& XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO
)) {
87 ctrl
|= XGE_HAL_TX_FIFO_SPECIAL_FUNC
;
91 * according to the XENA spec:
93 * It is important to note that pointers and list control words are
94 * always written in pairs: in the first write, the host must write a
95 * pointer, and in the second write, it must write the list control
96 * word. Any other access will result in an error. Also, all 16 bytes
97 * of the pointer/control structure must be written, including any
103 * we want touch work_arr in order with ownership bit set to HW
105 __hal_channel_dtr_post(channelh
, dtrh
);
107 xge_os_pio_mem_write64(fifo
->channel
.pdev
, fifo
->channel
.regh1
,
108 ctrl
, &hw_pair
->list_control
);
110 xge_debug_fifo(XGE_TRACE
, "posted txdl 0x"XGE_OS_LLXFMT
" ctrl 0x"XGE_OS_LLXFMT
" "
111 "into 0x"XGE_OS_LLXFMT
"", (unsigned long long)txdl_priv
->dma_addr
,
112 (unsigned long long)ctrl
,
113 (unsigned long long)(ulong_t
)&hw_pair
->txdl_pointer
);
115 #ifdef XGE_HAL_FIFO_DUMP_TXD
116 xge_os_printf(""XGE_OS_LLXFMT
":"XGE_OS_LLXFMT
":"XGE_OS_LLXFMT
":"
117 XGE_OS_LLXFMT
" dma "XGE_OS_LLXFMT
,
118 txdp
->control_1
, txdp
->control_2
, txdp
->buffer_pointer
,
119 txdp
->host_control
, txdl_priv
->dma_addr
);
122 fifo
->channel
.stats
.total_posts
++;
123 fifo
->channel
.usage_cnt
++;
124 if (fifo
->channel
.stats
.usage_max
< fifo
->channel
.usage_cnt
)
125 fifo
->channel
.stats
.usage_max
= fifo
->channel
.usage_cnt
;
128 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
129 __hal_fifo_txdl_free_many(xge_hal_channel_h channelh
,
130 xge_hal_fifo_txd_t
*txdp
, int list_size
, int frags
)
132 xge_hal_fifo_txdl_priv_t
*current_txdl_priv
;
133 xge_hal_fifo_txdl_priv_t
*next_txdl_priv
;
134 int invalid_frags
= frags
% list_size
;
136 xge_debug_fifo(XGE_ERR
,
137 "freeing corrupt dtrh %p, fragments %d list size %d",
138 txdp
, frags
, list_size
);
139 xge_assert(invalid_frags
== 0);
142 xge_debug_fifo(XGE_TRACE
,
143 "freeing linked dtrh %p, fragments %d list size %d",
144 txdp
, frags
, list_size
);
145 current_txdl_priv
= __hal_fifo_txdl_priv(txdp
);
146 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
147 current_txdl_priv
->allocated
= 0;
149 __hal_channel_dtr_free(channelh
, txdp
);
150 next_txdl_priv
= current_txdl_priv
->next_txdl_priv
;
153 if (next_txdl_priv
) {
154 current_txdl_priv
->next_txdl_priv
= NULL
;
155 txdp
= next_txdl_priv
->first_txdp
;
158 xge_debug_fifo(XGE_TRACE
,
159 "freed linked dtrh fragments %d list size %d",
164 xge_assert(frags
== 0)
167 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
168 __hal_fifo_txdl_restore_many(xge_hal_channel_h channelh
,
169 xge_hal_fifo_txd_t
*txdp
, int txdl_count
)
171 xge_hal_fifo_txdl_priv_t
*current_txdl_priv
;
172 xge_hal_fifo_txdl_priv_t
*next_txdl_priv
;
175 xge_assert(((xge_hal_channel_t
*)channelh
)->reserve_length
+
176 txdl_count
<= ((xge_hal_channel_t
*)channelh
)->reserve_initial
);
178 current_txdl_priv
= __hal_fifo_txdl_priv(txdp
);
181 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
182 current_txdl_priv
->allocated
= 0;
184 next_txdl_priv
= current_txdl_priv
->next_txdl_priv
;
185 txdp
= current_txdl_priv
->first_txdp
;
186 current_txdl_priv
->next_txdl_priv
= NULL
;
187 __hal_channel_dtr_restore(channelh
, (xge_hal_dtr_h
)txdp
, --i
);
188 xge_debug_fifo(XGE_TRACE
,
189 "dtrh %p restored at offset %d", txdp
, i
);
190 current_txdl_priv
= next_txdl_priv
;
191 } while(current_txdl_priv
);
192 __hal_channel_dtr_restore(channelh
, NULL
, txdl_count
);
195 * xge_hal_fifo_dtr_private - Retrieve per-descriptor private data.
196 * @channelh: Channel handle.
197 * @dtrh: Descriptor handle.
199 * Retrieve per-descriptor private data.
200 * Note that ULD requests per-descriptor space via
201 * xge_hal_channel_open().
203 * Returns: private ULD data associated with the descriptor.
204 * Usage: See ex_xmit{} and ex_tx_compl{}.
206 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void*
207 xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh
)
209 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)dtrh
;
211 return ((char *)(ulong_t
)txdp
->host_control
) +
212 sizeof(xge_hal_fifo_txdl_priv_t
);
216 * xge_hal_fifo_dtr_buffer_cnt - Get number of buffers carried by the
218 * @dtrh: Descriptor handle.
220 * Returns: Number of buffers stored in the given descriptor. Can be used
221 * _after_ the descriptor is set up for posting (see
222 * xge_hal_fifo_dtr_post()) and _before_ it is deallocated (see
223 * xge_hal_fifo_dtr_free()).
226 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
int
227 xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh
)
229 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
231 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
233 return txdl_priv
->frags
;
236 * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
238 * @channelh: Channel handle.
239 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
240 * with a valid handle.
241 * @frags: minimum number of fragments to be reserved.
243 * Reserve TxDL(s) (that is, fifo descriptor)
244 * for the subsequent filling-in by upper layerdriver (ULD))
245 * and posting on the corresponding channel (@channelh)
246 * via xge_hal_fifo_dtr_post().
248 * Returns: XGE_HAL_OK - success;
249 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
251 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
252 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
253 * Usage: See ex_xmit{}.
255 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
256 xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh
,
257 xge_hal_dtr_h
*dtrh
, const int frags
)
259 xge_hal_status_e status
= XGE_HAL_OK
;
260 int alloc_frags
= 0, dang_frags
= 0;
261 xge_hal_fifo_txd_t
*curr_txdp
= NULL
;
262 xge_hal_fifo_txd_t
*next_txdp
;
263 xge_hal_fifo_txdl_priv_t
*next_txdl_priv
, *curr_txdl_priv
= NULL
;
264 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
265 int max_frags
= fifo
->config
->max_frags
;
266 xge_hal_dtr_h dang_dtrh
= NULL
;
267 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
268 unsigned long flags
=0;
270 xge_debug_fifo(XGE_TRACE
, "dtr_reserve_many called for frags %d",
272 xge_assert(frags
< (fifo
->txdl_per_memblock
* max_frags
));
273 #if defined(XGE_HAL_TX_MULTI_RESERVE)
274 xge_os_spin_lock(&fifo
->channel
.reserve_lock
);
275 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
276 xge_os_spin_lock_irq(&fifo
->channel
.reserve_lock
, flags
);
278 while(alloc_frags
< frags
) {
279 status
= __hal_channel_dtr_alloc(channelh
,
280 (xge_hal_dtr_h
*)(void*)&next_txdp
);
281 if (status
!= XGE_HAL_OK
){
282 xge_debug_fifo(XGE_ERR
,
283 "failed to allocate linked fragments rc %d",
285 xge_assert(status
== XGE_HAL_INF_OUT_OF_DESCRIPTORS
);
287 xge_assert(alloc_frags
/max_frags
);
288 __hal_fifo_txdl_restore_many(channelh
,
289 (xge_hal_fifo_txd_t
*) *dtrh
, alloc_frags
/max_frags
);
292 xge_assert(dang_frags
/max_frags
);
293 __hal_fifo_txdl_restore_many(channelh
,
294 (xge_hal_fifo_txd_t
*) dang_dtrh
, dang_frags
/max_frags
);
298 xge_debug_fifo(XGE_TRACE
, "allocated linked dtrh %p"
299 " for frags %d", next_txdp
, frags
);
300 next_txdl_priv
= __hal_fifo_txdl_priv(next_txdp
);
301 xge_assert(next_txdl_priv
);
302 xge_assert(next_txdl_priv
->first_txdp
== next_txdp
);
303 next_txdl_priv
->dang_txdl
= NULL
;
304 next_txdl_priv
->dang_frags
= 0;
305 next_txdl_priv
->next_txdl_priv
= NULL
;
306 #if defined(XGE_OS_MEMORY_CHECK)
307 next_txdl_priv
->allocated
= 1;
309 if (!curr_txdp
|| !curr_txdl_priv
) {
310 curr_txdp
= next_txdp
;
311 curr_txdl_priv
= next_txdl_priv
;
312 *dtrh
= (xge_hal_dtr_h
)next_txdp
;
313 alloc_frags
= max_frags
;
316 if (curr_txdl_priv
->memblock
==
317 next_txdl_priv
->memblock
) {
318 xge_debug_fifo(XGE_TRACE
,
319 "linking dtrh %p, with %p",
321 xge_assert (next_txdp
==
322 curr_txdp
+ max_frags
);
323 alloc_frags
+= max_frags
;
324 curr_txdl_priv
->next_txdl_priv
= next_txdl_priv
;
328 xge_assert(dang_dtrh
== NULL
);
330 dang_frags
= alloc_frags
;
331 xge_debug_fifo(XGE_TRACE
,
332 "dangling dtrh %p, linked with dtrh %p",
334 next_txdl_priv
->dang_txdl
= (xge_hal_fifo_txd_t
*) *dtrh
;
335 next_txdl_priv
->dang_frags
= alloc_frags
;
336 alloc_frags
= max_frags
;
339 curr_txdp
= next_txdp
;
340 curr_txdl_priv
= next_txdl_priv
;
343 #if defined(XGE_HAL_TX_MULTI_RESERVE)
344 xge_os_spin_unlock(&fifo
->channel
.reserve_lock
);
345 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
346 xge_os_spin_unlock_irq(&fifo
->channel
.reserve_lock
, flags
);
349 if (status
== XGE_HAL_OK
) {
350 xge_hal_fifo_txdl_priv_t
* txdl_priv
;
351 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)*dtrh
;
352 xge_hal_stats_channel_info_t
*statsp
= &fifo
->channel
.stats
;
353 txdl_priv
= __hal_fifo_txdl_priv(txdp
);
354 /* reset the TxDL's private */
355 txdl_priv
->align_dma_offset
= 0;
356 txdl_priv
->align_vaddr_start
= txdl_priv
->align_vaddr
;
357 txdl_priv
->align_used_frags
= 0;
358 txdl_priv
->frags
= 0;
359 txdl_priv
->bytes_sent
= 0;
360 txdl_priv
->alloc_frags
= alloc_frags
;
362 txdp
->control_1
= txdp
->control_2
= 0;
364 #if defined(XGE_OS_MEMORY_CHECK)
365 txdl_priv
->allocated
= 1;
367 /* update statistics */
368 statsp
->total_posts_dtrs_many
++;
369 statsp
->total_posts_frags_many
+= txdl_priv
->alloc_frags
;
370 if (txdl_priv
->dang_frags
){
371 statsp
->total_posts_dang_dtrs
++;
372 statsp
->total_posts_dang_frags
+= txdl_priv
->dang_frags
;
380 * xge_hal_fifo_dtr_reserve - Reserve fifo descriptor.
381 * @channelh: Channel handle.
382 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
383 * with a valid handle.
385 * Reserve a single TxDL (that is, fifo descriptor)
386 * for the subsequent filling-in by upper layerdriver (ULD))
387 * and posting on the corresponding channel (@channelh)
388 * via xge_hal_fifo_dtr_post().
390 * Note: it is the responsibility of ULD to reserve multiple descriptors
391 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
392 * carries up to configured number (fifo.max_frags) of contiguous buffers.
394 * Returns: XGE_HAL_OK - success;
395 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
397 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
398 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
399 * Usage: See ex_xmit{}.
401 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
402 xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh
, xge_hal_dtr_h
*dtrh
)
404 xge_hal_status_e status
;
405 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
406 unsigned long flags
=0;
409 #if defined(XGE_HAL_TX_MULTI_RESERVE)
410 xge_os_spin_lock(&((xge_hal_channel_t
*)channelh
)->reserve_lock
);
411 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
412 xge_os_spin_lock_irq(&((xge_hal_channel_t
*)channelh
)->reserve_lock
,
416 status
= __hal_channel_dtr_alloc(channelh
, dtrh
);
418 #if defined(XGE_HAL_TX_MULTI_RESERVE)
419 xge_os_spin_unlock(&((xge_hal_channel_t
*)channelh
)->reserve_lock
);
420 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
421 xge_os_spin_unlock_irq(&((xge_hal_channel_t
*)channelh
)->reserve_lock
,
425 if (status
== XGE_HAL_OK
) {
426 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)*dtrh
;
427 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
429 txdl_priv
= __hal_fifo_txdl_priv(txdp
);
431 /* reset the TxDL's private */
432 txdl_priv
->align_dma_offset
= 0;
433 txdl_priv
->align_vaddr_start
= txdl_priv
->align_vaddr
;
434 txdl_priv
->align_used_frags
= 0;
435 txdl_priv
->frags
= 0;
436 txdl_priv
->alloc_frags
=
437 ((xge_hal_fifo_t
*)channelh
)->config
->max_frags
;
438 txdl_priv
->dang_txdl
= NULL
;
439 txdl_priv
->dang_frags
= 0;
440 txdl_priv
->next_txdl_priv
= NULL
;
441 txdl_priv
->bytes_sent
= 0;
444 txdp
->control_1
= txdp
->control_2
= 0;
446 #if defined(XGE_OS_MEMORY_CHECK)
447 txdl_priv
->allocated
= 1;
455 * xge_hal_fifo_dtr_reserve_sp - Reserve fifo descriptor and store it in
456 * the ULD-provided "scratch" memory.
457 * @channelh: Channel handle.
458 * @dtr_sp_size: Size of the %dtr_sp "scratch pad" that HAL can use for TxDL.
459 * @dtr_sp: "Scratch pad" supplied by upper-layer driver (ULD).
461 * Reserve TxDL and fill-in ULD supplied "scratch pad". The difference
462 * between this API and xge_hal_fifo_dtr_reserve() is (possibly) -
465 * If upper-layer uses ULP-defined commands, and if those commands have enough
466 * space for HAL/Xframe descriptors - tnan it is better (read: faster) to fit
467 * all the per-command information into one command, which is typically
468 * one contiguous block.
470 * Note: Unlike xge_hal_fifo_dtr_reserve(), this function can be used to
471 * allocate a single descriptor for transmit operation.
473 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_free(),
474 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
476 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
477 xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh
, int dtr_sp_size
,
478 xge_hal_dtr_h dtr_sp
)
480 /* FIXME: implement */
485 * xge_hal_fifo_dtr_post - Post descriptor on the fifo channel.
486 * @channelh: Channel handle.
487 * @dtrh: Descriptor obtained via xge_hal_fifo_dtr_reserve() or
488 * xge_hal_fifo_dtr_reserve_sp()
489 * @frags: Number of contiguous buffers that are part of a single
490 * transmit operation.
492 * Post descriptor on the 'fifo' type channel for transmission.
493 * Prior to posting the descriptor should be filled in accordance with
494 * Host/Xframe interface specification for a given service (LL, etc.).
496 * See also: xge_hal_fifo_dtr_post_many(), xge_hal_ring_dtr_post().
497 * Usage: See ex_xmit{}.
499 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
500 xge_hal_fifo_dtr_post(xge_hal_channel_h channelh
, xge_hal_dtr_h dtrh
)
502 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
503 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
504 xge_hal_fifo_txd_t
*txdp_last
;
505 xge_hal_fifo_txd_t
*txdp_first
;
506 #if defined(XGE_HAL_TX_MULTI_POST_IRQ)
507 unsigned long flags
= 0;
510 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
512 txdp_first
= (xge_hal_fifo_txd_t
*)dtrh
;
513 txdp_first
->control_1
|= XGE_HAL_TXD_GATHER_CODE_FIRST
;
514 txdp_first
->control_2
|= fifo
->interrupt_type
;
516 txdp_last
= (xge_hal_fifo_txd_t
*)dtrh
+ (txdl_priv
->frags
- 1);
517 txdp_last
->control_1
|= XGE_HAL_TXD_GATHER_CODE_LAST
;
519 #if defined(XGE_HAL_TX_MULTI_POST)
520 xge_os_spin_lock(fifo
->post_lock_ptr
);
521 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
522 xge_os_spin_lock_irq(fifo
->post_lock_ptr
, flags
);
525 __hal_fifo_dtr_post_single(channelh
, dtrh
,
526 (u64
)(XGE_HAL_TX_FIFO_FIRST_LIST
| XGE_HAL_TX_FIFO_LAST_LIST
));
528 #if defined(XGE_HAL_TX_MULTI_POST)
529 xge_os_spin_unlock(fifo
->post_lock_ptr
);
530 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
531 xge_os_spin_unlock_irq(fifo
->post_lock_ptr
, flags
);
536 * xge_hal_fifo_dtr_post_many - Post multiple descriptors on fifo
538 * @channelh: Channel to post descriptor.
539 * @num: Number of descriptors (i.e., fifo TxDLs) in the %dtrs[].
540 * @dtrs: Descriptors obtained via xge_hal_fifo_dtr_reserve().
541 * @frags_arr: Number of fragments carried @dtrs descriptors.
542 * Note that frag_arr[i] corresponds to descriptor dtrs[i].
544 * Post multi-descriptor on the fifo channel. The operation is atomic:
545 * all descriptrs are posted on the channel "back-to-back' without
546 * letting other posts (possibly driven by multiple transmitting threads)
549 * See also: xge_hal_fifo_dtr_post(), xge_hal_ring_dtr_post().
551 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
552 xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh
, int num
,
553 xge_hal_dtr_h dtrs
[])
556 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
557 xge_hal_fifo_txd_t
*txdp_last
;
558 xge_hal_fifo_txd_t
*txdp_first
;
559 xge_hal_fifo_txdl_priv_t
*txdl_priv_last
;
560 #if defined(XGE_HAL_TX_MULTI_POST_IRQ)
561 unsigned long flags
= 0;
566 txdp_first
= (xge_hal_fifo_txd_t
*)dtrs
[0];
567 txdp_first
->control_1
|= XGE_HAL_TXD_GATHER_CODE_FIRST
;
568 txdp_first
->control_2
|= fifo
->interrupt_type
;
570 txdl_priv_last
= __hal_fifo_txdl_priv(dtrs
[num
-1]);
571 txdp_last
= (xge_hal_fifo_txd_t
*)dtrs
[num
-1] +
572 (txdl_priv_last
->frags
- 1);
573 txdp_last
->control_1
|= XGE_HAL_TXD_GATHER_CODE_LAST
;
575 #if defined(XGE_HAL_TX_MULTI_POST)
576 xge_os_spin_lock(&((xge_hal_channel_t
*)channelh
)->post_lock
);
577 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
578 xge_os_spin_lock_irq(&((xge_hal_channel_t
*)channelh
)->post_lock
,
582 for (i
=0; i
<num
; i
++) {
583 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
585 xge_hal_dtr_h dtrh
= dtrs
[i
];
587 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
588 txdl_priv
= txdl_priv
; /* Cheat lint */
592 val64
|= XGE_HAL_TX_FIFO_FIRST_LIST
;
593 } else if (i
== num
-1) {
594 val64
|= XGE_HAL_TX_FIFO_LAST_LIST
;
597 val64
|= XGE_HAL_TX_FIFO_SPECIAL_FUNC
;
598 __hal_fifo_dtr_post_single(channelh
, dtrh
, val64
);
601 #if defined(XGE_HAL_TX_MULTI_POST)
602 xge_os_spin_unlock(&((xge_hal_channel_t
*)channelh
)->post_lock
);
603 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
604 xge_os_spin_unlock_irq(&((xge_hal_channel_t
*)channelh
)->post_lock
,
608 fifo
->channel
.stats
.total_posts_many
++;
612 * xge_hal_fifo_dtr_next_completed - Retrieve next completed descriptor.
613 * @channelh: Channel handle.
614 * @dtrh: Descriptor handle. Returned by HAL.
615 * @t_code: Transfer code, as per Xframe User Guide,
616 * Transmit Descriptor Format.
619 * Retrieve the _next_ completed descriptor.
620 * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
621 * upper-layer driver (ULD) of new completed descriptors. After that
622 * the ULD can use xge_hal_fifo_dtr_next_completed to retrieve the rest
623 * completions (the very first completion is passed by HAL via
624 * xge_hal_channel_callback_f).
626 * Implementation-wise, the upper-layer driver is free to call
627 * xge_hal_fifo_dtr_next_completed either immediately from inside the
628 * channel callback, or in a deferred fashion and separate (from HAL)
631 * Non-zero @t_code means failure to process the descriptor.
632 * The failure could happen, for instance, when the link is
633 * down, in which case Xframe completes the descriptor because it
634 * is not able to send the data out.
636 * For details please refer to Xframe User Guide.
638 * Returns: XGE_HAL_OK - success.
639 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
640 * are currently available for processing.
642 * See also: xge_hal_channel_callback_f{},
643 * xge_hal_ring_dtr_next_completed().
644 * Usage: See ex_tx_compl{}.
646 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
647 xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh
,
648 xge_hal_dtr_h
*dtrh
, u8
*t_code
)
650 xge_hal_fifo_txd_t
*txdp
;
651 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
652 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
653 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
656 __hal_channel_dtr_try_complete(channelh
, dtrh
);
657 txdp
= (xge_hal_fifo_txd_t
*)*dtrh
;
659 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
662 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
663 txdl_priv
= __hal_fifo_txdl_priv(txdp
);
665 /* sync TxDL to read the ownership
667 * Note: 16bytes means Control_1 & Control_2 */
668 xge_os_dma_sync(fifo
->channel
.pdev
,
669 txdl_priv
->dma_handle
,
671 txdl_priv
->dma_offset
,
673 XGE_OS_DMA_DIR_FROMDEVICE
);
676 /* check whether host owns it */
677 if ( !(txdp
->control_1
& XGE_HAL_TXD_LIST_OWN_XENA
) ) {
679 xge_assert(txdp
->host_control
!=0);
681 __hal_channel_dtr_complete(channelh
);
683 *t_code
= (u8
)XGE_HAL_GET_TXD_T_CODE(txdp
->control_1
);
685 /* see XGE_HAL_SET_TXD_T_CODE() above.. */
686 xge_assert(*t_code
!= XGE_HAL_TXD_T_CODE_UNUSED_5
);
688 if (fifo
->channel
.usage_cnt
> 0)
689 fifo
->channel
.usage_cnt
--;
694 /* no more completions */
696 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
700 * xge_hal_fifo_dtr_free - Free descriptor.
701 * @channelh: Channel handle.
702 * @dtr: Descriptor handle.
704 * Free the reserved descriptor. This operation is "symmetrical" to
705 * xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp.
706 * The "free-ing" completes the descriptor's lifecycle.
708 * After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can
711 * - reserved (xge_hal_fifo_dtr_reserve);
713 * - posted (xge_hal_fifo_dtr_post);
715 * - completed (xge_hal_fifo_dtr_next_completed);
717 * - and recycled again (xge_hal_fifo_dtr_free).
719 * For alternative state transitions and more details please refer to
722 * See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve().
723 * Usage: See ex_tx_compl{}.
725 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
726 xge_hal_fifo_dtr_free(xge_hal_channel_h channelh
, xge_hal_dtr_h dtr
)
728 #if defined(XGE_HAL_TX_MULTI_FREE_IRQ)
729 unsigned long flags
= 0;
731 xge_hal_fifo_txdl_priv_t
*txdl_priv
= __hal_fifo_txdl_priv(
732 (xge_hal_fifo_txd_t
*)dtr
);
733 int max_frags
= ((xge_hal_fifo_t
*)channelh
)->config
->max_frags
;
734 #if defined(XGE_HAL_TX_MULTI_FREE)
735 xge_os_spin_lock(&((xge_hal_channel_t
*)channelh
)->free_lock
);
736 #elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
737 xge_os_spin_lock_irq(&((xge_hal_channel_t
*)channelh
)->free_lock
,
741 if (txdl_priv
->alloc_frags
> max_frags
) {
742 xge_hal_fifo_txd_t
*dang_txdp
= (xge_hal_fifo_txd_t
*)
743 txdl_priv
->dang_txdl
;
744 int dang_frags
= txdl_priv
->dang_frags
;
745 int alloc_frags
= txdl_priv
->alloc_frags
;
746 txdl_priv
->dang_txdl
= NULL
;
747 txdl_priv
->dang_frags
= 0;
748 txdl_priv
->alloc_frags
= 0;
749 /* dtrh must have a linked list of dtrh */
750 xge_assert(txdl_priv
->next_txdl_priv
);
752 /* free any dangling dtrh first */
754 xge_debug_fifo(XGE_TRACE
,
755 "freeing dangled dtrh %p for %d fragments",
756 dang_txdp
, dang_frags
);
757 __hal_fifo_txdl_free_many(channelh
, dang_txdp
,
758 max_frags
, dang_frags
);
761 /* now free the reserved dtrh list */
762 xge_debug_fifo(XGE_TRACE
,
763 "freeing dtrh %p list of %d fragments", dtr
,
765 __hal_fifo_txdl_free_many(channelh
,
766 (xge_hal_fifo_txd_t
*)dtr
, max_frags
,
770 __hal_channel_dtr_free(channelh
, dtr
);
772 ((xge_hal_channel_t
*)channelh
)->poll_bytes
+= txdl_priv
->bytes_sent
;
774 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
775 __hal_fifo_txdl_priv(dtr
)->allocated
= 0;
778 #if defined(XGE_HAL_TX_MULTI_FREE)
779 xge_os_spin_unlock(&((xge_hal_channel_t
*)channelh
)->free_lock
);
780 #elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
781 xge_os_spin_unlock_irq(&((xge_hal_channel_t
*)channelh
)->free_lock
,
788 * xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill
789 * in fifo descriptor.
790 * @channelh: Channel handle.
791 * @dtrh: Descriptor handle.
792 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
794 * @vaddr: Virtual address of the data buffer.
795 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
796 * @size: Size of the data buffer (in bytes).
797 * @misaligned_size: Size (in bytes) of the misaligned portion of the
798 * data buffer. Calculated by the caller, based on the platform/OS/other
799 * specific criteria, which is outside of HAL's domain. See notes below.
801 * This API is part of the transmit descriptor preparation for posting
802 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
803 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
804 * All three APIs fill in the fields of the fifo descriptor,
805 * in accordance with the Xframe specification.
806 * On the PCI-X based systems aligning transmit data typically provides better
807 * transmit performance. The typical alignment granularity: L2 cacheline size.
808 * However, HAL does not make assumptions in terms of the alignment granularity;
809 * this is specified via additional @misaligned_size parameter described above.
810 * Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(),
811 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
812 * provides a separate xge_hal_check_alignment() API sufficient to cover
813 * most (but not all) possible alignment criteria.
814 * If the buffer appears to be aligned, the ULD calls
815 * xge_hal_fifo_dtr_buffer_set().
816 * Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned().
818 * Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In
819 * addition to filling in the specified descriptor it aligns transmit data on
820 * the specified boundary.
821 * Note: Decision on whether to align or not to align a given contiguous
822 * transmit buffer is outside of HAL's domain. To this end ULD can use any
823 * programmable criteria, which can help to 1) boost transmit performance,
824 * and/or 2) provide a workaround for PCI bridge bugs, if any.
826 * See also: xge_hal_fifo_dtr_buffer_set(),
827 * xge_hal_check_alignment().
829 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
830 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
832 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
833 xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh
,
834 xge_hal_dtr_h dtrh
, int frag_idx
, void *vaddr
,
835 dma_addr_t dma_pointer
, int size
, int misaligned_size
)
837 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
838 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
839 xge_hal_fifo_txd_t
*txdp
;
843 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
844 txdp
= (xge_hal_fifo_txd_t
*)dtrh
+ txdl_priv
->frags
;
847 txdp
->control_1
= txdp
->control_2
= 0;
850 /* On some systems buffer size could be zero.
851 * It is the responsibility of ULD and *not HAL* to
852 * detect it and skip it. */
853 xge_assert(size
> 0);
854 xge_assert(frag_idx
< txdl_priv
->alloc_frags
);
855 xge_assert(misaligned_size
!= 0 &&
856 misaligned_size
<= fifo
->config
->alignment_size
);
858 remaining_size
= size
- misaligned_size
;
859 xge_assert(remaining_size
>= 0);
861 xge_os_memcpy((char*)txdl_priv
->align_vaddr_start
,
862 vaddr
, misaligned_size
);
864 if (txdl_priv
->align_used_frags
>= fifo
->config
->max_aligned_frags
) {
865 return XGE_HAL_ERR_OUT_ALIGNED_FRAGS
;
868 /* setup new buffer */
869 prev_boff
= txdl_priv
->align_vaddr_start
- txdl_priv
->align_vaddr
;
870 txdp
->buffer_pointer
= (u64
)txdl_priv
->align_dma_addr
+ prev_boff
;
871 txdp
->control_1
|= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size
);
872 txdl_priv
->bytes_sent
+= misaligned_size
;
873 fifo
->channel
.stats
.total_buffers
++;
875 txdl_priv
->align_used_frags
++;
876 txdl_priv
->align_vaddr_start
+= fifo
->config
->alignment_size
;
877 txdl_priv
->align_dma_offset
= 0;
879 #if defined(XGE_OS_DMA_REQUIRES_SYNC)
880 /* sync new buffer */
881 xge_os_dma_sync(fifo
->channel
.pdev
,
882 txdl_priv
->align_dma_handle
,
883 txdp
->buffer_pointer
,
886 XGE_OS_DMA_DIR_TODEVICE
);
889 if (remaining_size
) {
890 xge_assert(frag_idx
< txdl_priv
->alloc_frags
);
892 txdp
->buffer_pointer
= (u64
)dma_pointer
+
895 XGE_HAL_TXD_BUFFER0_SIZE(remaining_size
);
896 txdl_priv
->bytes_sent
+= remaining_size
;
898 fifo
->channel
.stats
.total_buffers
++;
906 * xge_hal_fifo_dtr_buffer_append - Append the contents of virtually
907 * contiguous data buffer to a single physically contiguous buffer.
908 * @channelh: Channel handle.
909 * @dtrh: Descriptor handle.
910 * @vaddr: Virtual address of the data buffer.
911 * @size: Size of the data buffer (in bytes).
913 * This API is part of the transmit descriptor preparation for posting
914 * (via xge_hal_fifo_dtr_post()).
915 * The main difference of this API wrt to the APIs
916 * xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the
917 * contents of virtually contiguous data buffers received from
918 * upper layer into a single physically contiguous data buffer and the
919 * device will do a DMA from this buffer.
921 * See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(),
922 * xge_hal_fifo_dtr_buffer_set_aligned().
924 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
925 xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh
, xge_hal_dtr_h dtrh
,
926 void *vaddr
, int size
)
928 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
929 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
932 xge_assert(size
> 0);
934 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
936 used
= txdl_priv
->align_vaddr_start
- txdl_priv
->align_vaddr
;
937 used
+= txdl_priv
->align_dma_offset
;
938 if (used
+ (unsigned int)size
> (unsigned int)fifo
->align_size
)
939 return XGE_HAL_ERR_OUT_ALIGNED_FRAGS
;
941 xge_os_memcpy((char*)txdl_priv
->align_vaddr_start
+
942 txdl_priv
->align_dma_offset
, vaddr
, size
);
944 fifo
->channel
.stats
.copied_frags
++;
946 txdl_priv
->align_dma_offset
+= size
;
951 * xge_hal_fifo_dtr_buffer_finalize - Prepares a descriptor that contains the
952 * single physically contiguous buffer.
954 * @channelh: Channel handle.
955 * @dtrh: Descriptor handle.
956 * @frag_idx: Index of the data buffer in the Txdl list.
958 * This API in conjuction with xge_hal_fifo_dtr_buffer_append() prepares
959 * a descriptor that consists of a single physically contiguous buffer
960 * which inturn contains the contents of one or more virtually contiguous
961 * buffers received from the upper layer.
963 * See Also: xge_hal_fifo_dtr_buffer_append().
965 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
966 xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh
, xge_hal_dtr_h dtrh
,
969 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
970 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
971 xge_hal_fifo_txd_t
*txdp
;
974 xge_assert(frag_idx
< fifo
->config
->max_frags
);
976 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
977 txdp
= (xge_hal_fifo_txd_t
*)dtrh
+ txdl_priv
->frags
;
980 txdp
->control_1
= txdp
->control_2
= 0;
983 prev_boff
= txdl_priv
->align_vaddr_start
- txdl_priv
->align_vaddr
;
984 txdp
->buffer_pointer
= (u64
)txdl_priv
->align_dma_addr
+ prev_boff
;
986 XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv
->align_dma_offset
);
987 txdl_priv
->bytes_sent
+= (unsigned int)txdl_priv
->align_dma_offset
;
988 fifo
->channel
.stats
.total_buffers
++;
989 fifo
->channel
.stats
.copied_buffers
++;
991 txdl_priv
->align_used_frags
++;
993 #if defined(XGE_OS_DMA_REQUIRES_SYNC)
994 /* sync pre-mapped buffer */
995 xge_os_dma_sync(fifo
->channel
.pdev
,
996 txdl_priv
->align_dma_handle
,
997 txdp
->buffer_pointer
,
999 txdl_priv
->align_dma_offset
,
1000 XGE_OS_DMA_DIR_TODEVICE
);
1003 /* increment vaddr_start for the next buffer_append() iteration */
1004 txdl_priv
->align_vaddr_start
+= txdl_priv
->align_dma_offset
;
1005 txdl_priv
->align_dma_offset
= 0;
1009 * xge_hal_fifo_dtr_buffer_set - Set transmit buffer pointer in the
1011 * @channelh: Channel handle.
1012 * @dtrh: Descriptor handle.
1013 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1015 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1016 * @size: Size of the data buffer (in bytes).
1018 * This API is part of the preparation of the transmit descriptor for posting
1019 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1020 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
1021 * All three APIs fill in the fields of the fifo descriptor,
1022 * in accordance with the Xframe specification.
1024 * See also: xge_hal_fifo_dtr_buffer_set_aligned(),
1025 * xge_hal_check_alignment().
1027 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
1028 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
1029 * Prepare transmit descriptor for transmission (via
1030 * xge_hal_fifo_dtr_post()).
1031 * See also: xge_hal_fifo_dtr_vlan_set().
1032 * Note: Compare with xge_hal_fifo_dtr_buffer_set_aligned().
1034 * Usage: See ex_xmit{}.
1036 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
1037 xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh
, xge_hal_dtr_h dtrh
,
1038 int frag_idx
, dma_addr_t dma_pointer
, int size
)
1040 xge_hal_fifo_t
*fifo
= (xge_hal_fifo_t
*)channelh
;
1041 xge_hal_fifo_txdl_priv_t
*txdl_priv
;
1042 xge_hal_fifo_txd_t
*txdp
;
1044 txdl_priv
= __hal_fifo_txdl_priv(dtrh
);
1045 txdp
= (xge_hal_fifo_txd_t
*)dtrh
+ txdl_priv
->frags
;
1047 if (frag_idx
!= 0) {
1048 txdp
->control_1
= txdp
->control_2
= 0;
1052 * it is the responsibility of upper layers and not HAL
1053 * detect it and skip zero-size fragment
1055 xge_assert(size
> 0);
1056 xge_assert(frag_idx
< txdl_priv
->alloc_frags
);
1058 txdp
->buffer_pointer
= (u64
)dma_pointer
;
1059 txdp
->control_1
|= XGE_HAL_TXD_BUFFER0_SIZE(size
);
1060 txdl_priv
->bytes_sent
+= size
;
1061 fifo
->channel
.stats
.total_buffers
++;
1066 * xge_hal_fifo_dtr_mss_set - Set MSS.
1067 * @dtrh: Descriptor handle.
1068 * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1069 * ULD, which in turn inserts the MSS into the @dtrh.
1071 * This API is part of the preparation of the transmit descriptor for posting
1072 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1073 * xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
1074 * and xge_hal_fifo_dtr_cksum_set_bits().
1075 * All these APIs fill in the fields of the fifo descriptor,
1076 * in accordance with the Xframe specification.
1078 * See also: xge_hal_fifo_dtr_reserve(),
1079 * xge_hal_fifo_dtr_post(), xge_hal_fifo_dtr_vlan_set().
1080 * Usage: See ex_xmit{}.
1082 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
1083 xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh
, int mss
)
1085 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)dtrh
;
1087 txdp
->control_1
|= XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO
);
1088 txdp
->control_1
|= XGE_HAL_TXD_TCP_LSO_MSS(mss
);
1092 * xge_hal_fifo_dtr_cksum_set_bits - Offload checksum.
1093 * @dtrh: Descriptor handle.
1094 * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1095 * and/or TCP and/or UDP.
1097 * Ask Xframe to calculate IPv4 & transport checksums for _this_ transmit
1099 * This API is part of the preparation of the transmit descriptor for posting
1100 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1101 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
1102 * and xge_hal_fifo_dtr_buffer_set().
1103 * All these APIs fill in the fields of the fifo descriptor,
1104 * in accordance with the Xframe specification.
1106 * See also: xge_hal_fifo_dtr_reserve(),
1107 * xge_hal_fifo_dtr_post(), XGE_HAL_TXD_TX_CKO_IPV4_EN,
1108 * XGE_HAL_TXD_TX_CKO_TCP_EN.
1109 * Usage: See ex_xmit{}.
1111 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
1112 xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh
, u64 cksum_bits
)
1114 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)dtrh
;
1116 txdp
->control_2
|= cksum_bits
;
1121 * xge_hal_fifo_dtr_vlan_set - Set VLAN tag.
1122 * @dtrh: Descriptor handle.
1123 * @vlan_tag: 16bit VLAN tag.
1125 * Insert VLAN tag into specified transmit descriptor.
1126 * The actual insertion of the tag into outgoing frame is done by the hardware.
1127 * See also: xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_mss_set().
1129 __HAL_STATIC_FIFO __HAL_INLINE_FIFO
void
1130 xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh
, u16 vlan_tag
)
1132 xge_hal_fifo_txd_t
*txdp
= (xge_hal_fifo_txd_t
*)dtrh
;
1134 txdp
->control_2
|= XGE_HAL_TXD_VLAN_ENABLE
;
1135 txdp
->control_2
|= XGE_HAL_TXD_VLAN_TAG(vlan_tag
);
1139 * xge_hal_fifo_is_next_dtr_completed - Checks if the next dtr is completed
1140 * @channelh: Channel handle.
1142 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
1143 xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh
)
1145 xge_hal_fifo_txd_t
*txdp
;
1148 __hal_channel_dtr_try_complete(channelh
, &dtrh
);
1149 txdp
= (xge_hal_fifo_txd_t
*)dtrh
;
1151 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS
;
1154 /* check whether host owns it */
1155 if ( !(txdp
->control_1
& XGE_HAL_TXD_LIST_OWN_XENA
) ) {
1156 xge_assert(txdp
->host_control
!=0);
1160 /* no more completions */
1161 return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS
;