1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #include "ena_eth_com.h"
8 static struct ena_eth_io_rx_cdesc_base
*ena_com_get_next_rx_cdesc(
9 struct ena_com_io_cq
*io_cq
)
11 struct ena_eth_io_rx_cdesc_base
*cdesc
;
12 u16 expected_phase
, head_masked
;
15 head_masked
= io_cq
->head
& (io_cq
->q_depth
- 1);
16 expected_phase
= io_cq
->phase
;
18 cdesc
= (struct ena_eth_io_rx_cdesc_base
*)(io_cq
->cdesc_addr
.virt_addr
19 + (head_masked
* io_cq
->cdesc_entry_size_in_bytes
));
21 desc_phase
= (READ_ONCE(cdesc
->status
) &
22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK
) >>
23 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT
;
25 if (desc_phase
!= expected_phase
)
28 /* Make sure we read the rest of the descriptor after the phase bit
36 static void *get_sq_desc_regular_queue(struct ena_com_io_sq
*io_sq
)
41 tail_masked
= io_sq
->tail
& (io_sq
->q_depth
- 1);
43 offset
= tail_masked
* io_sq
->desc_entry_size
;
45 return (void *)((uintptr_t)io_sq
->desc_addr
.virt_addr
+ offset
);
48 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq
*io_sq
,
51 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
56 dst_tail_mask
= io_sq
->tail
& (io_sq
->q_depth
- 1);
57 dst_offset
= dst_tail_mask
* llq_info
->desc_list_entry_size
;
59 if (is_llq_max_tx_burst_exists(io_sq
)) {
60 if (unlikely(!io_sq
->entries_in_tx_burst_left
)) {
61 netdev_err(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
62 "Error: trying to send more packets than tx burst allows\n");
66 io_sq
->entries_in_tx_burst_left
--;
67 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
68 "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
69 io_sq
->qid
, io_sq
->entries_in_tx_burst_left
);
72 /* Make sure everything was written into the bounce buffer before
73 * writing the bounce buffer to the device
77 /* The line is completed. Copy it to dev */
78 __iowrite64_copy(io_sq
->desc_addr
.pbuf_dev_addr
+ dst_offset
,
79 bounce_buffer
, (llq_info
->desc_list_entry_size
) / 8);
83 /* Switch phase bit in case of wrap around */
84 if (unlikely((io_sq
->tail
& (io_sq
->q_depth
- 1)) == 0))
90 static int ena_com_write_header_to_bounce(struct ena_com_io_sq
*io_sq
,
94 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
95 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
96 u8
*bounce_buffer
= pkt_ctrl
->curr_bounce_buf
;
99 if (unlikely(io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
))
103 llq_info
->descs_num_before_header
* io_sq
->desc_entry_size
;
105 if (unlikely((header_offset
+ header_len
) >
106 llq_info
->desc_list_entry_size
)) {
107 netdev_err(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
108 "Trying to write header larger than llq entry can accommodate\n");
112 if (unlikely(!bounce_buffer
)) {
113 netdev_err(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
114 "Bounce buffer is NULL\n");
118 memcpy(bounce_buffer
+ header_offset
, header_src
, header_len
);
123 static void *get_sq_desc_llq(struct ena_com_io_sq
*io_sq
)
125 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
129 bounce_buffer
= pkt_ctrl
->curr_bounce_buf
;
131 if (unlikely(!bounce_buffer
)) {
132 netdev_err(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
133 "Bounce buffer is NULL\n");
137 sq_desc
= bounce_buffer
+ pkt_ctrl
->idx
* io_sq
->desc_entry_size
;
139 pkt_ctrl
->descs_left_in_line
--;
144 static int ena_com_close_bounce_buffer(struct ena_com_io_sq
*io_sq
)
146 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
147 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
150 if (unlikely(io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
))
153 /* bounce buffer was used, so write it and get a new one */
155 rc
= ena_com_write_bounce_buffer_to_dev(io_sq
,
156 pkt_ctrl
->curr_bounce_buf
);
160 pkt_ctrl
->curr_bounce_buf
=
161 ena_com_get_next_bounce_buffer(&io_sq
->bounce_buf_ctrl
);
162 memset(io_sq
->llq_buf_ctrl
.curr_bounce_buf
,
163 0x0, llq_info
->desc_list_entry_size
);
167 pkt_ctrl
->descs_left_in_line
= llq_info
->descs_num_before_header
;
171 static void *get_sq_desc(struct ena_com_io_sq
*io_sq
)
173 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
174 return get_sq_desc_llq(io_sq
);
176 return get_sq_desc_regular_queue(io_sq
);
179 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq
*io_sq
)
181 struct ena_com_llq_pkt_ctrl
*pkt_ctrl
= &io_sq
->llq_buf_ctrl
;
182 struct ena_com_llq_info
*llq_info
= &io_sq
->llq_info
;
185 if (!pkt_ctrl
->descs_left_in_line
) {
186 rc
= ena_com_write_bounce_buffer_to_dev(io_sq
,
187 pkt_ctrl
->curr_bounce_buf
);
191 pkt_ctrl
->curr_bounce_buf
=
192 ena_com_get_next_bounce_buffer(&io_sq
->bounce_buf_ctrl
);
193 memset(io_sq
->llq_buf_ctrl
.curr_bounce_buf
,
194 0x0, llq_info
->desc_list_entry_size
);
197 if (unlikely(llq_info
->desc_stride_ctrl
== ENA_ADMIN_SINGLE_DESC_PER_ENTRY
))
198 pkt_ctrl
->descs_left_in_line
= 1;
200 pkt_ctrl
->descs_left_in_line
=
201 llq_info
->desc_list_entry_size
/ io_sq
->desc_entry_size
;
207 static int ena_com_sq_update_tail(struct ena_com_io_sq
*io_sq
)
209 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
210 return ena_com_sq_update_llq_tail(io_sq
);
214 /* Switch phase bit in case of wrap around */
215 if (unlikely((io_sq
->tail
& (io_sq
->q_depth
- 1)) == 0))
221 static struct ena_eth_io_rx_cdesc_base
*
222 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq
*io_cq
, u16 idx
)
224 idx
&= (io_cq
->q_depth
- 1);
225 return (struct ena_eth_io_rx_cdesc_base
*)
226 ((uintptr_t)io_cq
->cdesc_addr
.virt_addr
+
227 idx
* io_cq
->cdesc_entry_size_in_bytes
);
230 static u16
ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq
*io_cq
,
231 u16
*first_cdesc_idx
)
233 struct ena_eth_io_rx_cdesc_base
*cdesc
;
234 u16 count
= 0, head_masked
;
238 cdesc
= ena_com_get_next_rx_cdesc(io_cq
);
242 ena_com_cq_inc_head(io_cq
);
244 last
= (READ_ONCE(cdesc
->status
) &
245 ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK
) >>
246 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT
;
250 *first_cdesc_idx
= io_cq
->cur_rx_pkt_cdesc_start_idx
;
251 count
+= io_cq
->cur_rx_pkt_cdesc_count
;
253 head_masked
= io_cq
->head
& (io_cq
->q_depth
- 1);
255 io_cq
->cur_rx_pkt_cdesc_count
= 0;
256 io_cq
->cur_rx_pkt_cdesc_start_idx
= head_masked
;
258 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq
)->net_device
,
259 "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
260 io_cq
->qid
, *first_cdesc_idx
, count
);
262 io_cq
->cur_rx_pkt_cdesc_count
+= count
;
269 static int ena_com_create_meta(struct ena_com_io_sq
*io_sq
,
270 struct ena_com_tx_meta
*ena_meta
)
272 struct ena_eth_io_tx_meta_desc
*meta_desc
= NULL
;
274 meta_desc
= get_sq_desc(io_sq
);
275 if (unlikely(!meta_desc
))
278 memset(meta_desc
, 0x0, sizeof(struct ena_eth_io_tx_meta_desc
));
280 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK
;
282 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK
;
284 /* bits 0-9 of the mss */
285 meta_desc
->word2
|= ((u32
)ena_meta
->mss
<<
286 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT
) &
287 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK
;
288 /* bits 10-13 of the mss */
289 meta_desc
->len_ctrl
|= ((ena_meta
->mss
>> 10) <<
290 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT
) &
291 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK
;
293 /* Extended meta desc */
294 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK
;
295 meta_desc
->len_ctrl
|= ((u32
)io_sq
->phase
<<
296 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT
) &
297 ENA_ETH_IO_TX_META_DESC_PHASE_MASK
;
299 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_FIRST_MASK
;
300 meta_desc
->len_ctrl
|= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK
;
302 meta_desc
->word2
|= ena_meta
->l3_hdr_len
&
303 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK
;
304 meta_desc
->word2
|= (ena_meta
->l3_hdr_offset
<<
305 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT
) &
306 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK
;
308 meta_desc
->word2
|= ((u32
)ena_meta
->l4_hdr_len
<<
309 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT
) &
310 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK
;
312 return ena_com_sq_update_tail(io_sq
);
315 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq
*io_sq
,
316 struct ena_com_tx_ctx
*ena_tx_ctx
,
319 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
321 /* When disable meta caching is set, don't bother to save the meta and
322 * compare it to the stored version, just create the meta
324 if (io_sq
->disable_meta_caching
) {
325 if (unlikely(!ena_tx_ctx
->meta_valid
))
329 return ena_com_create_meta(io_sq
, ena_meta
);
332 if (ena_com_meta_desc_changed(io_sq
, ena_tx_ctx
)) {
334 /* Cache the meta desc */
335 memcpy(&io_sq
->cached_tx_meta
, ena_meta
,
336 sizeof(struct ena_com_tx_meta
));
337 return ena_com_create_meta(io_sq
, ena_meta
);
344 static void ena_com_rx_set_flags(struct ena_com_io_cq
*io_cq
,
345 struct ena_com_rx_ctx
*ena_rx_ctx
,
346 struct ena_eth_io_rx_cdesc_base
*cdesc
)
348 ena_rx_ctx
->l3_proto
= cdesc
->status
&
349 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK
;
350 ena_rx_ctx
->l4_proto
=
351 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK
) >>
352 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT
;
353 ena_rx_ctx
->l3_csum_err
=
354 !!((cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK
) >>
355 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT
);
356 ena_rx_ctx
->l4_csum_err
=
357 !!((cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK
) >>
358 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT
);
359 ena_rx_ctx
->l4_csum_checked
=
360 !!((cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK
) >>
361 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT
);
362 ena_rx_ctx
->hash
= cdesc
->hash
;
364 (cdesc
->status
& ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK
) >>
365 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT
;
367 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq
)->net_device
,
368 "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
369 ena_rx_ctx
->l3_proto
, ena_rx_ctx
->l4_proto
,
370 ena_rx_ctx
->l3_csum_err
, ena_rx_ctx
->l4_csum_err
,
371 ena_rx_ctx
->hash
, ena_rx_ctx
->frag
, cdesc
->status
);
374 /*****************************************************************************/
375 /***************************** API **********************************/
376 /*****************************************************************************/
378 int ena_com_prepare_tx(struct ena_com_io_sq
*io_sq
,
379 struct ena_com_tx_ctx
*ena_tx_ctx
,
382 struct ena_eth_io_tx_desc
*desc
= NULL
;
383 struct ena_com_buf
*ena_bufs
= ena_tx_ctx
->ena_bufs
;
384 void *buffer_to_push
= ena_tx_ctx
->push_header
;
385 u16 header_len
= ena_tx_ctx
->header_len
;
386 u16 num_bufs
= ena_tx_ctx
->num_bufs
;
387 u16 start_tail
= io_sq
->tail
;
392 WARN(io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_TX
, "wrong Q type");
394 /* num_bufs +1 for potential meta desc */
395 if (unlikely(!ena_com_sq_have_enough_space(io_sq
, num_bufs
+ 1))) {
396 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
397 "Not enough space in the tx queue\n");
401 if (unlikely(header_len
> io_sq
->tx_max_header_size
)) {
402 netdev_err(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
403 "Header size is too large %d max header: %d\n",
404 header_len
, io_sq
->tx_max_header_size
);
408 if (unlikely(io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
&&
412 rc
= ena_com_write_header_to_bounce(io_sq
, buffer_to_push
, header_len
);
416 rc
= ena_com_create_and_store_tx_meta_desc(io_sq
, ena_tx_ctx
, &have_meta
);
418 netdev_err(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
419 "Failed to create and store tx meta desc\n");
423 /* If the caller doesn't want to send packets */
424 if (unlikely(!num_bufs
&& !header_len
)) {
425 rc
= ena_com_close_bounce_buffer(io_sq
);
426 *nb_hw_desc
= io_sq
->tail
- start_tail
;
430 desc
= get_sq_desc(io_sq
);
433 memset(desc
, 0x0, sizeof(struct ena_eth_io_tx_desc
));
435 /* Set first desc when we don't have meta descriptor */
437 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_FIRST_MASK
;
439 desc
->buff_addr_hi_hdr_sz
|= ((u32
)header_len
<<
440 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT
) &
441 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK
;
442 desc
->len_ctrl
|= ((u32
)io_sq
->phase
<< ENA_ETH_IO_TX_DESC_PHASE_SHIFT
) &
443 ENA_ETH_IO_TX_DESC_PHASE_MASK
;
445 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK
;
448 desc
->meta_ctrl
|= ((u32
)ena_tx_ctx
->req_id
<<
449 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT
) &
450 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK
;
452 desc
->meta_ctrl
|= (ena_tx_ctx
->df
<<
453 ENA_ETH_IO_TX_DESC_DF_SHIFT
) &
454 ENA_ETH_IO_TX_DESC_DF_MASK
;
457 desc
->len_ctrl
|= ((ena_tx_ctx
->req_id
>> 10) <<
458 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT
) &
459 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK
;
461 if (ena_tx_ctx
->meta_valid
) {
462 desc
->meta_ctrl
|= (ena_tx_ctx
->tso_enable
<<
463 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT
) &
464 ENA_ETH_IO_TX_DESC_TSO_EN_MASK
;
465 desc
->meta_ctrl
|= ena_tx_ctx
->l3_proto
&
466 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK
;
467 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_proto
<<
468 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT
) &
469 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK
;
470 desc
->meta_ctrl
|= (ena_tx_ctx
->l3_csum_enable
<<
471 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT
) &
472 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK
;
473 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_csum_enable
<<
474 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT
) &
475 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK
;
476 desc
->meta_ctrl
|= (ena_tx_ctx
->l4_csum_partial
<<
477 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT
) &
478 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK
;
481 for (i
= 0; i
< num_bufs
; i
++) {
482 /* The first desc share the same desc as the header */
483 if (likely(i
!= 0)) {
484 rc
= ena_com_sq_update_tail(io_sq
);
488 desc
= get_sq_desc(io_sq
);
492 memset(desc
, 0x0, sizeof(struct ena_eth_io_tx_desc
));
494 desc
->len_ctrl
|= ((u32
)io_sq
->phase
<<
495 ENA_ETH_IO_TX_DESC_PHASE_SHIFT
) &
496 ENA_ETH_IO_TX_DESC_PHASE_MASK
;
499 desc
->len_ctrl
|= ena_bufs
->len
&
500 ENA_ETH_IO_TX_DESC_LENGTH_MASK
;
502 addr_hi
= ((ena_bufs
->paddr
&
503 GENMASK_ULL(io_sq
->dma_addr_bits
- 1, 32)) >> 32);
505 desc
->buff_addr_lo
= (u32
)ena_bufs
->paddr
;
506 desc
->buff_addr_hi_hdr_sz
|= addr_hi
&
507 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK
;
511 /* set the last desc indicator */
512 desc
->len_ctrl
|= ENA_ETH_IO_TX_DESC_LAST_MASK
;
514 rc
= ena_com_sq_update_tail(io_sq
);
518 rc
= ena_com_close_bounce_buffer(io_sq
);
520 *nb_hw_desc
= io_sq
->tail
- start_tail
;
524 int ena_com_rx_pkt(struct ena_com_io_cq
*io_cq
,
525 struct ena_com_io_sq
*io_sq
,
526 struct ena_com_rx_ctx
*ena_rx_ctx
)
528 struct ena_com_rx_buf_info
*ena_buf
= &ena_rx_ctx
->ena_bufs
[0];
529 struct ena_eth_io_rx_cdesc_base
*cdesc
= NULL
;
530 u16 q_depth
= io_cq
->q_depth
;
535 WARN(io_cq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
, "wrong Q type");
537 nb_hw_desc
= ena_com_cdesc_rx_pkt_get(io_cq
, &cdesc_idx
);
538 if (nb_hw_desc
== 0) {
539 ena_rx_ctx
->descs
= nb_hw_desc
;
543 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq
)->net_device
,
544 "Fetch rx packet: queue %d completed desc: %d\n", io_cq
->qid
,
547 if (unlikely(nb_hw_desc
> ena_rx_ctx
->max_bufs
)) {
548 netdev_err(ena_com_io_cq_to_ena_dev(io_cq
)->net_device
,
549 "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc
,
550 ena_rx_ctx
->max_bufs
);
554 cdesc
= ena_com_rx_cdesc_idx_to_ptr(io_cq
, cdesc_idx
);
555 ena_rx_ctx
->pkt_offset
= cdesc
->offset
;
558 ena_buf
[i
].len
= cdesc
->length
;
559 ena_buf
[i
].req_id
= cdesc
->req_id
;
560 if (unlikely(ena_buf
[i
].req_id
>= q_depth
))
563 if (++i
>= nb_hw_desc
)
566 cdesc
= ena_com_rx_cdesc_idx_to_ptr(io_cq
, cdesc_idx
+ i
);
570 /* Update SQ head ptr */
571 io_sq
->next_to_comp
+= nb_hw_desc
;
573 netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq
)->net_device
,
574 "[%s][QID#%d] Updating SQ head to: %d\n", __func__
,
575 io_sq
->qid
, io_sq
->next_to_comp
);
577 /* Get rx flags from the last pkt */
578 ena_com_rx_set_flags(io_cq
, ena_rx_ctx
, cdesc
);
580 ena_rx_ctx
->descs
= nb_hw_desc
;
585 int ena_com_add_single_rx_desc(struct ena_com_io_sq
*io_sq
,
586 struct ena_com_buf
*ena_buf
,
589 struct ena_eth_io_rx_desc
*desc
;
591 WARN(io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
, "wrong Q type");
593 if (unlikely(!ena_com_sq_have_enough_space(io_sq
, 1)))
596 desc
= get_sq_desc(io_sq
);
600 memset(desc
, 0x0, sizeof(struct ena_eth_io_rx_desc
));
602 desc
->length
= ena_buf
->len
;
604 desc
->ctrl
= ENA_ETH_IO_RX_DESC_FIRST_MASK
|
605 ENA_ETH_IO_RX_DESC_LAST_MASK
|
606 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK
|
607 (io_sq
->phase
& ENA_ETH_IO_RX_DESC_PHASE_MASK
);
609 desc
->req_id
= req_id
;
611 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq
)->net_device
,
612 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
613 __func__
, io_sq
->qid
, req_id
);
615 desc
->buff_addr_lo
= (u32
)ena_buf
->paddr
;
617 ((ena_buf
->paddr
& GENMASK_ULL(io_sq
->dma_addr_bits
- 1, 32)) >> 32);
619 return ena_com_sq_update_tail(io_sq
);
622 bool ena_com_cq_empty(struct ena_com_io_cq
*io_cq
)
624 struct ena_eth_io_rx_cdesc_base
*cdesc
;
626 cdesc
= ena_com_get_next_rx_cdesc(io_cq
);