2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef ENA_ETH_COM_H_
34 #define ENA_ETH_COM_H_
38 /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
39 #define ENA_COMP_HEAD_THRESH 4
41 struct ena_com_tx_ctx
{
42 struct ena_com_tx_meta ena_meta
;
43 struct ena_com_buf
*ena_bufs
;
44 /* For LLQ, header buffer - pushed to the device mem space */
47 enum ena_eth_io_l3_proto_index l3_proto
;
48 enum ena_eth_io_l4_proto_index l4_proto
;
51 /* For regular queue, indicate the size of the header
52 * For LLQ, indicate the size of the pushed buffer
61 u8 df
; /* Don't fragment */
64 struct ena_com_rx_ctx
{
65 struct ena_com_rx_buf_info
*ena_bufs
;
66 enum ena_eth_io_l3_proto_index l3_proto
;
67 enum ena_eth_io_l4_proto_index l4_proto
;
71 /* fragmented packet */
78 int ena_com_prepare_tx(struct ena_com_io_sq
*io_sq
,
79 struct ena_com_tx_ctx
*ena_tx_ctx
,
82 int ena_com_rx_pkt(struct ena_com_io_cq
*io_cq
,
83 struct ena_com_io_sq
*io_sq
,
84 struct ena_com_rx_ctx
*ena_rx_ctx
);
86 int ena_com_add_single_rx_desc(struct ena_com_io_sq
*io_sq
,
87 struct ena_com_buf
*ena_buf
,
90 bool ena_com_cq_empty(struct ena_com_io_cq
*io_cq
);
92 static inline void ena_com_unmask_intr(struct ena_com_io_cq
*io_cq
,
93 struct ena_eth_io_intr_reg
*intr_reg
)
95 writel(intr_reg
->intr_control
, io_cq
->unmask_reg
);
98 static inline int ena_com_free_desc(struct ena_com_io_sq
*io_sq
)
100 u16 tail
, next_to_comp
, cnt
;
102 next_to_comp
= io_sq
->next_to_comp
;
104 cnt
= tail
- next_to_comp
;
106 return io_sq
->q_depth
- 1 - cnt
;
109 /* Check if the submission queue has enough space to hold required_buffers */
110 static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq
*io_sq
,
111 u16 required_buffers
)
115 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
116 return ena_com_free_desc(io_sq
) >= required_buffers
;
118 /* This calculation doesn't need to be 100% accurate. So to reduce
119 * the calculation overhead just Subtract 2 lines from the free descs
120 * (one for the header line and one to compensate the devision
123 temp
= required_buffers
/ io_sq
->llq_info
.descs_per_entry
+ 2;
125 return ena_com_free_desc(io_sq
) > temp
;
128 static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq
*io_sq
,
129 struct ena_com_tx_ctx
*ena_tx_ctx
)
131 if (!ena_tx_ctx
->meta_valid
)
134 return !!memcmp(&io_sq
->cached_tx_meta
,
135 &ena_tx_ctx
->ena_meta
,
136 sizeof(struct ena_com_tx_meta
));
139 static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq
*io_sq
)
141 return (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) &&
142 io_sq
->llq_info
.max_entries_in_tx_burst
> 0;
145 static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq
*io_sq
,
146 struct ena_com_tx_ctx
*ena_tx_ctx
)
148 struct ena_com_llq_info
*llq_info
;
149 int descs_after_first_entry
;
150 int num_entries_needed
= 1;
153 if (!is_llq_max_tx_burst_exists(io_sq
))
156 llq_info
= &io_sq
->llq_info
;
157 num_descs
= ena_tx_ctx
->num_bufs
;
159 if (unlikely(ena_com_meta_desc_changed(io_sq
, ena_tx_ctx
)))
162 if (num_descs
> llq_info
->descs_num_before_header
) {
163 descs_after_first_entry
= num_descs
- llq_info
->descs_num_before_header
;
164 num_entries_needed
+= DIV_ROUND_UP(descs_after_first_entry
,
165 llq_info
->descs_per_entry
);
168 pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq
->qid
,
169 num_descs
, num_entries_needed
);
171 return num_entries_needed
> io_sq
->entries_in_tx_burst_left
;
174 static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq
*io_sq
)
176 u16 max_entries_in_tx_burst
= io_sq
->llq_info
.max_entries_in_tx_burst
;
177 u16 tail
= io_sq
->tail
;
179 pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
182 writel(tail
, io_sq
->db_addr
);
184 if (is_llq_max_tx_burst_exists(io_sq
)) {
185 pr_debug("reset available entries in tx burst for queue %d to %d\n",
186 io_sq
->qid
, max_entries_in_tx_burst
);
187 io_sq
->entries_in_tx_burst_left
= max_entries_in_tx_burst
;
193 static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq
*io_cq
)
195 u16 unreported_comp
, head
;
198 if (unlikely(io_cq
->cq_head_db_reg
)) {
200 unreported_comp
= head
- io_cq
->last_head_update
;
201 need_update
= unreported_comp
> (io_cq
->q_depth
/ ENA_COMP_HEAD_THRESH
);
203 if (unlikely(need_update
)) {
204 pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
206 writel(head
, io_cq
->cq_head_db_reg
);
207 io_cq
->last_head_update
= head
;
214 static inline void ena_com_update_numa_node(struct ena_com_io_cq
*io_cq
,
217 struct ena_eth_io_numa_node_cfg_reg numa_cfg
;
219 if (!io_cq
->numa_node_cfg_reg
)
222 numa_cfg
.numa_cfg
= (numa_node
& ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK
)
223 | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK
;
225 writel(numa_cfg
.numa_cfg
, io_cq
->numa_node_cfg_reg
);
228 static inline void ena_com_comp_ack(struct ena_com_io_sq
*io_sq
, u16 elem
)
230 io_sq
->next_to_comp
+= elem
;
233 static inline void ena_com_cq_inc_head(struct ena_com_io_cq
*io_cq
)
237 /* Switch phase bit in case of wrap around */
238 if (unlikely((io_cq
->head
& (io_cq
->q_depth
- 1)) == 0))
242 static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq
*io_cq
,
245 u8 expected_phase
, cdesc_phase
;
246 struct ena_eth_io_tx_cdesc
*cdesc
;
249 masked_head
= io_cq
->head
& (io_cq
->q_depth
- 1);
250 expected_phase
= io_cq
->phase
;
252 cdesc
= (struct ena_eth_io_tx_cdesc
*)
253 ((uintptr_t)io_cq
->cdesc_addr
.virt_addr
+
254 (masked_head
* io_cq
->cdesc_entry_size_in_bytes
));
256 /* When the current completion descriptor phase isn't the same as the
257 * expected, it mean that the device still didn't update
260 cdesc_phase
= READ_ONCE(cdesc
->flags
) & ENA_ETH_IO_TX_CDESC_PHASE_MASK
;
261 if (cdesc_phase
!= expected_phase
)
266 *req_id
= READ_ONCE(cdesc
->req_id
);
267 if (unlikely(*req_id
>= io_cq
->q_depth
)) {
268 pr_err("Invalid req id %d\n", cdesc
->req_id
);
272 ena_com_cq_inc_head(io_cq
);
277 #endif /* ENA_ETH_COM_H_ */