2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #define ENA_CTRL_MAJOR 0
46 #define ENA_CTRL_MINOR 0
47 #define ENA_CTRL_SUB_MINOR 1
49 #define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
63 #define ENA_REGS_ADMIN_INTR_MASK 1
67 /*****************************************************************************/
68 /*****************************************************************************/
69 /*****************************************************************************/
74 /* Abort - canceled by the driver */
79 struct completion wait_event
;
80 struct ena_admin_acq_entry
*user_cqe
;
82 enum ena_cmd_status status
;
83 /* status from the device */
89 struct ena_com_stats_ctx
{
90 struct ena_admin_aq_get_stats_cmd get_cmd
;
91 struct ena_admin_acq_get_stats_resp get_resp
;
94 static int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
95 struct ena_common_mem_addr
*ena_addr
,
98 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
99 pr_err("dma address has more bits that the device supports\n");
103 ena_addr
->mem_addr_low
= lower_32_bits(addr
);
104 ena_addr
->mem_addr_high
= (u16
)upper_32_bits(addr
);
109 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
111 struct ena_com_admin_sq
*sq
= &queue
->sq
;
112 u16 size
= ADMIN_SQ_SIZE(queue
->q_depth
);
114 sq
->entries
= dma_alloc_coherent(queue
->q_dmadev
, size
, &sq
->dma_addr
,
118 pr_err("memory allocation failed\n");
131 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
133 struct ena_com_admin_cq
*cq
= &queue
->cq
;
134 u16 size
= ADMIN_CQ_SIZE(queue
->q_depth
);
136 cq
->entries
= dma_alloc_coherent(queue
->q_dmadev
, size
, &cq
->dma_addr
,
140 pr_err("memory allocation failed\n");
150 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
151 struct ena_aenq_handlers
*aenq_handlers
)
153 struct ena_com_aenq
*aenq
= &dev
->aenq
;
154 u32 addr_low
, addr_high
, aenq_caps
;
157 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
158 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
159 aenq
->entries
= dma_alloc_coherent(dev
->dmadev
, size
, &aenq
->dma_addr
,
162 if (!aenq
->entries
) {
163 pr_err("memory allocation failed\n");
167 aenq
->head
= aenq
->q_depth
;
170 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
171 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
173 writel(addr_low
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
174 writel(addr_high
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
177 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
178 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
181 writel(aenq_caps
, dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
183 if (unlikely(!aenq_handlers
)) {
184 pr_err("aenq handlers pointer is NULL\n");
188 aenq
->aenq_handlers
= aenq_handlers
;
193 static void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
194 struct ena_comp_ctx
*comp_ctx
)
196 comp_ctx
->occupied
= false;
197 atomic_dec(&queue
->outstanding_cmds
);
200 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
201 u16 command_id
, bool capture
)
203 if (unlikely(command_id
>= queue
->q_depth
)) {
204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
205 command_id
, queue
->q_depth
);
209 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
210 pr_err("Completion context is occupied\n");
215 atomic_inc(&queue
->outstanding_cmds
);
216 queue
->comp_ctx
[command_id
].occupied
= true;
219 return &queue
->comp_ctx
[command_id
];
222 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
223 struct ena_admin_aq_entry
*cmd
,
224 size_t cmd_size_in_bytes
,
225 struct ena_admin_acq_entry
*comp
,
226 size_t comp_size_in_bytes
)
228 struct ena_comp_ctx
*comp_ctx
;
229 u16 tail_masked
, cmd_id
;
233 queue_size_mask
= admin_queue
->q_depth
- 1;
235 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
237 /* In case of queue FULL */
238 cnt
= (u16
)atomic_read(&admin_queue
->outstanding_cmds
);
239 if (cnt
>= admin_queue
->q_depth
) {
240 pr_debug("admin queue is full.\n");
241 admin_queue
->stats
.out_of_space
++;
242 return ERR_PTR(-ENOSPC
);
245 cmd_id
= admin_queue
->curr_cmd_id
;
247 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
250 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
253 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
254 if (unlikely(!comp_ctx
))
255 return ERR_PTR(-EINVAL
);
257 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
258 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
259 comp_ctx
->user_cqe
= comp
;
260 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
262 reinit_completion(&comp_ctx
->wait_event
);
264 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
266 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
269 admin_queue
->sq
.tail
++;
270 admin_queue
->stats
.submitted_cmd
++;
272 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
273 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
275 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
280 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
282 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
283 struct ena_comp_ctx
*comp_ctx
;
286 queue
->comp_ctx
= devm_kzalloc(queue
->q_dmadev
, size
, GFP_KERNEL
);
287 if (unlikely(!queue
->comp_ctx
)) {
288 pr_err("memory allocation failed\n");
292 for (i
= 0; i
< queue
->q_depth
; i
++) {
293 comp_ctx
= get_comp_ctxt(queue
, i
, false);
295 init_completion(&comp_ctx
->wait_event
);
301 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
302 struct ena_admin_aq_entry
*cmd
,
303 size_t cmd_size_in_bytes
,
304 struct ena_admin_acq_entry
*comp
,
305 size_t comp_size_in_bytes
)
307 unsigned long flags
= 0;
308 struct ena_comp_ctx
*comp_ctx
;
310 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
311 if (unlikely(!admin_queue
->running_state
)) {
312 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
313 return ERR_PTR(-ENODEV
);
315 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
319 if (IS_ERR(comp_ctx
))
320 admin_queue
->running_state
= false;
321 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
326 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
327 struct ena_com_create_io_ctx
*ctx
,
328 struct ena_com_io_sq
*io_sq
)
333 memset(&io_sq
->desc_addr
, 0x0, sizeof(io_sq
->desc_addr
));
335 io_sq
->dma_addr_bits
= (u8
)ena_dev
->dma_addr_bits
;
336 io_sq
->desc_entry_size
=
337 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
338 sizeof(struct ena_eth_io_tx_desc
) :
339 sizeof(struct ena_eth_io_rx_desc
);
341 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
343 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
344 dev_node
= dev_to_node(ena_dev
->dmadev
);
345 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
346 io_sq
->desc_addr
.virt_addr
=
347 dma_alloc_coherent(ena_dev
->dmadev
, size
,
348 &io_sq
->desc_addr
.phys_addr
,
350 set_dev_node(ena_dev
->dmadev
, dev_node
);
351 if (!io_sq
->desc_addr
.virt_addr
) {
352 io_sq
->desc_addr
.virt_addr
=
353 dma_alloc_coherent(ena_dev
->dmadev
, size
,
354 &io_sq
->desc_addr
.phys_addr
,
358 if (!io_sq
->desc_addr
.virt_addr
) {
359 pr_err("memory allocation failed\n");
364 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
365 /* Allocate bounce buffers */
366 io_sq
->bounce_buf_ctrl
.buffer_size
=
367 ena_dev
->llq_info
.desc_list_entry_size
;
368 io_sq
->bounce_buf_ctrl
.buffers_num
=
369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT
;
370 io_sq
->bounce_buf_ctrl
.next_to_use
= 0;
372 size
= io_sq
->bounce_buf_ctrl
.buffer_size
*
373 io_sq
->bounce_buf_ctrl
.buffers_num
;
375 dev_node
= dev_to_node(ena_dev
->dmadev
);
376 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
377 io_sq
->bounce_buf_ctrl
.base_buffer
=
378 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
379 set_dev_node(ena_dev
->dmadev
, dev_node
);
380 if (!io_sq
->bounce_buf_ctrl
.base_buffer
)
381 io_sq
->bounce_buf_ctrl
.base_buffer
=
382 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
384 if (!io_sq
->bounce_buf_ctrl
.base_buffer
) {
385 pr_err("bounce buffer memory allocation failed\n");
389 memcpy(&io_sq
->llq_info
, &ena_dev
->llq_info
,
390 sizeof(io_sq
->llq_info
));
392 /* Initiate the first bounce buffer */
393 io_sq
->llq_buf_ctrl
.curr_bounce_buf
=
394 ena_com_get_next_bounce_buffer(&io_sq
->bounce_buf_ctrl
);
395 memset(io_sq
->llq_buf_ctrl
.curr_bounce_buf
,
396 0x0, io_sq
->llq_info
.desc_list_entry_size
);
397 io_sq
->llq_buf_ctrl
.descs_left_in_line
=
398 io_sq
->llq_info
.descs_num_before_header
;
400 if (io_sq
->llq_info
.max_entries_in_tx_burst
> 0)
401 io_sq
->entries_in_tx_burst_left
=
402 io_sq
->llq_info
.max_entries_in_tx_burst
;
406 io_sq
->next_to_comp
= 0;
412 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
413 struct ena_com_create_io_ctx
*ctx
,
414 struct ena_com_io_cq
*io_cq
)
419 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(io_cq
->cdesc_addr
));
421 /* Use the basic completion descriptor for Rx */
422 io_cq
->cdesc_entry_size_in_bytes
=
423 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
424 sizeof(struct ena_eth_io_tx_cdesc
) :
425 sizeof(struct ena_eth_io_rx_cdesc_base
);
427 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
429 prev_node
= dev_to_node(ena_dev
->dmadev
);
430 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
431 io_cq
->cdesc_addr
.virt_addr
=
432 dma_alloc_coherent(ena_dev
->dmadev
, size
,
433 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
434 set_dev_node(ena_dev
->dmadev
, prev_node
);
435 if (!io_cq
->cdesc_addr
.virt_addr
) {
436 io_cq
->cdesc_addr
.virt_addr
=
437 dma_alloc_coherent(ena_dev
->dmadev
, size
,
438 &io_cq
->cdesc_addr
.phys_addr
,
442 if (!io_cq
->cdesc_addr
.virt_addr
) {
443 pr_err("memory allocation failed\n");
453 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
454 struct ena_admin_acq_entry
*cqe
)
456 struct ena_comp_ctx
*comp_ctx
;
459 cmd_id
= cqe
->acq_common_descriptor
.command
&
460 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
462 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
463 if (unlikely(!comp_ctx
)) {
464 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
465 admin_queue
->running_state
= false;
469 comp_ctx
->status
= ENA_CMD_COMPLETED
;
470 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
472 if (comp_ctx
->user_cqe
)
473 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
475 if (!admin_queue
->polling
)
476 complete(&comp_ctx
->wait_event
);
479 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
481 struct ena_admin_acq_entry
*cqe
= NULL
;
486 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
487 phase
= admin_queue
->cq
.phase
;
489 cqe
= &admin_queue
->cq
.entries
[head_masked
];
491 /* Go over all the completions */
492 while ((READ_ONCE(cqe
->acq_common_descriptor
.flags
) &
493 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
494 /* Do not read the rest of the completion entry before the
495 * phase bit was validated
498 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
502 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
507 cqe
= &admin_queue
->cq
.entries
[head_masked
];
510 admin_queue
->cq
.head
+= comp_num
;
511 admin_queue
->cq
.phase
= phase
;
512 admin_queue
->sq
.head
+= comp_num
;
513 admin_queue
->stats
.completed_cmd
+= comp_num
;
516 static int ena_com_comp_status_to_errno(u8 comp_status
)
518 if (unlikely(comp_status
!= 0))
519 pr_err("admin command failed[%u]\n", comp_status
);
521 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
524 switch (comp_status
) {
525 case ENA_ADMIN_SUCCESS
:
527 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
529 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
531 case ENA_ADMIN_BAD_OPCODE
:
532 case ENA_ADMIN_MALFORMED_REQUEST
:
533 case ENA_ADMIN_ILLEGAL_PARAMETER
:
534 case ENA_ADMIN_UNKNOWN_ERROR
:
541 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
542 struct ena_com_admin_queue
*admin_queue
)
544 unsigned long flags
= 0;
545 unsigned long timeout
;
548 timeout
= jiffies
+ usecs_to_jiffies(admin_queue
->completion_timeout
);
551 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
552 ena_com_handle_admin_completion(admin_queue
);
553 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
555 if (comp_ctx
->status
!= ENA_CMD_SUBMITTED
)
558 if (time_is_before_jiffies(timeout
)) {
559 pr_err("Wait for completion (polling) timeout\n");
560 /* ENA didn't have any completion */
561 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
562 admin_queue
->stats
.no_completion
++;
563 admin_queue
->running_state
= false;
564 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
573 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
574 pr_err("Command was aborted\n");
575 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
576 admin_queue
->stats
.aborted_cmd
++;
577 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
582 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
585 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
587 comp_ctxt_release(admin_queue
, comp_ctx
);
592 * Set the LLQ configurations of the firmware
594 * The driver provides only the enabled feature values to the device,
595 * which in turn, checks if they are supported.
597 static int ena_com_set_llq(struct ena_com_dev
*ena_dev
)
599 struct ena_com_admin_queue
*admin_queue
;
600 struct ena_admin_set_feat_cmd cmd
;
601 struct ena_admin_set_feat_resp resp
;
602 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
605 memset(&cmd
, 0x0, sizeof(cmd
));
606 admin_queue
= &ena_dev
->admin_queue
;
608 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
609 cmd
.feat_common
.feature_id
= ENA_ADMIN_LLQ
;
611 cmd
.u
.llq
.header_location_ctrl_enabled
= llq_info
->header_location_ctrl
;
612 cmd
.u
.llq
.entry_size_ctrl_enabled
= llq_info
->desc_list_entry_size_ctrl
;
613 cmd
.u
.llq
.desc_num_before_header_enabled
= llq_info
->descs_num_before_header
;
614 cmd
.u
.llq
.descriptors_stride_ctrl_enabled
= llq_info
->desc_stride_ctrl
;
616 ret
= ena_com_execute_admin_command(admin_queue
,
617 (struct ena_admin_aq_entry
*)&cmd
,
619 (struct ena_admin_acq_entry
*)&resp
,
623 pr_err("Failed to set LLQ configurations: %d\n", ret
);
628 static int ena_com_config_llq_info(struct ena_com_dev
*ena_dev
,
629 struct ena_admin_feature_llq_desc
*llq_features
,
630 struct ena_llq_configurations
*llq_default_cfg
)
632 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
636 memset(llq_info
, 0, sizeof(*llq_info
));
638 supported_feat
= llq_features
->header_location_ctrl_supported
;
640 if (likely(supported_feat
& llq_default_cfg
->llq_header_location
)) {
641 llq_info
->header_location_ctrl
=
642 llq_default_cfg
->llq_header_location
;
644 pr_err("Invalid header location control, supported: 0x%x\n",
649 if (likely(llq_info
->header_location_ctrl
== ENA_ADMIN_INLINE_HEADER
)) {
650 supported_feat
= llq_features
->descriptors_stride_ctrl_supported
;
651 if (likely(supported_feat
& llq_default_cfg
->llq_stride_ctrl
)) {
652 llq_info
->desc_stride_ctrl
= llq_default_cfg
->llq_stride_ctrl
;
654 if (supported_feat
& ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
) {
655 llq_info
->desc_stride_ctrl
= ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
;
656 } else if (supported_feat
& ENA_ADMIN_SINGLE_DESC_PER_ENTRY
) {
657 llq_info
->desc_stride_ctrl
= ENA_ADMIN_SINGLE_DESC_PER_ENTRY
;
659 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
664 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
665 llq_default_cfg
->llq_stride_ctrl
, supported_feat
,
666 llq_info
->desc_stride_ctrl
);
669 llq_info
->desc_stride_ctrl
= 0;
672 supported_feat
= llq_features
->entry_size_ctrl_supported
;
673 if (likely(supported_feat
& llq_default_cfg
->llq_ring_entry_size
)) {
674 llq_info
->desc_list_entry_size_ctrl
= llq_default_cfg
->llq_ring_entry_size
;
675 llq_info
->desc_list_entry_size
= llq_default_cfg
->llq_ring_entry_size_value
;
677 if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_128B
) {
678 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_128B
;
679 llq_info
->desc_list_entry_size
= 128;
680 } else if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_192B
) {
681 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_192B
;
682 llq_info
->desc_list_entry_size
= 192;
683 } else if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_256B
) {
684 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_256B
;
685 llq_info
->desc_list_entry_size
= 256;
687 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
692 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
693 llq_default_cfg
->llq_ring_entry_size
, supported_feat
,
694 llq_info
->desc_list_entry_size
);
696 if (unlikely(llq_info
->desc_list_entry_size
& 0x7)) {
697 /* The desc list entry size should be whole multiply of 8
698 * This requirement comes from __iowrite64_copy()
700 pr_err("illegal entry size %d\n",
701 llq_info
->desc_list_entry_size
);
705 if (llq_info
->desc_stride_ctrl
== ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
)
706 llq_info
->descs_per_entry
= llq_info
->desc_list_entry_size
/
707 sizeof(struct ena_eth_io_tx_desc
);
709 llq_info
->descs_per_entry
= 1;
711 supported_feat
= llq_features
->desc_num_before_header_supported
;
712 if (likely(supported_feat
& llq_default_cfg
->llq_num_decs_before_header
)) {
713 llq_info
->descs_num_before_header
= llq_default_cfg
->llq_num_decs_before_header
;
715 if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
) {
716 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
;
717 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1
) {
718 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1
;
719 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4
) {
720 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4
;
721 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8
) {
722 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8
;
724 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
729 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
730 llq_default_cfg
->llq_num_decs_before_header
,
731 supported_feat
, llq_info
->descs_num_before_header
);
734 llq_info
->max_entries_in_tx_burst
=
735 (u16
)(llq_features
->max_tx_burst_size
/ llq_default_cfg
->llq_ring_entry_size_value
);
737 rc
= ena_com_set_llq(ena_dev
);
739 pr_err("Cannot set LLQ configuration: %d\n", rc
);
744 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
745 struct ena_com_admin_queue
*admin_queue
)
747 unsigned long flags
= 0;
750 wait_for_completion_timeout(&comp_ctx
->wait_event
,
752 admin_queue
->completion_timeout
));
754 /* In case the command wasn't completed find out the root cause.
755 * There might be 2 kinds of errors
756 * 1) No completion (timeout reached)
757 * 2) There is completion but the device didn't get any msi-x interrupt.
759 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
760 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
761 ena_com_handle_admin_completion(admin_queue
);
762 admin_queue
->stats
.no_completion
++;
763 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
765 if (comp_ctx
->status
== ENA_CMD_COMPLETED
) {
766 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
767 comp_ctx
->cmd_opcode
,
768 admin_queue
->auto_polling
? "ON" : "OFF");
769 /* Check if fallback to polling is enabled */
770 if (admin_queue
->auto_polling
)
771 admin_queue
->polling
= true;
773 pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
774 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
776 /* Check if shifted to polling mode.
777 * This will happen if there is a completion without an interrupt
778 * and autopolling mode is enabled. Continuing normal execution in such case
780 if (!admin_queue
->polling
) {
781 admin_queue
->running_state
= false;
787 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
789 comp_ctxt_release(admin_queue
, comp_ctx
);
793 /* This method read the hardware device register through posting writes
794 * and waiting for response
795 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
797 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
799 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
800 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
801 mmio_read
->read_resp
;
802 u32 mmio_read_reg
, ret
, i
;
803 unsigned long flags
= 0;
804 u32 timeout
= mmio_read
->reg_read_to
;
809 timeout
= ENA_REG_READ_TIMEOUT
;
811 /* If readless is disabled, perform regular read */
812 if (!mmio_read
->readless_supported
)
813 return readl(ena_dev
->reg_bar
+ offset
);
815 spin_lock_irqsave(&mmio_read
->lock
, flags
);
816 mmio_read
->seq_num
++;
818 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
819 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
820 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
821 mmio_read_reg
|= mmio_read
->seq_num
&
822 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
824 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
826 for (i
= 0; i
< timeout
; i
++) {
827 if (READ_ONCE(read_resp
->req_id
) == mmio_read
->seq_num
)
833 if (unlikely(i
== timeout
)) {
834 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
835 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
837 ret
= ENA_MMIO_READ_TIMEOUT
;
841 if (read_resp
->reg_off
!= offset
) {
842 pr_err("Read failure: wrong offset provided\n");
843 ret
= ENA_MMIO_READ_TIMEOUT
;
845 ret
= read_resp
->reg_val
;
848 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
853 /* There are two types to wait for completion.
854 * Polling mode - wait until the completion is available.
855 * Async mode - wait on wait queue until the completion is ready
856 * (or the timeout expired).
857 * It is expected that the IRQ called ena_com_handle_admin_completion
858 * to mark the completions.
860 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
861 struct ena_com_admin_queue
*admin_queue
)
863 if (admin_queue
->polling
)
864 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
867 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
871 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
872 struct ena_com_io_sq
*io_sq
)
874 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
875 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
876 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
880 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
882 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
883 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
885 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
887 destroy_cmd
.sq
.sq_identity
|= (direction
<<
888 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
889 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
891 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
892 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
894 ret
= ena_com_execute_admin_command(admin_queue
,
895 (struct ena_admin_aq_entry
*)&destroy_cmd
,
897 (struct ena_admin_acq_entry
*)&destroy_resp
,
898 sizeof(destroy_resp
));
900 if (unlikely(ret
&& (ret
!= -ENODEV
)))
901 pr_err("failed to destroy io sq error: %d\n", ret
);
906 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
907 struct ena_com_io_sq
*io_sq
,
908 struct ena_com_io_cq
*io_cq
)
912 if (io_cq
->cdesc_addr
.virt_addr
) {
913 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
915 dma_free_coherent(ena_dev
->dmadev
, size
,
916 io_cq
->cdesc_addr
.virt_addr
,
917 io_cq
->cdesc_addr
.phys_addr
);
919 io_cq
->cdesc_addr
.virt_addr
= NULL
;
922 if (io_sq
->desc_addr
.virt_addr
) {
923 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
925 dma_free_coherent(ena_dev
->dmadev
, size
,
926 io_sq
->desc_addr
.virt_addr
,
927 io_sq
->desc_addr
.phys_addr
);
929 io_sq
->desc_addr
.virt_addr
= NULL
;
932 if (io_sq
->bounce_buf_ctrl
.base_buffer
) {
933 devm_kfree(ena_dev
->dmadev
, io_sq
->bounce_buf_ctrl
.base_buffer
);
934 io_sq
->bounce_buf_ctrl
.base_buffer
= NULL
;
938 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
943 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
944 timeout
= (timeout
* 100) / ENA_POLL_MS
;
946 for (i
= 0; i
< timeout
; i
++) {
947 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
949 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
950 pr_err("Reg read timeout occurred\n");
954 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
964 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
965 enum ena_admin_aq_feature_id feature_id
)
967 u32 feature_mask
= 1 << feature_id
;
969 /* Device attributes is always supported */
970 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
971 !(ena_dev
->supported_features
& feature_mask
))
977 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
978 struct ena_admin_get_feat_resp
*get_resp
,
979 enum ena_admin_aq_feature_id feature_id
,
980 dma_addr_t control_buf_dma_addr
,
981 u32 control_buff_size
,
984 struct ena_com_admin_queue
*admin_queue
;
985 struct ena_admin_get_feat_cmd get_cmd
;
988 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
989 pr_debug("Feature %d isn't supported\n", feature_id
);
993 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
994 admin_queue
= &ena_dev
->admin_queue
;
996 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
998 if (control_buff_size
)
999 get_cmd
.aq_common_descriptor
.flags
=
1000 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
1002 get_cmd
.aq_common_descriptor
.flags
= 0;
1004 ret
= ena_com_mem_addr_set(ena_dev
,
1005 &get_cmd
.control_buffer
.address
,
1006 control_buf_dma_addr
);
1007 if (unlikely(ret
)) {
1008 pr_err("memory address set failed\n");
1012 get_cmd
.control_buffer
.length
= control_buff_size
;
1013 get_cmd
.feat_common
.feature_version
= feature_ver
;
1014 get_cmd
.feat_common
.feature_id
= feature_id
;
1016 ret
= ena_com_execute_admin_command(admin_queue
,
1017 (struct ena_admin_aq_entry
*)
1020 (struct ena_admin_acq_entry
*)
1025 pr_err("Failed to submit get_feature command %d error: %d\n",
1031 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
1032 struct ena_admin_get_feat_resp
*get_resp
,
1033 enum ena_admin_aq_feature_id feature_id
,
1036 return ena_com_get_feature_ex(ena_dev
,
1044 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
1046 struct ena_rss
*rss
= &ena_dev
->rss
;
1049 dma_alloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
1050 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
1052 if (unlikely(!rss
->hash_key
))
1058 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
1060 struct ena_rss
*rss
= &ena_dev
->rss
;
1063 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
1064 rss
->hash_key
, rss
->hash_key_dma_addr
);
1065 rss
->hash_key
= NULL
;
1068 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
1070 struct ena_rss
*rss
= &ena_dev
->rss
;
1073 dma_alloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
1074 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
1076 if (unlikely(!rss
->hash_ctrl
))
1082 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
1084 struct ena_rss
*rss
= &ena_dev
->rss
;
1087 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
1088 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
1089 rss
->hash_ctrl
= NULL
;
1092 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
1095 struct ena_rss
*rss
= &ena_dev
->rss
;
1096 struct ena_admin_get_feat_resp get_resp
;
1100 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
1101 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
, 0);
1105 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
1106 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
1107 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1108 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
1109 1 << get_resp
.u
.ind_table
.max_size
);
1113 tbl_size
= (1ULL << log_size
) *
1114 sizeof(struct ena_admin_rss_ind_table_entry
);
1117 dma_alloc_coherent(ena_dev
->dmadev
, tbl_size
,
1118 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
1119 if (unlikely(!rss
->rss_ind_tbl
))
1122 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
1123 rss
->host_rss_ind_tbl
=
1124 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
1125 if (unlikely(!rss
->host_rss_ind_tbl
))
1128 rss
->tbl_log_size
= log_size
;
1133 tbl_size
= (1ULL << log_size
) *
1134 sizeof(struct ena_admin_rss_ind_table_entry
);
1136 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
1137 rss
->rss_ind_tbl_dma_addr
);
1138 rss
->rss_ind_tbl
= NULL
;
1140 rss
->tbl_log_size
= 0;
1144 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
1146 struct ena_rss
*rss
= &ena_dev
->rss
;
1147 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
1148 sizeof(struct ena_admin_rss_ind_table_entry
);
1150 if (rss
->rss_ind_tbl
)
1151 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
1152 rss
->rss_ind_tbl_dma_addr
);
1153 rss
->rss_ind_tbl
= NULL
;
1155 if (rss
->host_rss_ind_tbl
)
1156 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
1157 rss
->host_rss_ind_tbl
= NULL
;
1160 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
1161 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
1163 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1164 struct ena_admin_aq_create_sq_cmd create_cmd
;
1165 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
1169 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1171 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
1173 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1174 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
1176 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
1178 create_cmd
.sq_identity
|= (direction
<<
1179 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
1180 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
1182 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
1183 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
1185 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
1186 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
1187 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
1189 create_cmd
.sq_caps_3
|=
1190 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
1192 create_cmd
.cq_idx
= cq_idx
;
1193 create_cmd
.sq_depth
= io_sq
->q_depth
;
1195 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
1196 ret
= ena_com_mem_addr_set(ena_dev
,
1198 io_sq
->desc_addr
.phys_addr
);
1199 if (unlikely(ret
)) {
1200 pr_err("memory address set failed\n");
1205 ret
= ena_com_execute_admin_command(admin_queue
,
1206 (struct ena_admin_aq_entry
*)&create_cmd
,
1208 (struct ena_admin_acq_entry
*)&cmd_completion
,
1209 sizeof(cmd_completion
));
1210 if (unlikely(ret
)) {
1211 pr_err("Failed to create IO SQ. error: %d\n", ret
);
1215 io_sq
->idx
= cmd_completion
.sq_idx
;
1217 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1218 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1220 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1221 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1222 + cmd_completion
.llq_headers_offset
);
1224 io_sq
->desc_addr
.pbuf_dev_addr
=
1225 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1226 cmd_completion
.llq_descriptors_offset
);
1229 pr_debug("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1234 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1236 struct ena_rss
*rss
= &ena_dev
->rss
;
1237 struct ena_com_io_sq
*io_sq
;
1241 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1242 qid
= rss
->host_rss_ind_tbl
[i
];
1243 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1246 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1248 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1251 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1257 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1259 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1260 struct ena_rss
*rss
= &ena_dev
->rss
;
1264 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1265 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1267 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1268 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1270 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1272 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1275 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1281 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1282 u16 intr_delay_resolution
)
1284 /* Initial value of intr_delay_resolution might be 0 */
1285 u16 prev_intr_delay_resolution
=
1286 ena_dev
->intr_delay_resolution
?
1287 ena_dev
->intr_delay_resolution
:
1288 ENA_DEFAULT_INTR_DELAY_RESOLUTION
;
1290 if (!intr_delay_resolution
) {
1291 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1292 intr_delay_resolution
= ENA_DEFAULT_INTR_DELAY_RESOLUTION
;
1296 ena_dev
->intr_moder_rx_interval
=
1297 ena_dev
->intr_moder_rx_interval
*
1298 prev_intr_delay_resolution
/
1299 intr_delay_resolution
;
1302 ena_dev
->intr_moder_tx_interval
=
1303 ena_dev
->intr_moder_tx_interval
*
1304 prev_intr_delay_resolution
/
1305 intr_delay_resolution
;
1307 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1310 /*****************************************************************************/
1311 /******************************* API ******************************/
1312 /*****************************************************************************/
1314 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1315 struct ena_admin_aq_entry
*cmd
,
1317 struct ena_admin_acq_entry
*comp
,
1320 struct ena_comp_ctx
*comp_ctx
;
1323 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1325 if (IS_ERR(comp_ctx
)) {
1326 if (comp_ctx
== ERR_PTR(-ENODEV
))
1327 pr_debug("Failed to submit command [%ld]\n",
1330 pr_err("Failed to submit command [%ld]\n",
1333 return PTR_ERR(comp_ctx
);
1336 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1337 if (unlikely(ret
)) {
1338 if (admin_queue
->running_state
)
1339 pr_err("Failed to process command. ret = %d\n", ret
);
1341 pr_debug("Failed to process command. ret = %d\n", ret
);
1346 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1347 struct ena_com_io_cq
*io_cq
)
1349 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1350 struct ena_admin_aq_create_cq_cmd create_cmd
;
1351 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1354 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1356 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1358 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1359 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1360 create_cmd
.cq_caps_1
|=
1361 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1363 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1364 create_cmd
.cq_depth
= io_cq
->q_depth
;
1366 ret
= ena_com_mem_addr_set(ena_dev
,
1368 io_cq
->cdesc_addr
.phys_addr
);
1369 if (unlikely(ret
)) {
1370 pr_err("memory address set failed\n");
1374 ret
= ena_com_execute_admin_command(admin_queue
,
1375 (struct ena_admin_aq_entry
*)&create_cmd
,
1377 (struct ena_admin_acq_entry
*)&cmd_completion
,
1378 sizeof(cmd_completion
));
1379 if (unlikely(ret
)) {
1380 pr_err("Failed to create IO CQ. error: %d\n", ret
);
1384 io_cq
->idx
= cmd_completion
.cq_idx
;
1386 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1387 cmd_completion
.cq_interrupt_unmask_register_offset
);
1389 if (cmd_completion
.cq_head_db_register_offset
)
1390 io_cq
->cq_head_db_reg
=
1391 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1392 cmd_completion
.cq_head_db_register_offset
);
1394 if (cmd_completion
.numa_node_register_offset
)
1395 io_cq
->numa_node_cfg_reg
=
1396 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1397 cmd_completion
.numa_node_register_offset
);
1399 pr_debug("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1404 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1405 struct ena_com_io_sq
**io_sq
,
1406 struct ena_com_io_cq
**io_cq
)
1408 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1409 pr_err("Invalid queue number %d but the max is %d\n", qid
,
1410 ENA_TOTAL_NUM_QUEUES
);
1414 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1415 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1420 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1422 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1423 struct ena_comp_ctx
*comp_ctx
;
1426 if (!admin_queue
->comp_ctx
)
1429 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1430 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1431 if (unlikely(!comp_ctx
))
1434 comp_ctx
->status
= ENA_CMD_ABORTED
;
1436 complete(&comp_ctx
->wait_event
);
1440 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1442 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1443 unsigned long flags
= 0;
1445 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1446 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1447 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1448 msleep(ENA_POLL_MS
);
1449 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1451 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1454 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1455 struct ena_com_io_cq
*io_cq
)
1457 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1458 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1459 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1462 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
1464 destroy_cmd
.cq_idx
= io_cq
->idx
;
1465 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1467 ret
= ena_com_execute_admin_command(admin_queue
,
1468 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1469 sizeof(destroy_cmd
),
1470 (struct ena_admin_acq_entry
*)&destroy_resp
,
1471 sizeof(destroy_resp
));
1473 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1474 pr_err("Failed to destroy IO CQ. error: %d\n", ret
);
1479 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1481 return ena_dev
->admin_queue
.running_state
;
1484 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1486 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1487 unsigned long flags
= 0;
1489 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1490 ena_dev
->admin_queue
.running_state
= state
;
1491 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1494 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1496 u16 depth
= ena_dev
->aenq
.q_depth
;
1498 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1500 /* Init head_db to mark that all entries in the queue
1501 * are initially available
1503 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1506 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1508 struct ena_com_admin_queue
*admin_queue
;
1509 struct ena_admin_set_feat_cmd cmd
;
1510 struct ena_admin_set_feat_resp resp
;
1511 struct ena_admin_get_feat_resp get_resp
;
1514 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
, 0);
1516 pr_info("Can't get aenq configuration\n");
1520 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1521 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1522 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1526 memset(&cmd
, 0x0, sizeof(cmd
));
1527 admin_queue
= &ena_dev
->admin_queue
;
1529 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1530 cmd
.aq_common_descriptor
.flags
= 0;
1531 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1532 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1534 ret
= ena_com_execute_admin_command(admin_queue
,
1535 (struct ena_admin_aq_entry
*)&cmd
,
1537 (struct ena_admin_acq_entry
*)&resp
,
1541 pr_err("Failed to config AENQ ret: %d\n", ret
);
1546 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1548 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1551 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1552 pr_err("Reg read timeout occurred\n");
1556 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1557 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1559 pr_debug("ENA dma width: %d\n", width
);
1561 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1562 pr_err("DMA width illegal value: %d\n", width
);
1566 ena_dev
->dma_addr_bits
= width
;
1571 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1575 u32 ctrl_ver_masked
;
1577 /* Make sure the ENA version and the controller version are at least
1578 * as the driver expects
1580 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1581 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1582 ENA_REGS_CONTROLLER_VERSION_OFF
);
1584 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1585 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1586 pr_err("Reg read timeout occurred\n");
1590 pr_info("ena device version: %d.%d\n",
1591 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1592 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1593 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1595 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1596 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1597 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1598 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1599 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1600 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1601 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1602 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1605 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1606 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1607 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1609 /* Validate the ctrl version without the implementation ID */
1610 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1611 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1618 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1620 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1621 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1622 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1623 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1626 if (admin_queue
->comp_ctx
)
1627 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1628 admin_queue
->comp_ctx
= NULL
;
1629 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1631 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1635 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1637 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1641 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1642 if (ena_dev
->aenq
.entries
)
1643 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1645 aenq
->entries
= NULL
;
1648 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1653 mask_value
= ENA_REGS_ADMIN_INTR_MASK
;
1655 writel(mask_value
, ena_dev
->reg_bar
+ ENA_REGS_INTR_MASK_OFF
);
1656 ena_dev
->admin_queue
.polling
= polling
;
1659 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev
*ena_dev
,
1662 ena_dev
->admin_queue
.auto_polling
= polling
;
1665 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1667 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1669 spin_lock_init(&mmio_read
->lock
);
1670 mmio_read
->read_resp
=
1671 dma_alloc_coherent(ena_dev
->dmadev
,
1672 sizeof(*mmio_read
->read_resp
),
1673 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1674 if (unlikely(!mmio_read
->read_resp
))
1677 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1679 mmio_read
->read_resp
->req_id
= 0x0;
1680 mmio_read
->seq_num
= 0x0;
1681 mmio_read
->readless_supported
= true;
1690 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1692 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1694 mmio_read
->readless_supported
= readless_supported
;
1697 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1699 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1701 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1702 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1704 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1705 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1707 mmio_read
->read_resp
= NULL
;
1710 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1712 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1713 u32 addr_low
, addr_high
;
1715 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1716 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1718 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1719 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1722 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1723 struct ena_aenq_handlers
*aenq_handlers
)
1725 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1726 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1729 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1731 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1732 pr_err("Reg read timeout occurred\n");
1736 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1737 pr_err("Device isn't ready, abort com init\n");
1741 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1743 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1744 admin_queue
->polling
= false;
1745 admin_queue
->curr_cmd_id
= 0;
1747 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1749 spin_lock_init(&admin_queue
->q_lock
);
1751 ret
= ena_com_init_comp_ctxt(admin_queue
);
1755 ret
= ena_com_admin_init_sq(admin_queue
);
1759 ret
= ena_com_admin_init_cq(admin_queue
);
1763 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1764 ENA_REGS_AQ_DB_OFF
);
1766 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1767 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1769 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1770 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1772 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1773 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1775 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1776 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1779 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1780 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1781 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1782 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1785 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1786 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1787 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1788 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1790 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1791 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1792 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1796 admin_queue
->running_state
= true;
1800 ena_com_admin_destroy(ena_dev
);
1805 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1806 struct ena_com_create_io_ctx
*ctx
)
1808 struct ena_com_io_sq
*io_sq
;
1809 struct ena_com_io_cq
*io_cq
;
1812 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1813 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1814 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1818 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1819 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1821 memset(io_sq
, 0x0, sizeof(*io_sq
));
1822 memset(io_cq
, 0x0, sizeof(*io_cq
));
1825 io_cq
->q_depth
= ctx
->queue_size
;
1826 io_cq
->direction
= ctx
->direction
;
1827 io_cq
->qid
= ctx
->qid
;
1829 io_cq
->msix_vector
= ctx
->msix_vector
;
1831 io_sq
->q_depth
= ctx
->queue_size
;
1832 io_sq
->direction
= ctx
->direction
;
1833 io_sq
->qid
= ctx
->qid
;
1835 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1837 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1838 /* header length is limited to 8 bits */
1839 io_sq
->tx_max_header_size
=
1840 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1842 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1845 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1849 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1853 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1860 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1862 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1866 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1868 struct ena_com_io_sq
*io_sq
;
1869 struct ena_com_io_cq
*io_cq
;
1871 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1872 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid
,
1873 ENA_TOTAL_NUM_QUEUES
);
1877 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1878 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1880 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1881 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1883 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1886 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1887 struct ena_admin_get_feat_resp
*resp
)
1889 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
, 0);
1892 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1893 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1895 struct ena_admin_get_feat_resp get_resp
;
1898 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1899 ENA_ADMIN_DEVICE_ATTRIBUTES
, 0);
1903 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1904 sizeof(get_resp
.u
.dev_attr
));
1905 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1907 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
1908 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1909 ENA_ADMIN_MAX_QUEUES_EXT
,
1910 ENA_FEATURE_MAX_QUEUE_EXT_VER
);
1914 if (get_resp
.u
.max_queue_ext
.version
!= ENA_FEATURE_MAX_QUEUE_EXT_VER
)
1917 memcpy(&get_feat_ctx
->max_queue_ext
, &get_resp
.u
.max_queue_ext
,
1918 sizeof(get_resp
.u
.max_queue_ext
));
1919 ena_dev
->tx_max_header_size
=
1920 get_resp
.u
.max_queue_ext
.max_queue_ext
.max_tx_header_size
;
1922 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1923 ENA_ADMIN_MAX_QUEUES_NUM
, 0);
1924 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1925 sizeof(get_resp
.u
.max_queue
));
1926 ena_dev
->tx_max_header_size
=
1927 get_resp
.u
.max_queue
.max_header_size
;
1933 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1934 ENA_ADMIN_AENQ_CONFIG
, 0);
1938 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1939 sizeof(get_resp
.u
.aenq
));
1941 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1942 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
, 0);
1946 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1947 sizeof(get_resp
.u
.offload
));
1949 /* Driver hints isn't mandatory admin command. So in case the
1950 * command isn't supported set driver hints to 0
1952 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_HW_HINTS
, 0);
1955 memcpy(&get_feat_ctx
->hw_hints
, &get_resp
.u
.hw_hints
,
1956 sizeof(get_resp
.u
.hw_hints
));
1957 else if (rc
== -EOPNOTSUPP
)
1958 memset(&get_feat_ctx
->hw_hints
, 0x0,
1959 sizeof(get_feat_ctx
->hw_hints
));
1963 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_LLQ
, 0);
1965 memcpy(&get_feat_ctx
->llq
, &get_resp
.u
.llq
,
1966 sizeof(get_resp
.u
.llq
));
1967 else if (rc
== -EOPNOTSUPP
)
1968 memset(&get_feat_ctx
->llq
, 0x0, sizeof(get_feat_ctx
->llq
));
1975 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1977 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1980 /* ena_handle_specific_aenq_event:
1981 * return the handler that is relevant to the specific event group
1983 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1986 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1988 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1989 return aenq_handlers
->handlers
[group
];
1991 return aenq_handlers
->unimplemented_handler
;
1994 /* ena_aenq_intr_handler:
1995 * handles the aenq incoming events.
1996 * pop events from the queue and apply the specific handler
1998 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
2000 struct ena_admin_aenq_entry
*aenq_e
;
2001 struct ena_admin_aenq_common_desc
*aenq_common
;
2002 struct ena_com_aenq
*aenq
= &dev
->aenq
;
2003 unsigned long long timestamp
;
2004 ena_aenq_handler handler_cb
;
2005 u16 masked_head
, processed
= 0;
2008 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
2009 phase
= aenq
->phase
;
2010 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
2011 aenq_common
= &aenq_e
->aenq_common_desc
;
2013 /* Go over all the events */
2014 while ((READ_ONCE(aenq_common
->flags
) &
2015 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) == phase
) {
2016 /* Make sure the phase bit (ownership) is as expected before
2017 * reading the rest of the descriptor.
2022 (unsigned long long)aenq_common
->timestamp_low
|
2023 ((unsigned long long)aenq_common
->timestamp_high
<< 32);
2024 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2025 aenq_common
->group
, aenq_common
->syndrom
, timestamp
);
2027 /* Handle specific event*/
2028 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
2029 aenq_common
->group
);
2030 handler_cb(data
, aenq_e
); /* call the actual event handler*/
2032 /* Get next event entry */
2036 if (unlikely(masked_head
== aenq
->q_depth
)) {
2040 aenq_e
= &aenq
->entries
[masked_head
];
2041 aenq_common
= &aenq_e
->aenq_common_desc
;
2044 aenq
->head
+= processed
;
2045 aenq
->phase
= phase
;
2047 /* Don't update aenq doorbell if there weren't any processed events */
2051 /* write the aenq doorbell after all AENQ descriptors were read */
2053 writel_relaxed((u32
)aenq
->head
,
2054 dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
2057 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
,
2058 enum ena_regs_reset_reason_types reset_reason
)
2060 u32 stat
, timeout
, cap
, reset_val
;
2063 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
2064 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
2066 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
2067 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
2068 pr_err("Reg read32 timeout occurred\n");
2072 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
2073 pr_err("Device isn't ready, can't reset device\n");
2077 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
2078 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
2080 pr_err("Invalid timeout value\n");
2085 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
2086 reset_val
|= (reset_reason
<< ENA_REGS_DEV_CTL_RESET_REASON_SHIFT
) &
2087 ENA_REGS_DEV_CTL_RESET_REASON_MASK
;
2088 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
2090 /* Write again the MMIO read request address */
2091 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
2093 rc
= wait_for_reset_state(ena_dev
, timeout
,
2094 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
2096 pr_err("Reset indication didn't turn on\n");
2101 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
2102 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
2104 pr_err("Reset indication didn't turn off\n");
2108 timeout
= (cap
& ENA_REGS_CAPS_ADMIN_CMD_TO_MASK
) >>
2109 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT
;
2111 /* the resolution of timeout reg is 100ms */
2112 ena_dev
->admin_queue
.completion_timeout
= timeout
* 100000;
2114 ena_dev
->admin_queue
.completion_timeout
= ADMIN_CMD_TIMEOUT_US
;
2119 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
2120 struct ena_com_stats_ctx
*ctx
,
2121 enum ena_admin_get_stats_type type
)
2123 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
2124 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
2125 struct ena_com_admin_queue
*admin_queue
;
2128 admin_queue
= &ena_dev
->admin_queue
;
2130 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
2131 get_cmd
->aq_common_descriptor
.flags
= 0;
2132 get_cmd
->type
= type
;
2134 ret
= ena_com_execute_admin_command(admin_queue
,
2135 (struct ena_admin_aq_entry
*)get_cmd
,
2137 (struct ena_admin_acq_entry
*)get_resp
,
2141 pr_err("Failed to get stats. error: %d\n", ret
);
2146 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
2147 struct ena_admin_basic_stats
*stats
)
2149 struct ena_com_stats_ctx ctx
;
2152 memset(&ctx
, 0x0, sizeof(ctx
));
2153 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
2154 if (likely(ret
== 0))
2155 memcpy(stats
, &ctx
.get_resp
.basic_stats
,
2156 sizeof(ctx
.get_resp
.basic_stats
));
2161 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
2163 struct ena_com_admin_queue
*admin_queue
;
2164 struct ena_admin_set_feat_cmd cmd
;
2165 struct ena_admin_set_feat_resp resp
;
2168 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
2169 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
2173 memset(&cmd
, 0x0, sizeof(cmd
));
2174 admin_queue
= &ena_dev
->admin_queue
;
2176 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2177 cmd
.aq_common_descriptor
.flags
= 0;
2178 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
2179 cmd
.u
.mtu
.mtu
= mtu
;
2181 ret
= ena_com_execute_admin_command(admin_queue
,
2182 (struct ena_admin_aq_entry
*)&cmd
,
2184 (struct ena_admin_acq_entry
*)&resp
,
2188 pr_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
2193 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
2194 struct ena_admin_feature_offload_desc
*offload
)
2197 struct ena_admin_get_feat_resp resp
;
2199 ret
= ena_com_get_feature(ena_dev
, &resp
,
2200 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
, 0);
2201 if (unlikely(ret
)) {
2202 pr_err("Failed to get offload capabilities %d\n", ret
);
2206 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
2211 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
2213 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2214 struct ena_rss
*rss
= &ena_dev
->rss
;
2215 struct ena_admin_set_feat_cmd cmd
;
2216 struct ena_admin_set_feat_resp resp
;
2217 struct ena_admin_get_feat_resp get_resp
;
2220 if (!ena_com_check_supported_feature_id(ena_dev
,
2221 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
2222 pr_debug("Feature %d isn't supported\n",
2223 ENA_ADMIN_RSS_HASH_FUNCTION
);
2227 /* Validate hash function is supported */
2228 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
2229 ENA_ADMIN_RSS_HASH_FUNCTION
, 0);
2233 if (!(get_resp
.u
.flow_hash_func
.supported_func
& BIT(rss
->hash_func
))) {
2234 pr_err("Func hash %d isn't supported by device, abort\n",
2239 memset(&cmd
, 0x0, sizeof(cmd
));
2241 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2242 cmd
.aq_common_descriptor
.flags
=
2243 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2244 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
2245 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
2246 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
2248 ret
= ena_com_mem_addr_set(ena_dev
,
2249 &cmd
.control_buffer
.address
,
2250 rss
->hash_key_dma_addr
);
2251 if (unlikely(ret
)) {
2252 pr_err("memory address set failed\n");
2256 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
2258 ret
= ena_com_execute_admin_command(admin_queue
,
2259 (struct ena_admin_aq_entry
*)&cmd
,
2261 (struct ena_admin_acq_entry
*)&resp
,
2263 if (unlikely(ret
)) {
2264 pr_err("Failed to set hash function %d. error: %d\n",
2265 rss
->hash_func
, ret
);
2272 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2273 enum ena_admin_hash_functions func
,
2274 const u8
*key
, u16 key_len
, u32 init_val
)
2276 struct ena_rss
*rss
= &ena_dev
->rss
;
2277 struct ena_admin_get_feat_resp get_resp
;
2278 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2282 /* Make sure size is a mult of DWs */
2283 if (unlikely(key_len
& 0x3))
2286 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2287 ENA_ADMIN_RSS_HASH_FUNCTION
,
2288 rss
->hash_key_dma_addr
,
2289 sizeof(*rss
->hash_key
), 0);
2293 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2294 pr_err("Flow hash function %d isn't supported\n", func
);
2299 case ENA_ADMIN_TOEPLITZ
:
2300 if (key_len
> sizeof(hash_key
->key
)) {
2301 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2302 key_len
, sizeof(hash_key
->key
));
2306 memcpy(hash_key
->key
, key
, key_len
);
2307 rss
->hash_init_val
= init_val
;
2308 hash_key
->keys_num
= key_len
>> 2;
2310 case ENA_ADMIN_CRC32
:
2311 rss
->hash_init_val
= init_val
;
2314 pr_err("Invalid hash function (%d)\n", func
);
2318 rss
->hash_func
= func
;
2319 rc
= ena_com_set_hash_function(ena_dev
);
2321 /* Restore the old function */
2323 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2328 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2329 enum ena_admin_hash_functions
*func
,
2332 struct ena_rss
*rss
= &ena_dev
->rss
;
2333 struct ena_admin_get_feat_resp get_resp
;
2334 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2338 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2339 ENA_ADMIN_RSS_HASH_FUNCTION
,
2340 rss
->hash_key_dma_addr
,
2341 sizeof(*rss
->hash_key
), 0);
2345 rss
->hash_func
= get_resp
.u
.flow_hash_func
.selected_func
;
2347 *func
= rss
->hash_func
;
2350 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2355 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2356 enum ena_admin_flow_hash_proto proto
,
2359 struct ena_rss
*rss
= &ena_dev
->rss
;
2360 struct ena_admin_get_feat_resp get_resp
;
2363 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2364 ENA_ADMIN_RSS_HASH_INPUT
,
2365 rss
->hash_ctrl_dma_addr
,
2366 sizeof(*rss
->hash_ctrl
), 0);
2371 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2376 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2378 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2379 struct ena_rss
*rss
= &ena_dev
->rss
;
2380 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2381 struct ena_admin_set_feat_cmd cmd
;
2382 struct ena_admin_set_feat_resp resp
;
2385 if (!ena_com_check_supported_feature_id(ena_dev
,
2386 ENA_ADMIN_RSS_HASH_INPUT
)) {
2387 pr_debug("Feature %d isn't supported\n",
2388 ENA_ADMIN_RSS_HASH_INPUT
);
2392 memset(&cmd
, 0x0, sizeof(cmd
));
2394 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2395 cmd
.aq_common_descriptor
.flags
=
2396 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2397 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2398 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2399 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2400 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2402 ret
= ena_com_mem_addr_set(ena_dev
,
2403 &cmd
.control_buffer
.address
,
2404 rss
->hash_ctrl_dma_addr
);
2405 if (unlikely(ret
)) {
2406 pr_err("memory address set failed\n");
2409 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2411 ret
= ena_com_execute_admin_command(admin_queue
,
2412 (struct ena_admin_aq_entry
*)&cmd
,
2414 (struct ena_admin_acq_entry
*)&resp
,
2417 pr_err("Failed to set hash input. error: %d\n", ret
);
2422 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2424 struct ena_rss
*rss
= &ena_dev
->rss
;
2425 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2427 u16 available_fields
= 0;
2430 /* Get the supported hash input */
2431 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2435 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2436 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2437 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2439 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2440 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2441 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2443 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2444 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2445 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2447 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2448 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2449 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2451 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2452 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2454 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2455 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2457 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2458 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2460 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_NOT_IP
].fields
=
2461 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2463 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2464 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2465 hash_ctrl
->supported_fields
[i
].fields
;
2466 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2467 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2468 i
, hash_ctrl
->supported_fields
[i
].fields
,
2469 hash_ctrl
->selected_fields
[i
].fields
);
2474 rc
= ena_com_set_hash_ctrl(ena_dev
);
2476 /* In case of failure, restore the old hash ctrl */
2478 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2483 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2484 enum ena_admin_flow_hash_proto proto
,
2487 struct ena_rss
*rss
= &ena_dev
->rss
;
2488 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2489 u16 supported_fields
;
2492 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2493 pr_err("Invalid proto num (%u)\n", proto
);
2497 /* Get the ctrl table */
2498 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2502 /* Make sure all the fields are supported */
2503 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2504 if ((hash_fields
& supported_fields
) != hash_fields
) {
2505 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2506 proto
, hash_fields
, supported_fields
);
2509 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2511 rc
= ena_com_set_hash_ctrl(ena_dev
);
2513 /* In case of failure, restore the old hash ctrl */
2515 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2520 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2521 u16 entry_idx
, u16 entry_value
)
2523 struct ena_rss
*rss
= &ena_dev
->rss
;
2525 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2528 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2531 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2536 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2538 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2539 struct ena_rss
*rss
= &ena_dev
->rss
;
2540 struct ena_admin_set_feat_cmd cmd
;
2541 struct ena_admin_set_feat_resp resp
;
2544 if (!ena_com_check_supported_feature_id(
2545 ena_dev
, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2546 pr_debug("Feature %d isn't supported\n",
2547 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2551 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2553 pr_err("Failed to convert host indirection table to device table\n");
2557 memset(&cmd
, 0x0, sizeof(cmd
));
2559 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2560 cmd
.aq_common_descriptor
.flags
=
2561 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2562 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2563 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2564 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2566 ret
= ena_com_mem_addr_set(ena_dev
,
2567 &cmd
.control_buffer
.address
,
2568 rss
->rss_ind_tbl_dma_addr
);
2569 if (unlikely(ret
)) {
2570 pr_err("memory address set failed\n");
2574 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2575 sizeof(struct ena_admin_rss_ind_table_entry
);
2577 ret
= ena_com_execute_admin_command(admin_queue
,
2578 (struct ena_admin_aq_entry
*)&cmd
,
2580 (struct ena_admin_acq_entry
*)&resp
,
2584 pr_err("Failed to set indirect table. error: %d\n", ret
);
2589 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2591 struct ena_rss
*rss
= &ena_dev
->rss
;
2592 struct ena_admin_get_feat_resp get_resp
;
2596 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2597 sizeof(struct ena_admin_rss_ind_table_entry
);
2599 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2600 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2601 rss
->rss_ind_tbl_dma_addr
,
2609 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2613 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2614 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2619 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2623 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2625 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2629 rc
= ena_com_hash_key_allocate(ena_dev
);
2633 rc
= ena_com_hash_ctrl_init(ena_dev
);
2640 ena_com_hash_key_destroy(ena_dev
);
2642 ena_com_indirect_table_destroy(ena_dev
);
2648 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2650 ena_com_indirect_table_destroy(ena_dev
);
2651 ena_com_hash_key_destroy(ena_dev
);
2652 ena_com_hash_ctrl_destroy(ena_dev
);
2654 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2657 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2659 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2661 host_attr
->host_info
=
2662 dma_alloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2663 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2664 if (unlikely(!host_attr
->host_info
))
2667 host_attr
->host_info
->ena_spec_version
= ((ENA_COMMON_SPEC_VERSION_MAJOR
<<
2668 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
) |
2669 (ENA_COMMON_SPEC_VERSION_MINOR
));
2674 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2675 u32 debug_area_size
)
2677 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2679 host_attr
->debug_area_virt_addr
=
2680 dma_alloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2681 &host_attr
->debug_area_dma_addr
,
2683 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2684 host_attr
->debug_area_size
= 0;
2688 host_attr
->debug_area_size
= debug_area_size
;
2693 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2695 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2697 if (host_attr
->host_info
) {
2698 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2699 host_attr
->host_info_dma_addr
);
2700 host_attr
->host_info
= NULL
;
2704 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2706 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2708 if (host_attr
->debug_area_virt_addr
) {
2709 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2710 host_attr
->debug_area_virt_addr
,
2711 host_attr
->debug_area_dma_addr
);
2712 host_attr
->debug_area_virt_addr
= NULL
;
2716 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2718 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2719 struct ena_com_admin_queue
*admin_queue
;
2720 struct ena_admin_set_feat_cmd cmd
;
2721 struct ena_admin_set_feat_resp resp
;
2725 /* Host attribute config is called before ena_com_get_dev_attr_feat
2726 * so ena_com can't check if the feature is supported.
2729 memset(&cmd
, 0x0, sizeof(cmd
));
2730 admin_queue
= &ena_dev
->admin_queue
;
2732 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2733 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2735 ret
= ena_com_mem_addr_set(ena_dev
,
2736 &cmd
.u
.host_attr
.debug_ba
,
2737 host_attr
->debug_area_dma_addr
);
2738 if (unlikely(ret
)) {
2739 pr_err("memory address set failed\n");
2743 ret
= ena_com_mem_addr_set(ena_dev
,
2744 &cmd
.u
.host_attr
.os_info_ba
,
2745 host_attr
->host_info_dma_addr
);
2746 if (unlikely(ret
)) {
2747 pr_err("memory address set failed\n");
2751 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2753 ret
= ena_com_execute_admin_command(admin_queue
,
2754 (struct ena_admin_aq_entry
*)&cmd
,
2756 (struct ena_admin_acq_entry
*)&resp
,
2760 pr_err("Failed to set host attributes: %d\n", ret
);
2765 /* Interrupt moderation */
2766 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2768 return ena_com_check_supported_feature_id(ena_dev
,
2769 ENA_ADMIN_INTERRUPT_MODERATION
);
2772 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs
,
2773 u32 intr_delay_resolution
,
2774 u32
*intr_moder_interval
)
2776 if (!intr_delay_resolution
) {
2777 pr_err("Illegal interrupt delay granularity value\n");
2781 *intr_moder_interval
= coalesce_usecs
/ intr_delay_resolution
;
2786 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2787 u32 tx_coalesce_usecs
)
2789 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs
,
2790 ena_dev
->intr_delay_resolution
,
2791 &ena_dev
->intr_moder_tx_interval
);
2794 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2795 u32 rx_coalesce_usecs
)
2797 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs
,
2798 ena_dev
->intr_delay_resolution
,
2799 &ena_dev
->intr_moder_rx_interval
);
2802 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2804 struct ena_admin_get_feat_resp get_resp
;
2805 u16 delay_resolution
;
2808 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2809 ENA_ADMIN_INTERRUPT_MODERATION
, 0);
2812 if (rc
== -EOPNOTSUPP
) {
2813 pr_debug("Feature %d isn't supported\n",
2814 ENA_ADMIN_INTERRUPT_MODERATION
);
2817 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2821 /* no moderation supported, disable adaptive support */
2822 ena_com_disable_adaptive_moderation(ena_dev
);
2826 /* if moderation is supported by device we set adaptive moderation */
2827 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2828 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2830 /* Disable adaptive moderation by default - can be enabled later */
2831 ena_com_disable_adaptive_moderation(ena_dev
);
2836 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2838 return ena_dev
->intr_moder_tx_interval
;
2841 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2843 return ena_dev
->intr_moder_rx_interval
;
2846 int ena_com_config_dev_mode(struct ena_com_dev
*ena_dev
,
2847 struct ena_admin_feature_llq_desc
*llq_features
,
2848 struct ena_llq_configurations
*llq_default_cfg
)
2850 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
2853 if (!llq_features
->max_llq_num
) {
2854 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2858 rc
= ena_com_config_llq_info(ena_dev
, llq_features
, llq_default_cfg
);
2862 ena_dev
->tx_max_header_size
= llq_info
->desc_list_entry_size
-
2863 (llq_info
->descs_num_before_header
* sizeof(struct ena_eth_io_tx_desc
));
2865 if (unlikely(ena_dev
->tx_max_header_size
== 0)) {
2866 pr_err("the size of the LLQ entry is smaller than needed\n");
2870 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_DEV
;