2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 #define ENA_REGS_ADMIN_INTR_MASK 1
68 /*****************************************************************************/
69 /*****************************************************************************/
70 /*****************************************************************************/
75 /* Abort - canceled by the driver */
80 struct completion wait_event
;
81 struct ena_admin_acq_entry
*user_cqe
;
83 enum ena_cmd_status status
;
84 /* status from the device */
90 struct ena_com_stats_ctx
{
91 struct ena_admin_aq_get_stats_cmd get_cmd
;
92 struct ena_admin_acq_get_stats_resp get_resp
;
95 static inline int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
96 struct ena_common_mem_addr
*ena_addr
,
99 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
100 pr_err("dma address has more bits that the device supports\n");
104 ena_addr
->mem_addr_low
= lower_32_bits(addr
);
105 ena_addr
->mem_addr_high
= (u16
)upper_32_bits(addr
);
110 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
112 struct ena_com_admin_sq
*sq
= &queue
->sq
;
113 u16 size
= ADMIN_SQ_SIZE(queue
->q_depth
);
115 sq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &sq
->dma_addr
,
119 pr_err("memory allocation failed");
132 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
134 struct ena_com_admin_cq
*cq
= &queue
->cq
;
135 u16 size
= ADMIN_CQ_SIZE(queue
->q_depth
);
137 cq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &cq
->dma_addr
,
141 pr_err("memory allocation failed");
151 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
152 struct ena_aenq_handlers
*aenq_handlers
)
154 struct ena_com_aenq
*aenq
= &dev
->aenq
;
155 u32 addr_low
, addr_high
, aenq_caps
;
158 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
159 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
160 aenq
->entries
= dma_zalloc_coherent(dev
->dmadev
, size
, &aenq
->dma_addr
,
163 if (!aenq
->entries
) {
164 pr_err("memory allocation failed");
168 aenq
->head
= aenq
->q_depth
;
171 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
172 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
174 writel(addr_low
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
175 writel(addr_high
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
178 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
179 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
180 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
181 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
182 writel(aenq_caps
, dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
184 if (unlikely(!aenq_handlers
)) {
185 pr_err("aenq handlers pointer is NULL\n");
189 aenq
->aenq_handlers
= aenq_handlers
;
194 static inline void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
195 struct ena_comp_ctx
*comp_ctx
)
197 comp_ctx
->occupied
= false;
198 atomic_dec(&queue
->outstanding_cmds
);
201 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
202 u16 command_id
, bool capture
)
204 if (unlikely(command_id
>= queue
->q_depth
)) {
205 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
206 command_id
, queue
->q_depth
);
210 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
211 pr_err("Completion context is occupied\n");
216 atomic_inc(&queue
->outstanding_cmds
);
217 queue
->comp_ctx
[command_id
].occupied
= true;
220 return &queue
->comp_ctx
[command_id
];
223 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
224 struct ena_admin_aq_entry
*cmd
,
225 size_t cmd_size_in_bytes
,
226 struct ena_admin_acq_entry
*comp
,
227 size_t comp_size_in_bytes
)
229 struct ena_comp_ctx
*comp_ctx
;
230 u16 tail_masked
, cmd_id
;
234 queue_size_mask
= admin_queue
->q_depth
- 1;
236 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
238 /* In case of queue FULL */
239 cnt
= atomic_read(&admin_queue
->outstanding_cmds
);
240 if (cnt
>= admin_queue
->q_depth
) {
241 pr_debug("admin queue is full.\n");
242 admin_queue
->stats
.out_of_space
++;
243 return ERR_PTR(-ENOSPC
);
246 cmd_id
= admin_queue
->curr_cmd_id
;
248 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
249 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
251 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
252 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
254 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
255 if (unlikely(!comp_ctx
))
256 return ERR_PTR(-EINVAL
);
258 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
259 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
260 comp_ctx
->user_cqe
= comp
;
261 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
263 reinit_completion(&comp_ctx
->wait_event
);
265 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
267 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
270 admin_queue
->sq
.tail
++;
271 admin_queue
->stats
.submitted_cmd
++;
273 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
274 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
276 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
281 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
283 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
284 struct ena_comp_ctx
*comp_ctx
;
287 queue
->comp_ctx
= devm_kzalloc(queue
->q_dmadev
, size
, GFP_KERNEL
);
288 if (unlikely(!queue
->comp_ctx
)) {
289 pr_err("memory allocation failed");
293 for (i
= 0; i
< queue
->q_depth
; i
++) {
294 comp_ctx
= get_comp_ctxt(queue
, i
, false);
296 init_completion(&comp_ctx
->wait_event
);
302 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
303 struct ena_admin_aq_entry
*cmd
,
304 size_t cmd_size_in_bytes
,
305 struct ena_admin_acq_entry
*comp
,
306 size_t comp_size_in_bytes
)
309 struct ena_comp_ctx
*comp_ctx
;
311 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
312 if (unlikely(!admin_queue
->running_state
)) {
313 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
314 return ERR_PTR(-ENODEV
);
316 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
320 if (IS_ERR(comp_ctx
))
321 admin_queue
->running_state
= false;
322 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
327 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
328 struct ena_com_create_io_ctx
*ctx
,
329 struct ena_com_io_sq
*io_sq
)
334 memset(&io_sq
->desc_addr
, 0x0, sizeof(io_sq
->desc_addr
));
336 io_sq
->desc_entry_size
=
337 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
338 sizeof(struct ena_eth_io_tx_desc
) :
339 sizeof(struct ena_eth_io_rx_desc
);
341 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
343 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
344 dev_node
= dev_to_node(ena_dev
->dmadev
);
345 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
346 io_sq
->desc_addr
.virt_addr
=
347 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
348 &io_sq
->desc_addr
.phys_addr
,
350 set_dev_node(ena_dev
->dmadev
, dev_node
);
351 if (!io_sq
->desc_addr
.virt_addr
) {
352 io_sq
->desc_addr
.virt_addr
=
353 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
354 &io_sq
->desc_addr
.phys_addr
,
358 dev_node
= dev_to_node(ena_dev
->dmadev
);
359 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
360 io_sq
->desc_addr
.virt_addr
=
361 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
362 set_dev_node(ena_dev
->dmadev
, dev_node
);
363 if (!io_sq
->desc_addr
.virt_addr
) {
364 io_sq
->desc_addr
.virt_addr
=
365 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
369 if (!io_sq
->desc_addr
.virt_addr
) {
370 pr_err("memory allocation failed");
375 io_sq
->next_to_comp
= 0;
381 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
382 struct ena_com_create_io_ctx
*ctx
,
383 struct ena_com_io_cq
*io_cq
)
388 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(io_cq
->cdesc_addr
));
390 /* Use the basic completion descriptor for Rx */
391 io_cq
->cdesc_entry_size_in_bytes
=
392 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
393 sizeof(struct ena_eth_io_tx_cdesc
) :
394 sizeof(struct ena_eth_io_rx_cdesc_base
);
396 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
398 prev_node
= dev_to_node(ena_dev
->dmadev
);
399 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
400 io_cq
->cdesc_addr
.virt_addr
=
401 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
402 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
403 set_dev_node(ena_dev
->dmadev
, prev_node
);
404 if (!io_cq
->cdesc_addr
.virt_addr
) {
405 io_cq
->cdesc_addr
.virt_addr
=
406 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
407 &io_cq
->cdesc_addr
.phys_addr
,
411 if (!io_cq
->cdesc_addr
.virt_addr
) {
412 pr_err("memory allocation failed");
422 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
423 struct ena_admin_acq_entry
*cqe
)
425 struct ena_comp_ctx
*comp_ctx
;
428 cmd_id
= cqe
->acq_common_descriptor
.command
&
429 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
431 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
432 if (unlikely(!comp_ctx
)) {
433 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
434 admin_queue
->running_state
= false;
438 comp_ctx
->status
= ENA_CMD_COMPLETED
;
439 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
441 if (comp_ctx
->user_cqe
)
442 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
444 if (!admin_queue
->polling
)
445 complete(&comp_ctx
->wait_event
);
448 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
450 struct ena_admin_acq_entry
*cqe
= NULL
;
455 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
456 phase
= admin_queue
->cq
.phase
;
458 cqe
= &admin_queue
->cq
.entries
[head_masked
];
460 /* Go over all the completions */
461 while ((cqe
->acq_common_descriptor
.flags
&
462 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
463 /* Do not read the rest of the completion entry before the
464 * phase bit was validated
467 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
471 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
476 cqe
= &admin_queue
->cq
.entries
[head_masked
];
479 admin_queue
->cq
.head
+= comp_num
;
480 admin_queue
->cq
.phase
= phase
;
481 admin_queue
->sq
.head
+= comp_num
;
482 admin_queue
->stats
.completed_cmd
+= comp_num
;
485 static int ena_com_comp_status_to_errno(u8 comp_status
)
487 if (unlikely(comp_status
!= 0))
488 pr_err("admin command failed[%u]\n", comp_status
);
490 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
493 switch (comp_status
) {
494 case ENA_ADMIN_SUCCESS
:
496 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
498 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
500 case ENA_ADMIN_BAD_OPCODE
:
501 case ENA_ADMIN_MALFORMED_REQUEST
:
502 case ENA_ADMIN_ILLEGAL_PARAMETER
:
503 case ENA_ADMIN_UNKNOWN_ERROR
:
510 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
511 struct ena_com_admin_queue
*admin_queue
)
513 unsigned long flags
, timeout
;
516 timeout
= jiffies
+ usecs_to_jiffies(admin_queue
->completion_timeout
);
519 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
520 ena_com_handle_admin_completion(admin_queue
);
521 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
523 if (comp_ctx
->status
!= ENA_CMD_SUBMITTED
)
526 if (time_is_before_jiffies(timeout
)) {
527 pr_err("Wait for completion (polling) timeout\n");
528 /* ENA didn't have any completion */
529 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
530 admin_queue
->stats
.no_completion
++;
531 admin_queue
->running_state
= false;
532 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
541 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
542 pr_err("Command was aborted\n");
543 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
544 admin_queue
->stats
.aborted_cmd
++;
545 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
550 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
553 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
555 comp_ctxt_release(admin_queue
, comp_ctx
);
559 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
560 struct ena_com_admin_queue
*admin_queue
)
565 wait_for_completion_timeout(&comp_ctx
->wait_event
,
567 admin_queue
->completion_timeout
));
569 /* In case the command wasn't completed find out the root cause.
570 * There might be 2 kinds of errors
571 * 1) No completion (timeout reached)
572 * 2) There is completion but the device didn't get any msi-x interrupt.
574 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
575 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
576 ena_com_handle_admin_completion(admin_queue
);
577 admin_queue
->stats
.no_completion
++;
578 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
580 if (comp_ctx
->status
== ENA_CMD_COMPLETED
)
581 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
582 comp_ctx
->cmd_opcode
);
584 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
585 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
587 admin_queue
->running_state
= false;
592 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
594 comp_ctxt_release(admin_queue
, comp_ctx
);
598 /* This method read the hardware device register through posting writes
599 * and waiting for response
600 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
602 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
604 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
605 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
606 mmio_read
->read_resp
;
607 u32 mmio_read_reg
, ret
, i
;
609 u32 timeout
= mmio_read
->reg_read_to
;
614 timeout
= ENA_REG_READ_TIMEOUT
;
616 /* If readless is disabled, perform regular read */
617 if (!mmio_read
->readless_supported
)
618 return readl(ena_dev
->reg_bar
+ offset
);
620 spin_lock_irqsave(&mmio_read
->lock
, flags
);
621 mmio_read
->seq_num
++;
623 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
624 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
625 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
626 mmio_read_reg
|= mmio_read
->seq_num
&
627 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
629 /* make sure read_resp->req_id get updated before the hw can write
634 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
636 for (i
= 0; i
< timeout
; i
++) {
637 if (read_resp
->req_id
== mmio_read
->seq_num
)
643 if (unlikely(i
== timeout
)) {
644 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
645 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
647 ret
= ENA_MMIO_READ_TIMEOUT
;
651 if (read_resp
->reg_off
!= offset
) {
652 pr_err("Read failure: wrong offset provided");
653 ret
= ENA_MMIO_READ_TIMEOUT
;
655 ret
= read_resp
->reg_val
;
658 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
663 /* There are two types to wait for completion.
664 * Polling mode - wait until the completion is available.
665 * Async mode - wait on wait queue until the completion is ready
666 * (or the timeout expired).
667 * It is expected that the IRQ called ena_com_handle_admin_completion
668 * to mark the completions.
670 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
671 struct ena_com_admin_queue
*admin_queue
)
673 if (admin_queue
->polling
)
674 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
677 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
681 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
682 struct ena_com_io_sq
*io_sq
)
684 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
685 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
686 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
690 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
692 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
693 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
695 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
697 destroy_cmd
.sq
.sq_identity
|= (direction
<<
698 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
699 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
701 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
702 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
704 ret
= ena_com_execute_admin_command(admin_queue
,
705 (struct ena_admin_aq_entry
*)&destroy_cmd
,
707 (struct ena_admin_acq_entry
*)&destroy_resp
,
708 sizeof(destroy_resp
));
710 if (unlikely(ret
&& (ret
!= -ENODEV
)))
711 pr_err("failed to destroy io sq error: %d\n", ret
);
716 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
717 struct ena_com_io_sq
*io_sq
,
718 struct ena_com_io_cq
*io_cq
)
722 if (io_cq
->cdesc_addr
.virt_addr
) {
723 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
725 dma_free_coherent(ena_dev
->dmadev
, size
,
726 io_cq
->cdesc_addr
.virt_addr
,
727 io_cq
->cdesc_addr
.phys_addr
);
729 io_cq
->cdesc_addr
.virt_addr
= NULL
;
732 if (io_sq
->desc_addr
.virt_addr
) {
733 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
735 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
736 dma_free_coherent(ena_dev
->dmadev
, size
,
737 io_sq
->desc_addr
.virt_addr
,
738 io_sq
->desc_addr
.phys_addr
);
740 devm_kfree(ena_dev
->dmadev
, io_sq
->desc_addr
.virt_addr
);
742 io_sq
->desc_addr
.virt_addr
= NULL
;
746 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
751 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
752 timeout
= (timeout
* 100) / ENA_POLL_MS
;
754 for (i
= 0; i
< timeout
; i
++) {
755 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
757 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
758 pr_err("Reg read timeout occurred\n");
762 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
772 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
773 enum ena_admin_aq_feature_id feature_id
)
775 u32 feature_mask
= 1 << feature_id
;
777 /* Device attributes is always supported */
778 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
779 !(ena_dev
->supported_features
& feature_mask
))
785 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
786 struct ena_admin_get_feat_resp
*get_resp
,
787 enum ena_admin_aq_feature_id feature_id
,
788 dma_addr_t control_buf_dma_addr
,
789 u32 control_buff_size
)
791 struct ena_com_admin_queue
*admin_queue
;
792 struct ena_admin_get_feat_cmd get_cmd
;
795 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
796 pr_debug("Feature %d isn't supported\n", feature_id
);
800 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
801 admin_queue
= &ena_dev
->admin_queue
;
803 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
805 if (control_buff_size
)
806 get_cmd
.aq_common_descriptor
.flags
=
807 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
809 get_cmd
.aq_common_descriptor
.flags
= 0;
811 ret
= ena_com_mem_addr_set(ena_dev
,
812 &get_cmd
.control_buffer
.address
,
813 control_buf_dma_addr
);
815 pr_err("memory address set failed\n");
819 get_cmd
.control_buffer
.length
= control_buff_size
;
821 get_cmd
.feat_common
.feature_id
= feature_id
;
823 ret
= ena_com_execute_admin_command(admin_queue
,
824 (struct ena_admin_aq_entry
*)
827 (struct ena_admin_acq_entry
*)
832 pr_err("Failed to submit get_feature command %d error: %d\n",
838 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
839 struct ena_admin_get_feat_resp
*get_resp
,
840 enum ena_admin_aq_feature_id feature_id
)
842 return ena_com_get_feature_ex(ena_dev
,
849 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
851 struct ena_rss
*rss
= &ena_dev
->rss
;
854 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
855 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
857 if (unlikely(!rss
->hash_key
))
863 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
865 struct ena_rss
*rss
= &ena_dev
->rss
;
868 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
869 rss
->hash_key
, rss
->hash_key_dma_addr
);
870 rss
->hash_key
= NULL
;
873 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
875 struct ena_rss
*rss
= &ena_dev
->rss
;
878 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
879 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
881 if (unlikely(!rss
->hash_ctrl
))
887 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
889 struct ena_rss
*rss
= &ena_dev
->rss
;
892 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
893 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
894 rss
->hash_ctrl
= NULL
;
897 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
900 struct ena_rss
*rss
= &ena_dev
->rss
;
901 struct ena_admin_get_feat_resp get_resp
;
905 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
906 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
910 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
911 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
912 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
913 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
914 1 << get_resp
.u
.ind_table
.max_size
);
918 tbl_size
= (1ULL << log_size
) *
919 sizeof(struct ena_admin_rss_ind_table_entry
);
922 dma_zalloc_coherent(ena_dev
->dmadev
, tbl_size
,
923 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
924 if (unlikely(!rss
->rss_ind_tbl
))
927 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
928 rss
->host_rss_ind_tbl
=
929 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
930 if (unlikely(!rss
->host_rss_ind_tbl
))
933 rss
->tbl_log_size
= log_size
;
938 tbl_size
= (1ULL << log_size
) *
939 sizeof(struct ena_admin_rss_ind_table_entry
);
941 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
942 rss
->rss_ind_tbl_dma_addr
);
943 rss
->rss_ind_tbl
= NULL
;
945 rss
->tbl_log_size
= 0;
949 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
951 struct ena_rss
*rss
= &ena_dev
->rss
;
952 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
953 sizeof(struct ena_admin_rss_ind_table_entry
);
955 if (rss
->rss_ind_tbl
)
956 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
957 rss
->rss_ind_tbl_dma_addr
);
958 rss
->rss_ind_tbl
= NULL
;
960 if (rss
->host_rss_ind_tbl
)
961 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
962 rss
->host_rss_ind_tbl
= NULL
;
965 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
966 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
968 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
969 struct ena_admin_aq_create_sq_cmd create_cmd
;
970 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
974 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
976 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
978 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
979 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
981 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
983 create_cmd
.sq_identity
|= (direction
<<
984 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
985 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
987 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
988 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
990 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
991 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
992 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
994 create_cmd
.sq_caps_3
|=
995 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
997 create_cmd
.cq_idx
= cq_idx
;
998 create_cmd
.sq_depth
= io_sq
->q_depth
;
1000 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
1001 ret
= ena_com_mem_addr_set(ena_dev
,
1003 io_sq
->desc_addr
.phys_addr
);
1004 if (unlikely(ret
)) {
1005 pr_err("memory address set failed\n");
1010 ret
= ena_com_execute_admin_command(admin_queue
,
1011 (struct ena_admin_aq_entry
*)&create_cmd
,
1013 (struct ena_admin_acq_entry
*)&cmd_completion
,
1014 sizeof(cmd_completion
));
1015 if (unlikely(ret
)) {
1016 pr_err("Failed to create IO SQ. error: %d\n", ret
);
1020 io_sq
->idx
= cmd_completion
.sq_idx
;
1022 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1023 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1025 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1026 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1027 + cmd_completion
.llq_headers_offset
);
1029 io_sq
->desc_addr
.pbuf_dev_addr
=
1030 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1031 cmd_completion
.llq_descriptors_offset
);
1034 pr_debug("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1039 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1041 struct ena_rss
*rss
= &ena_dev
->rss
;
1042 struct ena_com_io_sq
*io_sq
;
1046 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1047 qid
= rss
->host_rss_ind_tbl
[i
];
1048 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1051 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1053 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1056 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1062 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1064 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1065 struct ena_rss
*rss
= &ena_dev
->rss
;
1069 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1070 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1072 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1073 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1075 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1077 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1080 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1086 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
1090 size
= sizeof(struct ena_intr_moder_entry
) * ENA_INTR_MAX_NUM_OF_LEVELS
;
1092 ena_dev
->intr_moder_tbl
=
1093 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
1094 if (!ena_dev
->intr_moder_tbl
)
1097 ena_com_config_default_interrupt_moderation_table(ena_dev
);
1102 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1103 u16 intr_delay_resolution
)
1105 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
1108 if (!intr_delay_resolution
) {
1109 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1110 intr_delay_resolution
= 1;
1112 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1115 for (i
= 0; i
< ENA_INTR_MAX_NUM_OF_LEVELS
; i
++)
1116 intr_moder_tbl
[i
].intr_moder_interval
/= intr_delay_resolution
;
1119 ena_dev
->intr_moder_tx_interval
/= intr_delay_resolution
;
1122 /*****************************************************************************/
1123 /******************************* API ******************************/
1124 /*****************************************************************************/
1126 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1127 struct ena_admin_aq_entry
*cmd
,
1129 struct ena_admin_acq_entry
*comp
,
1132 struct ena_comp_ctx
*comp_ctx
;
1135 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1137 if (IS_ERR(comp_ctx
)) {
1138 if (comp_ctx
== ERR_PTR(-ENODEV
))
1139 pr_debug("Failed to submit command [%ld]\n",
1142 pr_err("Failed to submit command [%ld]\n",
1145 return PTR_ERR(comp_ctx
);
1148 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1149 if (unlikely(ret
)) {
1150 if (admin_queue
->running_state
)
1151 pr_err("Failed to process command. ret = %d\n", ret
);
1153 pr_debug("Failed to process command. ret = %d\n", ret
);
1158 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1159 struct ena_com_io_cq
*io_cq
)
1161 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1162 struct ena_admin_aq_create_cq_cmd create_cmd
;
1163 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1166 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1168 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1170 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1171 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1172 create_cmd
.cq_caps_1
|=
1173 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1175 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1176 create_cmd
.cq_depth
= io_cq
->q_depth
;
1178 ret
= ena_com_mem_addr_set(ena_dev
,
1180 io_cq
->cdesc_addr
.phys_addr
);
1181 if (unlikely(ret
)) {
1182 pr_err("memory address set failed\n");
1186 ret
= ena_com_execute_admin_command(admin_queue
,
1187 (struct ena_admin_aq_entry
*)&create_cmd
,
1189 (struct ena_admin_acq_entry
*)&cmd_completion
,
1190 sizeof(cmd_completion
));
1191 if (unlikely(ret
)) {
1192 pr_err("Failed to create IO CQ. error: %d\n", ret
);
1196 io_cq
->idx
= cmd_completion
.cq_idx
;
1198 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1199 cmd_completion
.cq_interrupt_unmask_register_offset
);
1201 if (cmd_completion
.cq_head_db_register_offset
)
1202 io_cq
->cq_head_db_reg
=
1203 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1204 cmd_completion
.cq_head_db_register_offset
);
1206 if (cmd_completion
.numa_node_register_offset
)
1207 io_cq
->numa_node_cfg_reg
=
1208 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1209 cmd_completion
.numa_node_register_offset
);
1211 pr_debug("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1216 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1217 struct ena_com_io_sq
**io_sq
,
1218 struct ena_com_io_cq
**io_cq
)
1220 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1221 pr_err("Invalid queue number %d but the max is %d\n", qid
,
1222 ENA_TOTAL_NUM_QUEUES
);
1226 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1227 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1232 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1234 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1235 struct ena_comp_ctx
*comp_ctx
;
1238 if (!admin_queue
->comp_ctx
)
1241 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1242 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1243 if (unlikely(!comp_ctx
))
1246 comp_ctx
->status
= ENA_CMD_ABORTED
;
1248 complete(&comp_ctx
->wait_event
);
1252 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1254 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1255 unsigned long flags
;
1257 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1258 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1259 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1260 msleep(ENA_POLL_MS
);
1261 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1263 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1266 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1267 struct ena_com_io_cq
*io_cq
)
1269 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1270 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1271 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1274 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
1276 destroy_cmd
.cq_idx
= io_cq
->idx
;
1277 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1279 ret
= ena_com_execute_admin_command(admin_queue
,
1280 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1281 sizeof(destroy_cmd
),
1282 (struct ena_admin_acq_entry
*)&destroy_resp
,
1283 sizeof(destroy_resp
));
1285 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1286 pr_err("Failed to destroy IO CQ. error: %d\n", ret
);
1291 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1293 return ena_dev
->admin_queue
.running_state
;
1296 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1298 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1299 unsigned long flags
;
1301 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1302 ena_dev
->admin_queue
.running_state
= state
;
1303 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1306 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1308 u16 depth
= ena_dev
->aenq
.q_depth
;
1310 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1312 /* Init head_db to mark that all entries in the queue
1313 * are initially available
1315 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1318 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1320 struct ena_com_admin_queue
*admin_queue
;
1321 struct ena_admin_set_feat_cmd cmd
;
1322 struct ena_admin_set_feat_resp resp
;
1323 struct ena_admin_get_feat_resp get_resp
;
1326 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
);
1328 pr_info("Can't get aenq configuration\n");
1332 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1333 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1334 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1338 memset(&cmd
, 0x0, sizeof(cmd
));
1339 admin_queue
= &ena_dev
->admin_queue
;
1341 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1342 cmd
.aq_common_descriptor
.flags
= 0;
1343 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1344 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1346 ret
= ena_com_execute_admin_command(admin_queue
,
1347 (struct ena_admin_aq_entry
*)&cmd
,
1349 (struct ena_admin_acq_entry
*)&resp
,
1353 pr_err("Failed to config AENQ ret: %d\n", ret
);
1358 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1360 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1363 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1364 pr_err("Reg read timeout occurred\n");
1368 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1369 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1371 pr_debug("ENA dma width: %d\n", width
);
1373 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1374 pr_err("DMA width illegal value: %d\n", width
);
1378 ena_dev
->dma_addr_bits
= width
;
1383 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1387 u32 ctrl_ver_masked
;
1389 /* Make sure the ENA version and the controller version are at least
1390 * as the driver expects
1392 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1393 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1394 ENA_REGS_CONTROLLER_VERSION_OFF
);
1396 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1397 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1398 pr_err("Reg read timeout occurred\n");
1402 pr_info("ena device version: %d.%d\n",
1403 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1404 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1405 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1407 if (ver
< MIN_ENA_VER
) {
1408 pr_err("ENA version is lower than the minimal version the driver supports\n");
1412 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1413 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1414 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1415 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1416 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1417 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1418 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1419 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1422 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1423 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1424 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1426 /* Validate the ctrl version without the implementation ID */
1427 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1428 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1435 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1437 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1438 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1439 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1440 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1443 if (admin_queue
->comp_ctx
)
1444 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1445 admin_queue
->comp_ctx
= NULL
;
1446 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1448 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1452 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1454 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1458 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1459 if (ena_dev
->aenq
.entries
)
1460 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1462 aenq
->entries
= NULL
;
1465 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1470 mask_value
= ENA_REGS_ADMIN_INTR_MASK
;
1472 writel(mask_value
, ena_dev
->reg_bar
+ ENA_REGS_INTR_MASK_OFF
);
1473 ena_dev
->admin_queue
.polling
= polling
;
1476 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1478 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1480 spin_lock_init(&mmio_read
->lock
);
1481 mmio_read
->read_resp
=
1482 dma_zalloc_coherent(ena_dev
->dmadev
,
1483 sizeof(*mmio_read
->read_resp
),
1484 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1485 if (unlikely(!mmio_read
->read_resp
))
1488 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1490 mmio_read
->read_resp
->req_id
= 0x0;
1491 mmio_read
->seq_num
= 0x0;
1492 mmio_read
->readless_supported
= true;
1497 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1499 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1501 mmio_read
->readless_supported
= readless_supported
;
1504 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1506 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1508 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1509 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1511 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1512 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1514 mmio_read
->read_resp
= NULL
;
1517 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1519 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1520 u32 addr_low
, addr_high
;
1522 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1523 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1525 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1526 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1529 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1530 struct ena_aenq_handlers
*aenq_handlers
,
1533 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1534 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1537 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1539 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1540 pr_err("Reg read timeout occurred\n");
1544 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1545 pr_err("Device isn't ready, abort com init\n");
1549 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1551 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1552 admin_queue
->polling
= false;
1553 admin_queue
->curr_cmd_id
= 0;
1555 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1558 spin_lock_init(&admin_queue
->q_lock
);
1560 ret
= ena_com_init_comp_ctxt(admin_queue
);
1564 ret
= ena_com_admin_init_sq(admin_queue
);
1568 ret
= ena_com_admin_init_cq(admin_queue
);
1572 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1573 ENA_REGS_AQ_DB_OFF
);
1575 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1576 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1578 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1579 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1581 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1582 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1584 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1585 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1588 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1589 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1590 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1591 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1594 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1595 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1596 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1597 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1599 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1600 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1601 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1605 admin_queue
->running_state
= true;
1609 ena_com_admin_destroy(ena_dev
);
1614 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1615 struct ena_com_create_io_ctx
*ctx
)
1617 struct ena_com_io_sq
*io_sq
;
1618 struct ena_com_io_cq
*io_cq
;
1621 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1622 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1623 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1627 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1628 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1630 memset(io_sq
, 0x0, sizeof(*io_sq
));
1631 memset(io_cq
, 0x0, sizeof(*io_cq
));
1634 io_cq
->q_depth
= ctx
->queue_size
;
1635 io_cq
->direction
= ctx
->direction
;
1636 io_cq
->qid
= ctx
->qid
;
1638 io_cq
->msix_vector
= ctx
->msix_vector
;
1640 io_sq
->q_depth
= ctx
->queue_size
;
1641 io_sq
->direction
= ctx
->direction
;
1642 io_sq
->qid
= ctx
->qid
;
1644 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1646 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1647 /* header length is limited to 8 bits */
1648 io_sq
->tx_max_header_size
=
1649 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1651 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1654 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1658 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1662 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1669 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1671 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1675 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1677 struct ena_com_io_sq
*io_sq
;
1678 struct ena_com_io_cq
*io_cq
;
1680 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1681 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid
,
1682 ENA_TOTAL_NUM_QUEUES
);
1686 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1687 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1689 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1690 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1692 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1695 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1696 struct ena_admin_get_feat_resp
*resp
)
1698 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
);
1701 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1702 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1704 struct ena_admin_get_feat_resp get_resp
;
1707 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1708 ENA_ADMIN_DEVICE_ATTRIBUTES
);
1712 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1713 sizeof(get_resp
.u
.dev_attr
));
1714 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1716 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1717 ENA_ADMIN_MAX_QUEUES_NUM
);
1721 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1722 sizeof(get_resp
.u
.max_queue
));
1723 ena_dev
->tx_max_header_size
= get_resp
.u
.max_queue
.max_header_size
;
1725 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1726 ENA_ADMIN_AENQ_CONFIG
);
1730 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1731 sizeof(get_resp
.u
.aenq
));
1733 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1734 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1738 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1739 sizeof(get_resp
.u
.offload
));
1741 /* Driver hints isn't mandatory admin command. So in case the
1742 * command isn't supported set driver hints to 0
1744 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_HW_HINTS
);
1747 memcpy(&get_feat_ctx
->hw_hints
, &get_resp
.u
.hw_hints
,
1748 sizeof(get_resp
.u
.hw_hints
));
1749 else if (rc
== -EOPNOTSUPP
)
1750 memset(&get_feat_ctx
->hw_hints
, 0x0,
1751 sizeof(get_feat_ctx
->hw_hints
));
1758 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1760 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1763 /* ena_handle_specific_aenq_event:
1764 * return the handler that is relevant to the specific event group
1766 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1769 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1771 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1772 return aenq_handlers
->handlers
[group
];
1774 return aenq_handlers
->unimplemented_handler
;
1777 /* ena_aenq_intr_handler:
1778 * handles the aenq incoming events.
1779 * pop events from the queue and apply the specific handler
1781 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
1783 struct ena_admin_aenq_entry
*aenq_e
;
1784 struct ena_admin_aenq_common_desc
*aenq_common
;
1785 struct ena_com_aenq
*aenq
= &dev
->aenq
;
1786 ena_aenq_handler handler_cb
;
1787 u16 masked_head
, processed
= 0;
1790 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
1791 phase
= aenq
->phase
;
1792 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
1793 aenq_common
= &aenq_e
->aenq_common_desc
;
1795 /* Go over all the events */
1796 while ((aenq_common
->flags
& ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) ==
1798 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1799 aenq_common
->group
, aenq_common
->syndrom
,
1800 (u64
)aenq_common
->timestamp_low
+
1801 ((u64
)aenq_common
->timestamp_high
<< 32));
1803 /* Handle specific event*/
1804 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
1805 aenq_common
->group
);
1806 handler_cb(data
, aenq_e
); /* call the actual event handler*/
1808 /* Get next event entry */
1812 if (unlikely(masked_head
== aenq
->q_depth
)) {
1816 aenq_e
= &aenq
->entries
[masked_head
];
1817 aenq_common
= &aenq_e
->aenq_common_desc
;
1820 aenq
->head
+= processed
;
1821 aenq
->phase
= phase
;
1823 /* Don't update aenq doorbell if there weren't any processed events */
1827 /* write the aenq doorbell after all AENQ descriptors were read */
1829 writel((u32
)aenq
->head
, dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1832 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
,
1833 enum ena_regs_reset_reason_types reset_reason
)
1835 u32 stat
, timeout
, cap
, reset_val
;
1838 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1839 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1841 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
1842 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
1843 pr_err("Reg read32 timeout occurred\n");
1847 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
1848 pr_err("Device isn't ready, can't reset device\n");
1852 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
1853 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
1855 pr_err("Invalid timeout value\n");
1860 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
1861 reset_val
|= (reset_reason
<< ENA_REGS_DEV_CTL_RESET_REASON_SHIFT
) &
1862 ENA_REGS_DEV_CTL_RESET_REASON_MASK
;
1863 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1865 /* Write again the MMIO read request address */
1866 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1868 rc
= wait_for_reset_state(ena_dev
, timeout
,
1869 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
1871 pr_err("Reset indication didn't turn on\n");
1876 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1877 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
1879 pr_err("Reset indication didn't turn off\n");
1883 timeout
= (cap
& ENA_REGS_CAPS_ADMIN_CMD_TO_MASK
) >>
1884 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT
;
1886 /* the resolution of timeout reg is 100ms */
1887 ena_dev
->admin_queue
.completion_timeout
= timeout
* 100000;
1889 ena_dev
->admin_queue
.completion_timeout
= ADMIN_CMD_TIMEOUT_US
;
1894 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
1895 struct ena_com_stats_ctx
*ctx
,
1896 enum ena_admin_get_stats_type type
)
1898 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
1899 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
1900 struct ena_com_admin_queue
*admin_queue
;
1903 admin_queue
= &ena_dev
->admin_queue
;
1905 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
1906 get_cmd
->aq_common_descriptor
.flags
= 0;
1907 get_cmd
->type
= type
;
1909 ret
= ena_com_execute_admin_command(admin_queue
,
1910 (struct ena_admin_aq_entry
*)get_cmd
,
1912 (struct ena_admin_acq_entry
*)get_resp
,
1916 pr_err("Failed to get stats. error: %d\n", ret
);
1921 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
1922 struct ena_admin_basic_stats
*stats
)
1924 struct ena_com_stats_ctx ctx
;
1927 memset(&ctx
, 0x0, sizeof(ctx
));
1928 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
1929 if (likely(ret
== 0))
1930 memcpy(stats
, &ctx
.get_resp
.basic_stats
,
1931 sizeof(ctx
.get_resp
.basic_stats
));
1936 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
1938 struct ena_com_admin_queue
*admin_queue
;
1939 struct ena_admin_set_feat_cmd cmd
;
1940 struct ena_admin_set_feat_resp resp
;
1943 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
1944 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
1948 memset(&cmd
, 0x0, sizeof(cmd
));
1949 admin_queue
= &ena_dev
->admin_queue
;
1951 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1952 cmd
.aq_common_descriptor
.flags
= 0;
1953 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
1954 cmd
.u
.mtu
.mtu
= mtu
;
1956 ret
= ena_com_execute_admin_command(admin_queue
,
1957 (struct ena_admin_aq_entry
*)&cmd
,
1959 (struct ena_admin_acq_entry
*)&resp
,
1963 pr_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
1968 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
1969 struct ena_admin_feature_offload_desc
*offload
)
1972 struct ena_admin_get_feat_resp resp
;
1974 ret
= ena_com_get_feature(ena_dev
, &resp
,
1975 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1976 if (unlikely(ret
)) {
1977 pr_err("Failed to get offload capabilities %d\n", ret
);
1981 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
1986 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
1988 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1989 struct ena_rss
*rss
= &ena_dev
->rss
;
1990 struct ena_admin_set_feat_cmd cmd
;
1991 struct ena_admin_set_feat_resp resp
;
1992 struct ena_admin_get_feat_resp get_resp
;
1995 if (!ena_com_check_supported_feature_id(ena_dev
,
1996 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
1997 pr_debug("Feature %d isn't supported\n",
1998 ENA_ADMIN_RSS_HASH_FUNCTION
);
2002 /* Validate hash function is supported */
2003 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
2004 ENA_ADMIN_RSS_HASH_FUNCTION
);
2008 if (get_resp
.u
.flow_hash_func
.supported_func
& (1 << rss
->hash_func
)) {
2009 pr_err("Func hash %d isn't supported by device, abort\n",
2014 memset(&cmd
, 0x0, sizeof(cmd
));
2016 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2017 cmd
.aq_common_descriptor
.flags
=
2018 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2019 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
2020 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
2021 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
2023 ret
= ena_com_mem_addr_set(ena_dev
,
2024 &cmd
.control_buffer
.address
,
2025 rss
->hash_key_dma_addr
);
2026 if (unlikely(ret
)) {
2027 pr_err("memory address set failed\n");
2031 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
2033 ret
= ena_com_execute_admin_command(admin_queue
,
2034 (struct ena_admin_aq_entry
*)&cmd
,
2036 (struct ena_admin_acq_entry
*)&resp
,
2038 if (unlikely(ret
)) {
2039 pr_err("Failed to set hash function %d. error: %d\n",
2040 rss
->hash_func
, ret
);
2047 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2048 enum ena_admin_hash_functions func
,
2049 const u8
*key
, u16 key_len
, u32 init_val
)
2051 struct ena_rss
*rss
= &ena_dev
->rss
;
2052 struct ena_admin_get_feat_resp get_resp
;
2053 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2057 /* Make sure size is a mult of DWs */
2058 if (unlikely(key_len
& 0x3))
2061 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2062 ENA_ADMIN_RSS_HASH_FUNCTION
,
2063 rss
->hash_key_dma_addr
,
2064 sizeof(*rss
->hash_key
));
2068 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2069 pr_err("Flow hash function %d isn't supported\n", func
);
2074 case ENA_ADMIN_TOEPLITZ
:
2075 if (key_len
> sizeof(hash_key
->key
)) {
2076 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2077 key_len
, sizeof(hash_key
->key
));
2081 memcpy(hash_key
->key
, key
, key_len
);
2082 rss
->hash_init_val
= init_val
;
2083 hash_key
->keys_num
= key_len
>> 2;
2085 case ENA_ADMIN_CRC32
:
2086 rss
->hash_init_val
= init_val
;
2089 pr_err("Invalid hash function (%d)\n", func
);
2093 rc
= ena_com_set_hash_function(ena_dev
);
2095 /* Restore the old function */
2097 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2102 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2103 enum ena_admin_hash_functions
*func
,
2106 struct ena_rss
*rss
= &ena_dev
->rss
;
2107 struct ena_admin_get_feat_resp get_resp
;
2108 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2112 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2113 ENA_ADMIN_RSS_HASH_FUNCTION
,
2114 rss
->hash_key_dma_addr
,
2115 sizeof(*rss
->hash_key
));
2119 rss
->hash_func
= get_resp
.u
.flow_hash_func
.selected_func
;
2121 *func
= rss
->hash_func
;
2124 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2129 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2130 enum ena_admin_flow_hash_proto proto
,
2133 struct ena_rss
*rss
= &ena_dev
->rss
;
2134 struct ena_admin_get_feat_resp get_resp
;
2137 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2138 ENA_ADMIN_RSS_HASH_INPUT
,
2139 rss
->hash_ctrl_dma_addr
,
2140 sizeof(*rss
->hash_ctrl
));
2145 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2150 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2152 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2153 struct ena_rss
*rss
= &ena_dev
->rss
;
2154 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2155 struct ena_admin_set_feat_cmd cmd
;
2156 struct ena_admin_set_feat_resp resp
;
2159 if (!ena_com_check_supported_feature_id(ena_dev
,
2160 ENA_ADMIN_RSS_HASH_INPUT
)) {
2161 pr_debug("Feature %d isn't supported\n",
2162 ENA_ADMIN_RSS_HASH_INPUT
);
2166 memset(&cmd
, 0x0, sizeof(cmd
));
2168 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2169 cmd
.aq_common_descriptor
.flags
=
2170 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2171 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2172 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2173 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2174 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2176 ret
= ena_com_mem_addr_set(ena_dev
,
2177 &cmd
.control_buffer
.address
,
2178 rss
->hash_ctrl_dma_addr
);
2179 if (unlikely(ret
)) {
2180 pr_err("memory address set failed\n");
2183 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2185 ret
= ena_com_execute_admin_command(admin_queue
,
2186 (struct ena_admin_aq_entry
*)&cmd
,
2188 (struct ena_admin_acq_entry
*)&resp
,
2191 pr_err("Failed to set hash input. error: %d\n", ret
);
2196 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2198 struct ena_rss
*rss
= &ena_dev
->rss
;
2199 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2201 u16 available_fields
= 0;
2204 /* Get the supported hash input */
2205 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2209 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2210 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2211 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2213 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2214 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2215 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2217 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2218 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2219 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2221 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2222 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2223 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2225 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2226 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2228 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2229 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2231 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2232 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2234 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_NOT_IP
].fields
=
2235 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2237 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2238 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2239 hash_ctrl
->supported_fields
[i
].fields
;
2240 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2241 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2242 i
, hash_ctrl
->supported_fields
[i
].fields
,
2243 hash_ctrl
->selected_fields
[i
].fields
);
2248 rc
= ena_com_set_hash_ctrl(ena_dev
);
2250 /* In case of failure, restore the old hash ctrl */
2252 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2257 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2258 enum ena_admin_flow_hash_proto proto
,
2261 struct ena_rss
*rss
= &ena_dev
->rss
;
2262 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2263 u16 supported_fields
;
2266 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2267 pr_err("Invalid proto num (%u)\n", proto
);
2271 /* Get the ctrl table */
2272 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2276 /* Make sure all the fields are supported */
2277 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2278 if ((hash_fields
& supported_fields
) != hash_fields
) {
2279 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2280 proto
, hash_fields
, supported_fields
);
2283 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2285 rc
= ena_com_set_hash_ctrl(ena_dev
);
2287 /* In case of failure, restore the old hash ctrl */
2289 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2294 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2295 u16 entry_idx
, u16 entry_value
)
2297 struct ena_rss
*rss
= &ena_dev
->rss
;
2299 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2302 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2305 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2310 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2312 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2313 struct ena_rss
*rss
= &ena_dev
->rss
;
2314 struct ena_admin_set_feat_cmd cmd
;
2315 struct ena_admin_set_feat_resp resp
;
2318 if (!ena_com_check_supported_feature_id(
2319 ena_dev
, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2320 pr_debug("Feature %d isn't supported\n",
2321 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2325 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2327 pr_err("Failed to convert host indirection table to device table\n");
2331 memset(&cmd
, 0x0, sizeof(cmd
));
2333 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2334 cmd
.aq_common_descriptor
.flags
=
2335 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2336 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2337 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2338 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2340 ret
= ena_com_mem_addr_set(ena_dev
,
2341 &cmd
.control_buffer
.address
,
2342 rss
->rss_ind_tbl_dma_addr
);
2343 if (unlikely(ret
)) {
2344 pr_err("memory address set failed\n");
2348 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2349 sizeof(struct ena_admin_rss_ind_table_entry
);
2351 ret
= ena_com_execute_admin_command(admin_queue
,
2352 (struct ena_admin_aq_entry
*)&cmd
,
2354 (struct ena_admin_acq_entry
*)&resp
,
2358 pr_err("Failed to set indirect table. error: %d\n", ret
);
2363 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2365 struct ena_rss
*rss
= &ena_dev
->rss
;
2366 struct ena_admin_get_feat_resp get_resp
;
2370 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2371 sizeof(struct ena_admin_rss_ind_table_entry
);
2373 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2374 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2375 rss
->rss_ind_tbl_dma_addr
,
2383 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2387 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2388 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2393 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2397 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2399 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2403 rc
= ena_com_hash_key_allocate(ena_dev
);
2407 rc
= ena_com_hash_ctrl_init(ena_dev
);
2414 ena_com_hash_key_destroy(ena_dev
);
2416 ena_com_indirect_table_destroy(ena_dev
);
2422 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2424 ena_com_indirect_table_destroy(ena_dev
);
2425 ena_com_hash_key_destroy(ena_dev
);
2426 ena_com_hash_ctrl_destroy(ena_dev
);
2428 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2431 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2433 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2435 host_attr
->host_info
=
2436 dma_zalloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2437 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2438 if (unlikely(!host_attr
->host_info
))
2444 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2445 u32 debug_area_size
)
2447 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2449 host_attr
->debug_area_virt_addr
=
2450 dma_zalloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2451 &host_attr
->debug_area_dma_addr
, GFP_KERNEL
);
2452 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2453 host_attr
->debug_area_size
= 0;
2457 host_attr
->debug_area_size
= debug_area_size
;
2462 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2464 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2466 if (host_attr
->host_info
) {
2467 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2468 host_attr
->host_info_dma_addr
);
2469 host_attr
->host_info
= NULL
;
2473 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2475 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2477 if (host_attr
->debug_area_virt_addr
) {
2478 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2479 host_attr
->debug_area_virt_addr
,
2480 host_attr
->debug_area_dma_addr
);
2481 host_attr
->debug_area_virt_addr
= NULL
;
2485 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2487 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2488 struct ena_com_admin_queue
*admin_queue
;
2489 struct ena_admin_set_feat_cmd cmd
;
2490 struct ena_admin_set_feat_resp resp
;
2494 /* Host attribute config is called before ena_com_get_dev_attr_feat
2495 * so ena_com can't check if the feature is supported.
2498 memset(&cmd
, 0x0, sizeof(cmd
));
2499 admin_queue
= &ena_dev
->admin_queue
;
2501 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2502 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2504 ret
= ena_com_mem_addr_set(ena_dev
,
2505 &cmd
.u
.host_attr
.debug_ba
,
2506 host_attr
->debug_area_dma_addr
);
2507 if (unlikely(ret
)) {
2508 pr_err("memory address set failed\n");
2512 ret
= ena_com_mem_addr_set(ena_dev
,
2513 &cmd
.u
.host_attr
.os_info_ba
,
2514 host_attr
->host_info_dma_addr
);
2515 if (unlikely(ret
)) {
2516 pr_err("memory address set failed\n");
2520 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2522 ret
= ena_com_execute_admin_command(admin_queue
,
2523 (struct ena_admin_aq_entry
*)&cmd
,
2525 (struct ena_admin_acq_entry
*)&resp
,
2529 pr_err("Failed to set host attributes: %d\n", ret
);
2534 /* Interrupt moderation */
2535 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2537 return ena_com_check_supported_feature_id(ena_dev
,
2538 ENA_ADMIN_INTERRUPT_MODERATION
);
2541 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2542 u32 tx_coalesce_usecs
)
2544 if (!ena_dev
->intr_delay_resolution
) {
2545 pr_err("Illegal interrupt delay granularity value\n");
2549 ena_dev
->intr_moder_tx_interval
= tx_coalesce_usecs
/
2550 ena_dev
->intr_delay_resolution
;
2555 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2556 u32 rx_coalesce_usecs
)
2558 if (!ena_dev
->intr_delay_resolution
) {
2559 pr_err("Illegal interrupt delay granularity value\n");
2563 /* We use LOWEST entry of moderation table for storing
2564 * nonadaptive interrupt coalescing values
2566 ena_dev
->intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2567 rx_coalesce_usecs
/ ena_dev
->intr_delay_resolution
;
2572 void ena_com_destroy_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2574 if (ena_dev
->intr_moder_tbl
)
2575 devm_kfree(ena_dev
->dmadev
, ena_dev
->intr_moder_tbl
);
2576 ena_dev
->intr_moder_tbl
= NULL
;
2579 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2581 struct ena_admin_get_feat_resp get_resp
;
2582 u16 delay_resolution
;
2585 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2586 ENA_ADMIN_INTERRUPT_MODERATION
);
2589 if (rc
== -EOPNOTSUPP
) {
2590 pr_debug("Feature %d isn't supported\n",
2591 ENA_ADMIN_INTERRUPT_MODERATION
);
2594 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2598 /* no moderation supported, disable adaptive support */
2599 ena_com_disable_adaptive_moderation(ena_dev
);
2603 rc
= ena_com_init_interrupt_moderation_table(ena_dev
);
2607 /* if moderation is supported by device we set adaptive moderation */
2608 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2609 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2610 ena_com_enable_adaptive_moderation(ena_dev
);
2614 ena_com_destroy_interrupt_moderation(ena_dev
);
2618 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
2620 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2622 if (!intr_moder_tbl
)
2625 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2626 ENA_INTR_LOWEST_USECS
;
2627 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].pkts_per_interval
=
2628 ENA_INTR_LOWEST_PKTS
;
2629 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].bytes_per_interval
=
2630 ENA_INTR_LOWEST_BYTES
;
2632 intr_moder_tbl
[ENA_INTR_MODER_LOW
].intr_moder_interval
=
2634 intr_moder_tbl
[ENA_INTR_MODER_LOW
].pkts_per_interval
=
2636 intr_moder_tbl
[ENA_INTR_MODER_LOW
].bytes_per_interval
=
2639 intr_moder_tbl
[ENA_INTR_MODER_MID
].intr_moder_interval
=
2641 intr_moder_tbl
[ENA_INTR_MODER_MID
].pkts_per_interval
=
2643 intr_moder_tbl
[ENA_INTR_MODER_MID
].bytes_per_interval
=
2646 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].intr_moder_interval
=
2647 ENA_INTR_HIGH_USECS
;
2648 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].pkts_per_interval
=
2650 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].bytes_per_interval
=
2651 ENA_INTR_HIGH_BYTES
;
2653 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].intr_moder_interval
=
2654 ENA_INTR_HIGHEST_USECS
;
2655 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].pkts_per_interval
=
2656 ENA_INTR_HIGHEST_PKTS
;
2657 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].bytes_per_interval
=
2658 ENA_INTR_HIGHEST_BYTES
;
2661 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2663 return ena_dev
->intr_moder_tx_interval
;
2666 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2668 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2671 return intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
;
2676 void ena_com_init_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2677 enum ena_intr_moder_level level
,
2678 struct ena_intr_moder_entry
*entry
)
2680 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2682 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2685 intr_moder_tbl
[level
].intr_moder_interval
= entry
->intr_moder_interval
;
2686 if (ena_dev
->intr_delay_resolution
)
2687 intr_moder_tbl
[level
].intr_moder_interval
/=
2688 ena_dev
->intr_delay_resolution
;
2689 intr_moder_tbl
[level
].pkts_per_interval
= entry
->pkts_per_interval
;
2691 /* use hardcoded value until ethtool supports bytecount parameter */
2692 if (entry
->bytes_per_interval
!= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
)
2693 intr_moder_tbl
[level
].bytes_per_interval
= entry
->bytes_per_interval
;
2696 void ena_com_get_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2697 enum ena_intr_moder_level level
,
2698 struct ena_intr_moder_entry
*entry
)
2700 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2702 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2705 entry
->intr_moder_interval
= intr_moder_tbl
[level
].intr_moder_interval
;
2706 if (ena_dev
->intr_delay_resolution
)
2707 entry
->intr_moder_interval
*= ena_dev
->intr_delay_resolution
;
2708 entry
->pkts_per_interval
=
2709 intr_moder_tbl
[level
].pkts_per_interval
;
2710 entry
->bytes_per_interval
= intr_moder_tbl
[level
].bytes_per_interval
;