1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
8 /*****************************************************************************/
9 /*****************************************************************************/
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
18 #define ENA_CTRL_MAJOR 0
19 #define ENA_CTRL_MINOR 0
20 #define ENA_CTRL_SUB_MINOR 1
22 #define MIN_ENA_CTRL_VER \
23 (((ENA_CTRL_MAJOR) << \
24 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25 ((ENA_CTRL_MINOR) << \
26 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
29 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
30 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
32 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
34 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
36 #define ENA_REGS_ADMIN_INTR_MASK 1
38 #define ENA_MIN_ADMIN_POLL_US 100
40 #define ENA_MAX_ADMIN_POLL_US 5000
42 /*****************************************************************************/
43 /*****************************************************************************/
44 /*****************************************************************************/
49 /* Abort - canceled by the driver */
54 struct completion wait_event
;
55 struct ena_admin_acq_entry
*user_cqe
;
57 enum ena_cmd_status status
;
58 /* status from the device */
64 struct ena_com_stats_ctx
{
65 struct ena_admin_aq_get_stats_cmd get_cmd
;
66 struct ena_admin_acq_get_stats_resp get_resp
;
69 static int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
70 struct ena_common_mem_addr
*ena_addr
,
73 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
74 netdev_err(ena_dev
->net_device
,
75 "DMA address has more bits that the device supports\n");
79 ena_addr
->mem_addr_low
= lower_32_bits(addr
);
80 ena_addr
->mem_addr_high
= (u16
)upper_32_bits(addr
);
85 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*admin_queue
)
87 struct ena_com_dev
*ena_dev
= admin_queue
->ena_dev
;
88 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
89 u16 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
91 sq
->entries
= dma_alloc_coherent(admin_queue
->q_dmadev
, size
,
92 &sq
->dma_addr
, GFP_KERNEL
);
95 netdev_err(ena_dev
->net_device
, "Memory allocation failed\n");
108 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*admin_queue
)
110 struct ena_com_dev
*ena_dev
= admin_queue
->ena_dev
;
111 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
112 u16 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
114 cq
->entries
= dma_alloc_coherent(admin_queue
->q_dmadev
, size
,
115 &cq
->dma_addr
, GFP_KERNEL
);
118 netdev_err(ena_dev
->net_device
, "Memory allocation failed\n");
128 static int ena_com_admin_init_aenq(struct ena_com_dev
*ena_dev
,
129 struct ena_aenq_handlers
*aenq_handlers
)
131 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
132 u32 addr_low
, addr_high
, aenq_caps
;
135 ena_dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
136 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
137 aenq
->entries
= dma_alloc_coherent(ena_dev
->dmadev
, size
,
138 &aenq
->dma_addr
, GFP_KERNEL
);
140 if (!aenq
->entries
) {
141 netdev_err(ena_dev
->net_device
, "Memory allocation failed\n");
145 aenq
->head
= aenq
->q_depth
;
148 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
149 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
151 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
152 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
155 aenq_caps
|= ena_dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
156 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
157 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
158 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
159 writel(aenq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
161 if (unlikely(!aenq_handlers
)) {
162 netdev_err(ena_dev
->net_device
,
163 "AENQ handlers pointer is NULL\n");
167 aenq
->aenq_handlers
= aenq_handlers
;
172 static void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
173 struct ena_comp_ctx
*comp_ctx
)
175 comp_ctx
->occupied
= false;
176 atomic_dec(&queue
->outstanding_cmds
);
179 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*admin_queue
,
180 u16 command_id
, bool capture
)
182 if (unlikely(command_id
>= admin_queue
->q_depth
)) {
183 netdev_err(admin_queue
->ena_dev
->net_device
,
184 "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
185 command_id
, admin_queue
->q_depth
);
189 if (unlikely(!admin_queue
->comp_ctx
)) {
190 netdev_err(admin_queue
->ena_dev
->net_device
,
191 "Completion context is NULL\n");
195 if (unlikely(admin_queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
196 netdev_err(admin_queue
->ena_dev
->net_device
,
197 "Completion context is occupied\n");
202 atomic_inc(&admin_queue
->outstanding_cmds
);
203 admin_queue
->comp_ctx
[command_id
].occupied
= true;
206 return &admin_queue
->comp_ctx
[command_id
];
209 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
210 struct ena_admin_aq_entry
*cmd
,
211 size_t cmd_size_in_bytes
,
212 struct ena_admin_acq_entry
*comp
,
213 size_t comp_size_in_bytes
)
215 struct ena_comp_ctx
*comp_ctx
;
216 u16 tail_masked
, cmd_id
;
220 queue_size_mask
= admin_queue
->q_depth
- 1;
222 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
224 /* In case of queue FULL */
225 cnt
= (u16
)atomic_read(&admin_queue
->outstanding_cmds
);
226 if (cnt
>= admin_queue
->q_depth
) {
227 netdev_dbg(admin_queue
->ena_dev
->net_device
,
228 "Admin queue is full.\n");
229 admin_queue
->stats
.out_of_space
++;
230 return ERR_PTR(-ENOSPC
);
233 cmd_id
= admin_queue
->curr_cmd_id
;
235 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
236 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
238 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
239 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
241 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
242 if (unlikely(!comp_ctx
))
243 return ERR_PTR(-EINVAL
);
245 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
246 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
247 comp_ctx
->user_cqe
= comp
;
248 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
250 reinit_completion(&comp_ctx
->wait_event
);
252 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
254 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
257 admin_queue
->sq
.tail
++;
258 admin_queue
->stats
.submitted_cmd
++;
260 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
261 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
263 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
268 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*admin_queue
)
270 struct ena_com_dev
*ena_dev
= admin_queue
->ena_dev
;
271 size_t size
= admin_queue
->q_depth
* sizeof(struct ena_comp_ctx
);
272 struct ena_comp_ctx
*comp_ctx
;
275 admin_queue
->comp_ctx
=
276 devm_kzalloc(admin_queue
->q_dmadev
, size
, GFP_KERNEL
);
277 if (unlikely(!admin_queue
->comp_ctx
)) {
278 netdev_err(ena_dev
->net_device
, "Memory allocation failed\n");
282 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
283 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
285 init_completion(&comp_ctx
->wait_event
);
291 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
292 struct ena_admin_aq_entry
*cmd
,
293 size_t cmd_size_in_bytes
,
294 struct ena_admin_acq_entry
*comp
,
295 size_t comp_size_in_bytes
)
297 unsigned long flags
= 0;
298 struct ena_comp_ctx
*comp_ctx
;
300 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
301 if (unlikely(!admin_queue
->running_state
)) {
302 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
303 return ERR_PTR(-ENODEV
);
305 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
309 if (IS_ERR(comp_ctx
))
310 admin_queue
->running_state
= false;
311 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
316 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
317 struct ena_com_create_io_ctx
*ctx
,
318 struct ena_com_io_sq
*io_sq
)
323 memset(&io_sq
->desc_addr
, 0x0, sizeof(io_sq
->desc_addr
));
325 io_sq
->dma_addr_bits
= (u8
)ena_dev
->dma_addr_bits
;
326 io_sq
->desc_entry_size
=
327 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
328 sizeof(struct ena_eth_io_tx_desc
) :
329 sizeof(struct ena_eth_io_rx_desc
);
331 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
333 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
334 dev_node
= dev_to_node(ena_dev
->dmadev
);
335 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
336 io_sq
->desc_addr
.virt_addr
=
337 dma_alloc_coherent(ena_dev
->dmadev
, size
,
338 &io_sq
->desc_addr
.phys_addr
,
340 set_dev_node(ena_dev
->dmadev
, dev_node
);
341 if (!io_sq
->desc_addr
.virt_addr
) {
342 io_sq
->desc_addr
.virt_addr
=
343 dma_alloc_coherent(ena_dev
->dmadev
, size
,
344 &io_sq
->desc_addr
.phys_addr
,
348 if (!io_sq
->desc_addr
.virt_addr
) {
349 netdev_err(ena_dev
->net_device
,
350 "Memory allocation failed\n");
355 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
356 /* Allocate bounce buffers */
357 io_sq
->bounce_buf_ctrl
.buffer_size
=
358 ena_dev
->llq_info
.desc_list_entry_size
;
359 io_sq
->bounce_buf_ctrl
.buffers_num
=
360 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT
;
361 io_sq
->bounce_buf_ctrl
.next_to_use
= 0;
363 size
= io_sq
->bounce_buf_ctrl
.buffer_size
*
364 io_sq
->bounce_buf_ctrl
.buffers_num
;
366 dev_node
= dev_to_node(ena_dev
->dmadev
);
367 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
368 io_sq
->bounce_buf_ctrl
.base_buffer
=
369 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
370 set_dev_node(ena_dev
->dmadev
, dev_node
);
371 if (!io_sq
->bounce_buf_ctrl
.base_buffer
)
372 io_sq
->bounce_buf_ctrl
.base_buffer
=
373 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
375 if (!io_sq
->bounce_buf_ctrl
.base_buffer
) {
376 netdev_err(ena_dev
->net_device
,
377 "Bounce buffer memory allocation failed\n");
381 memcpy(&io_sq
->llq_info
, &ena_dev
->llq_info
,
382 sizeof(io_sq
->llq_info
));
384 /* Initiate the first bounce buffer */
385 io_sq
->llq_buf_ctrl
.curr_bounce_buf
=
386 ena_com_get_next_bounce_buffer(&io_sq
->bounce_buf_ctrl
);
387 memset(io_sq
->llq_buf_ctrl
.curr_bounce_buf
,
388 0x0, io_sq
->llq_info
.desc_list_entry_size
);
389 io_sq
->llq_buf_ctrl
.descs_left_in_line
=
390 io_sq
->llq_info
.descs_num_before_header
;
391 io_sq
->disable_meta_caching
=
392 io_sq
->llq_info
.disable_meta_caching
;
394 if (io_sq
->llq_info
.max_entries_in_tx_burst
> 0)
395 io_sq
->entries_in_tx_burst_left
=
396 io_sq
->llq_info
.max_entries_in_tx_burst
;
400 io_sq
->next_to_comp
= 0;
406 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
407 struct ena_com_create_io_ctx
*ctx
,
408 struct ena_com_io_cq
*io_cq
)
413 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(io_cq
->cdesc_addr
));
415 /* Use the basic completion descriptor for Rx */
416 io_cq
->cdesc_entry_size_in_bytes
=
417 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
418 sizeof(struct ena_eth_io_tx_cdesc
) :
419 sizeof(struct ena_eth_io_rx_cdesc_base
);
421 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
423 prev_node
= dev_to_node(ena_dev
->dmadev
);
424 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
425 io_cq
->cdesc_addr
.virt_addr
=
426 dma_alloc_coherent(ena_dev
->dmadev
, size
,
427 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
428 set_dev_node(ena_dev
->dmadev
, prev_node
);
429 if (!io_cq
->cdesc_addr
.virt_addr
) {
430 io_cq
->cdesc_addr
.virt_addr
=
431 dma_alloc_coherent(ena_dev
->dmadev
, size
,
432 &io_cq
->cdesc_addr
.phys_addr
,
436 if (!io_cq
->cdesc_addr
.virt_addr
) {
437 netdev_err(ena_dev
->net_device
, "Memory allocation failed\n");
447 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
448 struct ena_admin_acq_entry
*cqe
)
450 struct ena_comp_ctx
*comp_ctx
;
453 cmd_id
= cqe
->acq_common_descriptor
.command
&
454 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
456 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
457 if (unlikely(!comp_ctx
)) {
458 netdev_err(admin_queue
->ena_dev
->net_device
,
459 "comp_ctx is NULL. Changing the admin queue running state\n");
460 admin_queue
->running_state
= false;
464 comp_ctx
->status
= ENA_CMD_COMPLETED
;
465 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
467 if (comp_ctx
->user_cqe
)
468 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
470 if (!admin_queue
->polling
)
471 complete(&comp_ctx
->wait_event
);
474 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
476 struct ena_admin_acq_entry
*cqe
= NULL
;
481 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
482 phase
= admin_queue
->cq
.phase
;
484 cqe
= &admin_queue
->cq
.entries
[head_masked
];
486 /* Go over all the completions */
487 while ((READ_ONCE(cqe
->acq_common_descriptor
.flags
) &
488 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
489 /* Do not read the rest of the completion entry before the
490 * phase bit was validated
493 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
497 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
502 cqe
= &admin_queue
->cq
.entries
[head_masked
];
505 admin_queue
->cq
.head
+= comp_num
;
506 admin_queue
->cq
.phase
= phase
;
507 admin_queue
->sq
.head
+= comp_num
;
508 admin_queue
->stats
.completed_cmd
+= comp_num
;
511 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue
*admin_queue
,
514 if (unlikely(comp_status
!= 0))
515 netdev_err(admin_queue
->ena_dev
->net_device
,
516 "Admin command failed[%u]\n", comp_status
);
518 switch (comp_status
) {
519 case ENA_ADMIN_SUCCESS
:
521 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
523 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
525 case ENA_ADMIN_BAD_OPCODE
:
526 case ENA_ADMIN_MALFORMED_REQUEST
:
527 case ENA_ADMIN_ILLEGAL_PARAMETER
:
528 case ENA_ADMIN_UNKNOWN_ERROR
:
530 case ENA_ADMIN_RESOURCE_BUSY
:
537 static void ena_delay_exponential_backoff_us(u32 exp
, u32 delay_us
)
539 delay_us
= max_t(u32
, ENA_MIN_ADMIN_POLL_US
, delay_us
);
540 delay_us
= min_t(u32
, delay_us
* (1U << exp
), ENA_MAX_ADMIN_POLL_US
);
541 usleep_range(delay_us
, 2 * delay_us
);
544 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
545 struct ena_com_admin_queue
*admin_queue
)
547 unsigned long flags
= 0;
548 unsigned long timeout
;
552 timeout
= jiffies
+ usecs_to_jiffies(admin_queue
->completion_timeout
);
555 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
556 ena_com_handle_admin_completion(admin_queue
);
557 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
559 if (comp_ctx
->status
!= ENA_CMD_SUBMITTED
)
562 if (time_is_before_jiffies(timeout
)) {
563 netdev_err(admin_queue
->ena_dev
->net_device
,
564 "Wait for completion (polling) timeout\n");
565 /* ENA didn't have any completion */
566 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
567 admin_queue
->stats
.no_completion
++;
568 admin_queue
->running_state
= false;
569 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
575 ena_delay_exponential_backoff_us(exp
++,
576 admin_queue
->ena_dev
->ena_min_poll_delay_us
);
579 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
580 netdev_err(admin_queue
->ena_dev
->net_device
,
581 "Command was aborted\n");
582 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
583 admin_queue
->stats
.aborted_cmd
++;
584 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
589 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
592 ret
= ena_com_comp_status_to_errno(admin_queue
, comp_ctx
->comp_status
);
594 comp_ctxt_release(admin_queue
, comp_ctx
);
599 * Set the LLQ configurations of the firmware
601 * The driver provides only the enabled feature values to the device,
602 * which in turn, checks if they are supported.
604 static int ena_com_set_llq(struct ena_com_dev
*ena_dev
)
606 struct ena_com_admin_queue
*admin_queue
;
607 struct ena_admin_set_feat_cmd cmd
;
608 struct ena_admin_set_feat_resp resp
;
609 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
612 memset(&cmd
, 0x0, sizeof(cmd
));
613 admin_queue
= &ena_dev
->admin_queue
;
615 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
616 cmd
.feat_common
.feature_id
= ENA_ADMIN_LLQ
;
618 cmd
.u
.llq
.header_location_ctrl_enabled
= llq_info
->header_location_ctrl
;
619 cmd
.u
.llq
.entry_size_ctrl_enabled
= llq_info
->desc_list_entry_size_ctrl
;
620 cmd
.u
.llq
.desc_num_before_header_enabled
= llq_info
->descs_num_before_header
;
621 cmd
.u
.llq
.descriptors_stride_ctrl_enabled
= llq_info
->desc_stride_ctrl
;
623 cmd
.u
.llq
.accel_mode
.u
.set
.enabled_flags
=
624 BIT(ENA_ADMIN_DISABLE_META_CACHING
) |
625 BIT(ENA_ADMIN_LIMIT_TX_BURST
);
627 ret
= ena_com_execute_admin_command(admin_queue
,
628 (struct ena_admin_aq_entry
*)&cmd
,
630 (struct ena_admin_acq_entry
*)&resp
,
634 netdev_err(ena_dev
->net_device
,
635 "Failed to set LLQ configurations: %d\n", ret
);
640 static int ena_com_config_llq_info(struct ena_com_dev
*ena_dev
,
641 struct ena_admin_feature_llq_desc
*llq_features
,
642 struct ena_llq_configurations
*llq_default_cfg
)
644 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
645 struct ena_admin_accel_mode_get llq_accel_mode_get
;
649 memset(llq_info
, 0, sizeof(*llq_info
));
651 supported_feat
= llq_features
->header_location_ctrl_supported
;
653 if (likely(supported_feat
& llq_default_cfg
->llq_header_location
)) {
654 llq_info
->header_location_ctrl
=
655 llq_default_cfg
->llq_header_location
;
657 netdev_err(ena_dev
->net_device
,
658 "Invalid header location control, supported: 0x%x\n",
663 if (likely(llq_info
->header_location_ctrl
== ENA_ADMIN_INLINE_HEADER
)) {
664 supported_feat
= llq_features
->descriptors_stride_ctrl_supported
;
665 if (likely(supported_feat
& llq_default_cfg
->llq_stride_ctrl
)) {
666 llq_info
->desc_stride_ctrl
= llq_default_cfg
->llq_stride_ctrl
;
668 if (supported_feat
& ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
) {
669 llq_info
->desc_stride_ctrl
= ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
;
670 } else if (supported_feat
& ENA_ADMIN_SINGLE_DESC_PER_ENTRY
) {
671 llq_info
->desc_stride_ctrl
= ENA_ADMIN_SINGLE_DESC_PER_ENTRY
;
673 netdev_err(ena_dev
->net_device
,
674 "Invalid desc_stride_ctrl, supported: 0x%x\n",
679 netdev_err(ena_dev
->net_device
,
680 "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
681 llq_default_cfg
->llq_stride_ctrl
,
682 supported_feat
, llq_info
->desc_stride_ctrl
);
685 llq_info
->desc_stride_ctrl
= 0;
688 supported_feat
= llq_features
->entry_size_ctrl_supported
;
689 if (likely(supported_feat
& llq_default_cfg
->llq_ring_entry_size
)) {
690 llq_info
->desc_list_entry_size_ctrl
= llq_default_cfg
->llq_ring_entry_size
;
691 llq_info
->desc_list_entry_size
= llq_default_cfg
->llq_ring_entry_size_value
;
693 if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_128B
) {
694 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_128B
;
695 llq_info
->desc_list_entry_size
= 128;
696 } else if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_192B
) {
697 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_192B
;
698 llq_info
->desc_list_entry_size
= 192;
699 } else if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_256B
) {
700 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_256B
;
701 llq_info
->desc_list_entry_size
= 256;
703 netdev_err(ena_dev
->net_device
,
704 "Invalid entry_size_ctrl, supported: 0x%x\n",
709 netdev_err(ena_dev
->net_device
,
710 "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
711 llq_default_cfg
->llq_ring_entry_size
, supported_feat
,
712 llq_info
->desc_list_entry_size
);
714 if (unlikely(llq_info
->desc_list_entry_size
& 0x7)) {
715 /* The desc list entry size should be whole multiply of 8
716 * This requirement comes from __iowrite64_copy()
718 netdev_err(ena_dev
->net_device
, "Illegal entry size %d\n",
719 llq_info
->desc_list_entry_size
);
723 if (llq_info
->desc_stride_ctrl
== ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
)
724 llq_info
->descs_per_entry
= llq_info
->desc_list_entry_size
/
725 sizeof(struct ena_eth_io_tx_desc
);
727 llq_info
->descs_per_entry
= 1;
729 supported_feat
= llq_features
->desc_num_before_header_supported
;
730 if (likely(supported_feat
& llq_default_cfg
->llq_num_decs_before_header
)) {
731 llq_info
->descs_num_before_header
= llq_default_cfg
->llq_num_decs_before_header
;
733 if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
) {
734 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
;
735 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1
) {
736 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1
;
737 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4
) {
738 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4
;
739 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8
) {
740 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8
;
742 netdev_err(ena_dev
->net_device
,
743 "Invalid descs_num_before_header, supported: 0x%x\n",
748 netdev_err(ena_dev
->net_device
,
749 "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
750 llq_default_cfg
->llq_num_decs_before_header
,
751 supported_feat
, llq_info
->descs_num_before_header
);
753 /* Check for accelerated queue supported */
754 llq_accel_mode_get
= llq_features
->accel_mode
.u
.get
;
756 llq_info
->disable_meta_caching
=
757 !!(llq_accel_mode_get
.supported_flags
&
758 BIT(ENA_ADMIN_DISABLE_META_CACHING
));
760 if (llq_accel_mode_get
.supported_flags
& BIT(ENA_ADMIN_LIMIT_TX_BURST
))
761 llq_info
->max_entries_in_tx_burst
=
762 llq_accel_mode_get
.max_tx_burst_size
/
763 llq_default_cfg
->llq_ring_entry_size_value
;
765 rc
= ena_com_set_llq(ena_dev
);
767 netdev_err(ena_dev
->net_device
,
768 "Cannot set LLQ configuration: %d\n", rc
);
773 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
774 struct ena_com_admin_queue
*admin_queue
)
776 unsigned long flags
= 0;
779 wait_for_completion_timeout(&comp_ctx
->wait_event
,
781 admin_queue
->completion_timeout
));
783 /* In case the command wasn't completed find out the root cause.
784 * There might be 2 kinds of errors
785 * 1) No completion (timeout reached)
786 * 2) There is completion but the device didn't get any msi-x interrupt.
788 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
789 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
790 ena_com_handle_admin_completion(admin_queue
);
791 admin_queue
->stats
.no_completion
++;
792 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
794 if (comp_ctx
->status
== ENA_CMD_COMPLETED
) {
795 netdev_err(admin_queue
->ena_dev
->net_device
,
796 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
797 comp_ctx
->cmd_opcode
,
798 admin_queue
->auto_polling
? "ON" : "OFF");
799 /* Check if fallback to polling is enabled */
800 if (admin_queue
->auto_polling
)
801 admin_queue
->polling
= true;
803 netdev_err(admin_queue
->ena_dev
->net_device
,
804 "The ena device didn't send a completion for the admin cmd %d status %d\n",
805 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
807 /* Check if shifted to polling mode.
808 * This will happen if there is a completion without an interrupt
809 * and autopolling mode is enabled. Continuing normal execution in such case
811 if (!admin_queue
->polling
) {
812 admin_queue
->running_state
= false;
818 ret
= ena_com_comp_status_to_errno(admin_queue
, comp_ctx
->comp_status
);
820 comp_ctxt_release(admin_queue
, comp_ctx
);
824 /* This method read the hardware device register through posting writes
825 * and waiting for response
826 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
828 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
830 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
831 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
832 mmio_read
->read_resp
;
833 u32 mmio_read_reg
, ret
, i
;
834 unsigned long flags
= 0;
835 u32 timeout
= mmio_read
->reg_read_to
;
840 timeout
= ENA_REG_READ_TIMEOUT
;
842 /* If readless is disabled, perform regular read */
843 if (!mmio_read
->readless_supported
)
844 return readl(ena_dev
->reg_bar
+ offset
);
846 spin_lock_irqsave(&mmio_read
->lock
, flags
);
847 mmio_read
->seq_num
++;
849 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
850 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
851 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
852 mmio_read_reg
|= mmio_read
->seq_num
&
853 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
855 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
857 for (i
= 0; i
< timeout
; i
++) {
858 if (READ_ONCE(read_resp
->req_id
) == mmio_read
->seq_num
)
864 if (unlikely(i
== timeout
)) {
865 netdev_err(ena_dev
->net_device
,
866 "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
867 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
869 ret
= ENA_MMIO_READ_TIMEOUT
;
873 if (read_resp
->reg_off
!= offset
) {
874 netdev_err(ena_dev
->net_device
,
875 "Read failure: wrong offset provided\n");
876 ret
= ENA_MMIO_READ_TIMEOUT
;
878 ret
= read_resp
->reg_val
;
881 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
886 /* There are two types to wait for completion.
887 * Polling mode - wait until the completion is available.
888 * Async mode - wait on wait queue until the completion is ready
889 * (or the timeout expired).
890 * It is expected that the IRQ called ena_com_handle_admin_completion
891 * to mark the completions.
893 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
894 struct ena_com_admin_queue
*admin_queue
)
896 if (admin_queue
->polling
)
897 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
900 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
904 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
905 struct ena_com_io_sq
*io_sq
)
907 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
908 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
909 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
913 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
915 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
916 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
918 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
920 destroy_cmd
.sq
.sq_identity
|= (direction
<<
921 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
922 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
924 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
925 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
927 ret
= ena_com_execute_admin_command(admin_queue
,
928 (struct ena_admin_aq_entry
*)&destroy_cmd
,
930 (struct ena_admin_acq_entry
*)&destroy_resp
,
931 sizeof(destroy_resp
));
933 if (unlikely(ret
&& (ret
!= -ENODEV
)))
934 netdev_err(ena_dev
->net_device
,
935 "Failed to destroy io sq error: %d\n", ret
);
940 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
941 struct ena_com_io_sq
*io_sq
,
942 struct ena_com_io_cq
*io_cq
)
946 if (io_cq
->cdesc_addr
.virt_addr
) {
947 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
949 dma_free_coherent(ena_dev
->dmadev
, size
,
950 io_cq
->cdesc_addr
.virt_addr
,
951 io_cq
->cdesc_addr
.phys_addr
);
953 io_cq
->cdesc_addr
.virt_addr
= NULL
;
956 if (io_sq
->desc_addr
.virt_addr
) {
957 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
959 dma_free_coherent(ena_dev
->dmadev
, size
,
960 io_sq
->desc_addr
.virt_addr
,
961 io_sq
->desc_addr
.phys_addr
);
963 io_sq
->desc_addr
.virt_addr
= NULL
;
966 if (io_sq
->bounce_buf_ctrl
.base_buffer
) {
967 devm_kfree(ena_dev
->dmadev
, io_sq
->bounce_buf_ctrl
.base_buffer
);
968 io_sq
->bounce_buf_ctrl
.base_buffer
= NULL
;
972 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
976 unsigned long timeout_stamp
;
978 /* Convert timeout from resolution of 100ms to us resolution. */
979 timeout_stamp
= jiffies
+ usecs_to_jiffies(100 * 1000 * timeout
);
982 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
984 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
985 netdev_err(ena_dev
->net_device
,
986 "Reg read timeout occurred\n");
990 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
994 if (time_is_before_jiffies(timeout_stamp
))
997 ena_delay_exponential_backoff_us(exp
++, ena_dev
->ena_min_poll_delay_us
);
1001 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
1002 enum ena_admin_aq_feature_id feature_id
)
1004 u32 feature_mask
= 1 << feature_id
;
1006 /* Device attributes is always supported */
1007 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
1008 !(ena_dev
->supported_features
& feature_mask
))
1014 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
1015 struct ena_admin_get_feat_resp
*get_resp
,
1016 enum ena_admin_aq_feature_id feature_id
,
1017 dma_addr_t control_buf_dma_addr
,
1018 u32 control_buff_size
,
1021 struct ena_com_admin_queue
*admin_queue
;
1022 struct ena_admin_get_feat_cmd get_cmd
;
1025 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
1026 netdev_dbg(ena_dev
->net_device
, "Feature %d isn't supported\n",
1031 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
1032 admin_queue
= &ena_dev
->admin_queue
;
1034 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
1036 if (control_buff_size
)
1037 get_cmd
.aq_common_descriptor
.flags
=
1038 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
1040 get_cmd
.aq_common_descriptor
.flags
= 0;
1042 ret
= ena_com_mem_addr_set(ena_dev
,
1043 &get_cmd
.control_buffer
.address
,
1044 control_buf_dma_addr
);
1045 if (unlikely(ret
)) {
1046 netdev_err(ena_dev
->net_device
, "Memory address set failed\n");
1050 get_cmd
.control_buffer
.length
= control_buff_size
;
1051 get_cmd
.feat_common
.feature_version
= feature_ver
;
1052 get_cmd
.feat_common
.feature_id
= feature_id
;
1054 ret
= ena_com_execute_admin_command(admin_queue
,
1055 (struct ena_admin_aq_entry
*)
1058 (struct ena_admin_acq_entry
*)
1063 netdev_err(ena_dev
->net_device
,
1064 "Failed to submit get_feature command %d error: %d\n",
1070 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
1071 struct ena_admin_get_feat_resp
*get_resp
,
1072 enum ena_admin_aq_feature_id feature_id
,
1075 return ena_com_get_feature_ex(ena_dev
,
1083 int ena_com_get_current_hash_function(struct ena_com_dev
*ena_dev
)
1085 return ena_dev
->rss
.hash_func
;
1088 static void ena_com_hash_key_fill_default_key(struct ena_com_dev
*ena_dev
)
1090 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
1091 (ena_dev
->rss
).hash_key
;
1093 netdev_rss_key_fill(&hash_key
->key
, sizeof(hash_key
->key
));
1094 /* The key buffer is stored in the device in an array of
1097 hash_key
->key_parts
= ENA_ADMIN_RSS_KEY_PARTS
;
1100 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
1102 struct ena_rss
*rss
= &ena_dev
->rss
;
1104 if (!ena_com_check_supported_feature_id(ena_dev
,
1105 ENA_ADMIN_RSS_HASH_FUNCTION
))
1109 dma_alloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
1110 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
1112 if (unlikely(!rss
->hash_key
))
1118 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
1120 struct ena_rss
*rss
= &ena_dev
->rss
;
1123 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
1124 rss
->hash_key
, rss
->hash_key_dma_addr
);
1125 rss
->hash_key
= NULL
;
1128 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
1130 struct ena_rss
*rss
= &ena_dev
->rss
;
1133 dma_alloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
1134 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
1136 if (unlikely(!rss
->hash_ctrl
))
1142 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
1144 struct ena_rss
*rss
= &ena_dev
->rss
;
1147 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
1148 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
1149 rss
->hash_ctrl
= NULL
;
1152 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
1155 struct ena_rss
*rss
= &ena_dev
->rss
;
1156 struct ena_admin_get_feat_resp get_resp
;
1160 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
1161 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG
, 0);
1165 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
1166 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
1167 netdev_err(ena_dev
->net_device
,
1168 "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1169 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
1170 1 << get_resp
.u
.ind_table
.max_size
);
1174 tbl_size
= (1ULL << log_size
) *
1175 sizeof(struct ena_admin_rss_ind_table_entry
);
1178 dma_alloc_coherent(ena_dev
->dmadev
, tbl_size
,
1179 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
1180 if (unlikely(!rss
->rss_ind_tbl
))
1183 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
1184 rss
->host_rss_ind_tbl
=
1185 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
1186 if (unlikely(!rss
->host_rss_ind_tbl
))
1189 rss
->tbl_log_size
= log_size
;
1194 tbl_size
= (1ULL << log_size
) *
1195 sizeof(struct ena_admin_rss_ind_table_entry
);
1197 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
1198 rss
->rss_ind_tbl_dma_addr
);
1199 rss
->rss_ind_tbl
= NULL
;
1201 rss
->tbl_log_size
= 0;
1205 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
1207 struct ena_rss
*rss
= &ena_dev
->rss
;
1208 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
1209 sizeof(struct ena_admin_rss_ind_table_entry
);
1211 if (rss
->rss_ind_tbl
)
1212 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
1213 rss
->rss_ind_tbl_dma_addr
);
1214 rss
->rss_ind_tbl
= NULL
;
1216 if (rss
->host_rss_ind_tbl
)
1217 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
1218 rss
->host_rss_ind_tbl
= NULL
;
1221 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
1222 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
1224 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1225 struct ena_admin_aq_create_sq_cmd create_cmd
;
1226 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
1230 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1232 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
1234 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1235 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
1237 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
1239 create_cmd
.sq_identity
|= (direction
<<
1240 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
1241 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
1243 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
1244 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
1246 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
1247 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
1248 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
1250 create_cmd
.sq_caps_3
|=
1251 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
1253 create_cmd
.cq_idx
= cq_idx
;
1254 create_cmd
.sq_depth
= io_sq
->q_depth
;
1256 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
1257 ret
= ena_com_mem_addr_set(ena_dev
,
1259 io_sq
->desc_addr
.phys_addr
);
1260 if (unlikely(ret
)) {
1261 netdev_err(ena_dev
->net_device
,
1262 "Memory address set failed\n");
1267 ret
= ena_com_execute_admin_command(admin_queue
,
1268 (struct ena_admin_aq_entry
*)&create_cmd
,
1270 (struct ena_admin_acq_entry
*)&cmd_completion
,
1271 sizeof(cmd_completion
));
1272 if (unlikely(ret
)) {
1273 netdev_err(ena_dev
->net_device
,
1274 "Failed to create IO SQ. error: %d\n", ret
);
1278 io_sq
->idx
= cmd_completion
.sq_idx
;
1280 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1281 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1283 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1284 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1285 + cmd_completion
.llq_headers_offset
);
1287 io_sq
->desc_addr
.pbuf_dev_addr
=
1288 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1289 cmd_completion
.llq_descriptors_offset
);
1292 netdev_dbg(ena_dev
->net_device
, "Created sq[%u], depth[%u]\n",
1293 io_sq
->idx
, io_sq
->q_depth
);
1298 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1300 struct ena_rss
*rss
= &ena_dev
->rss
;
1301 struct ena_com_io_sq
*io_sq
;
1305 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1306 qid
= rss
->host_rss_ind_tbl
[i
];
1307 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1310 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1312 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1315 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1321 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1322 u16 intr_delay_resolution
)
1324 u16 prev_intr_delay_resolution
= ena_dev
->intr_delay_resolution
;
1326 if (unlikely(!intr_delay_resolution
)) {
1327 netdev_err(ena_dev
->net_device
,
1328 "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1329 intr_delay_resolution
= ENA_DEFAULT_INTR_DELAY_RESOLUTION
;
1333 ena_dev
->intr_moder_rx_interval
=
1334 ena_dev
->intr_moder_rx_interval
*
1335 prev_intr_delay_resolution
/
1336 intr_delay_resolution
;
1339 ena_dev
->intr_moder_tx_interval
=
1340 ena_dev
->intr_moder_tx_interval
*
1341 prev_intr_delay_resolution
/
1342 intr_delay_resolution
;
1344 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1347 /*****************************************************************************/
1348 /******************************* API ******************************/
1349 /*****************************************************************************/
1351 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1352 struct ena_admin_aq_entry
*cmd
,
1354 struct ena_admin_acq_entry
*comp
,
1357 struct ena_comp_ctx
*comp_ctx
;
1360 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1362 if (IS_ERR(comp_ctx
)) {
1363 ret
= PTR_ERR(comp_ctx
);
1365 netdev_dbg(admin_queue
->ena_dev
->net_device
,
1366 "Failed to submit command [%d]\n", ret
);
1368 netdev_err(admin_queue
->ena_dev
->net_device
,
1369 "Failed to submit command [%d]\n", ret
);
1374 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1375 if (unlikely(ret
)) {
1376 if (admin_queue
->running_state
)
1377 netdev_err(admin_queue
->ena_dev
->net_device
,
1378 "Failed to process command. ret = %d\n", ret
);
1380 netdev_dbg(admin_queue
->ena_dev
->net_device
,
1381 "Failed to process command. ret = %d\n", ret
);
1386 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1387 struct ena_com_io_cq
*io_cq
)
1389 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1390 struct ena_admin_aq_create_cq_cmd create_cmd
;
1391 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1394 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1396 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1398 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1399 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1400 create_cmd
.cq_caps_1
|=
1401 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1403 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1404 create_cmd
.cq_depth
= io_cq
->q_depth
;
1406 ret
= ena_com_mem_addr_set(ena_dev
,
1408 io_cq
->cdesc_addr
.phys_addr
);
1409 if (unlikely(ret
)) {
1410 netdev_err(ena_dev
->net_device
, "Memory address set failed\n");
1414 ret
= ena_com_execute_admin_command(admin_queue
,
1415 (struct ena_admin_aq_entry
*)&create_cmd
,
1417 (struct ena_admin_acq_entry
*)&cmd_completion
,
1418 sizeof(cmd_completion
));
1419 if (unlikely(ret
)) {
1420 netdev_err(ena_dev
->net_device
,
1421 "Failed to create IO CQ. error: %d\n", ret
);
1425 io_cq
->idx
= cmd_completion
.cq_idx
;
1427 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1428 cmd_completion
.cq_interrupt_unmask_register_offset
);
1430 if (cmd_completion
.cq_head_db_register_offset
)
1431 io_cq
->cq_head_db_reg
=
1432 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1433 cmd_completion
.cq_head_db_register_offset
);
1435 if (cmd_completion
.numa_node_register_offset
)
1436 io_cq
->numa_node_cfg_reg
=
1437 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1438 cmd_completion
.numa_node_register_offset
);
1440 netdev_dbg(ena_dev
->net_device
, "Created cq[%u], depth[%u]\n",
1441 io_cq
->idx
, io_cq
->q_depth
);
1446 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1447 struct ena_com_io_sq
**io_sq
,
1448 struct ena_com_io_cq
**io_cq
)
1450 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1451 netdev_err(ena_dev
->net_device
,
1452 "Invalid queue number %d but the max is %d\n", qid
,
1453 ENA_TOTAL_NUM_QUEUES
);
1457 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1458 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1463 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1465 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1466 struct ena_comp_ctx
*comp_ctx
;
1469 if (!admin_queue
->comp_ctx
)
1472 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1473 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1474 if (unlikely(!comp_ctx
))
1477 comp_ctx
->status
= ENA_CMD_ABORTED
;
1479 complete(&comp_ctx
->wait_event
);
1483 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1485 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1486 unsigned long flags
= 0;
1489 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1490 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1491 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1492 ena_delay_exponential_backoff_us(exp
++,
1493 ena_dev
->ena_min_poll_delay_us
);
1494 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1496 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1499 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1500 struct ena_com_io_cq
*io_cq
)
1502 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1503 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1504 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1507 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
1509 destroy_cmd
.cq_idx
= io_cq
->idx
;
1510 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1512 ret
= ena_com_execute_admin_command(admin_queue
,
1513 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1514 sizeof(destroy_cmd
),
1515 (struct ena_admin_acq_entry
*)&destroy_resp
,
1516 sizeof(destroy_resp
));
1518 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1519 netdev_err(ena_dev
->net_device
,
1520 "Failed to destroy IO CQ. error: %d\n", ret
);
1525 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1527 return ena_dev
->admin_queue
.running_state
;
1530 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1532 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1533 unsigned long flags
= 0;
1535 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1536 ena_dev
->admin_queue
.running_state
= state
;
1537 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1540 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1542 u16 depth
= ena_dev
->aenq
.q_depth
;
1544 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1546 /* Init head_db to mark that all entries in the queue
1547 * are initially available
1549 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1552 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1554 struct ena_com_admin_queue
*admin_queue
;
1555 struct ena_admin_set_feat_cmd cmd
;
1556 struct ena_admin_set_feat_resp resp
;
1557 struct ena_admin_get_feat_resp get_resp
;
1560 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
, 0);
1562 dev_info(ena_dev
->dmadev
, "Can't get aenq configuration\n");
1566 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1567 netdev_warn(ena_dev
->net_device
,
1568 "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1569 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1573 memset(&cmd
, 0x0, sizeof(cmd
));
1574 admin_queue
= &ena_dev
->admin_queue
;
1576 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1577 cmd
.aq_common_descriptor
.flags
= 0;
1578 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1579 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1581 ret
= ena_com_execute_admin_command(admin_queue
,
1582 (struct ena_admin_aq_entry
*)&cmd
,
1584 (struct ena_admin_acq_entry
*)&resp
,
1588 netdev_err(ena_dev
->net_device
,
1589 "Failed to config AENQ ret: %d\n", ret
);
1594 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1596 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1599 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1600 netdev_err(ena_dev
->net_device
, "Reg read timeout occurred\n");
1604 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1605 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1607 netdev_dbg(ena_dev
->net_device
, "ENA dma width: %d\n", width
);
1609 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1610 netdev_err(ena_dev
->net_device
, "DMA width illegal value: %d\n",
1615 ena_dev
->dma_addr_bits
= width
;
1620 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1624 u32 ctrl_ver_masked
;
1626 /* Make sure the ENA version and the controller version are at least
1627 * as the driver expects
1629 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1630 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1631 ENA_REGS_CONTROLLER_VERSION_OFF
);
1633 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1634 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1635 netdev_err(ena_dev
->net_device
, "Reg read timeout occurred\n");
1639 dev_info(ena_dev
->dmadev
, "ENA device version: %d.%d\n",
1640 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1641 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1642 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1644 dev_info(ena_dev
->dmadev
,
1645 "ENA controller version: %d.%d.%d implementation version %d\n",
1646 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1647 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1648 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1649 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1650 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1651 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1652 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1655 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1656 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1657 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1659 /* Validate the ctrl version without the implementation ID */
1660 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1661 netdev_err(ena_dev
->net_device
,
1662 "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1670 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev
*ena_dev
,
1671 struct ena_com_admin_queue
*admin_queue
)
1674 if (!admin_queue
->comp_ctx
)
1677 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1679 admin_queue
->comp_ctx
= NULL
;
1682 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1684 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1685 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1686 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1687 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1690 ena_com_free_ena_admin_queue_comp_ctx(ena_dev
, admin_queue
);
1692 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1694 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1698 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1700 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1704 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1705 if (ena_dev
->aenq
.entries
)
1706 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1708 aenq
->entries
= NULL
;
1711 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1716 mask_value
= ENA_REGS_ADMIN_INTR_MASK
;
1718 writel(mask_value
, ena_dev
->reg_bar
+ ENA_REGS_INTR_MASK_OFF
);
1719 ena_dev
->admin_queue
.polling
= polling
;
1722 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev
*ena_dev
,
1725 ena_dev
->admin_queue
.auto_polling
= polling
;
1728 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1730 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1732 spin_lock_init(&mmio_read
->lock
);
1733 mmio_read
->read_resp
=
1734 dma_alloc_coherent(ena_dev
->dmadev
,
1735 sizeof(*mmio_read
->read_resp
),
1736 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1737 if (unlikely(!mmio_read
->read_resp
))
1740 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1742 mmio_read
->read_resp
->req_id
= 0x0;
1743 mmio_read
->seq_num
= 0x0;
1744 mmio_read
->readless_supported
= true;
1753 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1755 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1757 mmio_read
->readless_supported
= readless_supported
;
1760 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1762 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1764 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1765 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1767 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1768 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1770 mmio_read
->read_resp
= NULL
;
1773 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1775 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1776 u32 addr_low
, addr_high
;
1778 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1779 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1781 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1782 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1785 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1786 struct ena_aenq_handlers
*aenq_handlers
)
1788 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1789 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1792 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1794 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1795 netdev_err(ena_dev
->net_device
, "Reg read timeout occurred\n");
1799 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1800 netdev_err(ena_dev
->net_device
,
1801 "Device isn't ready, abort com init\n");
1805 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1807 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1808 admin_queue
->polling
= false;
1809 admin_queue
->curr_cmd_id
= 0;
1811 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1813 spin_lock_init(&admin_queue
->q_lock
);
1815 ret
= ena_com_init_comp_ctxt(admin_queue
);
1819 ret
= ena_com_admin_init_sq(admin_queue
);
1823 ret
= ena_com_admin_init_cq(admin_queue
);
1827 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1828 ENA_REGS_AQ_DB_OFF
);
1830 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1831 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1833 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1834 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1836 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1837 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1839 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1840 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1843 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1844 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1845 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1846 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1849 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1850 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1851 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1852 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1854 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1855 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1856 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1860 admin_queue
->ena_dev
= ena_dev
;
1861 admin_queue
->running_state
= true;
1865 ena_com_admin_destroy(ena_dev
);
1870 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1871 struct ena_com_create_io_ctx
*ctx
)
1873 struct ena_com_io_sq
*io_sq
;
1874 struct ena_com_io_cq
*io_cq
;
1877 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1878 netdev_err(ena_dev
->net_device
,
1879 "Qid (%d) is bigger than max num of queues (%d)\n",
1880 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1884 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1885 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1887 memset(io_sq
, 0x0, sizeof(*io_sq
));
1888 memset(io_cq
, 0x0, sizeof(*io_cq
));
1891 io_cq
->q_depth
= ctx
->queue_size
;
1892 io_cq
->direction
= ctx
->direction
;
1893 io_cq
->qid
= ctx
->qid
;
1895 io_cq
->msix_vector
= ctx
->msix_vector
;
1897 io_sq
->q_depth
= ctx
->queue_size
;
1898 io_sq
->direction
= ctx
->direction
;
1899 io_sq
->qid
= ctx
->qid
;
1901 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1903 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1904 /* header length is limited to 8 bits */
1905 io_sq
->tx_max_header_size
=
1906 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1908 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1911 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1915 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1919 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1926 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1928 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1932 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1934 struct ena_com_io_sq
*io_sq
;
1935 struct ena_com_io_cq
*io_cq
;
1937 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1938 netdev_err(ena_dev
->net_device
,
1939 "Qid (%d) is bigger than max num of queues (%d)\n",
1940 qid
, ENA_TOTAL_NUM_QUEUES
);
1944 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1945 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1947 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1948 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1950 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1953 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1954 struct ena_admin_get_feat_resp
*resp
)
1956 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
, 0);
1959 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1960 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1962 struct ena_admin_get_feat_resp get_resp
;
1965 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1966 ENA_ADMIN_DEVICE_ATTRIBUTES
, 0);
1970 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1971 sizeof(get_resp
.u
.dev_attr
));
1973 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1975 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
1976 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1977 ENA_ADMIN_MAX_QUEUES_EXT
,
1978 ENA_FEATURE_MAX_QUEUE_EXT_VER
);
1982 if (get_resp
.u
.max_queue_ext
.version
!= ENA_FEATURE_MAX_QUEUE_EXT_VER
)
1985 memcpy(&get_feat_ctx
->max_queue_ext
, &get_resp
.u
.max_queue_ext
,
1986 sizeof(get_resp
.u
.max_queue_ext
));
1987 ena_dev
->tx_max_header_size
=
1988 get_resp
.u
.max_queue_ext
.max_queue_ext
.max_tx_header_size
;
1990 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1991 ENA_ADMIN_MAX_QUEUES_NUM
, 0);
1992 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1993 sizeof(get_resp
.u
.max_queue
));
1994 ena_dev
->tx_max_header_size
=
1995 get_resp
.u
.max_queue
.max_header_size
;
2001 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2002 ENA_ADMIN_AENQ_CONFIG
, 0);
2006 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
2007 sizeof(get_resp
.u
.aenq
));
2009 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2010 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
, 0);
2014 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
2015 sizeof(get_resp
.u
.offload
));
2017 /* Driver hints isn't mandatory admin command. So in case the
2018 * command isn't supported set driver hints to 0
2020 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_HW_HINTS
, 0);
2023 memcpy(&get_feat_ctx
->hw_hints
, &get_resp
.u
.hw_hints
,
2024 sizeof(get_resp
.u
.hw_hints
));
2025 else if (rc
== -EOPNOTSUPP
)
2026 memset(&get_feat_ctx
->hw_hints
, 0x0,
2027 sizeof(get_feat_ctx
->hw_hints
));
2031 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_LLQ
, 0);
2033 memcpy(&get_feat_ctx
->llq
, &get_resp
.u
.llq
,
2034 sizeof(get_resp
.u
.llq
));
2035 else if (rc
== -EOPNOTSUPP
)
2036 memset(&get_feat_ctx
->llq
, 0x0, sizeof(get_feat_ctx
->llq
));
2043 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
2045 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
2048 /* ena_handle_specific_aenq_event:
2049 * return the handler that is relevant to the specific event group
2051 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*ena_dev
,
2054 struct ena_aenq_handlers
*aenq_handlers
= ena_dev
->aenq
.aenq_handlers
;
2056 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
2057 return aenq_handlers
->handlers
[group
];
2059 return aenq_handlers
->unimplemented_handler
;
2062 /* ena_aenq_intr_handler:
2063 * handles the aenq incoming events.
2064 * pop events from the queue and apply the specific handler
2066 void ena_com_aenq_intr_handler(struct ena_com_dev
*ena_dev
, void *data
)
2068 struct ena_admin_aenq_entry
*aenq_e
;
2069 struct ena_admin_aenq_common_desc
*aenq_common
;
2070 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
2072 ena_aenq_handler handler_cb
;
2073 u16 masked_head
, processed
= 0;
2076 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
2077 phase
= aenq
->phase
;
2078 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
2079 aenq_common
= &aenq_e
->aenq_common_desc
;
2081 /* Go over all the events */
2082 while ((READ_ONCE(aenq_common
->flags
) &
2083 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) == phase
) {
2084 /* Make sure the phase bit (ownership) is as expected before
2085 * reading the rest of the descriptor.
2089 timestamp
= (u64
)aenq_common
->timestamp_low
|
2090 ((u64
)aenq_common
->timestamp_high
<< 32);
2092 netdev_dbg(ena_dev
->net_device
,
2093 "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2094 aenq_common
->group
, aenq_common
->syndrome
, timestamp
);
2096 /* Handle specific event*/
2097 handler_cb
= ena_com_get_specific_aenq_cb(ena_dev
,
2098 aenq_common
->group
);
2099 handler_cb(data
, aenq_e
); /* call the actual event handler*/
2101 /* Get next event entry */
2105 if (unlikely(masked_head
== aenq
->q_depth
)) {
2109 aenq_e
= &aenq
->entries
[masked_head
];
2110 aenq_common
= &aenq_e
->aenq_common_desc
;
2113 aenq
->head
+= processed
;
2114 aenq
->phase
= phase
;
2116 /* Don't update aenq doorbell if there weren't any processed events */
2120 /* write the aenq doorbell after all AENQ descriptors were read */
2122 writel_relaxed((u32
)aenq
->head
,
2123 ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
2126 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
,
2127 enum ena_regs_reset_reason_types reset_reason
)
2129 u32 stat
, timeout
, cap
, reset_val
;
2132 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
2133 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
2135 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
2136 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
2137 netdev_err(ena_dev
->net_device
, "Reg read32 timeout occurred\n");
2141 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
2142 netdev_err(ena_dev
->net_device
,
2143 "Device isn't ready, can't reset device\n");
2147 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
2148 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
2150 netdev_err(ena_dev
->net_device
, "Invalid timeout value\n");
2155 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
2156 reset_val
|= (reset_reason
<< ENA_REGS_DEV_CTL_RESET_REASON_SHIFT
) &
2157 ENA_REGS_DEV_CTL_RESET_REASON_MASK
;
2158 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
2160 /* Write again the MMIO read request address */
2161 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
2163 rc
= wait_for_reset_state(ena_dev
, timeout
,
2164 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
2166 netdev_err(ena_dev
->net_device
,
2167 "Reset indication didn't turn on\n");
2172 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
2173 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
2175 netdev_err(ena_dev
->net_device
,
2176 "Reset indication didn't turn off\n");
2180 timeout
= (cap
& ENA_REGS_CAPS_ADMIN_CMD_TO_MASK
) >>
2181 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT
;
2183 /* the resolution of timeout reg is 100ms */
2184 ena_dev
->admin_queue
.completion_timeout
= timeout
* 100000;
2186 ena_dev
->admin_queue
.completion_timeout
= ADMIN_CMD_TIMEOUT_US
;
2191 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
2192 struct ena_com_stats_ctx
*ctx
,
2193 enum ena_admin_get_stats_type type
)
2195 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
2196 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
2197 struct ena_com_admin_queue
*admin_queue
;
2200 admin_queue
= &ena_dev
->admin_queue
;
2202 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
2203 get_cmd
->aq_common_descriptor
.flags
= 0;
2204 get_cmd
->type
= type
;
2206 ret
= ena_com_execute_admin_command(admin_queue
,
2207 (struct ena_admin_aq_entry
*)get_cmd
,
2209 (struct ena_admin_acq_entry
*)get_resp
,
2213 netdev_err(ena_dev
->net_device
,
2214 "Failed to get stats. error: %d\n", ret
);
2219 int ena_com_get_eni_stats(struct ena_com_dev
*ena_dev
,
2220 struct ena_admin_eni_stats
*stats
)
2222 struct ena_com_stats_ctx ctx
;
2225 memset(&ctx
, 0x0, sizeof(ctx
));
2226 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_ENI
);
2227 if (likely(ret
== 0))
2228 memcpy(stats
, &ctx
.get_resp
.u
.eni_stats
,
2229 sizeof(ctx
.get_resp
.u
.eni_stats
));
2234 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
2235 struct ena_admin_basic_stats
*stats
)
2237 struct ena_com_stats_ctx ctx
;
2240 memset(&ctx
, 0x0, sizeof(ctx
));
2241 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
2242 if (likely(ret
== 0))
2243 memcpy(stats
, &ctx
.get_resp
.u
.basic_stats
,
2244 sizeof(ctx
.get_resp
.u
.basic_stats
));
2249 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, u32 mtu
)
2251 struct ena_com_admin_queue
*admin_queue
;
2252 struct ena_admin_set_feat_cmd cmd
;
2253 struct ena_admin_set_feat_resp resp
;
2256 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
2257 netdev_dbg(ena_dev
->net_device
, "Feature %d isn't supported\n",
2262 memset(&cmd
, 0x0, sizeof(cmd
));
2263 admin_queue
= &ena_dev
->admin_queue
;
2265 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2266 cmd
.aq_common_descriptor
.flags
= 0;
2267 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
2268 cmd
.u
.mtu
.mtu
= mtu
;
2270 ret
= ena_com_execute_admin_command(admin_queue
,
2271 (struct ena_admin_aq_entry
*)&cmd
,
2273 (struct ena_admin_acq_entry
*)&resp
,
2277 netdev_err(ena_dev
->net_device
,
2278 "Failed to set mtu %d. error: %d\n", mtu
, ret
);
2283 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
2284 struct ena_admin_feature_offload_desc
*offload
)
2287 struct ena_admin_get_feat_resp resp
;
2289 ret
= ena_com_get_feature(ena_dev
, &resp
,
2290 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
, 0);
2291 if (unlikely(ret
)) {
2292 netdev_err(ena_dev
->net_device
,
2293 "Failed to get offload capabilities %d\n", ret
);
2297 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
2302 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
2304 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2305 struct ena_rss
*rss
= &ena_dev
->rss
;
2306 struct ena_admin_set_feat_cmd cmd
;
2307 struct ena_admin_set_feat_resp resp
;
2308 struct ena_admin_get_feat_resp get_resp
;
2311 if (!ena_com_check_supported_feature_id(ena_dev
,
2312 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
2313 netdev_dbg(ena_dev
->net_device
, "Feature %d isn't supported\n",
2314 ENA_ADMIN_RSS_HASH_FUNCTION
);
2318 /* Validate hash function is supported */
2319 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
2320 ENA_ADMIN_RSS_HASH_FUNCTION
, 0);
2324 if (!(get_resp
.u
.flow_hash_func
.supported_func
& BIT(rss
->hash_func
))) {
2325 netdev_err(ena_dev
->net_device
,
2326 "Func hash %d isn't supported by device, abort\n",
2331 memset(&cmd
, 0x0, sizeof(cmd
));
2333 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2334 cmd
.aq_common_descriptor
.flags
=
2335 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2336 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
2337 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
2338 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
2340 ret
= ena_com_mem_addr_set(ena_dev
,
2341 &cmd
.control_buffer
.address
,
2342 rss
->hash_key_dma_addr
);
2343 if (unlikely(ret
)) {
2344 netdev_err(ena_dev
->net_device
, "Memory address set failed\n");
2348 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
2350 ret
= ena_com_execute_admin_command(admin_queue
,
2351 (struct ena_admin_aq_entry
*)&cmd
,
2353 (struct ena_admin_acq_entry
*)&resp
,
2355 if (unlikely(ret
)) {
2356 netdev_err(ena_dev
->net_device
,
2357 "Failed to set hash function %d. error: %d\n",
2358 rss
->hash_func
, ret
);
2365 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2366 enum ena_admin_hash_functions func
,
2367 const u8
*key
, u16 key_len
, u32 init_val
)
2369 struct ena_admin_feature_rss_flow_hash_control
*hash_key
;
2370 struct ena_admin_get_feat_resp get_resp
;
2371 enum ena_admin_hash_functions old_func
;
2372 struct ena_rss
*rss
= &ena_dev
->rss
;
2375 hash_key
= rss
->hash_key
;
2377 /* Make sure size is a mult of DWs */
2378 if (unlikely(key_len
& 0x3))
2381 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2382 ENA_ADMIN_RSS_HASH_FUNCTION
,
2383 rss
->hash_key_dma_addr
,
2384 sizeof(*rss
->hash_key
), 0);
2388 if (!(BIT(func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2389 netdev_err(ena_dev
->net_device
,
2390 "Flow hash function %d isn't supported\n", func
);
2395 case ENA_ADMIN_TOEPLITZ
:
2397 if (key_len
!= sizeof(hash_key
->key
)) {
2398 netdev_err(ena_dev
->net_device
,
2399 "key len (%hu) doesn't equal the supported size (%zu)\n",
2400 key_len
, sizeof(hash_key
->key
));
2403 memcpy(hash_key
->key
, key
, key_len
);
2404 rss
->hash_init_val
= init_val
;
2405 hash_key
->key_parts
= key_len
/ sizeof(hash_key
->key
[0]);
2408 case ENA_ADMIN_CRC32
:
2409 rss
->hash_init_val
= init_val
;
2412 netdev_err(ena_dev
->net_device
, "Invalid hash function (%d)\n",
2417 old_func
= rss
->hash_func
;
2418 rss
->hash_func
= func
;
2419 rc
= ena_com_set_hash_function(ena_dev
);
2421 /* Restore the old function */
2423 rss
->hash_func
= old_func
;
2428 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2429 enum ena_admin_hash_functions
*func
)
2431 struct ena_rss
*rss
= &ena_dev
->rss
;
2432 struct ena_admin_get_feat_resp get_resp
;
2435 if (unlikely(!func
))
2438 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2439 ENA_ADMIN_RSS_HASH_FUNCTION
,
2440 rss
->hash_key_dma_addr
,
2441 sizeof(*rss
->hash_key
), 0);
2445 /* ffs() returns 1 in case the lsb is set */
2446 rss
->hash_func
= ffs(get_resp
.u
.flow_hash_func
.selected_func
);
2450 *func
= rss
->hash_func
;
2455 int ena_com_get_hash_key(struct ena_com_dev
*ena_dev
, u8
*key
)
2457 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2458 ena_dev
->rss
.hash_key
;
2461 memcpy(key
, hash_key
->key
,
2462 (size_t)(hash_key
->key_parts
) * sizeof(hash_key
->key
[0]));
2467 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2468 enum ena_admin_flow_hash_proto proto
,
2471 struct ena_rss
*rss
= &ena_dev
->rss
;
2472 struct ena_admin_get_feat_resp get_resp
;
2475 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2476 ENA_ADMIN_RSS_HASH_INPUT
,
2477 rss
->hash_ctrl_dma_addr
,
2478 sizeof(*rss
->hash_ctrl
), 0);
2483 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2488 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2490 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2491 struct ena_rss
*rss
= &ena_dev
->rss
;
2492 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2493 struct ena_admin_set_feat_cmd cmd
;
2494 struct ena_admin_set_feat_resp resp
;
2497 if (!ena_com_check_supported_feature_id(ena_dev
,
2498 ENA_ADMIN_RSS_HASH_INPUT
)) {
2499 netdev_dbg(ena_dev
->net_device
, "Feature %d isn't supported\n",
2500 ENA_ADMIN_RSS_HASH_INPUT
);
2504 memset(&cmd
, 0x0, sizeof(cmd
));
2506 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2507 cmd
.aq_common_descriptor
.flags
=
2508 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2509 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2510 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2511 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2512 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2514 ret
= ena_com_mem_addr_set(ena_dev
,
2515 &cmd
.control_buffer
.address
,
2516 rss
->hash_ctrl_dma_addr
);
2517 if (unlikely(ret
)) {
2518 netdev_err(ena_dev
->net_device
, "Memory address set failed\n");
2521 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2523 ret
= ena_com_execute_admin_command(admin_queue
,
2524 (struct ena_admin_aq_entry
*)&cmd
,
2526 (struct ena_admin_acq_entry
*)&resp
,
2529 netdev_err(ena_dev
->net_device
,
2530 "Failed to set hash input. error: %d\n", ret
);
2535 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2537 struct ena_rss
*rss
= &ena_dev
->rss
;
2538 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2540 u16 available_fields
= 0;
2543 /* Get the supported hash input */
2544 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2548 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2549 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2550 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2552 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2553 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2554 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2556 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2557 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2558 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2560 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2561 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2562 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2564 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2565 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2567 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2568 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2570 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2571 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2573 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_NOT_IP
].fields
=
2574 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2576 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2577 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2578 hash_ctrl
->supported_fields
[i
].fields
;
2579 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2580 netdev_err(ena_dev
->net_device
,
2581 "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2582 i
, hash_ctrl
->supported_fields
[i
].fields
,
2583 hash_ctrl
->selected_fields
[i
].fields
);
2588 rc
= ena_com_set_hash_ctrl(ena_dev
);
2590 /* In case of failure, restore the old hash ctrl */
2592 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2597 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2598 enum ena_admin_flow_hash_proto proto
,
2601 struct ena_rss
*rss
= &ena_dev
->rss
;
2602 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2603 u16 supported_fields
;
2606 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2607 netdev_err(ena_dev
->net_device
, "Invalid proto num (%u)\n",
2612 /* Get the ctrl table */
2613 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2617 /* Make sure all the fields are supported */
2618 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2619 if ((hash_fields
& supported_fields
) != hash_fields
) {
2620 netdev_err(ena_dev
->net_device
,
2621 "Proto %d doesn't support the required fields %x. supports only: %x\n",
2622 proto
, hash_fields
, supported_fields
);
2625 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2627 rc
= ena_com_set_hash_ctrl(ena_dev
);
2629 /* In case of failure, restore the old hash ctrl */
2631 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2636 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2637 u16 entry_idx
, u16 entry_value
)
2639 struct ena_rss
*rss
= &ena_dev
->rss
;
2641 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2644 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2647 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2652 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2654 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2655 struct ena_rss
*rss
= &ena_dev
->rss
;
2656 struct ena_admin_set_feat_cmd cmd
;
2657 struct ena_admin_set_feat_resp resp
;
2660 if (!ena_com_check_supported_feature_id(
2661 ena_dev
, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG
)) {
2662 netdev_dbg(ena_dev
->net_device
, "Feature %d isn't supported\n",
2663 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG
);
2667 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2669 netdev_err(ena_dev
->net_device
,
2670 "Failed to convert host indirection table to device table\n");
2674 memset(&cmd
, 0x0, sizeof(cmd
));
2676 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2677 cmd
.aq_common_descriptor
.flags
=
2678 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2679 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG
;
2680 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2681 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2683 ret
= ena_com_mem_addr_set(ena_dev
,
2684 &cmd
.control_buffer
.address
,
2685 rss
->rss_ind_tbl_dma_addr
);
2686 if (unlikely(ret
)) {
2687 netdev_err(ena_dev
->net_device
, "Memory address set failed\n");
2691 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2692 sizeof(struct ena_admin_rss_ind_table_entry
);
2694 ret
= ena_com_execute_admin_command(admin_queue
,
2695 (struct ena_admin_aq_entry
*)&cmd
,
2697 (struct ena_admin_acq_entry
*)&resp
,
2701 netdev_err(ena_dev
->net_device
,
2702 "Failed to set indirect table. error: %d\n", ret
);
2707 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2709 struct ena_rss
*rss
= &ena_dev
->rss
;
2710 struct ena_admin_get_feat_resp get_resp
;
2714 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2715 sizeof(struct ena_admin_rss_ind_table_entry
);
2717 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2718 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG
,
2719 rss
->rss_ind_tbl_dma_addr
,
2727 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2728 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2733 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2737 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2739 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2743 /* The following function might return unsupported in case the
2744 * device doesn't support setting the key / hash function. We can safely
2745 * ignore this error and have indirection table support only.
2747 rc
= ena_com_hash_key_allocate(ena_dev
);
2749 ena_com_hash_key_fill_default_key(ena_dev
);
2750 else if (rc
!= -EOPNOTSUPP
)
2753 rc
= ena_com_hash_ctrl_init(ena_dev
);
2760 ena_com_hash_key_destroy(ena_dev
);
2762 ena_com_indirect_table_destroy(ena_dev
);
2768 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2770 ena_com_indirect_table_destroy(ena_dev
);
2771 ena_com_hash_key_destroy(ena_dev
);
2772 ena_com_hash_ctrl_destroy(ena_dev
);
2774 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2777 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2779 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2781 host_attr
->host_info
=
2782 dma_alloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2783 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2784 if (unlikely(!host_attr
->host_info
))
2787 host_attr
->host_info
->ena_spec_version
= ((ENA_COMMON_SPEC_VERSION_MAJOR
<<
2788 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
) |
2789 (ENA_COMMON_SPEC_VERSION_MINOR
));
2794 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2795 u32 debug_area_size
)
2797 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2799 host_attr
->debug_area_virt_addr
=
2800 dma_alloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2801 &host_attr
->debug_area_dma_addr
, GFP_KERNEL
);
2802 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2803 host_attr
->debug_area_size
= 0;
2807 host_attr
->debug_area_size
= debug_area_size
;
2812 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2814 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2816 if (host_attr
->host_info
) {
2817 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2818 host_attr
->host_info_dma_addr
);
2819 host_attr
->host_info
= NULL
;
2823 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2825 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2827 if (host_attr
->debug_area_virt_addr
) {
2828 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2829 host_attr
->debug_area_virt_addr
,
2830 host_attr
->debug_area_dma_addr
);
2831 host_attr
->debug_area_virt_addr
= NULL
;
2835 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2837 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2838 struct ena_com_admin_queue
*admin_queue
;
2839 struct ena_admin_set_feat_cmd cmd
;
2840 struct ena_admin_set_feat_resp resp
;
2844 /* Host attribute config is called before ena_com_get_dev_attr_feat
2845 * so ena_com can't check if the feature is supported.
2848 memset(&cmd
, 0x0, sizeof(cmd
));
2849 admin_queue
= &ena_dev
->admin_queue
;
2851 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2852 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2854 ret
= ena_com_mem_addr_set(ena_dev
,
2855 &cmd
.u
.host_attr
.debug_ba
,
2856 host_attr
->debug_area_dma_addr
);
2857 if (unlikely(ret
)) {
2858 netdev_err(ena_dev
->net_device
, "Memory address set failed\n");
2862 ret
= ena_com_mem_addr_set(ena_dev
,
2863 &cmd
.u
.host_attr
.os_info_ba
,
2864 host_attr
->host_info_dma_addr
);
2865 if (unlikely(ret
)) {
2866 netdev_err(ena_dev
->net_device
, "Memory address set failed\n");
2870 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2872 ret
= ena_com_execute_admin_command(admin_queue
,
2873 (struct ena_admin_aq_entry
*)&cmd
,
2875 (struct ena_admin_acq_entry
*)&resp
,
2879 netdev_err(ena_dev
->net_device
,
2880 "Failed to set host attributes: %d\n", ret
);
2885 /* Interrupt moderation */
2886 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2888 return ena_com_check_supported_feature_id(ena_dev
,
2889 ENA_ADMIN_INTERRUPT_MODERATION
);
2892 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev
*ena_dev
,
2894 u32 intr_delay_resolution
,
2895 u32
*intr_moder_interval
)
2897 if (!intr_delay_resolution
) {
2898 netdev_err(ena_dev
->net_device
,
2899 "Illegal interrupt delay granularity value\n");
2903 *intr_moder_interval
= coalesce_usecs
/ intr_delay_resolution
;
2908 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2909 u32 tx_coalesce_usecs
)
2911 return ena_com_update_nonadaptive_moderation_interval(ena_dev
,
2913 ena_dev
->intr_delay_resolution
,
2914 &ena_dev
->intr_moder_tx_interval
);
2917 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2918 u32 rx_coalesce_usecs
)
2920 return ena_com_update_nonadaptive_moderation_interval(ena_dev
,
2922 ena_dev
->intr_delay_resolution
,
2923 &ena_dev
->intr_moder_rx_interval
);
2926 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2928 struct ena_admin_get_feat_resp get_resp
;
2929 u16 delay_resolution
;
2932 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2933 ENA_ADMIN_INTERRUPT_MODERATION
, 0);
2936 if (rc
== -EOPNOTSUPP
) {
2937 netdev_dbg(ena_dev
->net_device
,
2938 "Feature %d isn't supported\n",
2939 ENA_ADMIN_INTERRUPT_MODERATION
);
2942 netdev_err(ena_dev
->net_device
,
2943 "Failed to get interrupt moderation admin cmd. rc: %d\n",
2947 /* no moderation supported, disable adaptive support */
2948 ena_com_disable_adaptive_moderation(ena_dev
);
2952 /* if moderation is supported by device we set adaptive moderation */
2953 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2954 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2956 /* Disable adaptive moderation by default - can be enabled later */
2957 ena_com_disable_adaptive_moderation(ena_dev
);
2962 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2964 return ena_dev
->intr_moder_tx_interval
;
2967 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2969 return ena_dev
->intr_moder_rx_interval
;
2972 int ena_com_config_dev_mode(struct ena_com_dev
*ena_dev
,
2973 struct ena_admin_feature_llq_desc
*llq_features
,
2974 struct ena_llq_configurations
*llq_default_cfg
)
2976 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
2979 if (!llq_features
->max_llq_num
) {
2980 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2984 rc
= ena_com_config_llq_info(ena_dev
, llq_features
, llq_default_cfg
);
2988 ena_dev
->tx_max_header_size
= llq_info
->desc_list_entry_size
-
2989 (llq_info
->descs_num_before_header
* sizeof(struct ena_eth_io_tx_desc
));
2991 if (unlikely(ena_dev
->tx_max_header_size
== 0)) {
2992 netdev_err(ena_dev
->net_device
,
2993 "The size of the LLQ entry is smaller than needed\n");
2997 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_DEV
;