2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/numa.h>
42 #include <linux/pci.h>
43 #include <linux/utsname.h>
44 #include <linux/version.h>
45 #include <linux/vmalloc.h>
48 #include "ena_netdev.h"
49 #include <linux/bpf_trace.h>
50 #include "ena_pci_id_tbl.h"
52 static char version
[] = DEVICE_NAME
" v" DRV_MODULE_VERSION
"\n";
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME
);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION
);
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT (5 * HZ)
62 #define ENA_NAPI_BUDGET 64
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug
= -1;
67 module_param(debug
, int, 0);
68 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
70 static struct ena_aenq_handlers aenq_handlers
;
72 static struct workqueue_struct
*ena_wq
;
74 MODULE_DEVICE_TABLE(pci
, ena_pci_tbl
);
76 static int ena_rss_init_default(struct ena_adapter
*adapter
);
77 static void check_for_admin_com_state(struct ena_adapter
*adapter
);
78 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
);
79 static int ena_restore_device(struct ena_adapter
*adapter
);
81 static void ena_init_io_rings(struct ena_adapter
*adapter
,
82 int first_index
, int count
);
83 static void ena_init_napi_in_range(struct ena_adapter
*adapter
, int first_index
,
85 static void ena_del_napi_in_range(struct ena_adapter
*adapter
, int first_index
,
87 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
);
88 static int ena_setup_tx_resources_in_range(struct ena_adapter
*adapter
,
91 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
);
92 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
);
93 static int ena_clean_xdp_irq(struct ena_ring
*xdp_ring
, u32 budget
);
94 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
);
95 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
);
96 static void ena_napi_disable_in_range(struct ena_adapter
*adapter
,
97 int first_index
, int count
);
98 static void ena_napi_enable_in_range(struct ena_adapter
*adapter
,
99 int first_index
, int count
);
100 static int ena_up(struct ena_adapter
*adapter
);
101 static void ena_down(struct ena_adapter
*adapter
);
102 static void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
103 struct ena_ring
*rx_ring
);
104 static void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
105 struct ena_ring
*rx_ring
);
106 static void ena_unmap_tx_buff(struct ena_ring
*tx_ring
,
107 struct ena_tx_buffer
*tx_info
);
108 static int ena_create_io_tx_queues_in_range(struct ena_adapter
*adapter
,
109 int first_index
, int count
);
111 static void ena_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
113 struct ena_adapter
*adapter
= netdev_priv(dev
);
115 /* Change the state of the device to trigger reset
116 * Check that we are not in the middle or a trigger already
119 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
122 adapter
->reset_reason
= ENA_REGS_RESET_OS_NETDEV_WD
;
123 u64_stats_update_begin(&adapter
->syncp
);
124 adapter
->dev_stats
.tx_timeout
++;
125 u64_stats_update_end(&adapter
->syncp
);
127 netif_err(adapter
, tx_err
, dev
, "Transmit time out\n");
130 static void update_rx_ring_mtu(struct ena_adapter
*adapter
, int mtu
)
134 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
135 adapter
->rx_ring
[i
].mtu
= mtu
;
138 static int ena_change_mtu(struct net_device
*dev
, int new_mtu
)
140 struct ena_adapter
*adapter
= netdev_priv(dev
);
143 ret
= ena_com_set_dev_mtu(adapter
->ena_dev
, new_mtu
);
145 netif_dbg(adapter
, drv
, dev
, "set MTU to %d\n", new_mtu
);
146 update_rx_ring_mtu(adapter
, new_mtu
);
149 netif_err(adapter
, drv
, dev
, "Failed to set MTU to %d\n",
156 static int ena_xmit_common(struct net_device
*dev
,
157 struct ena_ring
*ring
,
158 struct ena_tx_buffer
*tx_info
,
159 struct ena_com_tx_ctx
*ena_tx_ctx
,
163 struct ena_adapter
*adapter
= netdev_priv(dev
);
166 if (unlikely(ena_com_is_doorbell_needed(ring
->ena_com_io_sq
,
168 netif_dbg(adapter
, tx_queued
, dev
,
169 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
171 ena_com_write_sq_doorbell(ring
->ena_com_io_sq
);
174 /* prepare the packet's descriptors to dma engine */
175 rc
= ena_com_prepare_tx(ring
->ena_com_io_sq
, ena_tx_ctx
,
178 /* In case there isn't enough space in the queue for the packet,
179 * we simply drop it. All other failure reasons of
180 * ena_com_prepare_tx() are fatal and therefore require a device reset.
183 netif_err(adapter
, tx_queued
, dev
,
184 "failed to prepare tx bufs\n");
185 u64_stats_update_begin(&ring
->syncp
);
186 ring
->tx_stats
.prepare_ctx_err
++;
187 u64_stats_update_end(&ring
->syncp
);
189 adapter
->reset_reason
=
190 ENA_REGS_RESET_DRIVER_INVALID_STATE
;
191 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
196 u64_stats_update_begin(&ring
->syncp
);
197 ring
->tx_stats
.cnt
++;
198 ring
->tx_stats
.bytes
+= bytes
;
199 u64_stats_update_end(&ring
->syncp
);
201 tx_info
->tx_descs
= nb_hw_desc
;
202 tx_info
->last_jiffies
= jiffies
;
203 tx_info
->print_once
= 0;
205 ring
->next_to_use
= ENA_TX_RING_IDX_NEXT(next_to_use
,
210 /* This is the XDP napi callback. XDP queues use a separate napi callback
213 static int ena_xdp_io_poll(struct napi_struct
*napi
, int budget
)
215 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
216 u32 xdp_work_done
, xdp_budget
;
217 struct ena_ring
*xdp_ring
;
218 int napi_comp_call
= 0;
221 xdp_ring
= ena_napi
->xdp_ring
;
222 xdp_ring
->first_interrupt
= ena_napi
->first_interrupt
;
226 if (!test_bit(ENA_FLAG_DEV_UP
, &xdp_ring
->adapter
->flags
) ||
227 test_bit(ENA_FLAG_TRIGGER_RESET
, &xdp_ring
->adapter
->flags
)) {
228 napi_complete_done(napi
, 0);
232 xdp_work_done
= ena_clean_xdp_irq(xdp_ring
, xdp_budget
);
234 /* If the device is about to reset or down, avoid unmask
235 * the interrupt and return 0 so NAPI won't reschedule
237 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &xdp_ring
->adapter
->flags
))) {
238 napi_complete_done(napi
, 0);
240 } else if (xdp_budget
> xdp_work_done
) {
242 if (napi_complete_done(napi
, xdp_work_done
))
243 ena_unmask_interrupt(xdp_ring
, NULL
);
244 ena_update_ring_numa_node(xdp_ring
, NULL
);
250 u64_stats_update_begin(&xdp_ring
->syncp
);
251 xdp_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
252 xdp_ring
->tx_stats
.tx_poll
++;
253 u64_stats_update_end(&xdp_ring
->syncp
);
258 static int ena_xdp_tx_map_buff(struct ena_ring
*xdp_ring
,
259 struct ena_tx_buffer
*tx_info
,
260 struct xdp_buff
*xdp
,
264 struct ena_adapter
*adapter
= xdp_ring
->adapter
;
265 struct ena_com_buf
*ena_buf
;
269 tx_info
->xdpf
= convert_to_xdp_frame(xdp
);
270 size
= tx_info
->xdpf
->len
;
271 ena_buf
= tx_info
->bufs
;
273 /* llq push buffer */
274 *push_len
= min_t(u32
, size
, xdp_ring
->tx_max_header_size
);
275 *push_hdr
= tx_info
->xdpf
->data
;
277 if (size
- *push_len
> 0) {
278 dma
= dma_map_single(xdp_ring
->dev
,
279 *push_hdr
+ *push_len
,
282 if (unlikely(dma_mapping_error(xdp_ring
->dev
, dma
)))
283 goto error_report_dma_error
;
285 tx_info
->map_linear_data
= 1;
286 tx_info
->num_of_bufs
= 1;
289 ena_buf
->paddr
= dma
;
294 error_report_dma_error
:
295 u64_stats_update_begin(&xdp_ring
->syncp
);
296 xdp_ring
->tx_stats
.dma_mapping_err
++;
297 u64_stats_update_end(&xdp_ring
->syncp
);
298 netdev_warn(adapter
->netdev
, "failed to map xdp buff\n");
300 xdp_return_frame_rx_napi(tx_info
->xdpf
);
301 tx_info
->xdpf
= NULL
;
302 tx_info
->num_of_bufs
= 0;
307 static int ena_xdp_xmit_buff(struct net_device
*dev
,
308 struct xdp_buff
*xdp
,
310 struct ena_rx_buffer
*rx_info
)
312 struct ena_adapter
*adapter
= netdev_priv(dev
);
313 struct ena_com_tx_ctx ena_tx_ctx
= {0};
314 struct ena_tx_buffer
*tx_info
;
315 struct ena_ring
*xdp_ring
;
316 u16 next_to_use
, req_id
;
321 xdp_ring
= &adapter
->tx_ring
[qid
];
322 next_to_use
= xdp_ring
->next_to_use
;
323 req_id
= xdp_ring
->free_ids
[next_to_use
];
324 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
325 tx_info
->num_of_bufs
= 0;
326 page_ref_inc(rx_info
->page
);
327 tx_info
->xdp_rx_page
= rx_info
->page
;
329 rc
= ena_xdp_tx_map_buff(xdp_ring
, tx_info
, xdp
, &push_hdr
, &push_len
);
331 goto error_drop_packet
;
333 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
334 ena_tx_ctx
.push_header
= push_hdr
;
335 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
336 ena_tx_ctx
.req_id
= req_id
;
337 ena_tx_ctx
.header_len
= push_len
;
339 rc
= ena_xmit_common(dev
,
344 xdp
->data_end
- xdp
->data
);
346 goto error_unmap_dma
;
347 /* trigger the dma engine. ena_com_write_sq_doorbell()
350 ena_com_write_sq_doorbell(xdp_ring
->ena_com_io_sq
);
351 u64_stats_update_begin(&xdp_ring
->syncp
);
352 xdp_ring
->tx_stats
.doorbells
++;
353 u64_stats_update_end(&xdp_ring
->syncp
);
358 ena_unmap_tx_buff(xdp_ring
, tx_info
);
359 tx_info
->xdpf
= NULL
;
365 static int ena_xdp_execute(struct ena_ring
*rx_ring
,
366 struct xdp_buff
*xdp
,
367 struct ena_rx_buffer
*rx_info
)
369 struct bpf_prog
*xdp_prog
;
370 u32 verdict
= XDP_PASS
;
373 xdp_prog
= READ_ONCE(rx_ring
->xdp_bpf_prog
);
378 verdict
= bpf_prog_run_xdp(xdp_prog
, xdp
);
380 if (verdict
== XDP_TX
)
381 ena_xdp_xmit_buff(rx_ring
->netdev
,
383 rx_ring
->qid
+ rx_ring
->adapter
->num_io_queues
,
385 else if (unlikely(verdict
== XDP_ABORTED
))
386 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, verdict
);
387 else if (unlikely(verdict
> XDP_TX
))
388 bpf_warn_invalid_xdp_action(verdict
);
394 static void ena_init_all_xdp_queues(struct ena_adapter
*adapter
)
396 adapter
->xdp_first_ring
= adapter
->num_io_queues
;
397 adapter
->xdp_num_queues
= adapter
->num_io_queues
;
399 ena_init_io_rings(adapter
,
400 adapter
->xdp_first_ring
,
401 adapter
->xdp_num_queues
);
404 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter
*adapter
)
408 rc
= ena_setup_tx_resources_in_range(adapter
, adapter
->xdp_first_ring
,
409 adapter
->xdp_num_queues
);
413 rc
= ena_create_io_tx_queues_in_range(adapter
,
414 adapter
->xdp_first_ring
,
415 adapter
->xdp_num_queues
);
422 ena_free_all_io_tx_resources(adapter
);
427 /* Provides a way for both kernel and bpf-prog to know
428 * more about the RX-queue a given XDP frame arrived on.
430 static int ena_xdp_register_rxq_info(struct ena_ring
*rx_ring
)
434 rc
= xdp_rxq_info_reg(&rx_ring
->xdp_rxq
, rx_ring
->netdev
, rx_ring
->qid
);
437 netif_err(rx_ring
->adapter
, ifup
, rx_ring
->netdev
,
438 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
443 rc
= xdp_rxq_info_reg_mem_model(&rx_ring
->xdp_rxq
, MEM_TYPE_PAGE_SHARED
,
447 netif_err(rx_ring
->adapter
, ifup
, rx_ring
->netdev
,
448 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
450 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
457 static void ena_xdp_unregister_rxq_info(struct ena_ring
*rx_ring
)
459 xdp_rxq_info_unreg_mem_model(&rx_ring
->xdp_rxq
);
460 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
463 void ena_xdp_exchange_program_rx_in_range(struct ena_adapter
*adapter
,
464 struct bpf_prog
*prog
,
468 struct ena_ring
*rx_ring
;
471 for (i
= first
; i
< count
; i
++) {
472 rx_ring
= &adapter
->rx_ring
[i
];
473 xchg(&rx_ring
->xdp_bpf_prog
, prog
);
475 ena_xdp_register_rxq_info(rx_ring
);
476 rx_ring
->rx_headroom
= XDP_PACKET_HEADROOM
;
478 ena_xdp_unregister_rxq_info(rx_ring
);
479 rx_ring
->rx_headroom
= 0;
484 void ena_xdp_exchange_program(struct ena_adapter
*adapter
,
485 struct bpf_prog
*prog
)
487 struct bpf_prog
*old_bpf_prog
= xchg(&adapter
->xdp_bpf_prog
, prog
);
489 ena_xdp_exchange_program_rx_in_range(adapter
,
492 adapter
->num_io_queues
);
495 bpf_prog_put(old_bpf_prog
);
498 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter
*adapter
)
503 was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
508 adapter
->xdp_first_ring
= 0;
509 adapter
->xdp_num_queues
= 0;
510 ena_xdp_exchange_program(adapter
, NULL
);
512 rc
= ena_up(adapter
);
519 static int ena_xdp_set(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
521 struct ena_adapter
*adapter
= netdev_priv(netdev
);
522 struct bpf_prog
*prog
= bpf
->prog
;
523 struct bpf_prog
*old_bpf_prog
;
527 is_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
528 rc
= ena_xdp_allowed(adapter
);
529 if (rc
== ENA_XDP_ALLOWED
) {
530 old_bpf_prog
= adapter
->xdp_bpf_prog
;
533 ena_init_all_xdp_queues(adapter
);
534 } else if (!old_bpf_prog
) {
536 ena_init_all_xdp_queues(adapter
);
538 ena_xdp_exchange_program(adapter
, prog
);
540 if (is_up
&& !old_bpf_prog
) {
541 rc
= ena_up(adapter
);
545 } else if (old_bpf_prog
) {
546 rc
= ena_destroy_and_free_all_xdp_queues(adapter
);
551 prev_mtu
= netdev
->max_mtu
;
552 netdev
->max_mtu
= prog
? ENA_XDP_MAX_MTU
: adapter
->max_mtu
;
555 netif_info(adapter
, drv
, adapter
->netdev
,
556 "xdp program set, changing the max_mtu from %d to %d",
557 prev_mtu
, netdev
->max_mtu
);
559 } else if (rc
== ENA_XDP_CURRENT_MTU_TOO_LARGE
) {
560 netif_err(adapter
, drv
, adapter
->netdev
,
561 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
562 netdev
->mtu
, ENA_XDP_MAX_MTU
);
563 NL_SET_ERR_MSG_MOD(bpf
->extack
,
564 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
566 } else if (rc
== ENA_XDP_NO_ENOUGH_QUEUES
) {
567 netif_err(adapter
, drv
, adapter
->netdev
,
568 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
569 adapter
->num_io_queues
, adapter
->max_num_io_queues
);
570 NL_SET_ERR_MSG_MOD(bpf
->extack
,
571 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
578 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
579 * program as well as to query the current xdp program id.
581 static int ena_xdp(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
583 struct ena_adapter
*adapter
= netdev_priv(netdev
);
585 switch (bpf
->command
) {
587 return ena_xdp_set(netdev
, bpf
);
589 bpf
->prog_id
= adapter
->xdp_bpf_prog
?
590 adapter
->xdp_bpf_prog
->aux
->id
: 0;
598 static int ena_init_rx_cpu_rmap(struct ena_adapter
*adapter
)
600 #ifdef CONFIG_RFS_ACCEL
604 adapter
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(adapter
->num_io_queues
);
605 if (!adapter
->netdev
->rx_cpu_rmap
)
607 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
608 int irq_idx
= ENA_IO_IRQ_IDX(i
);
610 rc
= irq_cpu_rmap_add(adapter
->netdev
->rx_cpu_rmap
,
611 pci_irq_vector(adapter
->pdev
, irq_idx
));
613 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
614 adapter
->netdev
->rx_cpu_rmap
= NULL
;
618 #endif /* CONFIG_RFS_ACCEL */
622 static void ena_init_io_rings_common(struct ena_adapter
*adapter
,
623 struct ena_ring
*ring
, u16 qid
)
626 ring
->pdev
= adapter
->pdev
;
627 ring
->dev
= &adapter
->pdev
->dev
;
628 ring
->netdev
= adapter
->netdev
;
629 ring
->napi
= &adapter
->ena_napi
[qid
].napi
;
630 ring
->adapter
= adapter
;
631 ring
->ena_dev
= adapter
->ena_dev
;
632 ring
->per_napi_packets
= 0;
634 ring
->first_interrupt
= false;
635 ring
->no_interrupt_event_cnt
= 0;
636 u64_stats_init(&ring
->syncp
);
639 static void ena_init_io_rings(struct ena_adapter
*adapter
,
640 int first_index
, int count
)
642 struct ena_com_dev
*ena_dev
;
643 struct ena_ring
*txr
, *rxr
;
646 ena_dev
= adapter
->ena_dev
;
648 for (i
= first_index
; i
< first_index
+ count
; i
++) {
649 txr
= &adapter
->tx_ring
[i
];
650 rxr
= &adapter
->rx_ring
[i
];
652 /* TX common ring state */
653 ena_init_io_rings_common(adapter
, txr
, i
);
655 /* TX specific ring state */
656 txr
->ring_size
= adapter
->requested_tx_ring_size
;
657 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
658 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
659 txr
->sgl_size
= adapter
->max_tx_sgl_size
;
660 txr
->smoothed_interval
=
661 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
);
663 /* Don't init RX queues for xdp queues */
664 if (!ENA_IS_XDP_INDEX(adapter
, i
)) {
665 /* RX common ring state */
666 ena_init_io_rings_common(adapter
, rxr
, i
);
668 /* RX specific ring state */
669 rxr
->ring_size
= adapter
->requested_rx_ring_size
;
670 rxr
->rx_copybreak
= adapter
->rx_copybreak
;
671 rxr
->sgl_size
= adapter
->max_rx_sgl_size
;
672 rxr
->smoothed_interval
=
673 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
);
674 rxr
->empty_rx_queue
= 0;
675 adapter
->ena_napi
[i
].dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
680 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
681 * @adapter: network interface device structure
684 * Return 0 on success, negative on failure
686 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
)
688 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
689 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
692 if (tx_ring
->tx_buffer_info
) {
693 netif_err(adapter
, ifup
,
694 adapter
->netdev
, "tx_buffer_info info is not NULL");
698 size
= sizeof(struct ena_tx_buffer
) * tx_ring
->ring_size
;
699 node
= cpu_to_node(ena_irq
->cpu
);
701 tx_ring
->tx_buffer_info
= vzalloc_node(size
, node
);
702 if (!tx_ring
->tx_buffer_info
) {
703 tx_ring
->tx_buffer_info
= vzalloc(size
);
704 if (!tx_ring
->tx_buffer_info
)
705 goto err_tx_buffer_info
;
708 size
= sizeof(u16
) * tx_ring
->ring_size
;
709 tx_ring
->free_ids
= vzalloc_node(size
, node
);
710 if (!tx_ring
->free_ids
) {
711 tx_ring
->free_ids
= vzalloc(size
);
712 if (!tx_ring
->free_ids
)
713 goto err_tx_free_ids
;
716 size
= tx_ring
->tx_max_header_size
;
717 tx_ring
->push_buf_intermediate_buf
= vzalloc_node(size
, node
);
718 if (!tx_ring
->push_buf_intermediate_buf
) {
719 tx_ring
->push_buf_intermediate_buf
= vzalloc(size
);
720 if (!tx_ring
->push_buf_intermediate_buf
)
721 goto err_push_buf_intermediate_buf
;
724 /* Req id ring for TX out of order completions */
725 for (i
= 0; i
< tx_ring
->ring_size
; i
++)
726 tx_ring
->free_ids
[i
] = i
;
728 /* Reset tx statistics */
729 memset(&tx_ring
->tx_stats
, 0x0, sizeof(tx_ring
->tx_stats
));
731 tx_ring
->next_to_use
= 0;
732 tx_ring
->next_to_clean
= 0;
733 tx_ring
->cpu
= ena_irq
->cpu
;
736 err_push_buf_intermediate_buf
:
737 vfree(tx_ring
->free_ids
);
738 tx_ring
->free_ids
= NULL
;
740 vfree(tx_ring
->tx_buffer_info
);
741 tx_ring
->tx_buffer_info
= NULL
;
746 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
747 * @adapter: network interface device structure
750 * Free all transmit software resources
752 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
)
754 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
756 vfree(tx_ring
->tx_buffer_info
);
757 tx_ring
->tx_buffer_info
= NULL
;
759 vfree(tx_ring
->free_ids
);
760 tx_ring
->free_ids
= NULL
;
762 vfree(tx_ring
->push_buf_intermediate_buf
);
763 tx_ring
->push_buf_intermediate_buf
= NULL
;
766 static int ena_setup_tx_resources_in_range(struct ena_adapter
*adapter
,
772 for (i
= first_index
; i
< first_index
+ count
; i
++) {
773 rc
= ena_setup_tx_resources(adapter
, i
);
782 netif_err(adapter
, ifup
, adapter
->netdev
,
783 "Tx queue %d: allocation failed\n", i
);
785 /* rewind the index freeing the rings as we go */
786 while (first_index
< i
--)
787 ena_free_tx_resources(adapter
, i
);
791 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter
*adapter
,
792 int first_index
, int count
)
796 for (i
= first_index
; i
< first_index
+ count
; i
++)
797 ena_free_tx_resources(adapter
, i
);
800 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
801 * @adapter: board private structure
803 * Free all transmit software resources
805 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
)
807 ena_free_all_io_tx_resources_in_range(adapter
,
809 adapter
->xdp_num_queues
+
810 adapter
->num_io_queues
);
813 static int validate_rx_req_id(struct ena_ring
*rx_ring
, u16 req_id
)
815 if (likely(req_id
< rx_ring
->ring_size
))
818 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
819 "Invalid rx req_id: %hu\n", req_id
);
821 u64_stats_update_begin(&rx_ring
->syncp
);
822 rx_ring
->rx_stats
.bad_req_id
++;
823 u64_stats_update_end(&rx_ring
->syncp
);
825 /* Trigger device reset */
826 rx_ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_RX_REQ_ID
;
827 set_bit(ENA_FLAG_TRIGGER_RESET
, &rx_ring
->adapter
->flags
);
831 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
832 * @adapter: network interface device structure
835 * Returns 0 on success, negative on failure
837 static int ena_setup_rx_resources(struct ena_adapter
*adapter
,
840 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
841 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
844 if (rx_ring
->rx_buffer_info
) {
845 netif_err(adapter
, ifup
, adapter
->netdev
,
846 "rx_buffer_info is not NULL");
850 /* alloc extra element so in rx path
851 * we can always prefetch rx_info + 1
853 size
= sizeof(struct ena_rx_buffer
) * (rx_ring
->ring_size
+ 1);
854 node
= cpu_to_node(ena_irq
->cpu
);
856 rx_ring
->rx_buffer_info
= vzalloc_node(size
, node
);
857 if (!rx_ring
->rx_buffer_info
) {
858 rx_ring
->rx_buffer_info
= vzalloc(size
);
859 if (!rx_ring
->rx_buffer_info
)
863 size
= sizeof(u16
) * rx_ring
->ring_size
;
864 rx_ring
->free_ids
= vzalloc_node(size
, node
);
865 if (!rx_ring
->free_ids
) {
866 rx_ring
->free_ids
= vzalloc(size
);
867 if (!rx_ring
->free_ids
) {
868 vfree(rx_ring
->rx_buffer_info
);
869 rx_ring
->rx_buffer_info
= NULL
;
874 /* Req id ring for receiving RX pkts out of order */
875 for (i
= 0; i
< rx_ring
->ring_size
; i
++)
876 rx_ring
->free_ids
[i
] = i
;
878 /* Reset rx statistics */
879 memset(&rx_ring
->rx_stats
, 0x0, sizeof(rx_ring
->rx_stats
));
881 rx_ring
->next_to_clean
= 0;
882 rx_ring
->next_to_use
= 0;
883 rx_ring
->cpu
= ena_irq
->cpu
;
888 /* ena_free_rx_resources - Free I/O Rx Resources
889 * @adapter: network interface device structure
892 * Free all receive software resources
894 static void ena_free_rx_resources(struct ena_adapter
*adapter
,
897 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
899 vfree(rx_ring
->rx_buffer_info
);
900 rx_ring
->rx_buffer_info
= NULL
;
902 vfree(rx_ring
->free_ids
);
903 rx_ring
->free_ids
= NULL
;
906 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
907 * @adapter: board private structure
909 * Return 0 on success, negative on failure
911 static int ena_setup_all_rx_resources(struct ena_adapter
*adapter
)
915 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
916 rc
= ena_setup_rx_resources(adapter
, i
);
925 netif_err(adapter
, ifup
, adapter
->netdev
,
926 "Rx queue %d: allocation failed\n", i
);
928 /* rewind the index freeing the rings as we go */
930 ena_free_rx_resources(adapter
, i
);
934 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
935 * @adapter: board private structure
937 * Free all receive software resources
939 static void ena_free_all_io_rx_resources(struct ena_adapter
*adapter
)
943 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
944 ena_free_rx_resources(adapter
, i
);
947 static int ena_alloc_rx_page(struct ena_ring
*rx_ring
,
948 struct ena_rx_buffer
*rx_info
, gfp_t gfp
)
950 struct ena_com_buf
*ena_buf
;
954 /* if previous allocated page is not used */
955 if (unlikely(rx_info
->page
))
958 page
= alloc_page(gfp
);
959 if (unlikely(!page
)) {
960 u64_stats_update_begin(&rx_ring
->syncp
);
961 rx_ring
->rx_stats
.page_alloc_fail
++;
962 u64_stats_update_end(&rx_ring
->syncp
);
966 dma
= dma_map_page(rx_ring
->dev
, page
, 0, ENA_PAGE_SIZE
,
968 if (unlikely(dma_mapping_error(rx_ring
->dev
, dma
))) {
969 u64_stats_update_begin(&rx_ring
->syncp
);
970 rx_ring
->rx_stats
.dma_mapping_err
++;
971 u64_stats_update_end(&rx_ring
->syncp
);
976 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
977 "alloc page %p, rx_info %p\n", page
, rx_info
);
979 rx_info
->page
= page
;
980 rx_info
->page_offset
= 0;
981 ena_buf
= &rx_info
->ena_buf
;
982 ena_buf
->paddr
= dma
+ rx_ring
->rx_headroom
;
983 ena_buf
->len
= ENA_PAGE_SIZE
- rx_ring
->rx_headroom
;
988 static void ena_free_rx_page(struct ena_ring
*rx_ring
,
989 struct ena_rx_buffer
*rx_info
)
991 struct page
*page
= rx_info
->page
;
992 struct ena_com_buf
*ena_buf
= &rx_info
->ena_buf
;
994 if (unlikely(!page
)) {
995 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
996 "Trying to free unallocated buffer\n");
1000 dma_unmap_page(rx_ring
->dev
,
1001 ena_buf
->paddr
- rx_ring
->rx_headroom
,
1006 rx_info
->page
= NULL
;
1009 static int ena_refill_rx_bufs(struct ena_ring
*rx_ring
, u32 num
)
1011 u16 next_to_use
, req_id
;
1015 next_to_use
= rx_ring
->next_to_use
;
1017 for (i
= 0; i
< num
; i
++) {
1018 struct ena_rx_buffer
*rx_info
;
1020 req_id
= rx_ring
->free_ids
[next_to_use
];
1021 rc
= validate_rx_req_id(rx_ring
, req_id
);
1022 if (unlikely(rc
< 0))
1025 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1028 rc
= ena_alloc_rx_page(rx_ring
, rx_info
,
1029 GFP_ATOMIC
| __GFP_COMP
);
1030 if (unlikely(rc
< 0)) {
1031 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1032 "failed to alloc buffer for rx queue %d\n",
1036 rc
= ena_com_add_single_rx_desc(rx_ring
->ena_com_io_sq
,
1040 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1041 "failed to add buffer for rx queue %d\n",
1045 next_to_use
= ENA_RX_RING_IDX_NEXT(next_to_use
,
1046 rx_ring
->ring_size
);
1049 if (unlikely(i
< num
)) {
1050 u64_stats_update_begin(&rx_ring
->syncp
);
1051 rx_ring
->rx_stats
.refil_partial
++;
1052 u64_stats_update_end(&rx_ring
->syncp
);
1053 netdev_warn(rx_ring
->netdev
,
1054 "refilled rx qid %d with only %d buffers (from %d)\n",
1055 rx_ring
->qid
, i
, num
);
1058 /* ena_com_write_sq_doorbell issues a wmb() */
1060 ena_com_write_sq_doorbell(rx_ring
->ena_com_io_sq
);
1062 rx_ring
->next_to_use
= next_to_use
;
1067 static void ena_free_rx_bufs(struct ena_adapter
*adapter
,
1070 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
1073 for (i
= 0; i
< rx_ring
->ring_size
; i
++) {
1074 struct ena_rx_buffer
*rx_info
= &rx_ring
->rx_buffer_info
[i
];
1077 ena_free_rx_page(rx_ring
, rx_info
);
1081 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1082 * @adapter: board private structure
1084 static void ena_refill_all_rx_bufs(struct ena_adapter
*adapter
)
1086 struct ena_ring
*rx_ring
;
1087 int i
, rc
, bufs_num
;
1089 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
1090 rx_ring
= &adapter
->rx_ring
[i
];
1091 bufs_num
= rx_ring
->ring_size
- 1;
1092 rc
= ena_refill_rx_bufs(rx_ring
, bufs_num
);
1094 if (unlikely(rc
!= bufs_num
))
1095 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1096 "refilling Queue %d failed. allocated %d buffers from: %d\n",
1101 static void ena_free_all_rx_bufs(struct ena_adapter
*adapter
)
1105 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
1106 ena_free_rx_bufs(adapter
, i
);
1109 static void ena_unmap_tx_buff(struct ena_ring
*tx_ring
,
1110 struct ena_tx_buffer
*tx_info
)
1112 struct ena_com_buf
*ena_buf
;
1116 ena_buf
= tx_info
->bufs
;
1117 cnt
= tx_info
->num_of_bufs
;
1122 if (tx_info
->map_linear_data
) {
1123 dma_unmap_single(tx_ring
->dev
,
1124 dma_unmap_addr(ena_buf
, paddr
),
1125 dma_unmap_len(ena_buf
, len
),
1131 /* unmap remaining mapped pages */
1132 for (i
= 0; i
< cnt
; i
++) {
1133 dma_unmap_page(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
1134 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
1139 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1140 * @tx_ring: TX ring for which buffers be freed
1142 static void ena_free_tx_bufs(struct ena_ring
*tx_ring
)
1144 bool print_once
= true;
1147 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
1148 struct ena_tx_buffer
*tx_info
= &tx_ring
->tx_buffer_info
[i
];
1154 netdev_notice(tx_ring
->netdev
,
1155 "free uncompleted tx skb qid %d idx 0x%x\n",
1159 netdev_dbg(tx_ring
->netdev
,
1160 "free uncompleted tx skb qid %d idx 0x%x\n",
1164 ena_unmap_tx_buff(tx_ring
, tx_info
);
1166 dev_kfree_skb_any(tx_info
->skb
);
1168 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
1172 static void ena_free_all_tx_bufs(struct ena_adapter
*adapter
)
1174 struct ena_ring
*tx_ring
;
1177 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
1178 tx_ring
= &adapter
->tx_ring
[i
];
1179 ena_free_tx_bufs(tx_ring
);
1183 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
)
1188 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
1189 ena_qid
= ENA_IO_TXQ_IDX(i
);
1190 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
1194 static void ena_destroy_all_rx_queues(struct ena_adapter
*adapter
)
1199 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
1200 ena_qid
= ENA_IO_RXQ_IDX(i
);
1201 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
1202 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
1206 static void ena_destroy_all_io_queues(struct ena_adapter
*adapter
)
1208 ena_destroy_all_tx_queues(adapter
);
1209 ena_destroy_all_rx_queues(adapter
);
1212 static int handle_invalid_req_id(struct ena_ring
*ring
, u16 req_id
,
1213 struct ena_tx_buffer
*tx_info
, bool is_xdp
)
1216 netif_err(ring
->adapter
,
1219 "tx_info doesn't have valid %s",
1220 is_xdp
? "xdp frame" : "skb");
1222 netif_err(ring
->adapter
,
1225 "Invalid req_id: %hu\n",
1228 u64_stats_update_begin(&ring
->syncp
);
1229 ring
->tx_stats
.bad_req_id
++;
1230 u64_stats_update_end(&ring
->syncp
);
1232 /* Trigger device reset */
1233 ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_TX_REQ_ID
;
1234 set_bit(ENA_FLAG_TRIGGER_RESET
, &ring
->adapter
->flags
);
1238 static int validate_tx_req_id(struct ena_ring
*tx_ring
, u16 req_id
)
1240 struct ena_tx_buffer
*tx_info
= NULL
;
1242 if (likely(req_id
< tx_ring
->ring_size
)) {
1243 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1244 if (likely(tx_info
->skb
))
1248 return handle_invalid_req_id(tx_ring
, req_id
, tx_info
, false);
1251 static int validate_xdp_req_id(struct ena_ring
*xdp_ring
, u16 req_id
)
1253 struct ena_tx_buffer
*tx_info
= NULL
;
1255 if (likely(req_id
< xdp_ring
->ring_size
)) {
1256 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
1257 if (likely(tx_info
->xdpf
))
1261 return handle_invalid_req_id(xdp_ring
, req_id
, tx_info
, true);
1264 static int ena_clean_tx_irq(struct ena_ring
*tx_ring
, u32 budget
)
1266 struct netdev_queue
*txq
;
1275 next_to_clean
= tx_ring
->next_to_clean
;
1276 txq
= netdev_get_tx_queue(tx_ring
->netdev
, tx_ring
->qid
);
1278 while (tx_pkts
< budget
) {
1279 struct ena_tx_buffer
*tx_info
;
1280 struct sk_buff
*skb
;
1282 rc
= ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
,
1287 rc
= validate_tx_req_id(tx_ring
, req_id
);
1291 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1294 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1295 prefetch(&skb
->end
);
1297 tx_info
->skb
= NULL
;
1298 tx_info
->last_jiffies
= 0;
1300 ena_unmap_tx_buff(tx_ring
, tx_info
);
1302 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
1303 "tx_poll: q %d skb %p completed\n", tx_ring
->qid
,
1306 tx_bytes
+= skb
->len
;
1309 total_done
+= tx_info
->tx_descs
;
1311 tx_ring
->free_ids
[next_to_clean
] = req_id
;
1312 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
1313 tx_ring
->ring_size
);
1316 tx_ring
->next_to_clean
= next_to_clean
;
1317 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_done
);
1318 ena_com_update_dev_comp_head(tx_ring
->ena_com_io_cq
);
1320 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
1322 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
1323 "tx_poll: q %d done. total pkts: %d\n",
1324 tx_ring
->qid
, tx_pkts
);
1326 /* need to make the rings circular update visible to
1327 * ena_start_xmit() before checking for netif_queue_stopped().
1331 above_thresh
= ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
1332 ENA_TX_WAKEUP_THRESH
);
1333 if (unlikely(netif_tx_queue_stopped(txq
) && above_thresh
)) {
1334 __netif_tx_lock(txq
, smp_processor_id());
1336 ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
1337 ENA_TX_WAKEUP_THRESH
);
1338 if (netif_tx_queue_stopped(txq
) && above_thresh
&&
1339 test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
)) {
1340 netif_tx_wake_queue(txq
);
1341 u64_stats_update_begin(&tx_ring
->syncp
);
1342 tx_ring
->tx_stats
.queue_wakeup
++;
1343 u64_stats_update_end(&tx_ring
->syncp
);
1345 __netif_tx_unlock(txq
);
1351 static struct sk_buff
*ena_alloc_skb(struct ena_ring
*rx_ring
, bool frags
)
1353 struct sk_buff
*skb
;
1356 skb
= napi_get_frags(rx_ring
->napi
);
1358 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
1359 rx_ring
->rx_copybreak
);
1361 if (unlikely(!skb
)) {
1362 u64_stats_update_begin(&rx_ring
->syncp
);
1363 rx_ring
->rx_stats
.skb_alloc_fail
++;
1364 u64_stats_update_end(&rx_ring
->syncp
);
1365 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1366 "Failed to allocate skb. frags: %d\n", frags
);
1373 static struct sk_buff
*ena_rx_skb(struct ena_ring
*rx_ring
,
1374 struct ena_com_rx_buf_info
*ena_bufs
,
1378 struct sk_buff
*skb
;
1379 struct ena_rx_buffer
*rx_info
;
1380 u16 len
, req_id
, buf
= 0;
1383 len
= ena_bufs
[buf
].len
;
1384 req_id
= ena_bufs
[buf
].req_id
;
1385 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1387 if (unlikely(!rx_info
->page
)) {
1388 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1393 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1394 "rx_info %p page %p\n",
1395 rx_info
, rx_info
->page
);
1397 /* save virt address of first buffer */
1398 va
= page_address(rx_info
->page
) + rx_info
->page_offset
;
1399 prefetch(va
+ NET_IP_ALIGN
);
1401 if (len
<= rx_ring
->rx_copybreak
) {
1402 skb
= ena_alloc_skb(rx_ring
, false);
1406 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1407 "rx allocated small packet. len %d. data_len %d\n",
1408 skb
->len
, skb
->data_len
);
1410 /* sync this buffer for CPU use */
1411 dma_sync_single_for_cpu(rx_ring
->dev
,
1412 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
1415 skb_copy_to_linear_data(skb
, va
, len
);
1416 dma_sync_single_for_device(rx_ring
->dev
,
1417 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
1422 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1423 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
1424 *next_to_clean
= ENA_RX_RING_IDX_ADD(*next_to_clean
, descs
,
1425 rx_ring
->ring_size
);
1429 skb
= ena_alloc_skb(rx_ring
, true);
1434 dma_unmap_page(rx_ring
->dev
,
1435 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
1436 ENA_PAGE_SIZE
, DMA_FROM_DEVICE
);
1438 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_info
->page
,
1439 rx_info
->page_offset
, len
, ENA_PAGE_SIZE
);
1441 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1442 "rx skb updated. len %d. data_len %d\n",
1443 skb
->len
, skb
->data_len
);
1445 rx_info
->page
= NULL
;
1447 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
1449 ENA_RX_RING_IDX_NEXT(*next_to_clean
,
1450 rx_ring
->ring_size
);
1451 if (likely(--descs
== 0))
1455 len
= ena_bufs
[buf
].len
;
1456 req_id
= ena_bufs
[buf
].req_id
;
1457 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1463 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1464 * @adapter: structure containing adapter specific data
1465 * @ena_rx_ctx: received packet context/metadata
1466 * @skb: skb currently being received and modified
1468 static void ena_rx_checksum(struct ena_ring
*rx_ring
,
1469 struct ena_com_rx_ctx
*ena_rx_ctx
,
1470 struct sk_buff
*skb
)
1472 /* Rx csum disabled */
1473 if (unlikely(!(rx_ring
->netdev
->features
& NETIF_F_RXCSUM
))) {
1474 skb
->ip_summed
= CHECKSUM_NONE
;
1478 /* For fragmented packets the checksum isn't valid */
1479 if (ena_rx_ctx
->frag
) {
1480 skb
->ip_summed
= CHECKSUM_NONE
;
1484 /* if IP and error */
1485 if (unlikely((ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
) &&
1486 (ena_rx_ctx
->l3_csum_err
))) {
1487 /* ipv4 checksum error */
1488 skb
->ip_summed
= CHECKSUM_NONE
;
1489 u64_stats_update_begin(&rx_ring
->syncp
);
1490 rx_ring
->rx_stats
.bad_csum
++;
1491 u64_stats_update_end(&rx_ring
->syncp
);
1492 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1493 "RX IPv4 header checksum error\n");
1498 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1499 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
))) {
1500 if (unlikely(ena_rx_ctx
->l4_csum_err
)) {
1501 /* TCP/UDP checksum error */
1502 u64_stats_update_begin(&rx_ring
->syncp
);
1503 rx_ring
->rx_stats
.bad_csum
++;
1504 u64_stats_update_end(&rx_ring
->syncp
);
1505 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1506 "RX L4 checksum error\n");
1507 skb
->ip_summed
= CHECKSUM_NONE
;
1511 if (likely(ena_rx_ctx
->l4_csum_checked
)) {
1512 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1513 u64_stats_update_begin(&rx_ring
->syncp
);
1514 rx_ring
->rx_stats
.csum_good
++;
1515 u64_stats_update_end(&rx_ring
->syncp
);
1517 u64_stats_update_begin(&rx_ring
->syncp
);
1518 rx_ring
->rx_stats
.csum_unchecked
++;
1519 u64_stats_update_end(&rx_ring
->syncp
);
1520 skb
->ip_summed
= CHECKSUM_NONE
;
1523 skb
->ip_summed
= CHECKSUM_NONE
;
1529 static void ena_set_rx_hash(struct ena_ring
*rx_ring
,
1530 struct ena_com_rx_ctx
*ena_rx_ctx
,
1531 struct sk_buff
*skb
)
1533 enum pkt_hash_types hash_type
;
1535 if (likely(rx_ring
->netdev
->features
& NETIF_F_RXHASH
)) {
1536 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1537 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)))
1539 hash_type
= PKT_HASH_TYPE_L4
;
1541 hash_type
= PKT_HASH_TYPE_NONE
;
1543 /* Override hash type if the packet is fragmented */
1544 if (ena_rx_ctx
->frag
)
1545 hash_type
= PKT_HASH_TYPE_NONE
;
1547 skb_set_hash(skb
, ena_rx_ctx
->hash
, hash_type
);
1551 int ena_xdp_handle_buff(struct ena_ring
*rx_ring
, struct xdp_buff
*xdp
)
1553 struct ena_rx_buffer
*rx_info
;
1556 rx_info
= &rx_ring
->rx_buffer_info
[rx_ring
->ena_bufs
[0].req_id
];
1557 xdp
->data
= page_address(rx_info
->page
) +
1558 rx_info
->page_offset
+ rx_ring
->rx_headroom
;
1559 xdp_set_data_meta_invalid(xdp
);
1560 xdp
->data_hard_start
= page_address(rx_info
->page
);
1561 xdp
->data_end
= xdp
->data
+ rx_ring
->ena_bufs
[0].len
;
1562 /* If for some reason we received a bigger packet than
1563 * we expect, then we simply drop it
1565 if (unlikely(rx_ring
->ena_bufs
[0].len
> ENA_XDP_MAX_MTU
))
1568 ret
= ena_xdp_execute(rx_ring
, xdp
, rx_info
);
1570 /* The xdp program might expand the headers */
1571 if (ret
== XDP_PASS
) {
1572 rx_info
->page_offset
= xdp
->data
- xdp
->data_hard_start
;
1573 rx_ring
->ena_bufs
[0].len
= xdp
->data_end
- xdp
->data
;
1578 /* ena_clean_rx_irq - Cleanup RX irq
1579 * @rx_ring: RX ring to clean
1580 * @napi: napi handler
1581 * @budget: how many packets driver is allowed to clean
1583 * Returns the number of cleaned buffers.
1585 static int ena_clean_rx_irq(struct ena_ring
*rx_ring
, struct napi_struct
*napi
,
1588 u16 next_to_clean
= rx_ring
->next_to_clean
;
1589 struct ena_com_rx_ctx ena_rx_ctx
;
1590 struct ena_adapter
*adapter
;
1591 u32 res_budget
, work_done
;
1592 int rx_copybreak_pkt
= 0;
1593 int refill_threshold
;
1594 struct sk_buff
*skb
;
1595 int refill_required
;
1596 struct xdp_buff xdp
;
1602 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1603 "%s qid %d\n", __func__
, rx_ring
->qid
);
1604 res_budget
= budget
;
1605 xdp
.rxq
= &rx_ring
->xdp_rxq
;
1608 xdp_verdict
= XDP_PASS
;
1610 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
1611 ena_rx_ctx
.max_bufs
= rx_ring
->sgl_size
;
1612 ena_rx_ctx
.descs
= 0;
1613 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
1614 rx_ring
->ena_com_io_sq
,
1619 if (unlikely(ena_rx_ctx
.descs
== 0))
1622 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1623 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1624 rx_ring
->qid
, ena_rx_ctx
.descs
, ena_rx_ctx
.l3_proto
,
1625 ena_rx_ctx
.l4_proto
, ena_rx_ctx
.hash
);
1627 if (ena_xdp_present_ring(rx_ring
))
1628 xdp_verdict
= ena_xdp_handle_buff(rx_ring
, &xdp
);
1630 /* allocate skb and fill it */
1631 if (xdp_verdict
== XDP_PASS
)
1632 skb
= ena_rx_skb(rx_ring
,
1637 if (unlikely(!skb
)) {
1638 if (xdp_verdict
== XDP_TX
) {
1639 ena_free_rx_page(rx_ring
,
1640 &rx_ring
->rx_buffer_info
[rx_ring
->ena_bufs
[0].req_id
]);
1643 for (i
= 0; i
< ena_rx_ctx
.descs
; i
++) {
1644 rx_ring
->free_ids
[next_to_clean
] =
1645 rx_ring
->ena_bufs
[i
].req_id
;
1647 ENA_RX_RING_IDX_NEXT(next_to_clean
,
1648 rx_ring
->ring_size
);
1650 if (xdp_verdict
== XDP_TX
|| xdp_verdict
== XDP_DROP
)
1655 ena_rx_checksum(rx_ring
, &ena_rx_ctx
, skb
);
1657 ena_set_rx_hash(rx_ring
, &ena_rx_ctx
, skb
);
1659 skb_record_rx_queue(skb
, rx_ring
->qid
);
1661 if (rx_ring
->ena_bufs
[0].len
<= rx_ring
->rx_copybreak
) {
1662 total_len
+= rx_ring
->ena_bufs
[0].len
;
1664 napi_gro_receive(napi
, skb
);
1666 total_len
+= skb
->len
;
1667 napi_gro_frags(napi
);
1671 } while (likely(res_budget
));
1673 work_done
= budget
- res_budget
;
1674 rx_ring
->per_napi_packets
+= work_done
;
1675 u64_stats_update_begin(&rx_ring
->syncp
);
1676 rx_ring
->rx_stats
.bytes
+= total_len
;
1677 rx_ring
->rx_stats
.cnt
+= work_done
;
1678 rx_ring
->rx_stats
.rx_copybreak_pkt
+= rx_copybreak_pkt
;
1679 u64_stats_update_end(&rx_ring
->syncp
);
1681 rx_ring
->next_to_clean
= next_to_clean
;
1683 refill_required
= ena_com_free_desc(rx_ring
->ena_com_io_sq
);
1685 min_t(int, rx_ring
->ring_size
/ ENA_RX_REFILL_THRESH_DIVIDER
,
1686 ENA_RX_REFILL_THRESH_PACKET
);
1688 /* Optimization, try to batch new rx buffers */
1689 if (refill_required
> refill_threshold
) {
1690 ena_com_update_dev_comp_head(rx_ring
->ena_com_io_cq
);
1691 ena_refill_rx_bufs(rx_ring
, refill_required
);
1697 adapter
= netdev_priv(rx_ring
->netdev
);
1699 u64_stats_update_begin(&rx_ring
->syncp
);
1700 rx_ring
->rx_stats
.bad_desc_num
++;
1701 u64_stats_update_end(&rx_ring
->syncp
);
1703 /* Too many desc from the device. Trigger reset */
1704 adapter
->reset_reason
= ENA_REGS_RESET_TOO_MANY_RX_DESCS
;
1705 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
1710 static void ena_dim_work(struct work_struct
*w
)
1712 struct dim
*dim
= container_of(w
, struct dim
, work
);
1713 struct dim_cq_moder cur_moder
=
1714 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
1715 struct ena_napi
*ena_napi
= container_of(dim
, struct ena_napi
, dim
);
1717 ena_napi
->rx_ring
->smoothed_interval
= cur_moder
.usec
;
1718 dim
->state
= DIM_START_MEASURE
;
1721 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi
*ena_napi
)
1723 struct dim_sample dim_sample
;
1724 struct ena_ring
*rx_ring
= ena_napi
->rx_ring
;
1726 if (!rx_ring
->per_napi_packets
)
1729 rx_ring
->non_empty_napi_events
++;
1731 dim_update_sample(rx_ring
->non_empty_napi_events
,
1732 rx_ring
->rx_stats
.cnt
,
1733 rx_ring
->rx_stats
.bytes
,
1736 net_dim(&ena_napi
->dim
, dim_sample
);
1738 rx_ring
->per_napi_packets
= 0;
1741 static void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
1742 struct ena_ring
*rx_ring
)
1744 struct ena_eth_io_intr_reg intr_reg
;
1745 u32 rx_interval
= 0;
1746 /* Rx ring can be NULL when for XDP tx queues which don't have an
1747 * accompanying rx_ring pair.
1750 rx_interval
= ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
) ?
1751 rx_ring
->smoothed_interval
:
1752 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring
->ena_dev
);
1754 /* Update intr register: rx intr delay,
1755 * tx intr delay and interrupt unmask
1757 ena_com_update_intr_reg(&intr_reg
,
1759 tx_ring
->smoothed_interval
,
1762 /* It is a shared MSI-X.
1763 * Tx and Rx CQ have pointer to it.
1764 * So we use one of them to reach the intr reg
1765 * The Tx ring is used because the rx_ring is NULL for XDP queues
1767 ena_com_unmask_intr(tx_ring
->ena_com_io_cq
, &intr_reg
);
1770 static void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
1771 struct ena_ring
*rx_ring
)
1773 int cpu
= get_cpu();
1776 /* Check only one ring since the 2 rings are running on the same cpu */
1777 if (likely(tx_ring
->cpu
== cpu
))
1780 numa_node
= cpu_to_node(cpu
);
1783 if (numa_node
!= NUMA_NO_NODE
) {
1784 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, numa_node
);
1786 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
,
1799 static int ena_clean_xdp_irq(struct ena_ring
*xdp_ring
, u32 budget
)
1808 if (unlikely(!xdp_ring
))
1810 next_to_clean
= xdp_ring
->next_to_clean
;
1812 while (tx_pkts
< budget
) {
1813 struct ena_tx_buffer
*tx_info
;
1814 struct xdp_frame
*xdpf
;
1816 rc
= ena_com_tx_comp_req_id_get(xdp_ring
->ena_com_io_cq
,
1821 rc
= validate_xdp_req_id(xdp_ring
, req_id
);
1825 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
1826 xdpf
= tx_info
->xdpf
;
1828 tx_info
->xdpf
= NULL
;
1829 tx_info
->last_jiffies
= 0;
1830 ena_unmap_tx_buff(xdp_ring
, tx_info
);
1832 netif_dbg(xdp_ring
->adapter
, tx_done
, xdp_ring
->netdev
,
1833 "tx_poll: q %d skb %p completed\n", xdp_ring
->qid
,
1836 tx_bytes
+= xdpf
->len
;
1838 total_done
+= tx_info
->tx_descs
;
1840 __free_page(tx_info
->xdp_rx_page
);
1841 xdp_ring
->free_ids
[next_to_clean
] = req_id
;
1842 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
1843 xdp_ring
->ring_size
);
1846 xdp_ring
->next_to_clean
= next_to_clean
;
1847 ena_com_comp_ack(xdp_ring
->ena_com_io_sq
, total_done
);
1848 ena_com_update_dev_comp_head(xdp_ring
->ena_com_io_cq
);
1850 netif_dbg(xdp_ring
->adapter
, tx_done
, xdp_ring
->netdev
,
1851 "tx_poll: q %d done. total pkts: %d\n",
1852 xdp_ring
->qid
, tx_pkts
);
1857 static int ena_io_poll(struct napi_struct
*napi
, int budget
)
1859 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
1860 struct ena_ring
*tx_ring
, *rx_ring
;
1862 int rx_work_done
= 0;
1864 int napi_comp_call
= 0;
1867 tx_ring
= ena_napi
->tx_ring
;
1868 rx_ring
= ena_napi
->rx_ring
;
1870 tx_ring
->first_interrupt
= ena_napi
->first_interrupt
;
1871 rx_ring
->first_interrupt
= ena_napi
->first_interrupt
;
1873 tx_budget
= tx_ring
->ring_size
/ ENA_TX_POLL_BUDGET_DIVIDER
;
1875 if (!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1876 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
)) {
1877 napi_complete_done(napi
, 0);
1881 tx_work_done
= ena_clean_tx_irq(tx_ring
, tx_budget
);
1882 /* On netpoll the budget is zero and the handler should only clean the
1886 rx_work_done
= ena_clean_rx_irq(rx_ring
, napi
, budget
);
1888 /* If the device is about to reset or down, avoid unmask
1889 * the interrupt and return 0 so NAPI won't reschedule
1891 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1892 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
))) {
1893 napi_complete_done(napi
, 0);
1896 } else if ((budget
> rx_work_done
) && (tx_budget
> tx_work_done
)) {
1899 /* Update numa and unmask the interrupt only when schedule
1900 * from the interrupt context (vs from sk_busy_loop)
1902 if (napi_complete_done(napi
, rx_work_done
)) {
1903 /* We apply adaptive moderation on Rx path only.
1904 * Tx uses static interrupt moderation.
1906 if (ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
))
1907 ena_adjust_adaptive_rx_intr_moderation(ena_napi
);
1909 ena_unmask_interrupt(tx_ring
, rx_ring
);
1912 ena_update_ring_numa_node(tx_ring
, rx_ring
);
1919 u64_stats_update_begin(&tx_ring
->syncp
);
1920 tx_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
1921 tx_ring
->tx_stats
.tx_poll
++;
1922 u64_stats_update_end(&tx_ring
->syncp
);
1927 static irqreturn_t
ena_intr_msix_mgmnt(int irq
, void *data
)
1929 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
1931 ena_com_admin_q_comp_intr_handler(adapter
->ena_dev
);
1933 /* Don't call the aenq handler before probe is done */
1934 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
)))
1935 ena_com_aenq_intr_handler(adapter
->ena_dev
, data
);
1940 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1941 * @irq: interrupt number
1942 * @data: pointer to a network interface private napi device structure
1944 static irqreturn_t
ena_intr_msix_io(int irq
, void *data
)
1946 struct ena_napi
*ena_napi
= data
;
1948 ena_napi
->first_interrupt
= true;
1950 napi_schedule_irqoff(&ena_napi
->napi
);
1955 /* Reserve a single MSI-X vector for management (admin + aenq).
1956 * plus reserve one vector for each potential io queue.
1957 * the number of potential io queues is the minimum of what the device
1958 * supports and the number of vCPUs.
1960 static int ena_enable_msix(struct ena_adapter
*adapter
)
1962 int msix_vecs
, irq_cnt
;
1964 if (test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
1965 netif_err(adapter
, probe
, adapter
->netdev
,
1966 "Error, MSI-X is already enabled\n");
1970 /* Reserved the max msix vectors we might need */
1971 msix_vecs
= ENA_MAX_MSIX_VEC(adapter
->num_io_queues
);
1972 netif_dbg(adapter
, probe
, adapter
->netdev
,
1973 "trying to enable MSI-X, vectors %d\n", msix_vecs
);
1975 irq_cnt
= pci_alloc_irq_vectors(adapter
->pdev
, ENA_MIN_MSIX_VEC
,
1976 msix_vecs
, PCI_IRQ_MSIX
);
1979 netif_err(adapter
, probe
, adapter
->netdev
,
1980 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt
);
1984 if (irq_cnt
!= msix_vecs
) {
1985 netif_notice(adapter
, probe
, adapter
->netdev
,
1986 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1987 irq_cnt
, msix_vecs
);
1988 adapter
->num_io_queues
= irq_cnt
- ENA_ADMIN_MSIX_VEC
;
1991 if (ena_init_rx_cpu_rmap(adapter
))
1992 netif_warn(adapter
, probe
, adapter
->netdev
,
1993 "Failed to map IRQs to CPUs\n");
1995 adapter
->msix_vecs
= irq_cnt
;
1996 set_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
);
2001 static void ena_setup_mgmnt_intr(struct ena_adapter
*adapter
)
2005 snprintf(adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].name
,
2006 ENA_IRQNAME_SIZE
, "ena-mgmnt@pci:%s",
2007 pci_name(adapter
->pdev
));
2008 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].handler
=
2009 ena_intr_msix_mgmnt
;
2010 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].data
= adapter
;
2011 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].vector
=
2012 pci_irq_vector(adapter
->pdev
, ENA_MGMNT_IRQ_IDX
);
2013 cpu
= cpumask_first(cpu_online_mask
);
2014 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].cpu
= cpu
;
2015 cpumask_set_cpu(cpu
,
2016 &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].affinity_hint_mask
);
2019 static void ena_setup_io_intr(struct ena_adapter
*adapter
)
2021 struct net_device
*netdev
;
2022 int irq_idx
, i
, cpu
;
2025 netdev
= adapter
->netdev
;
2026 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2028 for (i
= 0; i
< io_queue_count
; i
++) {
2029 irq_idx
= ENA_IO_IRQ_IDX(i
);
2030 cpu
= i
% num_online_cpus();
2032 snprintf(adapter
->irq_tbl
[irq_idx
].name
, ENA_IRQNAME_SIZE
,
2033 "%s-Tx-Rx-%d", netdev
->name
, i
);
2034 adapter
->irq_tbl
[irq_idx
].handler
= ena_intr_msix_io
;
2035 adapter
->irq_tbl
[irq_idx
].data
= &adapter
->ena_napi
[i
];
2036 adapter
->irq_tbl
[irq_idx
].vector
=
2037 pci_irq_vector(adapter
->pdev
, irq_idx
);
2038 adapter
->irq_tbl
[irq_idx
].cpu
= cpu
;
2040 cpumask_set_cpu(cpu
,
2041 &adapter
->irq_tbl
[irq_idx
].affinity_hint_mask
);
2045 static int ena_request_mgmnt_irq(struct ena_adapter
*adapter
)
2047 unsigned long flags
= 0;
2048 struct ena_irq
*irq
;
2051 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
2052 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
2055 netif_err(adapter
, probe
, adapter
->netdev
,
2056 "failed to request admin irq\n");
2060 netif_dbg(adapter
, probe
, adapter
->netdev
,
2061 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2062 irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
2064 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
2069 static int ena_request_io_irq(struct ena_adapter
*adapter
)
2071 unsigned long flags
= 0;
2072 struct ena_irq
*irq
;
2075 if (!test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
2076 netif_err(adapter
, ifup
, adapter
->netdev
,
2077 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2081 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
2082 irq
= &adapter
->irq_tbl
[i
];
2083 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
2086 netif_err(adapter
, ifup
, adapter
->netdev
,
2087 "Failed to request I/O IRQ. index %d rc %d\n",
2092 netif_dbg(adapter
, ifup
, adapter
->netdev
,
2093 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2094 i
, irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
2096 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
2102 for (k
= ENA_IO_IRQ_FIRST_IDX
; k
< i
; k
++) {
2103 irq
= &adapter
->irq_tbl
[k
];
2104 free_irq(irq
->vector
, irq
->data
);
2110 static void ena_free_mgmnt_irq(struct ena_adapter
*adapter
)
2112 struct ena_irq
*irq
;
2114 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
2115 synchronize_irq(irq
->vector
);
2116 irq_set_affinity_hint(irq
->vector
, NULL
);
2117 free_irq(irq
->vector
, irq
->data
);
2120 static void ena_free_io_irq(struct ena_adapter
*adapter
)
2122 struct ena_irq
*irq
;
2125 #ifdef CONFIG_RFS_ACCEL
2126 if (adapter
->msix_vecs
>= 1) {
2127 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
2128 adapter
->netdev
->rx_cpu_rmap
= NULL
;
2130 #endif /* CONFIG_RFS_ACCEL */
2132 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++) {
2133 irq
= &adapter
->irq_tbl
[i
];
2134 irq_set_affinity_hint(irq
->vector
, NULL
);
2135 free_irq(irq
->vector
, irq
->data
);
2139 static void ena_disable_msix(struct ena_adapter
*adapter
)
2141 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
))
2142 pci_free_irq_vectors(adapter
->pdev
);
2145 static void ena_disable_io_intr_sync(struct ena_adapter
*adapter
)
2149 if (!netif_running(adapter
->netdev
))
2152 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< adapter
->msix_vecs
; i
++)
2153 synchronize_irq(adapter
->irq_tbl
[i
].vector
);
2156 static void ena_del_napi_in_range(struct ena_adapter
*adapter
,
2162 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2163 /* Check if napi was initialized before */
2164 if (!ENA_IS_XDP_INDEX(adapter
, i
) ||
2165 adapter
->ena_napi
[i
].xdp_ring
)
2166 netif_napi_del(&adapter
->ena_napi
[i
].napi
);
2168 WARN_ON(ENA_IS_XDP_INDEX(adapter
, i
) &&
2169 adapter
->ena_napi
[i
].xdp_ring
);
2173 static void ena_init_napi_in_range(struct ena_adapter
*adapter
,
2174 int first_index
, int count
)
2176 struct ena_napi
*napi
= {0};
2179 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2180 napi
= &adapter
->ena_napi
[i
];
2182 netif_napi_add(adapter
->netdev
,
2183 &adapter
->ena_napi
[i
].napi
,
2184 ENA_IS_XDP_INDEX(adapter
, i
) ? ena_xdp_io_poll
: ena_io_poll
,
2187 if (!ENA_IS_XDP_INDEX(adapter
, i
)) {
2188 napi
->rx_ring
= &adapter
->rx_ring
[i
];
2189 napi
->tx_ring
= &adapter
->tx_ring
[i
];
2191 napi
->xdp_ring
= &adapter
->tx_ring
[i
];
2197 static void ena_napi_disable_in_range(struct ena_adapter
*adapter
,
2203 for (i
= first_index
; i
< first_index
+ count
; i
++)
2204 napi_disable(&adapter
->ena_napi
[i
].napi
);
2207 static void ena_napi_enable_in_range(struct ena_adapter
*adapter
,
2213 for (i
= first_index
; i
< first_index
+ count
; i
++)
2214 napi_enable(&adapter
->ena_napi
[i
].napi
);
2217 /* Configure the Rx forwarding */
2218 static int ena_rss_configure(struct ena_adapter
*adapter
)
2220 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2223 /* In case the RSS table wasn't initialized by probe */
2224 if (!ena_dev
->rss
.tbl_log_size
) {
2225 rc
= ena_rss_init_default(adapter
);
2226 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
2227 netif_err(adapter
, ifup
, adapter
->netdev
,
2228 "Failed to init RSS rc: %d\n", rc
);
2233 /* Set indirect table */
2234 rc
= ena_com_indirect_table_set(ena_dev
);
2235 if (unlikely(rc
&& rc
!= -EOPNOTSUPP
))
2238 /* Configure hash function (if supported) */
2239 rc
= ena_com_set_hash_function(ena_dev
);
2240 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
2243 /* Configure hash inputs (if supported) */
2244 rc
= ena_com_set_hash_ctrl(ena_dev
);
2245 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
2251 static int ena_up_complete(struct ena_adapter
*adapter
)
2255 rc
= ena_rss_configure(adapter
);
2259 ena_change_mtu(adapter
->netdev
, adapter
->netdev
->mtu
);
2261 ena_refill_all_rx_bufs(adapter
);
2263 /* enable transmits */
2264 netif_tx_start_all_queues(adapter
->netdev
);
2266 ena_napi_enable_in_range(adapter
,
2268 adapter
->xdp_num_queues
+ adapter
->num_io_queues
);
2273 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
)
2275 struct ena_com_create_io_ctx ctx
;
2276 struct ena_com_dev
*ena_dev
;
2277 struct ena_ring
*tx_ring
;
2282 ena_dev
= adapter
->ena_dev
;
2284 tx_ring
= &adapter
->tx_ring
[qid
];
2285 msix_vector
= ENA_IO_IRQ_IDX(qid
);
2286 ena_qid
= ENA_IO_TXQ_IDX(qid
);
2288 memset(&ctx
, 0x0, sizeof(ctx
));
2290 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
2292 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
2293 ctx
.msix_vector
= msix_vector
;
2294 ctx
.queue_size
= tx_ring
->ring_size
;
2295 ctx
.numa_node
= cpu_to_node(tx_ring
->cpu
);
2297 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
2299 netif_err(adapter
, ifup
, adapter
->netdev
,
2300 "Failed to create I/O TX queue num %d rc: %d\n",
2305 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
2306 &tx_ring
->ena_com_io_sq
,
2307 &tx_ring
->ena_com_io_cq
);
2309 netif_err(adapter
, ifup
, adapter
->netdev
,
2310 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2312 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
2316 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, ctx
.numa_node
);
2320 static int ena_create_io_tx_queues_in_range(struct ena_adapter
*adapter
,
2321 int first_index
, int count
)
2323 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2326 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2327 rc
= ena_create_io_tx_queue(adapter
, i
);
2335 while (i
-- > first_index
)
2336 ena_com_destroy_io_queue(ena_dev
, ENA_IO_TXQ_IDX(i
));
2341 static int ena_create_io_rx_queue(struct ena_adapter
*adapter
, int qid
)
2343 struct ena_com_dev
*ena_dev
;
2344 struct ena_com_create_io_ctx ctx
;
2345 struct ena_ring
*rx_ring
;
2350 ena_dev
= adapter
->ena_dev
;
2352 rx_ring
= &adapter
->rx_ring
[qid
];
2353 msix_vector
= ENA_IO_IRQ_IDX(qid
);
2354 ena_qid
= ENA_IO_RXQ_IDX(qid
);
2356 memset(&ctx
, 0x0, sizeof(ctx
));
2359 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
2360 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2361 ctx
.msix_vector
= msix_vector
;
2362 ctx
.queue_size
= rx_ring
->ring_size
;
2363 ctx
.numa_node
= cpu_to_node(rx_ring
->cpu
);
2365 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
2367 netif_err(adapter
, ifup
, adapter
->netdev
,
2368 "Failed to create I/O RX queue num %d rc: %d\n",
2373 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
2374 &rx_ring
->ena_com_io_sq
,
2375 &rx_ring
->ena_com_io_cq
);
2377 netif_err(adapter
, ifup
, adapter
->netdev
,
2378 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2383 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, ctx
.numa_node
);
2387 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
2391 static int ena_create_all_io_rx_queues(struct ena_adapter
*adapter
)
2393 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2396 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
2397 rc
= ena_create_io_rx_queue(adapter
, i
);
2400 INIT_WORK(&adapter
->ena_napi
[i
].dim
.work
, ena_dim_work
);
2407 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
2408 ena_com_destroy_io_queue(ena_dev
, ENA_IO_RXQ_IDX(i
));
2414 static void set_io_rings_size(struct ena_adapter
*adapter
,
2420 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
2421 adapter
->tx_ring
[i
].ring_size
= new_tx_size
;
2422 adapter
->rx_ring
[i
].ring_size
= new_rx_size
;
2426 /* This function allows queue allocation to backoff when the system is
2427 * low on memory. If there is not enough memory to allocate io queues
2428 * the driver will try to allocate smaller queues.
2430 * The backoff algorithm is as follows:
2431 * 1. Try to allocate TX and RX and if successful.
2432 * 1.1. return success
2434 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2436 * 3. If TX or RX is smaller than 256
2437 * 3.1. return failure.
2439 * 4.1. go back to 1.
2441 static int create_queues_with_size_backoff(struct ena_adapter
*adapter
)
2443 int rc
, cur_rx_ring_size
, cur_tx_ring_size
;
2444 int new_rx_ring_size
, new_tx_ring_size
;
2446 /* current queue sizes might be set to smaller than the requested
2447 * ones due to past queue allocation failures.
2449 set_io_rings_size(adapter
, adapter
->requested_tx_ring_size
,
2450 adapter
->requested_rx_ring_size
);
2453 if (ena_xdp_present(adapter
)) {
2454 rc
= ena_setup_and_create_all_xdp_queues(adapter
);
2459 rc
= ena_setup_tx_resources_in_range(adapter
,
2461 adapter
->num_io_queues
);
2465 rc
= ena_create_io_tx_queues_in_range(adapter
,
2467 adapter
->num_io_queues
);
2469 goto err_create_tx_queues
;
2471 rc
= ena_setup_all_rx_resources(adapter
);
2475 rc
= ena_create_all_io_rx_queues(adapter
);
2477 goto err_create_rx_queues
;
2481 err_create_rx_queues
:
2482 ena_free_all_io_rx_resources(adapter
);
2484 ena_destroy_all_tx_queues(adapter
);
2485 err_create_tx_queues
:
2486 ena_free_all_io_tx_resources(adapter
);
2488 if (rc
!= -ENOMEM
) {
2489 netif_err(adapter
, ifup
, adapter
->netdev
,
2490 "Queue creation failed with error code %d\n",
2495 cur_tx_ring_size
= adapter
->tx_ring
[0].ring_size
;
2496 cur_rx_ring_size
= adapter
->rx_ring
[0].ring_size
;
2498 netif_err(adapter
, ifup
, adapter
->netdev
,
2499 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2500 cur_tx_ring_size
, cur_rx_ring_size
);
2502 new_tx_ring_size
= cur_tx_ring_size
;
2503 new_rx_ring_size
= cur_rx_ring_size
;
2505 /* Decrease the size of the larger queue, or
2506 * decrease both if they are the same size.
2508 if (cur_rx_ring_size
<= cur_tx_ring_size
)
2509 new_tx_ring_size
= cur_tx_ring_size
/ 2;
2510 if (cur_rx_ring_size
>= cur_tx_ring_size
)
2511 new_rx_ring_size
= cur_rx_ring_size
/ 2;
2513 if (new_tx_ring_size
< ENA_MIN_RING_SIZE
||
2514 new_rx_ring_size
< ENA_MIN_RING_SIZE
) {
2515 netif_err(adapter
, ifup
, adapter
->netdev
,
2516 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2521 netif_err(adapter
, ifup
, adapter
->netdev
,
2522 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2526 set_io_rings_size(adapter
, new_tx_ring_size
,
2531 static int ena_up(struct ena_adapter
*adapter
)
2533 int io_queue_count
, rc
, i
;
2535 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
2537 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2538 ena_setup_io_intr(adapter
);
2540 /* napi poll functions should be initialized before running
2541 * request_irq(), to handle a rare condition where there is a pending
2542 * interrupt, causing the ISR to fire immediately while the poll
2543 * function wasn't set yet, causing a null dereference
2545 ena_init_napi_in_range(adapter
, 0, io_queue_count
);
2547 rc
= ena_request_io_irq(adapter
);
2551 rc
= create_queues_with_size_backoff(adapter
);
2553 goto err_create_queues_with_backoff
;
2555 rc
= ena_up_complete(adapter
);
2559 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
2560 netif_carrier_on(adapter
->netdev
);
2562 u64_stats_update_begin(&adapter
->syncp
);
2563 adapter
->dev_stats
.interface_up
++;
2564 u64_stats_update_end(&adapter
->syncp
);
2566 set_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2568 /* Enable completion queues interrupt */
2569 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
2570 ena_unmask_interrupt(&adapter
->tx_ring
[i
],
2571 &adapter
->rx_ring
[i
]);
2573 /* schedule napi in case we had pending packets
2574 * from the last time we disable napi
2576 for (i
= 0; i
< io_queue_count
; i
++)
2577 napi_schedule(&adapter
->ena_napi
[i
].napi
);
2582 ena_destroy_all_tx_queues(adapter
);
2583 ena_free_all_io_tx_resources(adapter
);
2584 ena_destroy_all_rx_queues(adapter
);
2585 ena_free_all_io_rx_resources(adapter
);
2586 err_create_queues_with_backoff
:
2587 ena_free_io_irq(adapter
);
2589 ena_del_napi_in_range(adapter
, 0, io_queue_count
);
2594 static void ena_down(struct ena_adapter
*adapter
)
2596 int io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2598 netif_info(adapter
, ifdown
, adapter
->netdev
, "%s\n", __func__
);
2600 clear_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2602 u64_stats_update_begin(&adapter
->syncp
);
2603 adapter
->dev_stats
.interface_down
++;
2604 u64_stats_update_end(&adapter
->syncp
);
2606 netif_carrier_off(adapter
->netdev
);
2607 netif_tx_disable(adapter
->netdev
);
2609 /* After this point the napi handler won't enable the tx queue */
2610 ena_napi_disable_in_range(adapter
, 0, io_queue_count
);
2612 /* After destroy the queue there won't be any new interrupts */
2614 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
)) {
2617 rc
= ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
2619 dev_err(&adapter
->pdev
->dev
, "Device reset failed\n");
2620 /* stop submitting admin commands on a device that was reset */
2621 ena_com_set_admin_running_state(adapter
->ena_dev
, false);
2624 ena_destroy_all_io_queues(adapter
);
2626 ena_disable_io_intr_sync(adapter
);
2627 ena_free_io_irq(adapter
);
2628 ena_del_napi_in_range(adapter
, 0, io_queue_count
);
2630 ena_free_all_tx_bufs(adapter
);
2631 ena_free_all_rx_bufs(adapter
);
2632 ena_free_all_io_tx_resources(adapter
);
2633 ena_free_all_io_rx_resources(adapter
);
2636 /* ena_open - Called when a network interface is made active
2637 * @netdev: network interface device structure
2639 * Returns 0 on success, negative value on failure
2641 * The open entry point is called when a network interface is made
2642 * active by the system (IFF_UP). At this point all resources needed
2643 * for transmit and receive operations are allocated, the interrupt
2644 * handler is registered with the OS, the watchdog timer is started,
2645 * and the stack is notified that the interface is ready.
2647 static int ena_open(struct net_device
*netdev
)
2649 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2652 /* Notify the stack of the actual queue counts. */
2653 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->num_io_queues
);
2655 netif_err(adapter
, ifup
, netdev
, "Can't set num tx queues\n");
2659 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->num_io_queues
);
2661 netif_err(adapter
, ifup
, netdev
, "Can't set num rx queues\n");
2665 rc
= ena_up(adapter
);
2672 /* ena_close - Disables a network interface
2673 * @netdev: network interface device structure
2675 * Returns 0, this is not allowed to fail
2677 * The close entry point is called when an interface is de-activated
2678 * by the OS. The hardware is still under the drivers control, but
2679 * needs to be disabled. A global MAC reset is issued to stop the
2680 * hardware, and all transmit and receive resources are freed.
2682 static int ena_close(struct net_device
*netdev
)
2684 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2686 netif_dbg(adapter
, ifdown
, netdev
, "%s\n", __func__
);
2688 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
2691 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2694 /* Check for device status and issue reset if needed*/
2695 check_for_admin_com_state(adapter
);
2696 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2697 netif_err(adapter
, ifdown
, adapter
->netdev
,
2698 "Destroy failure, restarting device\n");
2699 ena_dump_stats_to_dmesg(adapter
);
2700 /* rtnl lock already obtained in dev_ioctl() layer */
2701 ena_destroy_device(adapter
, false);
2702 ena_restore_device(adapter
);
2708 int ena_update_queue_sizes(struct ena_adapter
*adapter
,
2714 dev_was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2715 ena_close(adapter
->netdev
);
2716 adapter
->requested_tx_ring_size
= new_tx_size
;
2717 adapter
->requested_rx_ring_size
= new_rx_size
;
2718 ena_init_io_rings(adapter
,
2720 adapter
->xdp_num_queues
+
2721 adapter
->num_io_queues
);
2722 return dev_was_up
? ena_up(adapter
) : 0;
2725 int ena_update_queue_count(struct ena_adapter
*adapter
, u32 new_channel_count
)
2727 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2728 int prev_channel_count
;
2731 dev_was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2732 ena_close(adapter
->netdev
);
2733 prev_channel_count
= adapter
->num_io_queues
;
2734 adapter
->num_io_queues
= new_channel_count
;
2735 if (ena_xdp_present(adapter
) &&
2736 ena_xdp_allowed(adapter
) == ENA_XDP_ALLOWED
) {
2737 adapter
->xdp_first_ring
= new_channel_count
;
2738 adapter
->xdp_num_queues
= new_channel_count
;
2739 if (prev_channel_count
> new_channel_count
)
2740 ena_xdp_exchange_program_rx_in_range(adapter
,
2743 prev_channel_count
);
2745 ena_xdp_exchange_program_rx_in_range(adapter
,
2746 adapter
->xdp_bpf_prog
,
2751 /* We need to destroy the rss table so that the indirection
2752 * table will be reinitialized by ena_up()
2754 ena_com_rss_destroy(ena_dev
);
2755 ena_init_io_rings(adapter
,
2757 adapter
->xdp_num_queues
+
2758 adapter
->num_io_queues
);
2759 return dev_was_up
? ena_open(adapter
->netdev
) : 0;
2762 static void ena_tx_csum(struct ena_com_tx_ctx
*ena_tx_ctx
, struct sk_buff
*skb
)
2764 u32 mss
= skb_shinfo(skb
)->gso_size
;
2765 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
2768 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) || mss
) {
2769 ena_tx_ctx
->l4_csum_enable
= 1;
2771 ena_tx_ctx
->tso_enable
= 1;
2772 ena_meta
->l4_hdr_len
= tcp_hdr(skb
)->doff
;
2773 ena_tx_ctx
->l4_csum_partial
= 0;
2775 ena_tx_ctx
->tso_enable
= 0;
2776 ena_meta
->l4_hdr_len
= 0;
2777 ena_tx_ctx
->l4_csum_partial
= 1;
2780 switch (ip_hdr(skb
)->version
) {
2782 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
2783 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
))
2786 ena_tx_ctx
->l3_csum_enable
= 1;
2787 l4_protocol
= ip_hdr(skb
)->protocol
;
2790 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
2791 l4_protocol
= ipv6_hdr(skb
)->nexthdr
;
2797 if (l4_protocol
== IPPROTO_TCP
)
2798 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
2800 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
2802 ena_meta
->mss
= mss
;
2803 ena_meta
->l3_hdr_len
= skb_network_header_len(skb
);
2804 ena_meta
->l3_hdr_offset
= skb_network_offset(skb
);
2805 ena_tx_ctx
->meta_valid
= 1;
2808 ena_tx_ctx
->meta_valid
= 0;
2812 static int ena_check_and_linearize_skb(struct ena_ring
*tx_ring
,
2813 struct sk_buff
*skb
)
2815 int num_frags
, header_len
, rc
;
2817 num_frags
= skb_shinfo(skb
)->nr_frags
;
2818 header_len
= skb_headlen(skb
);
2820 if (num_frags
< tx_ring
->sgl_size
)
2823 if ((num_frags
== tx_ring
->sgl_size
) &&
2824 (header_len
< tx_ring
->tx_max_header_size
))
2827 u64_stats_update_begin(&tx_ring
->syncp
);
2828 tx_ring
->tx_stats
.linearize
++;
2829 u64_stats_update_end(&tx_ring
->syncp
);
2831 rc
= skb_linearize(skb
);
2833 u64_stats_update_begin(&tx_ring
->syncp
);
2834 tx_ring
->tx_stats
.linearize_failed
++;
2835 u64_stats_update_end(&tx_ring
->syncp
);
2841 static int ena_tx_map_skb(struct ena_ring
*tx_ring
,
2842 struct ena_tx_buffer
*tx_info
,
2843 struct sk_buff
*skb
,
2847 struct ena_adapter
*adapter
= tx_ring
->adapter
;
2848 struct ena_com_buf
*ena_buf
;
2850 u32 skb_head_len
, frag_len
, last_frag
;
2855 skb_head_len
= skb_headlen(skb
);
2857 ena_buf
= tx_info
->bufs
;
2859 if (tx_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2860 /* When the device is LLQ mode, the driver will copy
2861 * the header into the device memory space.
2862 * the ena_com layer assume the header is in a linear
2864 * This assumption might be wrong since part of the header
2865 * can be in the fragmented buffers.
2866 * Use skb_header_pointer to make sure the header is in a
2867 * linear memory space.
2870 push_len
= min_t(u32
, skb
->len
, tx_ring
->tx_max_header_size
);
2871 *push_hdr
= skb_header_pointer(skb
, 0, push_len
,
2872 tx_ring
->push_buf_intermediate_buf
);
2873 *header_len
= push_len
;
2874 if (unlikely(skb
->data
!= *push_hdr
)) {
2875 u64_stats_update_begin(&tx_ring
->syncp
);
2876 tx_ring
->tx_stats
.llq_buffer_copy
++;
2877 u64_stats_update_end(&tx_ring
->syncp
);
2879 delta
= push_len
- skb_head_len
;
2883 *header_len
= min_t(u32
, skb_head_len
,
2884 tx_ring
->tx_max_header_size
);
2887 netif_dbg(adapter
, tx_queued
, adapter
->netdev
,
2888 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb
,
2889 *push_hdr
, push_len
);
2891 if (skb_head_len
> push_len
) {
2892 dma
= dma_map_single(tx_ring
->dev
, skb
->data
+ push_len
,
2893 skb_head_len
- push_len
, DMA_TO_DEVICE
);
2894 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
2895 goto error_report_dma_error
;
2897 ena_buf
->paddr
= dma
;
2898 ena_buf
->len
= skb_head_len
- push_len
;
2901 tx_info
->num_of_bufs
++;
2902 tx_info
->map_linear_data
= 1;
2904 tx_info
->map_linear_data
= 0;
2907 last_frag
= skb_shinfo(skb
)->nr_frags
;
2909 for (i
= 0; i
< last_frag
; i
++) {
2910 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2912 frag_len
= skb_frag_size(frag
);
2914 if (unlikely(delta
>= frag_len
)) {
2919 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, delta
,
2920 frag_len
- delta
, DMA_TO_DEVICE
);
2921 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
2922 goto error_report_dma_error
;
2924 ena_buf
->paddr
= dma
;
2925 ena_buf
->len
= frag_len
- delta
;
2927 tx_info
->num_of_bufs
++;
2933 error_report_dma_error
:
2934 u64_stats_update_begin(&tx_ring
->syncp
);
2935 tx_ring
->tx_stats
.dma_mapping_err
++;
2936 u64_stats_update_end(&tx_ring
->syncp
);
2937 netdev_warn(adapter
->netdev
, "failed to map skb\n");
2939 tx_info
->skb
= NULL
;
2941 tx_info
->num_of_bufs
+= i
;
2942 ena_unmap_tx_buff(tx_ring
, tx_info
);
2947 /* Called with netif_tx_lock. */
2948 static netdev_tx_t
ena_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2950 struct ena_adapter
*adapter
= netdev_priv(dev
);
2951 struct ena_tx_buffer
*tx_info
;
2952 struct ena_com_tx_ctx ena_tx_ctx
;
2953 struct ena_ring
*tx_ring
;
2954 struct netdev_queue
*txq
;
2956 u16 next_to_use
, req_id
, header_len
;
2959 netif_dbg(adapter
, tx_queued
, dev
, "%s skb %p\n", __func__
, skb
);
2960 /* Determine which tx ring we will be placed on */
2961 qid
= skb_get_queue_mapping(skb
);
2962 tx_ring
= &adapter
->tx_ring
[qid
];
2963 txq
= netdev_get_tx_queue(dev
, qid
);
2965 rc
= ena_check_and_linearize_skb(tx_ring
, skb
);
2967 goto error_drop_packet
;
2969 skb_tx_timestamp(skb
);
2971 next_to_use
= tx_ring
->next_to_use
;
2972 req_id
= tx_ring
->free_ids
[next_to_use
];
2973 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
2974 tx_info
->num_of_bufs
= 0;
2976 WARN(tx_info
->skb
, "SKB isn't NULL req_id %d\n", req_id
);
2978 rc
= ena_tx_map_skb(tx_ring
, tx_info
, skb
, &push_hdr
, &header_len
);
2980 goto error_drop_packet
;
2982 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
2983 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
2984 ena_tx_ctx
.push_header
= push_hdr
;
2985 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
2986 ena_tx_ctx
.req_id
= req_id
;
2987 ena_tx_ctx
.header_len
= header_len
;
2989 /* set flags and meta data */
2990 ena_tx_csum(&ena_tx_ctx
, skb
);
2992 rc
= ena_xmit_common(dev
,
2999 goto error_unmap_dma
;
3001 netdev_tx_sent_queue(txq
, skb
->len
);
3003 /* stop the queue when no more space available, the packet can have up
3004 * to sgl_size + 2. one for the meta descriptor and one for header
3005 * (if the header is larger than tx_max_header_size).
3007 if (unlikely(!ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
3008 tx_ring
->sgl_size
+ 2))) {
3009 netif_dbg(adapter
, tx_queued
, dev
, "%s stop queue %d\n",
3012 netif_tx_stop_queue(txq
);
3013 u64_stats_update_begin(&tx_ring
->syncp
);
3014 tx_ring
->tx_stats
.queue_stop
++;
3015 u64_stats_update_end(&tx_ring
->syncp
);
3017 /* There is a rare condition where this function decide to
3018 * stop the queue but meanwhile clean_tx_irq updates
3019 * next_to_completion and terminates.
3020 * The queue will remain stopped forever.
3021 * To solve this issue add a mb() to make sure that
3022 * netif_tx_stop_queue() write is vissible before checking if
3023 * there is additional space in the queue.
3027 if (ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
3028 ENA_TX_WAKEUP_THRESH
)) {
3029 netif_tx_wake_queue(txq
);
3030 u64_stats_update_begin(&tx_ring
->syncp
);
3031 tx_ring
->tx_stats
.queue_wakeup
++;
3032 u64_stats_update_end(&tx_ring
->syncp
);
3036 if (netif_xmit_stopped(txq
) || !netdev_xmit_more()) {
3037 /* trigger the dma engine. ena_com_write_sq_doorbell()
3040 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
3041 u64_stats_update_begin(&tx_ring
->syncp
);
3042 tx_ring
->tx_stats
.doorbells
++;
3043 u64_stats_update_end(&tx_ring
->syncp
);
3046 return NETDEV_TX_OK
;
3049 ena_unmap_tx_buff(tx_ring
, tx_info
);
3050 tx_info
->skb
= NULL
;
3054 return NETDEV_TX_OK
;
3057 static u16
ena_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
3058 struct net_device
*sb_dev
)
3061 /* we suspect that this is good for in--kernel network services that
3062 * want to loop incoming skb rx to tx in normal user generated traffic,
3063 * most probably we will not get to this
3065 if (skb_rx_queue_recorded(skb
))
3066 qid
= skb_get_rx_queue(skb
);
3068 qid
= netdev_pick_tx(dev
, skb
, NULL
);
3073 static void ena_config_host_info(struct ena_com_dev
*ena_dev
,
3074 struct pci_dev
*pdev
)
3076 struct ena_admin_host_info
*host_info
;
3079 /* Allocate only the host info */
3080 rc
= ena_com_allocate_host_info(ena_dev
);
3082 pr_err("Cannot allocate host info\n");
3086 host_info
= ena_dev
->host_attr
.host_info
;
3088 host_info
->bdf
= (pdev
->bus
->number
<< 8) | pdev
->devfn
;
3089 host_info
->os_type
= ENA_ADMIN_OS_LINUX
;
3090 host_info
->kernel_ver
= LINUX_VERSION_CODE
;
3091 strlcpy(host_info
->kernel_ver_str
, utsname()->version
,
3092 sizeof(host_info
->kernel_ver_str
) - 1);
3093 host_info
->os_dist
= 0;
3094 strncpy(host_info
->os_dist_str
, utsname()->release
,
3095 sizeof(host_info
->os_dist_str
) - 1);
3096 host_info
->driver_version
=
3097 (DRV_MODULE_VER_MAJOR
) |
3098 (DRV_MODULE_VER_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
3099 (DRV_MODULE_VER_SUBMINOR
<< ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
) |
3100 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT
);
3101 host_info
->num_cpus
= num_online_cpus();
3103 host_info
->driver_supported_features
=
3104 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK
;
3106 rc
= ena_com_set_host_attributes(ena_dev
);
3108 if (rc
== -EOPNOTSUPP
)
3109 pr_warn("Cannot set host attributes\n");
3111 pr_err("Cannot set host attributes\n");
3119 ena_com_delete_host_info(ena_dev
);
3122 static void ena_config_debug_area(struct ena_adapter
*adapter
)
3124 u32 debug_area_size
;
3127 ss_count
= ena_get_sset_count(adapter
->netdev
, ETH_SS_STATS
);
3128 if (ss_count
<= 0) {
3129 netif_err(adapter
, drv
, adapter
->netdev
,
3130 "SS count is negative\n");
3134 /* allocate 32 bytes for each string and 64bit for the value */
3135 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
3137 rc
= ena_com_allocate_debug_area(adapter
->ena_dev
, debug_area_size
);
3139 pr_err("Cannot allocate debug area\n");
3143 rc
= ena_com_set_host_attributes(adapter
->ena_dev
);
3145 if (rc
== -EOPNOTSUPP
)
3146 netif_warn(adapter
, drv
, adapter
->netdev
,
3147 "Cannot set host attributes\n");
3149 netif_err(adapter
, drv
, adapter
->netdev
,
3150 "Cannot set host attributes\n");
3156 ena_com_delete_debug_area(adapter
->ena_dev
);
3159 static void ena_get_stats64(struct net_device
*netdev
,
3160 struct rtnl_link_stats64
*stats
)
3162 struct ena_adapter
*adapter
= netdev_priv(netdev
);
3163 struct ena_ring
*rx_ring
, *tx_ring
;
3168 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3171 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
3174 tx_ring
= &adapter
->tx_ring
[i
];
3177 start
= u64_stats_fetch_begin_irq(&tx_ring
->syncp
);
3178 packets
= tx_ring
->tx_stats
.cnt
;
3179 bytes
= tx_ring
->tx_stats
.bytes
;
3180 } while (u64_stats_fetch_retry_irq(&tx_ring
->syncp
, start
));
3182 stats
->tx_packets
+= packets
;
3183 stats
->tx_bytes
+= bytes
;
3185 rx_ring
= &adapter
->rx_ring
[i
];
3188 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
3189 packets
= rx_ring
->rx_stats
.cnt
;
3190 bytes
= rx_ring
->rx_stats
.bytes
;
3191 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
3193 stats
->rx_packets
+= packets
;
3194 stats
->rx_bytes
+= bytes
;
3198 start
= u64_stats_fetch_begin_irq(&adapter
->syncp
);
3199 rx_drops
= adapter
->dev_stats
.rx_drops
;
3200 } while (u64_stats_fetch_retry_irq(&adapter
->syncp
, start
));
3202 stats
->rx_dropped
= rx_drops
;
3204 stats
->multicast
= 0;
3205 stats
->collisions
= 0;
3207 stats
->rx_length_errors
= 0;
3208 stats
->rx_crc_errors
= 0;
3209 stats
->rx_frame_errors
= 0;
3210 stats
->rx_fifo_errors
= 0;
3211 stats
->rx_missed_errors
= 0;
3212 stats
->tx_window_errors
= 0;
3214 stats
->rx_errors
= 0;
3215 stats
->tx_errors
= 0;
3218 static const struct net_device_ops ena_netdev_ops
= {
3219 .ndo_open
= ena_open
,
3220 .ndo_stop
= ena_close
,
3221 .ndo_start_xmit
= ena_start_xmit
,
3222 .ndo_select_queue
= ena_select_queue
,
3223 .ndo_get_stats64
= ena_get_stats64
,
3224 .ndo_tx_timeout
= ena_tx_timeout
,
3225 .ndo_change_mtu
= ena_change_mtu
,
3226 .ndo_set_mac_address
= NULL
,
3227 .ndo_validate_addr
= eth_validate_addr
,
3231 static int ena_device_validate_params(struct ena_adapter
*adapter
,
3232 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3234 struct net_device
*netdev
= adapter
->netdev
;
3237 rc
= ether_addr_equal(get_feat_ctx
->dev_attr
.mac_addr
,
3240 netif_err(adapter
, drv
, netdev
,
3241 "Error, mac address are different\n");
3245 if (get_feat_ctx
->dev_attr
.max_mtu
< netdev
->mtu
) {
3246 netif_err(adapter
, drv
, netdev
,
3247 "Error, device max mtu is smaller than netdev MTU\n");
3254 static int ena_device_init(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
,
3255 struct ena_com_dev_get_features_ctx
*get_feat_ctx
,
3258 struct device
*dev
= &pdev
->dev
;
3259 bool readless_supported
;
3264 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
3266 dev_err(dev
, "failed to init mmio read less\n");
3270 /* The PCIe configuration space revision id indicate if mmio reg
3273 readless_supported
= !(pdev
->revision
& ENA_MMIO_DISABLE_REG_READ
);
3274 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
3276 rc
= ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_NORMAL
);
3278 dev_err(dev
, "Can not reset device\n");
3279 goto err_mmio_read_less
;
3282 rc
= ena_com_validate_version(ena_dev
);
3284 dev_err(dev
, "device version is too low\n");
3285 goto err_mmio_read_less
;
3288 dma_width
= ena_com_get_dma_width(ena_dev
);
3289 if (dma_width
< 0) {
3290 dev_err(dev
, "Invalid dma width value %d", dma_width
);
3292 goto err_mmio_read_less
;
3295 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
3297 dev_err(dev
, "pci_set_dma_mask failed 0x%x\n", rc
);
3298 goto err_mmio_read_less
;
3301 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(dma_width
));
3303 dev_err(dev
, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3305 goto err_mmio_read_less
;
3308 /* ENA admin level init */
3309 rc
= ena_com_admin_init(ena_dev
, &aenq_handlers
);
3312 "Can not initialize ena admin queue with device\n");
3313 goto err_mmio_read_less
;
3316 /* To enable the msix interrupts the driver needs to know the number
3317 * of queues. So the driver uses polling mode to retrieve this
3320 ena_com_set_admin_polling_mode(ena_dev
, true);
3322 ena_config_host_info(ena_dev
, pdev
);
3324 /* Get Device Attributes*/
3325 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
3327 dev_err(dev
, "Cannot get attribute for ena device rc=%d\n", rc
);
3328 goto err_admin_init
;
3331 /* Try to turn all the available aenq groups */
3332 aenq_groups
= BIT(ENA_ADMIN_LINK_CHANGE
) |
3333 BIT(ENA_ADMIN_FATAL_ERROR
) |
3334 BIT(ENA_ADMIN_WARNING
) |
3335 BIT(ENA_ADMIN_NOTIFICATION
) |
3336 BIT(ENA_ADMIN_KEEP_ALIVE
);
3338 aenq_groups
&= get_feat_ctx
->aenq
.supported_groups
;
3340 rc
= ena_com_set_aenq_config(ena_dev
, aenq_groups
);
3342 dev_err(dev
, "Cannot configure aenq groups rc= %d\n", rc
);
3343 goto err_admin_init
;
3346 *wd_state
= !!(aenq_groups
& BIT(ENA_ADMIN_KEEP_ALIVE
));
3351 ena_com_delete_host_info(ena_dev
);
3352 ena_com_admin_destroy(ena_dev
);
3354 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3359 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter
*adapter
)
3361 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3362 struct device
*dev
= &adapter
->pdev
->dev
;
3365 rc
= ena_enable_msix(adapter
);
3367 dev_err(dev
, "Can not reserve msix vectors\n");
3371 ena_setup_mgmnt_intr(adapter
);
3373 rc
= ena_request_mgmnt_irq(adapter
);
3375 dev_err(dev
, "Can not setup management interrupts\n");
3376 goto err_disable_msix
;
3379 ena_com_set_admin_polling_mode(ena_dev
, false);
3381 ena_com_admin_aenq_enable(ena_dev
);
3386 ena_disable_msix(adapter
);
3391 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
)
3393 struct net_device
*netdev
= adapter
->netdev
;
3394 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3397 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
3400 netif_carrier_off(netdev
);
3402 del_timer_sync(&adapter
->timer_service
);
3404 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
3405 adapter
->dev_up_before_reset
= dev_up
;
3407 ena_com_set_admin_running_state(ena_dev
, false);
3409 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3412 /* Stop the device from sending AENQ events (in case reset flag is set
3413 * and device is up, ena_down() already reset the device.
3415 if (!(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
) && dev_up
))
3416 ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
3418 ena_free_mgmnt_irq(adapter
);
3420 ena_disable_msix(adapter
);
3422 ena_com_abort_admin_commands(ena_dev
);
3424 ena_com_wait_for_abort_completion(ena_dev
);
3426 ena_com_admin_destroy(ena_dev
);
3428 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3430 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
3432 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3433 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3436 static int ena_restore_device(struct ena_adapter
*adapter
)
3438 struct ena_com_dev_get_features_ctx get_feat_ctx
;
3439 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3440 struct pci_dev
*pdev
= adapter
->pdev
;
3444 set_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3445 rc
= ena_device_init(ena_dev
, adapter
->pdev
, &get_feat_ctx
, &wd_state
);
3447 dev_err(&pdev
->dev
, "Can not initialize device\n");
3450 adapter
->wd_state
= wd_state
;
3452 rc
= ena_device_validate_params(adapter
, &get_feat_ctx
);
3454 dev_err(&pdev
->dev
, "Validation of device parameters failed\n");
3455 goto err_device_destroy
;
3458 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
);
3460 dev_err(&pdev
->dev
, "Enable MSI-X failed\n");
3461 goto err_device_destroy
;
3463 /* If the interface was up before the reset bring it up */
3464 if (adapter
->dev_up_before_reset
) {
3465 rc
= ena_up(adapter
);
3467 dev_err(&pdev
->dev
, "Failed to create I/O queues\n");
3468 goto err_disable_msix
;
3472 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3474 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3475 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
3476 netif_carrier_on(adapter
->netdev
);
3478 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3480 "Device reset completed successfully, Driver info: %s\n",
3485 ena_free_mgmnt_irq(adapter
);
3486 ena_disable_msix(adapter
);
3488 ena_com_abort_admin_commands(ena_dev
);
3489 ena_com_wait_for_abort_completion(ena_dev
);
3490 ena_com_admin_destroy(ena_dev
);
3491 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_DRIVER_INVALID_STATE
);
3492 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3494 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3495 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3497 "Reset attempt failed. Can not reset the device\n");
3502 static void ena_fw_reset_device(struct work_struct
*work
)
3504 struct ena_adapter
*adapter
=
3505 container_of(work
, struct ena_adapter
, reset_task
);
3506 struct pci_dev
*pdev
= adapter
->pdev
;
3508 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3510 "device reset schedule while reset bit is off\n");
3514 ena_destroy_device(adapter
, false);
3515 ena_restore_device(adapter
);
3519 static int check_for_rx_interrupt_queue(struct ena_adapter
*adapter
,
3520 struct ena_ring
*rx_ring
)
3522 if (likely(rx_ring
->first_interrupt
))
3525 if (ena_com_cq_empty(rx_ring
->ena_com_io_cq
))
3528 rx_ring
->no_interrupt_event_cnt
++;
3530 if (rx_ring
->no_interrupt_event_cnt
== ENA_MAX_NO_INTERRUPT_ITERATIONS
) {
3531 netif_err(adapter
, rx_err
, adapter
->netdev
,
3532 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3534 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
3535 smp_mb__before_atomic();
3536 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3543 static int check_missing_comp_in_tx_queue(struct ena_adapter
*adapter
,
3544 struct ena_ring
*tx_ring
)
3546 struct ena_tx_buffer
*tx_buf
;
3547 unsigned long last_jiffies
;
3551 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
3552 tx_buf
= &tx_ring
->tx_buffer_info
[i
];
3553 last_jiffies
= tx_buf
->last_jiffies
;
3555 if (last_jiffies
== 0)
3556 /* no pending Tx at this location */
3559 if (unlikely(!tx_ring
->first_interrupt
&& time_is_before_jiffies(last_jiffies
+
3560 2 * adapter
->missing_tx_completion_to
))) {
3561 /* If after graceful period interrupt is still not
3562 * received, we schedule a reset
3564 netif_err(adapter
, tx_err
, adapter
->netdev
,
3565 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3567 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
3568 smp_mb__before_atomic();
3569 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3573 if (unlikely(time_is_before_jiffies(last_jiffies
+
3574 adapter
->missing_tx_completion_to
))) {
3575 if (!tx_buf
->print_once
)
3576 netif_notice(adapter
, tx_err
, adapter
->netdev
,
3577 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3580 tx_buf
->print_once
= 1;
3585 if (unlikely(missed_tx
> adapter
->missing_tx_completion_threshold
)) {
3586 netif_err(adapter
, tx_err
, adapter
->netdev
,
3587 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3589 adapter
->missing_tx_completion_threshold
);
3590 adapter
->reset_reason
=
3591 ENA_REGS_RESET_MISS_TX_CMPL
;
3592 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3596 u64_stats_update_begin(&tx_ring
->syncp
);
3597 tx_ring
->tx_stats
.missed_tx
= missed_tx
;
3598 u64_stats_update_end(&tx_ring
->syncp
);
3603 static void check_for_missing_completions(struct ena_adapter
*adapter
)
3605 struct ena_ring
*tx_ring
;
3606 struct ena_ring
*rx_ring
;
3610 io_queue_count
= adapter
->xdp_num_queues
+ adapter
->num_io_queues
;
3611 /* Make sure the driver doesn't turn the device in other process */
3614 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3617 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
3620 if (adapter
->missing_tx_completion_to
== ENA_HW_HINTS_NO_TIMEOUT
)
3623 budget
= ENA_MONITORED_TX_QUEUES
;
3625 for (i
= adapter
->last_monitored_tx_qid
; i
< io_queue_count
; i
++) {
3626 tx_ring
= &adapter
->tx_ring
[i
];
3627 rx_ring
= &adapter
->rx_ring
[i
];
3629 rc
= check_missing_comp_in_tx_queue(adapter
, tx_ring
);
3633 rc
= !ENA_IS_XDP_INDEX(adapter
, i
) ?
3634 check_for_rx_interrupt_queue(adapter
, rx_ring
) : 0;
3643 adapter
->last_monitored_tx_qid
= i
% io_queue_count
;
3646 /* trigger napi schedule after 2 consecutive detections */
3647 #define EMPTY_RX_REFILL 2
3648 /* For the rare case where the device runs out of Rx descriptors and the
3649 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3651 * This case will lead to a deadlock:
3652 * The device won't send interrupts since all the new Rx packets will be dropped
3653 * The napi handler won't allocate new Rx descriptors so the device will be
3654 * able to send new packets.
3656 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3657 * It is recommended to have at least 512MB, with a minimum of 128MB for
3658 * constrained environment).
3660 * When such a situation is detected - Reschedule napi
3662 static void check_for_empty_rx_ring(struct ena_adapter
*adapter
)
3664 struct ena_ring
*rx_ring
;
3665 int i
, refill_required
;
3667 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3670 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
3673 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
3674 rx_ring
= &adapter
->rx_ring
[i
];
3677 ena_com_free_desc(rx_ring
->ena_com_io_sq
);
3678 if (unlikely(refill_required
== (rx_ring
->ring_size
- 1))) {
3679 rx_ring
->empty_rx_queue
++;
3681 if (rx_ring
->empty_rx_queue
>= EMPTY_RX_REFILL
) {
3682 u64_stats_update_begin(&rx_ring
->syncp
);
3683 rx_ring
->rx_stats
.empty_rx_ring
++;
3684 u64_stats_update_end(&rx_ring
->syncp
);
3686 netif_err(adapter
, drv
, adapter
->netdev
,
3687 "trigger refill for ring %d\n", i
);
3689 napi_schedule(rx_ring
->napi
);
3690 rx_ring
->empty_rx_queue
= 0;
3693 rx_ring
->empty_rx_queue
= 0;
3698 /* Check for keep alive expiration */
3699 static void check_for_missing_keep_alive(struct ena_adapter
*adapter
)
3701 unsigned long keep_alive_expired
;
3703 if (!adapter
->wd_state
)
3706 if (adapter
->keep_alive_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3709 keep_alive_expired
= round_jiffies(adapter
->last_keep_alive_jiffies
+
3710 adapter
->keep_alive_timeout
);
3711 if (unlikely(time_is_before_jiffies(keep_alive_expired
))) {
3712 netif_err(adapter
, drv
, adapter
->netdev
,
3713 "Keep alive watchdog timeout.\n");
3714 u64_stats_update_begin(&adapter
->syncp
);
3715 adapter
->dev_stats
.wd_expired
++;
3716 u64_stats_update_end(&adapter
->syncp
);
3717 adapter
->reset_reason
= ENA_REGS_RESET_KEEP_ALIVE_TO
;
3718 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3722 static void check_for_admin_com_state(struct ena_adapter
*adapter
)
3724 if (unlikely(!ena_com_get_admin_running_state(adapter
->ena_dev
))) {
3725 netif_err(adapter
, drv
, adapter
->netdev
,
3726 "ENA admin queue is not in running state!\n");
3727 u64_stats_update_begin(&adapter
->syncp
);
3728 adapter
->dev_stats
.admin_q_pause
++;
3729 u64_stats_update_end(&adapter
->syncp
);
3730 adapter
->reset_reason
= ENA_REGS_RESET_ADMIN_TO
;
3731 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3735 static void ena_update_hints(struct ena_adapter
*adapter
,
3736 struct ena_admin_ena_hw_hints
*hints
)
3738 struct net_device
*netdev
= adapter
->netdev
;
3740 if (hints
->admin_completion_tx_timeout
)
3741 adapter
->ena_dev
->admin_queue
.completion_timeout
=
3742 hints
->admin_completion_tx_timeout
* 1000;
3744 if (hints
->mmio_read_timeout
)
3745 /* convert to usec */
3746 adapter
->ena_dev
->mmio_read
.reg_read_to
=
3747 hints
->mmio_read_timeout
* 1000;
3749 if (hints
->missed_tx_completion_count_threshold_to_reset
)
3750 adapter
->missing_tx_completion_threshold
=
3751 hints
->missed_tx_completion_count_threshold_to_reset
;
3753 if (hints
->missing_tx_completion_timeout
) {
3754 if (hints
->missing_tx_completion_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3755 adapter
->missing_tx_completion_to
= ENA_HW_HINTS_NO_TIMEOUT
;
3757 adapter
->missing_tx_completion_to
=
3758 msecs_to_jiffies(hints
->missing_tx_completion_timeout
);
3761 if (hints
->netdev_wd_timeout
)
3762 netdev
->watchdog_timeo
= msecs_to_jiffies(hints
->netdev_wd_timeout
);
3764 if (hints
->driver_watchdog_timeout
) {
3765 if (hints
->driver_watchdog_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3766 adapter
->keep_alive_timeout
= ENA_HW_HINTS_NO_TIMEOUT
;
3768 adapter
->keep_alive_timeout
=
3769 msecs_to_jiffies(hints
->driver_watchdog_timeout
);
3773 static void ena_update_host_info(struct ena_admin_host_info
*host_info
,
3774 struct net_device
*netdev
)
3776 host_info
->supported_network_features
[0] =
3777 netdev
->features
& GENMASK_ULL(31, 0);
3778 host_info
->supported_network_features
[1] =
3779 (netdev
->features
& GENMASK_ULL(63, 32)) >> 32;
3782 static void ena_timer_service(struct timer_list
*t
)
3784 struct ena_adapter
*adapter
= from_timer(adapter
, t
, timer_service
);
3785 u8
*debug_area
= adapter
->ena_dev
->host_attr
.debug_area_virt_addr
;
3786 struct ena_admin_host_info
*host_info
=
3787 adapter
->ena_dev
->host_attr
.host_info
;
3789 check_for_missing_keep_alive(adapter
);
3791 check_for_admin_com_state(adapter
);
3793 check_for_missing_completions(adapter
);
3795 check_for_empty_rx_ring(adapter
);
3798 ena_dump_stats_to_buf(adapter
, debug_area
);
3801 ena_update_host_info(host_info
, adapter
->netdev
);
3803 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3804 netif_err(adapter
, drv
, adapter
->netdev
,
3805 "Trigger reset is on\n");
3806 ena_dump_stats_to_dmesg(adapter
);
3807 queue_work(ena_wq
, &adapter
->reset_task
);
3811 /* Reset the timer */
3812 mod_timer(&adapter
->timer_service
, jiffies
+ HZ
);
3815 static int ena_calc_max_io_queue_num(struct pci_dev
*pdev
,
3816 struct ena_com_dev
*ena_dev
,
3817 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3819 int io_tx_sq_num
, io_tx_cq_num
, io_rx_num
, max_num_io_queues
;
3821 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
3822 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
3823 &get_feat_ctx
->max_queue_ext
.max_queue_ext
;
3824 io_rx_num
= min_t(u32
, max_queue_ext
->max_rx_sq_num
,
3825 max_queue_ext
->max_rx_cq_num
);
3827 io_tx_sq_num
= max_queue_ext
->max_tx_sq_num
;
3828 io_tx_cq_num
= max_queue_ext
->max_tx_cq_num
;
3830 struct ena_admin_queue_feature_desc
*max_queues
=
3831 &get_feat_ctx
->max_queues
;
3832 io_tx_sq_num
= max_queues
->max_sq_num
;
3833 io_tx_cq_num
= max_queues
->max_cq_num
;
3834 io_rx_num
= min_t(u32
, io_tx_sq_num
, io_tx_cq_num
);
3837 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3838 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3839 io_tx_sq_num
= get_feat_ctx
->llq
.max_llq_num
;
3841 max_num_io_queues
= min_t(u32
, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES
);
3842 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_rx_num
);
3843 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_tx_sq_num
);
3844 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_tx_cq_num
);
3845 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3846 max_num_io_queues
= min_t(u32
, max_num_io_queues
, pci_msix_vec_count(pdev
) - 1);
3847 if (unlikely(!max_num_io_queues
)) {
3848 dev_err(&pdev
->dev
, "The device doesn't have io queues\n");
3852 return max_num_io_queues
;
3855 static int ena_set_queues_placement_policy(struct pci_dev
*pdev
,
3856 struct ena_com_dev
*ena_dev
,
3857 struct ena_admin_feature_llq_desc
*llq
,
3858 struct ena_llq_configurations
*llq_default_configurations
)
3862 u32 llq_feature_mask
;
3864 llq_feature_mask
= 1 << ENA_ADMIN_LLQ
;
3865 if (!(ena_dev
->supported_features
& llq_feature_mask
)) {
3867 "LLQ is not supported Fallback to host mode policy.\n");
3868 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3872 has_mem_bar
= pci_select_bars(pdev
, IORESOURCE_MEM
) & BIT(ENA_MEM_BAR
);
3874 rc
= ena_com_config_dev_mode(ena_dev
, llq
, llq_default_configurations
);
3877 "Failed to configure the device mode. Fallback to host mode policy.\n");
3878 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3882 /* Nothing to config, exit */
3883 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
3888 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3889 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3893 ena_dev
->mem_bar
= devm_ioremap_wc(&pdev
->dev
,
3894 pci_resource_start(pdev
, ENA_MEM_BAR
),
3895 pci_resource_len(pdev
, ENA_MEM_BAR
));
3897 if (!ena_dev
->mem_bar
)
3903 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx
*feat
,
3904 struct net_device
*netdev
)
3906 netdev_features_t dev_features
= 0;
3908 /* Set offload features */
3909 if (feat
->offload
.tx
&
3910 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
3911 dev_features
|= NETIF_F_IP_CSUM
;
3913 if (feat
->offload
.tx
&
3914 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK
)
3915 dev_features
|= NETIF_F_IPV6_CSUM
;
3917 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
3918 dev_features
|= NETIF_F_TSO
;
3920 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK
)
3921 dev_features
|= NETIF_F_TSO6
;
3923 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK
)
3924 dev_features
|= NETIF_F_TSO_ECN
;
3926 if (feat
->offload
.rx_supported
&
3927 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
3928 dev_features
|= NETIF_F_RXCSUM
;
3930 if (feat
->offload
.rx_supported
&
3931 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK
)
3932 dev_features
|= NETIF_F_RXCSUM
;
3940 netdev
->hw_features
|= netdev
->features
;
3941 netdev
->vlan_features
|= netdev
->features
;
3944 static void ena_set_conf_feat_params(struct ena_adapter
*adapter
,
3945 struct ena_com_dev_get_features_ctx
*feat
)
3947 struct net_device
*netdev
= adapter
->netdev
;
3949 /* Copy mac address */
3950 if (!is_valid_ether_addr(feat
->dev_attr
.mac_addr
)) {
3951 eth_hw_addr_random(netdev
);
3952 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
3954 ether_addr_copy(adapter
->mac_addr
, feat
->dev_attr
.mac_addr
);
3955 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
3958 /* Set offload features */
3959 ena_set_dev_offloads(feat
, netdev
);
3961 adapter
->max_mtu
= feat
->dev_attr
.max_mtu
;
3962 netdev
->max_mtu
= adapter
->max_mtu
;
3963 netdev
->min_mtu
= ENA_MIN_MTU
;
3966 static int ena_rss_init_default(struct ena_adapter
*adapter
)
3968 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3969 struct device
*dev
= &adapter
->pdev
->dev
;
3973 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
3975 dev_err(dev
, "Cannot init indirect table\n");
3979 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
3980 val
= ethtool_rxfh_indir_default(i
, adapter
->num_io_queues
);
3981 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
3982 ENA_IO_RXQ_IDX(val
));
3983 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3984 dev_err(dev
, "Cannot fill indirect table\n");
3985 goto err_fill_indir
;
3989 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_CRC32
, NULL
,
3990 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
3991 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3992 dev_err(dev
, "Cannot fill hash function\n");
3993 goto err_fill_indir
;
3996 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
3997 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
3998 dev_err(dev
, "Cannot fill hash control\n");
3999 goto err_fill_indir
;
4005 ena_com_rss_destroy(ena_dev
);
4011 static void ena_release_bars(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
4013 int release_bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
4015 pci_release_selected_regions(pdev
, release_bars
);
4018 static void set_default_llq_configurations(struct ena_llq_configurations
*llq_config
)
4020 llq_config
->llq_header_location
= ENA_ADMIN_INLINE_HEADER
;
4021 llq_config
->llq_ring_entry_size
= ENA_ADMIN_LIST_ENTRY_SIZE_128B
;
4022 llq_config
->llq_stride_ctrl
= ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
;
4023 llq_config
->llq_num_decs_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
;
4024 llq_config
->llq_ring_entry_size_value
= 128;
4027 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx
*ctx
)
4029 struct ena_admin_feature_llq_desc
*llq
= &ctx
->get_feat_ctx
->llq
;
4030 struct ena_com_dev
*ena_dev
= ctx
->ena_dev
;
4031 u32 tx_queue_size
= ENA_DEFAULT_RING_SIZE
;
4032 u32 rx_queue_size
= ENA_DEFAULT_RING_SIZE
;
4033 u32 max_tx_queue_size
;
4034 u32 max_rx_queue_size
;
4036 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
4037 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
4038 &ctx
->get_feat_ctx
->max_queue_ext
.max_queue_ext
;
4039 max_rx_queue_size
= min_t(u32
, max_queue_ext
->max_rx_cq_depth
,
4040 max_queue_ext
->max_rx_sq_depth
);
4041 max_tx_queue_size
= max_queue_ext
->max_tx_cq_depth
;
4043 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
4044 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4045 llq
->max_llq_depth
);
4047 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4048 max_queue_ext
->max_tx_sq_depth
);
4050 ctx
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4051 max_queue_ext
->max_per_packet_tx_descs
);
4052 ctx
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4053 max_queue_ext
->max_per_packet_rx_descs
);
4055 struct ena_admin_queue_feature_desc
*max_queues
=
4056 &ctx
->get_feat_ctx
->max_queues
;
4057 max_rx_queue_size
= min_t(u32
, max_queues
->max_cq_depth
,
4058 max_queues
->max_sq_depth
);
4059 max_tx_queue_size
= max_queues
->max_cq_depth
;
4061 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
4062 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4063 llq
->max_llq_depth
);
4065 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4066 max_queues
->max_sq_depth
);
4068 ctx
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4069 max_queues
->max_packet_tx_descs
);
4070 ctx
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4071 max_queues
->max_packet_rx_descs
);
4074 max_tx_queue_size
= rounddown_pow_of_two(max_tx_queue_size
);
4075 max_rx_queue_size
= rounddown_pow_of_two(max_rx_queue_size
);
4077 tx_queue_size
= clamp_val(tx_queue_size
, ENA_MIN_RING_SIZE
,
4079 rx_queue_size
= clamp_val(rx_queue_size
, ENA_MIN_RING_SIZE
,
4082 tx_queue_size
= rounddown_pow_of_two(tx_queue_size
);
4083 rx_queue_size
= rounddown_pow_of_two(rx_queue_size
);
4085 ctx
->max_tx_queue_size
= max_tx_queue_size
;
4086 ctx
->max_rx_queue_size
= max_rx_queue_size
;
4087 ctx
->tx_queue_size
= tx_queue_size
;
4088 ctx
->rx_queue_size
= rx_queue_size
;
4093 /* ena_probe - Device Initialization Routine
4094 * @pdev: PCI device information struct
4095 * @ent: entry in ena_pci_tbl
4097 * Returns 0 on success, negative on failure
4099 * ena_probe initializes an adapter identified by a pci_dev structure.
4100 * The OS initialization, configuring of the adapter private structure,
4101 * and a hardware reset occur.
4103 static int ena_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4105 struct ena_com_dev_get_features_ctx get_feat_ctx
;
4106 struct ena_calc_queue_size_ctx calc_queue_ctx
= { 0 };
4107 struct ena_llq_configurations llq_config
;
4108 struct ena_com_dev
*ena_dev
= NULL
;
4109 struct ena_adapter
*adapter
;
4110 struct net_device
*netdev
;
4111 static int adapters_found
;
4112 u32 max_num_io_queues
;
4113 char *queue_type_str
;
4117 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
4119 dev_info_once(&pdev
->dev
, "%s", version
);
4121 rc
= pci_enable_device_mem(pdev
);
4123 dev_err(&pdev
->dev
, "pci_enable_device_mem() failed!\n");
4127 pci_set_master(pdev
);
4129 ena_dev
= vzalloc(sizeof(*ena_dev
));
4132 goto err_disable_device
;
4135 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
4136 rc
= pci_request_selected_regions(pdev
, bars
, DRV_MODULE_NAME
);
4138 dev_err(&pdev
->dev
, "pci_request_selected_regions failed %d\n",
4140 goto err_free_ena_dev
;
4143 ena_dev
->reg_bar
= devm_ioremap(&pdev
->dev
,
4144 pci_resource_start(pdev
, ENA_REG_BAR
),
4145 pci_resource_len(pdev
, ENA_REG_BAR
));
4146 if (!ena_dev
->reg_bar
) {
4147 dev_err(&pdev
->dev
, "failed to remap regs bar\n");
4149 goto err_free_region
;
4152 ena_dev
->dmadev
= &pdev
->dev
;
4154 rc
= ena_device_init(ena_dev
, pdev
, &get_feat_ctx
, &wd_state
);
4156 dev_err(&pdev
->dev
, "ena device init failed\n");
4159 goto err_free_region
;
4162 set_default_llq_configurations(&llq_config
);
4164 rc
= ena_set_queues_placement_policy(pdev
, ena_dev
, &get_feat_ctx
.llq
,
4167 dev_err(&pdev
->dev
, "ena device init failed\n");
4168 goto err_device_destroy
;
4171 calc_queue_ctx
.ena_dev
= ena_dev
;
4172 calc_queue_ctx
.get_feat_ctx
= &get_feat_ctx
;
4173 calc_queue_ctx
.pdev
= pdev
;
4175 /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
4176 * Updated during device initialization with the real granularity
4178 ena_dev
->intr_moder_tx_interval
= ENA_INTR_INITIAL_TX_INTERVAL_USECS
;
4179 ena_dev
->intr_moder_rx_interval
= ENA_INTR_INITIAL_RX_INTERVAL_USECS
;
4180 ena_dev
->intr_delay_resolution
= ENA_DEFAULT_INTR_DELAY_RESOLUTION
;
4181 max_num_io_queues
= ena_calc_max_io_queue_num(pdev
, ena_dev
, &get_feat_ctx
);
4182 rc
= ena_calc_io_queue_size(&calc_queue_ctx
);
4183 if (rc
|| !max_num_io_queues
) {
4185 goto err_device_destroy
;
4188 /* dev zeroed in init_etherdev */
4189 netdev
= alloc_etherdev_mq(sizeof(struct ena_adapter
), max_num_io_queues
);
4191 dev_err(&pdev
->dev
, "alloc_etherdev_mq failed\n");
4193 goto err_device_destroy
;
4196 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4198 adapter
= netdev_priv(netdev
);
4199 pci_set_drvdata(pdev
, adapter
);
4201 adapter
->ena_dev
= ena_dev
;
4202 adapter
->netdev
= netdev
;
4203 adapter
->pdev
= pdev
;
4205 ena_set_conf_feat_params(adapter
, &get_feat_ctx
);
4207 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
4208 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
4210 adapter
->requested_tx_ring_size
= calc_queue_ctx
.tx_queue_size
;
4211 adapter
->requested_rx_ring_size
= calc_queue_ctx
.rx_queue_size
;
4212 adapter
->max_tx_ring_size
= calc_queue_ctx
.max_tx_queue_size
;
4213 adapter
->max_rx_ring_size
= calc_queue_ctx
.max_rx_queue_size
;
4214 adapter
->max_tx_sgl_size
= calc_queue_ctx
.max_tx_sgl_size
;
4215 adapter
->max_rx_sgl_size
= calc_queue_ctx
.max_rx_sgl_size
;
4217 adapter
->num_io_queues
= max_num_io_queues
;
4218 adapter
->max_num_io_queues
= max_num_io_queues
;
4220 adapter
->xdp_first_ring
= 0;
4221 adapter
->xdp_num_queues
= 0;
4223 adapter
->last_monitored_tx_qid
= 0;
4225 adapter
->rx_copybreak
= ENA_DEFAULT_RX_COPYBREAK
;
4226 adapter
->wd_state
= wd_state
;
4228 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d", adapters_found
);
4230 rc
= ena_com_init_interrupt_moderation(adapter
->ena_dev
);
4233 "Failed to query interrupt moderation feature\n");
4234 goto err_netdev_destroy
;
4236 ena_init_io_rings(adapter
,
4238 adapter
->xdp_num_queues
+
4239 adapter
->num_io_queues
);
4241 netdev
->netdev_ops
= &ena_netdev_ops
;
4242 netdev
->watchdog_timeo
= TX_TIMEOUT
;
4243 ena_set_ethtool_ops(netdev
);
4245 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4247 u64_stats_init(&adapter
->syncp
);
4249 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
);
4252 "Failed to enable and set the admin interrupts\n");
4253 goto err_worker_destroy
;
4255 rc
= ena_rss_init_default(adapter
);
4256 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
4257 dev_err(&pdev
->dev
, "Cannot init RSS rc: %d\n", rc
);
4261 ena_config_debug_area(adapter
);
4263 memcpy(adapter
->netdev
->perm_addr
, adapter
->mac_addr
, netdev
->addr_len
);
4265 netif_carrier_off(netdev
);
4267 rc
= register_netdev(netdev
);
4269 dev_err(&pdev
->dev
, "Cannot register net device\n");
4273 INIT_WORK(&adapter
->reset_task
, ena_fw_reset_device
);
4275 adapter
->last_keep_alive_jiffies
= jiffies
;
4276 adapter
->keep_alive_timeout
= ENA_DEVICE_KALIVE_TIMEOUT
;
4277 adapter
->missing_tx_completion_to
= TX_TIMEOUT
;
4278 adapter
->missing_tx_completion_threshold
= MAX_NUM_OF_TIMEOUTED_PACKETS
;
4280 ena_update_hints(adapter
, &get_feat_ctx
.hw_hints
);
4282 timer_setup(&adapter
->timer_service
, ena_timer_service
, 0);
4283 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
4285 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
4286 queue_type_str
= "Regular";
4288 queue_type_str
= "Low Latency";
4290 dev_info(&pdev
->dev
,
4291 "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
4292 DEVICE_NAME
, (long)pci_resource_start(pdev
, 0),
4293 netdev
->dev_addr
, queue_type_str
);
4295 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
4302 ena_com_delete_debug_area(ena_dev
);
4303 ena_com_rss_destroy(ena_dev
);
4305 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_INIT_ERR
);
4306 /* stop submitting admin commands on a device that was reset */
4307 ena_com_set_admin_running_state(ena_dev
, false);
4308 ena_free_mgmnt_irq(adapter
);
4309 ena_disable_msix(adapter
);
4311 del_timer(&adapter
->timer_service
);
4313 free_netdev(netdev
);
4315 ena_com_delete_host_info(ena_dev
);
4316 ena_com_admin_destroy(ena_dev
);
4318 ena_release_bars(ena_dev
, pdev
);
4322 pci_disable_device(pdev
);
4326 /*****************************************************************************/
4328 /* ena_remove - Device Removal Routine
4329 * @pdev: PCI device information struct
4331 * ena_remove is called by the PCI subsystem to alert the driver
4332 * that it should release a PCI device.
4334 static void ena_remove(struct pci_dev
*pdev
)
4336 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
4337 struct ena_com_dev
*ena_dev
;
4338 struct net_device
*netdev
;
4340 ena_dev
= adapter
->ena_dev
;
4341 netdev
= adapter
->netdev
;
4343 #ifdef CONFIG_RFS_ACCEL
4344 if ((adapter
->msix_vecs
>= 1) && (netdev
->rx_cpu_rmap
)) {
4345 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
4346 netdev
->rx_cpu_rmap
= NULL
;
4348 #endif /* CONFIG_RFS_ACCEL */
4349 del_timer_sync(&adapter
->timer_service
);
4351 cancel_work_sync(&adapter
->reset_task
);
4354 ena_destroy_device(adapter
, true);
4357 unregister_netdev(netdev
);
4359 free_netdev(netdev
);
4361 ena_com_rss_destroy(ena_dev
);
4363 ena_com_delete_debug_area(ena_dev
);
4365 ena_com_delete_host_info(ena_dev
);
4367 ena_release_bars(ena_dev
, pdev
);
4369 pci_disable_device(pdev
);
4375 /* ena_suspend - PM suspend callback
4376 * @pdev: PCI device information struct
4377 * @state:power state
4379 static int ena_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4381 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
4383 u64_stats_update_begin(&adapter
->syncp
);
4384 adapter
->dev_stats
.suspend
++;
4385 u64_stats_update_end(&adapter
->syncp
);
4388 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
4390 "ignoring device reset request as the device is being suspended\n");
4391 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
4393 ena_destroy_device(adapter
, true);
4398 /* ena_resume - PM resume callback
4399 * @pdev: PCI device information struct
4402 static int ena_resume(struct pci_dev
*pdev
)
4404 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
4407 u64_stats_update_begin(&adapter
->syncp
);
4408 adapter
->dev_stats
.resume
++;
4409 u64_stats_update_end(&adapter
->syncp
);
4412 rc
= ena_restore_device(adapter
);
4418 static struct pci_driver ena_pci_driver
= {
4419 .name
= DRV_MODULE_NAME
,
4420 .id_table
= ena_pci_tbl
,
4422 .remove
= ena_remove
,
4424 .suspend
= ena_suspend
,
4425 .resume
= ena_resume
,
4427 .sriov_configure
= pci_sriov_configure_simple
,
4430 static int __init
ena_init(void)
4432 pr_info("%s", version
);
4434 ena_wq
= create_singlethread_workqueue(DRV_MODULE_NAME
);
4436 pr_err("Failed to create workqueue\n");
4440 return pci_register_driver(&ena_pci_driver
);
4443 static void __exit
ena_cleanup(void)
4445 pci_unregister_driver(&ena_pci_driver
);
4448 destroy_workqueue(ena_wq
);
4453 /******************************************************************************
4454 ******************************** AENQ Handlers *******************************
4455 *****************************************************************************/
4456 /* ena_update_on_link_change:
4457 * Notify the network interface about the change in link status
4459 static void ena_update_on_link_change(void *adapter_data
,
4460 struct ena_admin_aenq_entry
*aenq_e
)
4462 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4463 struct ena_admin_aenq_link_change_desc
*aenq_desc
=
4464 (struct ena_admin_aenq_link_change_desc
*)aenq_e
;
4465 int status
= aenq_desc
->flags
&
4466 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK
;
4469 netdev_dbg(adapter
->netdev
, "%s\n", __func__
);
4470 set_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
4471 if (!test_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
))
4472 netif_carrier_on(adapter
->netdev
);
4474 clear_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
4475 netif_carrier_off(adapter
->netdev
);
4479 static void ena_keep_alive_wd(void *adapter_data
,
4480 struct ena_admin_aenq_entry
*aenq_e
)
4482 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4483 struct ena_admin_aenq_keep_alive_desc
*desc
;
4486 desc
= (struct ena_admin_aenq_keep_alive_desc
*)aenq_e
;
4487 adapter
->last_keep_alive_jiffies
= jiffies
;
4489 rx_drops
= ((u64
)desc
->rx_drops_high
<< 32) | desc
->rx_drops_low
;
4491 u64_stats_update_begin(&adapter
->syncp
);
4492 adapter
->dev_stats
.rx_drops
= rx_drops
;
4493 u64_stats_update_end(&adapter
->syncp
);
4496 static void ena_notification(void *adapter_data
,
4497 struct ena_admin_aenq_entry
*aenq_e
)
4499 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4500 struct ena_admin_ena_hw_hints
*hints
;
4502 WARN(aenq_e
->aenq_common_desc
.group
!= ENA_ADMIN_NOTIFICATION
,
4503 "Invalid group(%x) expected %x\n",
4504 aenq_e
->aenq_common_desc
.group
,
4505 ENA_ADMIN_NOTIFICATION
);
4507 switch (aenq_e
->aenq_common_desc
.syndrom
) {
4508 case ENA_ADMIN_UPDATE_HINTS
:
4509 hints
= (struct ena_admin_ena_hw_hints
*)
4510 (&aenq_e
->inline_data_w4
);
4511 ena_update_hints(adapter
, hints
);
4514 netif_err(adapter
, drv
, adapter
->netdev
,
4515 "Invalid aenq notification link state %d\n",
4516 aenq_e
->aenq_common_desc
.syndrom
);
4520 /* This handler will called for unknown event group or unimplemented handlers*/
4521 static void unimplemented_aenq_handler(void *data
,
4522 struct ena_admin_aenq_entry
*aenq_e
)
4524 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
4526 netif_err(adapter
, drv
, adapter
->netdev
,
4527 "Unknown event was received or event with unimplemented handler\n");
4530 static struct ena_aenq_handlers aenq_handlers
= {
4532 [ENA_ADMIN_LINK_CHANGE
] = ena_update_on_link_change
,
4533 [ENA_ADMIN_NOTIFICATION
] = ena_notification
,
4534 [ENA_ADMIN_KEEP_ALIVE
] = ena_keep_alive_wd
,
4536 .unimplemented_handler
= unimplemented_aenq_handler
4539 module_init(ena_init
);
4540 module_exit(ena_cleanup
);