1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include <linux/bpf_trace.h>
23 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME
);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
34 #define ENA_NAPI_BUDGET 64
36 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
37 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
38 static int debug
= -1;
39 module_param(debug
, int, 0);
40 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
42 static struct ena_aenq_handlers aenq_handlers
;
44 static struct workqueue_struct
*ena_wq
;
46 MODULE_DEVICE_TABLE(pci
, ena_pci_tbl
);
48 static int ena_rss_init_default(struct ena_adapter
*adapter
);
49 static void check_for_admin_com_state(struct ena_adapter
*adapter
);
50 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
);
51 static int ena_restore_device(struct ena_adapter
*adapter
);
53 static void ena_init_io_rings(struct ena_adapter
*adapter
,
54 int first_index
, int count
);
55 static void ena_init_napi_in_range(struct ena_adapter
*adapter
, int first_index
,
57 static void ena_del_napi_in_range(struct ena_adapter
*adapter
, int first_index
,
59 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
);
60 static int ena_setup_tx_resources_in_range(struct ena_adapter
*adapter
,
63 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
);
64 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
);
65 static int ena_clean_xdp_irq(struct ena_ring
*xdp_ring
, u32 budget
);
66 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
);
67 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
);
68 static void ena_napi_disable_in_range(struct ena_adapter
*adapter
,
69 int first_index
, int count
);
70 static void ena_napi_enable_in_range(struct ena_adapter
*adapter
,
71 int first_index
, int count
);
72 static int ena_up(struct ena_adapter
*adapter
);
73 static void ena_down(struct ena_adapter
*adapter
);
74 static void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
75 struct ena_ring
*rx_ring
);
76 static void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
77 struct ena_ring
*rx_ring
);
78 static void ena_unmap_tx_buff(struct ena_ring
*tx_ring
,
79 struct ena_tx_buffer
*tx_info
);
80 static int ena_create_io_tx_queues_in_range(struct ena_adapter
*adapter
,
81 int first_index
, int count
);
83 /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
84 static void ena_increase_stat(u64
*statp
, u64 cnt
,
85 struct u64_stats_sync
*syncp
)
87 u64_stats_update_begin(syncp
);
89 u64_stats_update_end(syncp
);
92 static void ena_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
94 struct ena_adapter
*adapter
= netdev_priv(dev
);
96 /* Change the state of the device to trigger reset
97 * Check that we are not in the middle or a trigger already
100 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
103 adapter
->reset_reason
= ENA_REGS_RESET_OS_NETDEV_WD
;
104 ena_increase_stat(&adapter
->dev_stats
.tx_timeout
, 1, &adapter
->syncp
);
106 netif_err(adapter
, tx_err
, dev
, "Transmit time out\n");
109 static void update_rx_ring_mtu(struct ena_adapter
*adapter
, int mtu
)
113 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
114 adapter
->rx_ring
[i
].mtu
= mtu
;
117 static int ena_change_mtu(struct net_device
*dev
, int new_mtu
)
119 struct ena_adapter
*adapter
= netdev_priv(dev
);
122 ret
= ena_com_set_dev_mtu(adapter
->ena_dev
, new_mtu
);
124 netif_dbg(adapter
, drv
, dev
, "Set MTU to %d\n", new_mtu
);
125 update_rx_ring_mtu(adapter
, new_mtu
);
128 netif_err(adapter
, drv
, dev
, "Failed to set MTU to %d\n",
135 static int ena_xmit_common(struct net_device
*dev
,
136 struct ena_ring
*ring
,
137 struct ena_tx_buffer
*tx_info
,
138 struct ena_com_tx_ctx
*ena_tx_ctx
,
142 struct ena_adapter
*adapter
= netdev_priv(dev
);
145 if (unlikely(ena_com_is_doorbell_needed(ring
->ena_com_io_sq
,
147 netif_dbg(adapter
, tx_queued
, dev
,
148 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
150 ena_com_write_sq_doorbell(ring
->ena_com_io_sq
);
153 /* prepare the packet's descriptors to dma engine */
154 rc
= ena_com_prepare_tx(ring
->ena_com_io_sq
, ena_tx_ctx
,
157 /* In case there isn't enough space in the queue for the packet,
158 * we simply drop it. All other failure reasons of
159 * ena_com_prepare_tx() are fatal and therefore require a device reset.
162 netif_err(adapter
, tx_queued
, dev
,
163 "Failed to prepare tx bufs\n");
164 ena_increase_stat(&ring
->tx_stats
.prepare_ctx_err
, 1,
167 adapter
->reset_reason
=
168 ENA_REGS_RESET_DRIVER_INVALID_STATE
;
169 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
174 u64_stats_update_begin(&ring
->syncp
);
175 ring
->tx_stats
.cnt
++;
176 ring
->tx_stats
.bytes
+= bytes
;
177 u64_stats_update_end(&ring
->syncp
);
179 tx_info
->tx_descs
= nb_hw_desc
;
180 tx_info
->last_jiffies
= jiffies
;
181 tx_info
->print_once
= 0;
183 ring
->next_to_use
= ENA_TX_RING_IDX_NEXT(next_to_use
,
188 /* This is the XDP napi callback. XDP queues use a separate napi callback
191 static int ena_xdp_io_poll(struct napi_struct
*napi
, int budget
)
193 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
194 u32 xdp_work_done
, xdp_budget
;
195 struct ena_ring
*xdp_ring
;
196 int napi_comp_call
= 0;
199 xdp_ring
= ena_napi
->xdp_ring
;
200 xdp_ring
->first_interrupt
= ena_napi
->first_interrupt
;
204 if (!test_bit(ENA_FLAG_DEV_UP
, &xdp_ring
->adapter
->flags
) ||
205 test_bit(ENA_FLAG_TRIGGER_RESET
, &xdp_ring
->adapter
->flags
)) {
206 napi_complete_done(napi
, 0);
210 xdp_work_done
= ena_clean_xdp_irq(xdp_ring
, xdp_budget
);
212 /* If the device is about to reset or down, avoid unmask
213 * the interrupt and return 0 so NAPI won't reschedule
215 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &xdp_ring
->adapter
->flags
))) {
216 napi_complete_done(napi
, 0);
218 } else if (xdp_budget
> xdp_work_done
) {
220 if (napi_complete_done(napi
, xdp_work_done
))
221 ena_unmask_interrupt(xdp_ring
, NULL
);
222 ena_update_ring_numa_node(xdp_ring
, NULL
);
228 u64_stats_update_begin(&xdp_ring
->syncp
);
229 xdp_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
230 xdp_ring
->tx_stats
.tx_poll
++;
231 u64_stats_update_end(&xdp_ring
->syncp
);
236 static int ena_xdp_tx_map_frame(struct ena_ring
*xdp_ring
,
237 struct ena_tx_buffer
*tx_info
,
238 struct xdp_frame
*xdpf
,
242 struct ena_adapter
*adapter
= xdp_ring
->adapter
;
243 struct ena_com_buf
*ena_buf
;
247 tx_info
->xdpf
= xdpf
;
248 size
= tx_info
->xdpf
->len
;
249 ena_buf
= tx_info
->bufs
;
251 /* llq push buffer */
252 *push_len
= min_t(u32
, size
, xdp_ring
->tx_max_header_size
);
253 *push_hdr
= tx_info
->xdpf
->data
;
255 if (size
- *push_len
> 0) {
256 dma
= dma_map_single(xdp_ring
->dev
,
257 *push_hdr
+ *push_len
,
260 if (unlikely(dma_mapping_error(xdp_ring
->dev
, dma
)))
261 goto error_report_dma_error
;
263 tx_info
->map_linear_data
= 1;
264 tx_info
->num_of_bufs
= 1;
267 ena_buf
->paddr
= dma
;
272 error_report_dma_error
:
273 ena_increase_stat(&xdp_ring
->tx_stats
.dma_mapping_err
, 1,
275 netif_warn(adapter
, tx_queued
, adapter
->netdev
, "Failed to map xdp buff\n");
277 xdp_return_frame_rx_napi(tx_info
->xdpf
);
278 tx_info
->xdpf
= NULL
;
279 tx_info
->num_of_bufs
= 0;
284 static int ena_xdp_xmit_frame(struct ena_ring
*xdp_ring
,
285 struct net_device
*dev
,
286 struct xdp_frame
*xdpf
,
289 struct ena_com_tx_ctx ena_tx_ctx
= {};
290 struct ena_tx_buffer
*tx_info
;
291 u16 next_to_use
, req_id
;
296 next_to_use
= xdp_ring
->next_to_use
;
297 req_id
= xdp_ring
->free_ids
[next_to_use
];
298 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
299 tx_info
->num_of_bufs
= 0;
301 rc
= ena_xdp_tx_map_frame(xdp_ring
, tx_info
, xdpf
, &push_hdr
, &push_len
);
303 goto error_drop_packet
;
305 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
306 ena_tx_ctx
.push_header
= push_hdr
;
307 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
308 ena_tx_ctx
.req_id
= req_id
;
309 ena_tx_ctx
.header_len
= push_len
;
311 rc
= ena_xmit_common(dev
,
318 goto error_unmap_dma
;
319 /* trigger the dma engine. ena_com_write_sq_doorbell()
322 if (flags
& XDP_XMIT_FLUSH
) {
323 ena_com_write_sq_doorbell(xdp_ring
->ena_com_io_sq
);
324 ena_increase_stat(&xdp_ring
->tx_stats
.doorbells
, 1,
331 ena_unmap_tx_buff(xdp_ring
, tx_info
);
332 tx_info
->xdpf
= NULL
;
334 xdp_return_frame(xdpf
);
338 static int ena_xdp_xmit(struct net_device
*dev
, int n
,
339 struct xdp_frame
**frames
, u32 flags
)
341 struct ena_adapter
*adapter
= netdev_priv(dev
);
342 int qid
, i
, err
, drops
= 0;
343 struct ena_ring
*xdp_ring
;
345 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
348 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
351 /* We assume that all rings have the same XDP program */
352 if (!READ_ONCE(adapter
->rx_ring
->xdp_bpf_prog
))
355 qid
= smp_processor_id() % adapter
->xdp_num_queues
;
356 qid
+= adapter
->xdp_first_ring
;
357 xdp_ring
= &adapter
->tx_ring
[qid
];
359 /* Other CPU ids might try to send thorugh this queue */
360 spin_lock(&xdp_ring
->xdp_tx_lock
);
362 for (i
= 0; i
< n
; i
++) {
363 err
= ena_xdp_xmit_frame(xdp_ring
, dev
, frames
[i
], 0);
364 /* The descriptor is freed by ena_xdp_xmit_frame in case
371 /* Ring doorbell to make device aware of the packets */
372 if (flags
& XDP_XMIT_FLUSH
) {
373 ena_com_write_sq_doorbell(xdp_ring
->ena_com_io_sq
);
374 ena_increase_stat(&xdp_ring
->tx_stats
.doorbells
, 1,
378 spin_unlock(&xdp_ring
->xdp_tx_lock
);
380 /* Return number of packets sent */
384 static int ena_xdp_execute(struct ena_ring
*rx_ring
, struct xdp_buff
*xdp
)
386 struct bpf_prog
*xdp_prog
;
387 struct ena_ring
*xdp_ring
;
388 u32 verdict
= XDP_PASS
;
389 struct xdp_frame
*xdpf
;
394 xdp_prog
= READ_ONCE(rx_ring
->xdp_bpf_prog
);
399 verdict
= bpf_prog_run_xdp(xdp_prog
, xdp
);
403 xdpf
= xdp_convert_buff_to_frame(xdp
);
404 if (unlikely(!xdpf
)) {
405 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, verdict
);
406 xdp_stat
= &rx_ring
->rx_stats
.xdp_aborted
;
410 /* Find xmit queue */
411 qid
= rx_ring
->qid
+ rx_ring
->adapter
->num_io_queues
;
412 xdp_ring
= &rx_ring
->adapter
->tx_ring
[qid
];
414 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
415 spin_lock(&xdp_ring
->xdp_tx_lock
);
417 ena_xdp_xmit_frame(xdp_ring
, rx_ring
->netdev
, xdpf
, XDP_XMIT_FLUSH
);
419 spin_unlock(&xdp_ring
->xdp_tx_lock
);
420 xdp_stat
= &rx_ring
->rx_stats
.xdp_tx
;
423 if (likely(!xdp_do_redirect(rx_ring
->netdev
, xdp
, xdp_prog
))) {
424 xdp_stat
= &rx_ring
->rx_stats
.xdp_redirect
;
429 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, verdict
);
430 xdp_stat
= &rx_ring
->rx_stats
.xdp_aborted
;
433 xdp_stat
= &rx_ring
->rx_stats
.xdp_drop
;
436 xdp_stat
= &rx_ring
->rx_stats
.xdp_pass
;
439 bpf_warn_invalid_xdp_action(verdict
);
440 xdp_stat
= &rx_ring
->rx_stats
.xdp_invalid
;
443 ena_increase_stat(xdp_stat
, 1, &rx_ring
->syncp
);
450 static void ena_init_all_xdp_queues(struct ena_adapter
*adapter
)
452 adapter
->xdp_first_ring
= adapter
->num_io_queues
;
453 adapter
->xdp_num_queues
= adapter
->num_io_queues
;
455 ena_init_io_rings(adapter
,
456 adapter
->xdp_first_ring
,
457 adapter
->xdp_num_queues
);
460 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter
*adapter
)
464 rc
= ena_setup_tx_resources_in_range(adapter
, adapter
->xdp_first_ring
,
465 adapter
->xdp_num_queues
);
469 rc
= ena_create_io_tx_queues_in_range(adapter
,
470 adapter
->xdp_first_ring
,
471 adapter
->xdp_num_queues
);
478 ena_free_all_io_tx_resources(adapter
);
483 /* Provides a way for both kernel and bpf-prog to know
484 * more about the RX-queue a given XDP frame arrived on.
486 static int ena_xdp_register_rxq_info(struct ena_ring
*rx_ring
)
490 rc
= xdp_rxq_info_reg(&rx_ring
->xdp_rxq
, rx_ring
->netdev
, rx_ring
->qid
, 0);
493 netif_err(rx_ring
->adapter
, ifup
, rx_ring
->netdev
,
494 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
499 rc
= xdp_rxq_info_reg_mem_model(&rx_ring
->xdp_rxq
, MEM_TYPE_PAGE_SHARED
,
503 netif_err(rx_ring
->adapter
, ifup
, rx_ring
->netdev
,
504 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
506 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
513 static void ena_xdp_unregister_rxq_info(struct ena_ring
*rx_ring
)
515 xdp_rxq_info_unreg_mem_model(&rx_ring
->xdp_rxq
);
516 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
519 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter
*adapter
,
520 struct bpf_prog
*prog
,
521 int first
, int count
)
523 struct ena_ring
*rx_ring
;
526 for (i
= first
; i
< count
; i
++) {
527 rx_ring
= &adapter
->rx_ring
[i
];
528 xchg(&rx_ring
->xdp_bpf_prog
, prog
);
530 ena_xdp_register_rxq_info(rx_ring
);
531 rx_ring
->rx_headroom
= XDP_PACKET_HEADROOM
;
533 ena_xdp_unregister_rxq_info(rx_ring
);
534 rx_ring
->rx_headroom
= 0;
539 static void ena_xdp_exchange_program(struct ena_adapter
*adapter
,
540 struct bpf_prog
*prog
)
542 struct bpf_prog
*old_bpf_prog
= xchg(&adapter
->xdp_bpf_prog
, prog
);
544 ena_xdp_exchange_program_rx_in_range(adapter
,
547 adapter
->num_io_queues
);
550 bpf_prog_put(old_bpf_prog
);
553 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter
*adapter
)
558 was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
563 adapter
->xdp_first_ring
= 0;
564 adapter
->xdp_num_queues
= 0;
565 ena_xdp_exchange_program(adapter
, NULL
);
567 rc
= ena_up(adapter
);
574 static int ena_xdp_set(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
576 struct ena_adapter
*adapter
= netdev_priv(netdev
);
577 struct bpf_prog
*prog
= bpf
->prog
;
578 struct bpf_prog
*old_bpf_prog
;
582 is_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
583 rc
= ena_xdp_allowed(adapter
);
584 if (rc
== ENA_XDP_ALLOWED
) {
585 old_bpf_prog
= adapter
->xdp_bpf_prog
;
588 ena_init_all_xdp_queues(adapter
);
589 } else if (!old_bpf_prog
) {
591 ena_init_all_xdp_queues(adapter
);
593 ena_xdp_exchange_program(adapter
, prog
);
595 if (is_up
&& !old_bpf_prog
) {
596 rc
= ena_up(adapter
);
600 } else if (old_bpf_prog
) {
601 rc
= ena_destroy_and_free_all_xdp_queues(adapter
);
606 prev_mtu
= netdev
->max_mtu
;
607 netdev
->max_mtu
= prog
? ENA_XDP_MAX_MTU
: adapter
->max_mtu
;
610 netif_info(adapter
, drv
, adapter
->netdev
,
611 "XDP program is set, changing the max_mtu from %d to %d",
612 prev_mtu
, netdev
->max_mtu
);
614 } else if (rc
== ENA_XDP_CURRENT_MTU_TOO_LARGE
) {
615 netif_err(adapter
, drv
, adapter
->netdev
,
616 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
617 netdev
->mtu
, ENA_XDP_MAX_MTU
);
618 NL_SET_ERR_MSG_MOD(bpf
->extack
,
619 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
621 } else if (rc
== ENA_XDP_NO_ENOUGH_QUEUES
) {
622 netif_err(adapter
, drv
, adapter
->netdev
,
623 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
624 adapter
->num_io_queues
, adapter
->max_num_io_queues
);
625 NL_SET_ERR_MSG_MOD(bpf
->extack
,
626 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
633 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
634 * program as well as to query the current xdp program id.
636 static int ena_xdp(struct net_device
*netdev
, struct netdev_bpf
*bpf
)
638 switch (bpf
->command
) {
640 return ena_xdp_set(netdev
, bpf
);
647 static int ena_init_rx_cpu_rmap(struct ena_adapter
*adapter
)
649 #ifdef CONFIG_RFS_ACCEL
653 adapter
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(adapter
->num_io_queues
);
654 if (!adapter
->netdev
->rx_cpu_rmap
)
656 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
657 int irq_idx
= ENA_IO_IRQ_IDX(i
);
659 rc
= irq_cpu_rmap_add(adapter
->netdev
->rx_cpu_rmap
,
660 pci_irq_vector(adapter
->pdev
, irq_idx
));
662 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
663 adapter
->netdev
->rx_cpu_rmap
= NULL
;
667 #endif /* CONFIG_RFS_ACCEL */
671 static void ena_init_io_rings_common(struct ena_adapter
*adapter
,
672 struct ena_ring
*ring
, u16 qid
)
675 ring
->pdev
= adapter
->pdev
;
676 ring
->dev
= &adapter
->pdev
->dev
;
677 ring
->netdev
= adapter
->netdev
;
678 ring
->napi
= &adapter
->ena_napi
[qid
].napi
;
679 ring
->adapter
= adapter
;
680 ring
->ena_dev
= adapter
->ena_dev
;
681 ring
->per_napi_packets
= 0;
683 ring
->first_interrupt
= false;
684 ring
->no_interrupt_event_cnt
= 0;
685 u64_stats_init(&ring
->syncp
);
688 static void ena_init_io_rings(struct ena_adapter
*adapter
,
689 int first_index
, int count
)
691 struct ena_com_dev
*ena_dev
;
692 struct ena_ring
*txr
, *rxr
;
695 ena_dev
= adapter
->ena_dev
;
697 for (i
= first_index
; i
< first_index
+ count
; i
++) {
698 txr
= &adapter
->tx_ring
[i
];
699 rxr
= &adapter
->rx_ring
[i
];
701 /* TX common ring state */
702 ena_init_io_rings_common(adapter
, txr
, i
);
704 /* TX specific ring state */
705 txr
->ring_size
= adapter
->requested_tx_ring_size
;
706 txr
->tx_max_header_size
= ena_dev
->tx_max_header_size
;
707 txr
->tx_mem_queue_type
= ena_dev
->tx_mem_queue_type
;
708 txr
->sgl_size
= adapter
->max_tx_sgl_size
;
709 txr
->smoothed_interval
=
710 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
);
711 txr
->disable_meta_caching
= adapter
->disable_meta_caching
;
712 spin_lock_init(&txr
->xdp_tx_lock
);
714 /* Don't init RX queues for xdp queues */
715 if (!ENA_IS_XDP_INDEX(adapter
, i
)) {
716 /* RX common ring state */
717 ena_init_io_rings_common(adapter
, rxr
, i
);
719 /* RX specific ring state */
720 rxr
->ring_size
= adapter
->requested_rx_ring_size
;
721 rxr
->rx_copybreak
= adapter
->rx_copybreak
;
722 rxr
->sgl_size
= adapter
->max_rx_sgl_size
;
723 rxr
->smoothed_interval
=
724 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
);
725 rxr
->empty_rx_queue
= 0;
726 adapter
->ena_napi
[i
].dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
731 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
732 * @adapter: network interface device structure
735 * Return 0 on success, negative on failure
737 static int ena_setup_tx_resources(struct ena_adapter
*adapter
, int qid
)
739 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
740 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
743 if (tx_ring
->tx_buffer_info
) {
744 netif_err(adapter
, ifup
,
745 adapter
->netdev
, "tx_buffer_info info is not NULL");
749 size
= sizeof(struct ena_tx_buffer
) * tx_ring
->ring_size
;
750 node
= cpu_to_node(ena_irq
->cpu
);
752 tx_ring
->tx_buffer_info
= vzalloc_node(size
, node
);
753 if (!tx_ring
->tx_buffer_info
) {
754 tx_ring
->tx_buffer_info
= vzalloc(size
);
755 if (!tx_ring
->tx_buffer_info
)
756 goto err_tx_buffer_info
;
759 size
= sizeof(u16
) * tx_ring
->ring_size
;
760 tx_ring
->free_ids
= vzalloc_node(size
, node
);
761 if (!tx_ring
->free_ids
) {
762 tx_ring
->free_ids
= vzalloc(size
);
763 if (!tx_ring
->free_ids
)
764 goto err_tx_free_ids
;
767 size
= tx_ring
->tx_max_header_size
;
768 tx_ring
->push_buf_intermediate_buf
= vzalloc_node(size
, node
);
769 if (!tx_ring
->push_buf_intermediate_buf
) {
770 tx_ring
->push_buf_intermediate_buf
= vzalloc(size
);
771 if (!tx_ring
->push_buf_intermediate_buf
)
772 goto err_push_buf_intermediate_buf
;
775 /* Req id ring for TX out of order completions */
776 for (i
= 0; i
< tx_ring
->ring_size
; i
++)
777 tx_ring
->free_ids
[i
] = i
;
779 /* Reset tx statistics */
780 memset(&tx_ring
->tx_stats
, 0x0, sizeof(tx_ring
->tx_stats
));
782 tx_ring
->next_to_use
= 0;
783 tx_ring
->next_to_clean
= 0;
784 tx_ring
->cpu
= ena_irq
->cpu
;
787 err_push_buf_intermediate_buf
:
788 vfree(tx_ring
->free_ids
);
789 tx_ring
->free_ids
= NULL
;
791 vfree(tx_ring
->tx_buffer_info
);
792 tx_ring
->tx_buffer_info
= NULL
;
797 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
798 * @adapter: network interface device structure
801 * Free all transmit software resources
803 static void ena_free_tx_resources(struct ena_adapter
*adapter
, int qid
)
805 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[qid
];
807 vfree(tx_ring
->tx_buffer_info
);
808 tx_ring
->tx_buffer_info
= NULL
;
810 vfree(tx_ring
->free_ids
);
811 tx_ring
->free_ids
= NULL
;
813 vfree(tx_ring
->push_buf_intermediate_buf
);
814 tx_ring
->push_buf_intermediate_buf
= NULL
;
817 static int ena_setup_tx_resources_in_range(struct ena_adapter
*adapter
,
823 for (i
= first_index
; i
< first_index
+ count
; i
++) {
824 rc
= ena_setup_tx_resources(adapter
, i
);
833 netif_err(adapter
, ifup
, adapter
->netdev
,
834 "Tx queue %d: allocation failed\n", i
);
836 /* rewind the index freeing the rings as we go */
837 while (first_index
< i
--)
838 ena_free_tx_resources(adapter
, i
);
842 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter
*adapter
,
843 int first_index
, int count
)
847 for (i
= first_index
; i
< first_index
+ count
; i
++)
848 ena_free_tx_resources(adapter
, i
);
851 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
852 * @adapter: board private structure
854 * Free all transmit software resources
856 static void ena_free_all_io_tx_resources(struct ena_adapter
*adapter
)
858 ena_free_all_io_tx_resources_in_range(adapter
,
860 adapter
->xdp_num_queues
+
861 adapter
->num_io_queues
);
864 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
865 * @adapter: network interface device structure
868 * Returns 0 on success, negative on failure
870 static int ena_setup_rx_resources(struct ena_adapter
*adapter
,
873 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
874 struct ena_irq
*ena_irq
= &adapter
->irq_tbl
[ENA_IO_IRQ_IDX(qid
)];
877 if (rx_ring
->rx_buffer_info
) {
878 netif_err(adapter
, ifup
, adapter
->netdev
,
879 "rx_buffer_info is not NULL");
883 /* alloc extra element so in rx path
884 * we can always prefetch rx_info + 1
886 size
= sizeof(struct ena_rx_buffer
) * (rx_ring
->ring_size
+ 1);
887 node
= cpu_to_node(ena_irq
->cpu
);
889 rx_ring
->rx_buffer_info
= vzalloc_node(size
, node
);
890 if (!rx_ring
->rx_buffer_info
) {
891 rx_ring
->rx_buffer_info
= vzalloc(size
);
892 if (!rx_ring
->rx_buffer_info
)
896 size
= sizeof(u16
) * rx_ring
->ring_size
;
897 rx_ring
->free_ids
= vzalloc_node(size
, node
);
898 if (!rx_ring
->free_ids
) {
899 rx_ring
->free_ids
= vzalloc(size
);
900 if (!rx_ring
->free_ids
) {
901 vfree(rx_ring
->rx_buffer_info
);
902 rx_ring
->rx_buffer_info
= NULL
;
907 /* Req id ring for receiving RX pkts out of order */
908 for (i
= 0; i
< rx_ring
->ring_size
; i
++)
909 rx_ring
->free_ids
[i
] = i
;
911 /* Reset rx statistics */
912 memset(&rx_ring
->rx_stats
, 0x0, sizeof(rx_ring
->rx_stats
));
914 rx_ring
->next_to_clean
= 0;
915 rx_ring
->next_to_use
= 0;
916 rx_ring
->cpu
= ena_irq
->cpu
;
921 /* ena_free_rx_resources - Free I/O Rx Resources
922 * @adapter: network interface device structure
925 * Free all receive software resources
927 static void ena_free_rx_resources(struct ena_adapter
*adapter
,
930 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
932 vfree(rx_ring
->rx_buffer_info
);
933 rx_ring
->rx_buffer_info
= NULL
;
935 vfree(rx_ring
->free_ids
);
936 rx_ring
->free_ids
= NULL
;
939 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
940 * @adapter: board private structure
942 * Return 0 on success, negative on failure
944 static int ena_setup_all_rx_resources(struct ena_adapter
*adapter
)
948 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
949 rc
= ena_setup_rx_resources(adapter
, i
);
958 netif_err(adapter
, ifup
, adapter
->netdev
,
959 "Rx queue %d: allocation failed\n", i
);
961 /* rewind the index freeing the rings as we go */
963 ena_free_rx_resources(adapter
, i
);
967 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
968 * @adapter: board private structure
970 * Free all receive software resources
972 static void ena_free_all_io_rx_resources(struct ena_adapter
*adapter
)
976 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
977 ena_free_rx_resources(adapter
, i
);
980 static int ena_alloc_rx_page(struct ena_ring
*rx_ring
,
981 struct ena_rx_buffer
*rx_info
, gfp_t gfp
)
983 int headroom
= rx_ring
->rx_headroom
;
984 struct ena_com_buf
*ena_buf
;
988 /* restore page offset value in case it has been changed by device */
989 rx_info
->page_offset
= headroom
;
991 /* if previous allocated page is not used */
992 if (unlikely(rx_info
->page
))
995 page
= alloc_page(gfp
);
996 if (unlikely(!page
)) {
997 ena_increase_stat(&rx_ring
->rx_stats
.page_alloc_fail
, 1,
1002 /* To enable NIC-side port-mirroring, AKA SPAN port,
1003 * we make the buffer readable from the nic as well
1005 dma
= dma_map_page(rx_ring
->dev
, page
, 0, ENA_PAGE_SIZE
,
1007 if (unlikely(dma_mapping_error(rx_ring
->dev
, dma
))) {
1008 ena_increase_stat(&rx_ring
->rx_stats
.dma_mapping_err
, 1,
1014 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1015 "Allocate page %p, rx_info %p\n", page
, rx_info
);
1017 rx_info
->page
= page
;
1018 ena_buf
= &rx_info
->ena_buf
;
1019 ena_buf
->paddr
= dma
+ headroom
;
1020 ena_buf
->len
= ENA_PAGE_SIZE
- headroom
;
1025 static void ena_unmap_rx_buff(struct ena_ring
*rx_ring
,
1026 struct ena_rx_buffer
*rx_info
)
1028 struct ena_com_buf
*ena_buf
= &rx_info
->ena_buf
;
1030 dma_unmap_page(rx_ring
->dev
, ena_buf
->paddr
- rx_ring
->rx_headroom
,
1035 static void ena_free_rx_page(struct ena_ring
*rx_ring
,
1036 struct ena_rx_buffer
*rx_info
)
1038 struct page
*page
= rx_info
->page
;
1040 if (unlikely(!page
)) {
1041 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1042 "Trying to free unallocated buffer\n");
1046 ena_unmap_rx_buff(rx_ring
, rx_info
);
1049 rx_info
->page
= NULL
;
1052 static int ena_refill_rx_bufs(struct ena_ring
*rx_ring
, u32 num
)
1054 u16 next_to_use
, req_id
;
1058 next_to_use
= rx_ring
->next_to_use
;
1060 for (i
= 0; i
< num
; i
++) {
1061 struct ena_rx_buffer
*rx_info
;
1063 req_id
= rx_ring
->free_ids
[next_to_use
];
1065 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1067 rc
= ena_alloc_rx_page(rx_ring
, rx_info
,
1068 GFP_ATOMIC
| __GFP_COMP
);
1069 if (unlikely(rc
< 0)) {
1070 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1071 "Failed to allocate buffer for rx queue %d\n",
1075 rc
= ena_com_add_single_rx_desc(rx_ring
->ena_com_io_sq
,
1079 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1080 "Failed to add buffer for rx queue %d\n",
1084 next_to_use
= ENA_RX_RING_IDX_NEXT(next_to_use
,
1085 rx_ring
->ring_size
);
1088 if (unlikely(i
< num
)) {
1089 ena_increase_stat(&rx_ring
->rx_stats
.refil_partial
, 1,
1091 netif_warn(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1092 "Refilled rx qid %d with only %d buffers (from %d)\n",
1093 rx_ring
->qid
, i
, num
);
1096 /* ena_com_write_sq_doorbell issues a wmb() */
1098 ena_com_write_sq_doorbell(rx_ring
->ena_com_io_sq
);
1100 rx_ring
->next_to_use
= next_to_use
;
1105 static void ena_free_rx_bufs(struct ena_adapter
*adapter
,
1108 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[qid
];
1111 for (i
= 0; i
< rx_ring
->ring_size
; i
++) {
1112 struct ena_rx_buffer
*rx_info
= &rx_ring
->rx_buffer_info
[i
];
1115 ena_free_rx_page(rx_ring
, rx_info
);
1119 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1120 * @adapter: board private structure
1122 static void ena_refill_all_rx_bufs(struct ena_adapter
*adapter
)
1124 struct ena_ring
*rx_ring
;
1125 int i
, rc
, bufs_num
;
1127 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
1128 rx_ring
= &adapter
->rx_ring
[i
];
1129 bufs_num
= rx_ring
->ring_size
- 1;
1130 rc
= ena_refill_rx_bufs(rx_ring
, bufs_num
);
1132 if (unlikely(rc
!= bufs_num
))
1133 netif_warn(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1134 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
1139 static void ena_free_all_rx_bufs(struct ena_adapter
*adapter
)
1143 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
1144 ena_free_rx_bufs(adapter
, i
);
1147 static void ena_unmap_tx_buff(struct ena_ring
*tx_ring
,
1148 struct ena_tx_buffer
*tx_info
)
1150 struct ena_com_buf
*ena_buf
;
1154 ena_buf
= tx_info
->bufs
;
1155 cnt
= tx_info
->num_of_bufs
;
1160 if (tx_info
->map_linear_data
) {
1161 dma_unmap_single(tx_ring
->dev
,
1162 dma_unmap_addr(ena_buf
, paddr
),
1163 dma_unmap_len(ena_buf
, len
),
1169 /* unmap remaining mapped pages */
1170 for (i
= 0; i
< cnt
; i
++) {
1171 dma_unmap_page(tx_ring
->dev
, dma_unmap_addr(ena_buf
, paddr
),
1172 dma_unmap_len(ena_buf
, len
), DMA_TO_DEVICE
);
1177 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1178 * @tx_ring: TX ring for which buffers be freed
1180 static void ena_free_tx_bufs(struct ena_ring
*tx_ring
)
1182 bool print_once
= true;
1185 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
1186 struct ena_tx_buffer
*tx_info
= &tx_ring
->tx_buffer_info
[i
];
1192 netif_notice(tx_ring
->adapter
, ifdown
, tx_ring
->netdev
,
1193 "Free uncompleted tx skb qid %d idx 0x%x\n",
1197 netif_dbg(tx_ring
->adapter
, ifdown
, tx_ring
->netdev
,
1198 "Free uncompleted tx skb qid %d idx 0x%x\n",
1202 ena_unmap_tx_buff(tx_ring
, tx_info
);
1204 dev_kfree_skb_any(tx_info
->skb
);
1206 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring
->netdev
,
1210 static void ena_free_all_tx_bufs(struct ena_adapter
*adapter
)
1212 struct ena_ring
*tx_ring
;
1215 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
1216 tx_ring
= &adapter
->tx_ring
[i
];
1217 ena_free_tx_bufs(tx_ring
);
1221 static void ena_destroy_all_tx_queues(struct ena_adapter
*adapter
)
1226 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
1227 ena_qid
= ENA_IO_TXQ_IDX(i
);
1228 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
1232 static void ena_destroy_all_rx_queues(struct ena_adapter
*adapter
)
1237 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
1238 ena_qid
= ENA_IO_RXQ_IDX(i
);
1239 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
1240 ena_com_destroy_io_queue(adapter
->ena_dev
, ena_qid
);
1244 static void ena_destroy_all_io_queues(struct ena_adapter
*adapter
)
1246 ena_destroy_all_tx_queues(adapter
);
1247 ena_destroy_all_rx_queues(adapter
);
1250 static int handle_invalid_req_id(struct ena_ring
*ring
, u16 req_id
,
1251 struct ena_tx_buffer
*tx_info
, bool is_xdp
)
1254 netif_err(ring
->adapter
,
1257 "tx_info doesn't have valid %s",
1258 is_xdp
? "xdp frame" : "skb");
1260 netif_err(ring
->adapter
,
1263 "Invalid req_id: %hu\n",
1266 ena_increase_stat(&ring
->tx_stats
.bad_req_id
, 1, &ring
->syncp
);
1268 /* Trigger device reset */
1269 ring
->adapter
->reset_reason
= ENA_REGS_RESET_INV_TX_REQ_ID
;
1270 set_bit(ENA_FLAG_TRIGGER_RESET
, &ring
->adapter
->flags
);
1274 static int validate_tx_req_id(struct ena_ring
*tx_ring
, u16 req_id
)
1276 struct ena_tx_buffer
*tx_info
= NULL
;
1278 if (likely(req_id
< tx_ring
->ring_size
)) {
1279 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1280 if (likely(tx_info
->skb
))
1284 return handle_invalid_req_id(tx_ring
, req_id
, tx_info
, false);
1287 static int validate_xdp_req_id(struct ena_ring
*xdp_ring
, u16 req_id
)
1289 struct ena_tx_buffer
*tx_info
= NULL
;
1291 if (likely(req_id
< xdp_ring
->ring_size
)) {
1292 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
1293 if (likely(tx_info
->xdpf
))
1297 return handle_invalid_req_id(xdp_ring
, req_id
, tx_info
, true);
1300 static int ena_clean_tx_irq(struct ena_ring
*tx_ring
, u32 budget
)
1302 struct netdev_queue
*txq
;
1311 next_to_clean
= tx_ring
->next_to_clean
;
1312 txq
= netdev_get_tx_queue(tx_ring
->netdev
, tx_ring
->qid
);
1314 while (tx_pkts
< budget
) {
1315 struct ena_tx_buffer
*tx_info
;
1316 struct sk_buff
*skb
;
1318 rc
= ena_com_tx_comp_req_id_get(tx_ring
->ena_com_io_cq
,
1323 rc
= validate_tx_req_id(tx_ring
, req_id
);
1327 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
1330 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1331 prefetch(&skb
->end
);
1333 tx_info
->skb
= NULL
;
1334 tx_info
->last_jiffies
= 0;
1336 ena_unmap_tx_buff(tx_ring
, tx_info
);
1338 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
1339 "tx_poll: q %d skb %p completed\n", tx_ring
->qid
,
1342 tx_bytes
+= skb
->len
;
1345 total_done
+= tx_info
->tx_descs
;
1347 tx_ring
->free_ids
[next_to_clean
] = req_id
;
1348 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
1349 tx_ring
->ring_size
);
1352 tx_ring
->next_to_clean
= next_to_clean
;
1353 ena_com_comp_ack(tx_ring
->ena_com_io_sq
, total_done
);
1354 ena_com_update_dev_comp_head(tx_ring
->ena_com_io_cq
);
1356 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
1358 netif_dbg(tx_ring
->adapter
, tx_done
, tx_ring
->netdev
,
1359 "tx_poll: q %d done. total pkts: %d\n",
1360 tx_ring
->qid
, tx_pkts
);
1362 /* need to make the rings circular update visible to
1363 * ena_start_xmit() before checking for netif_queue_stopped().
1367 above_thresh
= ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
1368 ENA_TX_WAKEUP_THRESH
);
1369 if (unlikely(netif_tx_queue_stopped(txq
) && above_thresh
)) {
1370 __netif_tx_lock(txq
, smp_processor_id());
1372 ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
1373 ENA_TX_WAKEUP_THRESH
);
1374 if (netif_tx_queue_stopped(txq
) && above_thresh
&&
1375 test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
)) {
1376 netif_tx_wake_queue(txq
);
1377 ena_increase_stat(&tx_ring
->tx_stats
.queue_wakeup
, 1,
1380 __netif_tx_unlock(txq
);
1386 static struct sk_buff
*ena_alloc_skb(struct ena_ring
*rx_ring
, bool frags
)
1388 struct sk_buff
*skb
;
1391 skb
= napi_get_frags(rx_ring
->napi
);
1393 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
1394 rx_ring
->rx_copybreak
);
1396 if (unlikely(!skb
)) {
1397 ena_increase_stat(&rx_ring
->rx_stats
.skb_alloc_fail
, 1,
1399 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1400 "Failed to allocate skb. frags: %d\n", frags
);
1407 static struct sk_buff
*ena_rx_skb(struct ena_ring
*rx_ring
,
1408 struct ena_com_rx_buf_info
*ena_bufs
,
1412 struct sk_buff
*skb
;
1413 struct ena_rx_buffer
*rx_info
;
1414 u16 len
, req_id
, buf
= 0;
1417 len
= ena_bufs
[buf
].len
;
1418 req_id
= ena_bufs
[buf
].req_id
;
1420 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1422 if (unlikely(!rx_info
->page
)) {
1423 netif_err(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1428 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1429 "rx_info %p page %p\n",
1430 rx_info
, rx_info
->page
);
1432 /* save virt address of first buffer */
1433 va
= page_address(rx_info
->page
) + rx_info
->page_offset
;
1437 if (len
<= rx_ring
->rx_copybreak
) {
1438 skb
= ena_alloc_skb(rx_ring
, false);
1442 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1443 "RX allocated small packet. len %d. data_len %d\n",
1444 skb
->len
, skb
->data_len
);
1446 /* sync this buffer for CPU use */
1447 dma_sync_single_for_cpu(rx_ring
->dev
,
1448 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
1451 skb_copy_to_linear_data(skb
, va
, len
);
1452 dma_sync_single_for_device(rx_ring
->dev
,
1453 dma_unmap_addr(&rx_info
->ena_buf
, paddr
),
1458 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
1459 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
1460 *next_to_clean
= ENA_RX_RING_IDX_ADD(*next_to_clean
, descs
,
1461 rx_ring
->ring_size
);
1465 skb
= ena_alloc_skb(rx_ring
, true);
1470 ena_unmap_rx_buff(rx_ring
, rx_info
);
1472 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_info
->page
,
1473 rx_info
->page_offset
, len
, ENA_PAGE_SIZE
);
1475 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1476 "RX skb updated. len %d. data_len %d\n",
1477 skb
->len
, skb
->data_len
);
1479 rx_info
->page
= NULL
;
1481 rx_ring
->free_ids
[*next_to_clean
] = req_id
;
1483 ENA_RX_RING_IDX_NEXT(*next_to_clean
,
1484 rx_ring
->ring_size
);
1485 if (likely(--descs
== 0))
1489 len
= ena_bufs
[buf
].len
;
1490 req_id
= ena_bufs
[buf
].req_id
;
1492 rx_info
= &rx_ring
->rx_buffer_info
[req_id
];
1498 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1499 * @adapter: structure containing adapter specific data
1500 * @ena_rx_ctx: received packet context/metadata
1501 * @skb: skb currently being received and modified
1503 static void ena_rx_checksum(struct ena_ring
*rx_ring
,
1504 struct ena_com_rx_ctx
*ena_rx_ctx
,
1505 struct sk_buff
*skb
)
1507 /* Rx csum disabled */
1508 if (unlikely(!(rx_ring
->netdev
->features
& NETIF_F_RXCSUM
))) {
1509 skb
->ip_summed
= CHECKSUM_NONE
;
1513 /* For fragmented packets the checksum isn't valid */
1514 if (ena_rx_ctx
->frag
) {
1515 skb
->ip_summed
= CHECKSUM_NONE
;
1519 /* if IP and error */
1520 if (unlikely((ena_rx_ctx
->l3_proto
== ENA_ETH_IO_L3_PROTO_IPV4
) &&
1521 (ena_rx_ctx
->l3_csum_err
))) {
1522 /* ipv4 checksum error */
1523 skb
->ip_summed
= CHECKSUM_NONE
;
1524 ena_increase_stat(&rx_ring
->rx_stats
.bad_csum
, 1,
1526 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1527 "RX IPv4 header checksum error\n");
1532 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1533 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
))) {
1534 if (unlikely(ena_rx_ctx
->l4_csum_err
)) {
1535 /* TCP/UDP checksum error */
1536 ena_increase_stat(&rx_ring
->rx_stats
.bad_csum
, 1,
1538 netif_dbg(rx_ring
->adapter
, rx_err
, rx_ring
->netdev
,
1539 "RX L4 checksum error\n");
1540 skb
->ip_summed
= CHECKSUM_NONE
;
1544 if (likely(ena_rx_ctx
->l4_csum_checked
)) {
1545 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1546 ena_increase_stat(&rx_ring
->rx_stats
.csum_good
, 1,
1549 ena_increase_stat(&rx_ring
->rx_stats
.csum_unchecked
, 1,
1551 skb
->ip_summed
= CHECKSUM_NONE
;
1554 skb
->ip_summed
= CHECKSUM_NONE
;
1560 static void ena_set_rx_hash(struct ena_ring
*rx_ring
,
1561 struct ena_com_rx_ctx
*ena_rx_ctx
,
1562 struct sk_buff
*skb
)
1564 enum pkt_hash_types hash_type
;
1566 if (likely(rx_ring
->netdev
->features
& NETIF_F_RXHASH
)) {
1567 if (likely((ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_TCP
) ||
1568 (ena_rx_ctx
->l4_proto
== ENA_ETH_IO_L4_PROTO_UDP
)))
1570 hash_type
= PKT_HASH_TYPE_L4
;
1572 hash_type
= PKT_HASH_TYPE_NONE
;
1574 /* Override hash type if the packet is fragmented */
1575 if (ena_rx_ctx
->frag
)
1576 hash_type
= PKT_HASH_TYPE_NONE
;
1578 skb_set_hash(skb
, ena_rx_ctx
->hash
, hash_type
);
1582 static int ena_xdp_handle_buff(struct ena_ring
*rx_ring
, struct xdp_buff
*xdp
)
1584 struct ena_rx_buffer
*rx_info
;
1587 rx_info
= &rx_ring
->rx_buffer_info
[rx_ring
->ena_bufs
[0].req_id
];
1588 xdp
->data
= page_address(rx_info
->page
) + rx_info
->page_offset
;
1589 xdp_set_data_meta_invalid(xdp
);
1590 xdp
->data_hard_start
= page_address(rx_info
->page
);
1591 xdp
->data_end
= xdp
->data
+ rx_ring
->ena_bufs
[0].len
;
1592 /* If for some reason we received a bigger packet than
1593 * we expect, then we simply drop it
1595 if (unlikely(rx_ring
->ena_bufs
[0].len
> ENA_XDP_MAX_MTU
))
1598 ret
= ena_xdp_execute(rx_ring
, xdp
);
1600 /* The xdp program might expand the headers */
1601 if (ret
== XDP_PASS
) {
1602 rx_info
->page_offset
= xdp
->data
- xdp
->data_hard_start
;
1603 rx_ring
->ena_bufs
[0].len
= xdp
->data_end
- xdp
->data
;
1608 /* ena_clean_rx_irq - Cleanup RX irq
1609 * @rx_ring: RX ring to clean
1610 * @napi: napi handler
1611 * @budget: how many packets driver is allowed to clean
1613 * Returns the number of cleaned buffers.
1615 static int ena_clean_rx_irq(struct ena_ring
*rx_ring
, struct napi_struct
*napi
,
1618 u16 next_to_clean
= rx_ring
->next_to_clean
;
1619 struct ena_com_rx_ctx ena_rx_ctx
;
1620 struct ena_rx_buffer
*rx_info
;
1621 struct ena_adapter
*adapter
;
1622 u32 res_budget
, work_done
;
1623 int rx_copybreak_pkt
= 0;
1624 int refill_threshold
;
1625 struct sk_buff
*skb
;
1626 int refill_required
;
1627 struct xdp_buff xdp
;
1634 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1635 "%s qid %d\n", __func__
, rx_ring
->qid
);
1636 res_budget
= budget
;
1637 xdp
.rxq
= &rx_ring
->xdp_rxq
;
1638 xdp
.frame_sz
= ENA_PAGE_SIZE
;
1641 xdp_verdict
= XDP_PASS
;
1643 ena_rx_ctx
.ena_bufs
= rx_ring
->ena_bufs
;
1644 ena_rx_ctx
.max_bufs
= rx_ring
->sgl_size
;
1645 ena_rx_ctx
.descs
= 0;
1646 ena_rx_ctx
.pkt_offset
= 0;
1647 rc
= ena_com_rx_pkt(rx_ring
->ena_com_io_cq
,
1648 rx_ring
->ena_com_io_sq
,
1653 if (unlikely(ena_rx_ctx
.descs
== 0))
1656 /* First descriptor might have an offset set by the device */
1657 rx_info
= &rx_ring
->rx_buffer_info
[rx_ring
->ena_bufs
[0].req_id
];
1658 rx_info
->page_offset
+= ena_rx_ctx
.pkt_offset
;
1660 netif_dbg(rx_ring
->adapter
, rx_status
, rx_ring
->netdev
,
1661 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1662 rx_ring
->qid
, ena_rx_ctx
.descs
, ena_rx_ctx
.l3_proto
,
1663 ena_rx_ctx
.l4_proto
, ena_rx_ctx
.hash
);
1665 if (ena_xdp_present_ring(rx_ring
))
1666 xdp_verdict
= ena_xdp_handle_buff(rx_ring
, &xdp
);
1668 /* allocate skb and fill it */
1669 if (xdp_verdict
== XDP_PASS
)
1670 skb
= ena_rx_skb(rx_ring
,
1675 if (unlikely(!skb
)) {
1676 for (i
= 0; i
< ena_rx_ctx
.descs
; i
++) {
1677 int req_id
= rx_ring
->ena_bufs
[i
].req_id
;
1679 rx_ring
->free_ids
[next_to_clean
] = req_id
;
1681 ENA_RX_RING_IDX_NEXT(next_to_clean
,
1682 rx_ring
->ring_size
);
1684 /* Packets was passed for transmission, unmap it
1687 if (xdp_verdict
== XDP_TX
|| xdp_verdict
== XDP_REDIRECT
) {
1688 ena_unmap_rx_buff(rx_ring
,
1689 &rx_ring
->rx_buffer_info
[req_id
]);
1690 rx_ring
->rx_buffer_info
[req_id
].page
= NULL
;
1693 if (xdp_verdict
!= XDP_PASS
) {
1694 xdp_flags
|= xdp_verdict
;
1701 ena_rx_checksum(rx_ring
, &ena_rx_ctx
, skb
);
1703 ena_set_rx_hash(rx_ring
, &ena_rx_ctx
, skb
);
1705 skb_record_rx_queue(skb
, rx_ring
->qid
);
1707 if (rx_ring
->ena_bufs
[0].len
<= rx_ring
->rx_copybreak
) {
1708 total_len
+= rx_ring
->ena_bufs
[0].len
;
1710 napi_gro_receive(napi
, skb
);
1712 total_len
+= skb
->len
;
1713 napi_gro_frags(napi
);
1717 } while (likely(res_budget
));
1719 work_done
= budget
- res_budget
;
1720 rx_ring
->per_napi_packets
+= work_done
;
1721 u64_stats_update_begin(&rx_ring
->syncp
);
1722 rx_ring
->rx_stats
.bytes
+= total_len
;
1723 rx_ring
->rx_stats
.cnt
+= work_done
;
1724 rx_ring
->rx_stats
.rx_copybreak_pkt
+= rx_copybreak_pkt
;
1725 u64_stats_update_end(&rx_ring
->syncp
);
1727 rx_ring
->next_to_clean
= next_to_clean
;
1729 refill_required
= ena_com_free_q_entries(rx_ring
->ena_com_io_sq
);
1731 min_t(int, rx_ring
->ring_size
/ ENA_RX_REFILL_THRESH_DIVIDER
,
1732 ENA_RX_REFILL_THRESH_PACKET
);
1734 /* Optimization, try to batch new rx buffers */
1735 if (refill_required
> refill_threshold
) {
1736 ena_com_update_dev_comp_head(rx_ring
->ena_com_io_cq
);
1737 ena_refill_rx_bufs(rx_ring
, refill_required
);
1740 if (xdp_flags
& XDP_REDIRECT
)
1746 adapter
= netdev_priv(rx_ring
->netdev
);
1748 if (rc
== -ENOSPC
) {
1749 ena_increase_stat(&rx_ring
->rx_stats
.bad_desc_num
, 1,
1751 adapter
->reset_reason
= ENA_REGS_RESET_TOO_MANY_RX_DESCS
;
1753 ena_increase_stat(&rx_ring
->rx_stats
.bad_req_id
, 1,
1755 adapter
->reset_reason
= ENA_REGS_RESET_INV_RX_REQ_ID
;
1758 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
1763 static void ena_dim_work(struct work_struct
*w
)
1765 struct dim
*dim
= container_of(w
, struct dim
, work
);
1766 struct dim_cq_moder cur_moder
=
1767 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
1768 struct ena_napi
*ena_napi
= container_of(dim
, struct ena_napi
, dim
);
1770 ena_napi
->rx_ring
->smoothed_interval
= cur_moder
.usec
;
1771 dim
->state
= DIM_START_MEASURE
;
1774 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi
*ena_napi
)
1776 struct dim_sample dim_sample
;
1777 struct ena_ring
*rx_ring
= ena_napi
->rx_ring
;
1779 if (!rx_ring
->per_napi_packets
)
1782 rx_ring
->non_empty_napi_events
++;
1784 dim_update_sample(rx_ring
->non_empty_napi_events
,
1785 rx_ring
->rx_stats
.cnt
,
1786 rx_ring
->rx_stats
.bytes
,
1789 net_dim(&ena_napi
->dim
, dim_sample
);
1791 rx_ring
->per_napi_packets
= 0;
1794 static void ena_unmask_interrupt(struct ena_ring
*tx_ring
,
1795 struct ena_ring
*rx_ring
)
1797 struct ena_eth_io_intr_reg intr_reg
;
1798 u32 rx_interval
= 0;
1799 /* Rx ring can be NULL when for XDP tx queues which don't have an
1800 * accompanying rx_ring pair.
1803 rx_interval
= ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
) ?
1804 rx_ring
->smoothed_interval
:
1805 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring
->ena_dev
);
1807 /* Update intr register: rx intr delay,
1808 * tx intr delay and interrupt unmask
1810 ena_com_update_intr_reg(&intr_reg
,
1812 tx_ring
->smoothed_interval
,
1815 ena_increase_stat(&tx_ring
->tx_stats
.unmask_interrupt
, 1,
1818 /* It is a shared MSI-X.
1819 * Tx and Rx CQ have pointer to it.
1820 * So we use one of them to reach the intr reg
1821 * The Tx ring is used because the rx_ring is NULL for XDP queues
1823 ena_com_unmask_intr(tx_ring
->ena_com_io_cq
, &intr_reg
);
1826 static void ena_update_ring_numa_node(struct ena_ring
*tx_ring
,
1827 struct ena_ring
*rx_ring
)
1829 int cpu
= get_cpu();
1832 /* Check only one ring since the 2 rings are running on the same cpu */
1833 if (likely(tx_ring
->cpu
== cpu
))
1836 numa_node
= cpu_to_node(cpu
);
1839 if (numa_node
!= NUMA_NO_NODE
) {
1840 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, numa_node
);
1842 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
,
1855 static int ena_clean_xdp_irq(struct ena_ring
*xdp_ring
, u32 budget
)
1864 if (unlikely(!xdp_ring
))
1866 next_to_clean
= xdp_ring
->next_to_clean
;
1868 while (tx_pkts
< budget
) {
1869 struct ena_tx_buffer
*tx_info
;
1870 struct xdp_frame
*xdpf
;
1872 rc
= ena_com_tx_comp_req_id_get(xdp_ring
->ena_com_io_cq
,
1877 rc
= validate_xdp_req_id(xdp_ring
, req_id
);
1881 tx_info
= &xdp_ring
->tx_buffer_info
[req_id
];
1882 xdpf
= tx_info
->xdpf
;
1884 tx_info
->xdpf
= NULL
;
1885 tx_info
->last_jiffies
= 0;
1886 ena_unmap_tx_buff(xdp_ring
, tx_info
);
1888 netif_dbg(xdp_ring
->adapter
, tx_done
, xdp_ring
->netdev
,
1889 "tx_poll: q %d skb %p completed\n", xdp_ring
->qid
,
1892 tx_bytes
+= xdpf
->len
;
1894 total_done
+= tx_info
->tx_descs
;
1896 xdp_return_frame(xdpf
);
1897 xdp_ring
->free_ids
[next_to_clean
] = req_id
;
1898 next_to_clean
= ENA_TX_RING_IDX_NEXT(next_to_clean
,
1899 xdp_ring
->ring_size
);
1902 xdp_ring
->next_to_clean
= next_to_clean
;
1903 ena_com_comp_ack(xdp_ring
->ena_com_io_sq
, total_done
);
1904 ena_com_update_dev_comp_head(xdp_ring
->ena_com_io_cq
);
1906 netif_dbg(xdp_ring
->adapter
, tx_done
, xdp_ring
->netdev
,
1907 "tx_poll: q %d done. total pkts: %d\n",
1908 xdp_ring
->qid
, tx_pkts
);
1913 static int ena_io_poll(struct napi_struct
*napi
, int budget
)
1915 struct ena_napi
*ena_napi
= container_of(napi
, struct ena_napi
, napi
);
1916 struct ena_ring
*tx_ring
, *rx_ring
;
1918 int rx_work_done
= 0;
1920 int napi_comp_call
= 0;
1923 tx_ring
= ena_napi
->tx_ring
;
1924 rx_ring
= ena_napi
->rx_ring
;
1926 tx_ring
->first_interrupt
= ena_napi
->first_interrupt
;
1927 rx_ring
->first_interrupt
= ena_napi
->first_interrupt
;
1929 tx_budget
= tx_ring
->ring_size
/ ENA_TX_POLL_BUDGET_DIVIDER
;
1931 if (!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1932 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
)) {
1933 napi_complete_done(napi
, 0);
1937 tx_work_done
= ena_clean_tx_irq(tx_ring
, tx_budget
);
1938 /* On netpoll the budget is zero and the handler should only clean the
1942 rx_work_done
= ena_clean_rx_irq(rx_ring
, napi
, budget
);
1944 /* If the device is about to reset or down, avoid unmask
1945 * the interrupt and return 0 so NAPI won't reschedule
1947 if (unlikely(!test_bit(ENA_FLAG_DEV_UP
, &tx_ring
->adapter
->flags
) ||
1948 test_bit(ENA_FLAG_TRIGGER_RESET
, &tx_ring
->adapter
->flags
))) {
1949 napi_complete_done(napi
, 0);
1952 } else if ((budget
> rx_work_done
) && (tx_budget
> tx_work_done
)) {
1955 /* Update numa and unmask the interrupt only when schedule
1956 * from the interrupt context (vs from sk_busy_loop)
1958 if (napi_complete_done(napi
, rx_work_done
) &&
1959 READ_ONCE(ena_napi
->interrupts_masked
)) {
1960 smp_rmb(); /* make sure interrupts_masked is read */
1961 WRITE_ONCE(ena_napi
->interrupts_masked
, false);
1962 /* We apply adaptive moderation on Rx path only.
1963 * Tx uses static interrupt moderation.
1965 if (ena_com_get_adaptive_moderation_enabled(rx_ring
->ena_dev
))
1966 ena_adjust_adaptive_rx_intr_moderation(ena_napi
);
1968 ena_unmask_interrupt(tx_ring
, rx_ring
);
1971 ena_update_ring_numa_node(tx_ring
, rx_ring
);
1978 u64_stats_update_begin(&tx_ring
->syncp
);
1979 tx_ring
->tx_stats
.napi_comp
+= napi_comp_call
;
1980 tx_ring
->tx_stats
.tx_poll
++;
1981 u64_stats_update_end(&tx_ring
->syncp
);
1986 static irqreturn_t
ena_intr_msix_mgmnt(int irq
, void *data
)
1988 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
1990 ena_com_admin_q_comp_intr_handler(adapter
->ena_dev
);
1992 /* Don't call the aenq handler before probe is done */
1993 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
)))
1994 ena_com_aenq_intr_handler(adapter
->ena_dev
, data
);
1999 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
2000 * @irq: interrupt number
2001 * @data: pointer to a network interface private napi device structure
2003 static irqreturn_t
ena_intr_msix_io(int irq
, void *data
)
2005 struct ena_napi
*ena_napi
= data
;
2007 ena_napi
->first_interrupt
= true;
2009 WRITE_ONCE(ena_napi
->interrupts_masked
, true);
2010 smp_wmb(); /* write interrupts_masked before calling napi */
2012 napi_schedule_irqoff(&ena_napi
->napi
);
2017 /* Reserve a single MSI-X vector for management (admin + aenq).
2018 * plus reserve one vector for each potential io queue.
2019 * the number of potential io queues is the minimum of what the device
2020 * supports and the number of vCPUs.
2022 static int ena_enable_msix(struct ena_adapter
*adapter
)
2024 int msix_vecs
, irq_cnt
;
2026 if (test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
2027 netif_err(adapter
, probe
, adapter
->netdev
,
2028 "Error, MSI-X is already enabled\n");
2032 /* Reserved the max msix vectors we might need */
2033 msix_vecs
= ENA_MAX_MSIX_VEC(adapter
->max_num_io_queues
);
2034 netif_dbg(adapter
, probe
, adapter
->netdev
,
2035 "Trying to enable MSI-X, vectors %d\n", msix_vecs
);
2037 irq_cnt
= pci_alloc_irq_vectors(adapter
->pdev
, ENA_MIN_MSIX_VEC
,
2038 msix_vecs
, PCI_IRQ_MSIX
);
2041 netif_err(adapter
, probe
, adapter
->netdev
,
2042 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt
);
2046 if (irq_cnt
!= msix_vecs
) {
2047 netif_notice(adapter
, probe
, adapter
->netdev
,
2048 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
2049 irq_cnt
, msix_vecs
);
2050 adapter
->num_io_queues
= irq_cnt
- ENA_ADMIN_MSIX_VEC
;
2053 if (ena_init_rx_cpu_rmap(adapter
))
2054 netif_warn(adapter
, probe
, adapter
->netdev
,
2055 "Failed to map IRQs to CPUs\n");
2057 adapter
->msix_vecs
= irq_cnt
;
2058 set_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
);
2063 static void ena_setup_mgmnt_intr(struct ena_adapter
*adapter
)
2067 snprintf(adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].name
,
2068 ENA_IRQNAME_SIZE
, "ena-mgmnt@pci:%s",
2069 pci_name(adapter
->pdev
));
2070 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].handler
=
2071 ena_intr_msix_mgmnt
;
2072 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].data
= adapter
;
2073 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].vector
=
2074 pci_irq_vector(adapter
->pdev
, ENA_MGMNT_IRQ_IDX
);
2075 cpu
= cpumask_first(cpu_online_mask
);
2076 adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].cpu
= cpu
;
2077 cpumask_set_cpu(cpu
,
2078 &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
].affinity_hint_mask
);
2081 static void ena_setup_io_intr(struct ena_adapter
*adapter
)
2083 struct net_device
*netdev
;
2084 int irq_idx
, i
, cpu
;
2087 netdev
= adapter
->netdev
;
2088 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2090 for (i
= 0; i
< io_queue_count
; i
++) {
2091 irq_idx
= ENA_IO_IRQ_IDX(i
);
2092 cpu
= i
% num_online_cpus();
2094 snprintf(adapter
->irq_tbl
[irq_idx
].name
, ENA_IRQNAME_SIZE
,
2095 "%s-Tx-Rx-%d", netdev
->name
, i
);
2096 adapter
->irq_tbl
[irq_idx
].handler
= ena_intr_msix_io
;
2097 adapter
->irq_tbl
[irq_idx
].data
= &adapter
->ena_napi
[i
];
2098 adapter
->irq_tbl
[irq_idx
].vector
=
2099 pci_irq_vector(adapter
->pdev
, irq_idx
);
2100 adapter
->irq_tbl
[irq_idx
].cpu
= cpu
;
2102 cpumask_set_cpu(cpu
,
2103 &adapter
->irq_tbl
[irq_idx
].affinity_hint_mask
);
2107 static int ena_request_mgmnt_irq(struct ena_adapter
*adapter
)
2109 unsigned long flags
= 0;
2110 struct ena_irq
*irq
;
2113 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
2114 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
2117 netif_err(adapter
, probe
, adapter
->netdev
,
2118 "Failed to request admin irq\n");
2122 netif_dbg(adapter
, probe
, adapter
->netdev
,
2123 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2124 irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
2126 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
2131 static int ena_request_io_irq(struct ena_adapter
*adapter
)
2133 u32 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2134 unsigned long flags
= 0;
2135 struct ena_irq
*irq
;
2138 if (!test_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
)) {
2139 netif_err(adapter
, ifup
, adapter
->netdev
,
2140 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2144 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< ENA_MAX_MSIX_VEC(io_queue_count
); i
++) {
2145 irq
= &adapter
->irq_tbl
[i
];
2146 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
2149 netif_err(adapter
, ifup
, adapter
->netdev
,
2150 "Failed to request I/O IRQ. index %d rc %d\n",
2155 netif_dbg(adapter
, ifup
, adapter
->netdev
,
2156 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2157 i
, irq
->affinity_hint_mask
.bits
[0], irq
->vector
);
2159 irq_set_affinity_hint(irq
->vector
, &irq
->affinity_hint_mask
);
2165 for (k
= ENA_IO_IRQ_FIRST_IDX
; k
< i
; k
++) {
2166 irq
= &adapter
->irq_tbl
[k
];
2167 free_irq(irq
->vector
, irq
->data
);
2173 static void ena_free_mgmnt_irq(struct ena_adapter
*adapter
)
2175 struct ena_irq
*irq
;
2177 irq
= &adapter
->irq_tbl
[ENA_MGMNT_IRQ_IDX
];
2178 synchronize_irq(irq
->vector
);
2179 irq_set_affinity_hint(irq
->vector
, NULL
);
2180 free_irq(irq
->vector
, irq
->data
);
2183 static void ena_free_io_irq(struct ena_adapter
*adapter
)
2185 u32 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2186 struct ena_irq
*irq
;
2189 #ifdef CONFIG_RFS_ACCEL
2190 if (adapter
->msix_vecs
>= 1) {
2191 free_irq_cpu_rmap(adapter
->netdev
->rx_cpu_rmap
);
2192 adapter
->netdev
->rx_cpu_rmap
= NULL
;
2194 #endif /* CONFIG_RFS_ACCEL */
2196 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< ENA_MAX_MSIX_VEC(io_queue_count
); i
++) {
2197 irq
= &adapter
->irq_tbl
[i
];
2198 irq_set_affinity_hint(irq
->vector
, NULL
);
2199 free_irq(irq
->vector
, irq
->data
);
2203 static void ena_disable_msix(struct ena_adapter
*adapter
)
2205 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED
, &adapter
->flags
))
2206 pci_free_irq_vectors(adapter
->pdev
);
2209 static void ena_disable_io_intr_sync(struct ena_adapter
*adapter
)
2211 u32 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2214 if (!netif_running(adapter
->netdev
))
2217 for (i
= ENA_IO_IRQ_FIRST_IDX
; i
< ENA_MAX_MSIX_VEC(io_queue_count
); i
++)
2218 synchronize_irq(adapter
->irq_tbl
[i
].vector
);
2221 static void ena_del_napi_in_range(struct ena_adapter
*adapter
,
2227 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2228 netif_napi_del(&adapter
->ena_napi
[i
].napi
);
2230 WARN_ON(!ENA_IS_XDP_INDEX(adapter
, i
) &&
2231 adapter
->ena_napi
[i
].xdp_ring
);
2235 static void ena_init_napi_in_range(struct ena_adapter
*adapter
,
2236 int first_index
, int count
)
2240 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2241 struct ena_napi
*napi
= &adapter
->ena_napi
[i
];
2243 netif_napi_add(adapter
->netdev
,
2245 ENA_IS_XDP_INDEX(adapter
, i
) ? ena_xdp_io_poll
: ena_io_poll
,
2248 if (!ENA_IS_XDP_INDEX(adapter
, i
)) {
2249 napi
->rx_ring
= &adapter
->rx_ring
[i
];
2250 napi
->tx_ring
= &adapter
->tx_ring
[i
];
2252 napi
->xdp_ring
= &adapter
->tx_ring
[i
];
2258 static void ena_napi_disable_in_range(struct ena_adapter
*adapter
,
2264 for (i
= first_index
; i
< first_index
+ count
; i
++)
2265 napi_disable(&adapter
->ena_napi
[i
].napi
);
2268 static void ena_napi_enable_in_range(struct ena_adapter
*adapter
,
2274 for (i
= first_index
; i
< first_index
+ count
; i
++)
2275 napi_enable(&adapter
->ena_napi
[i
].napi
);
2278 /* Configure the Rx forwarding */
2279 static int ena_rss_configure(struct ena_adapter
*adapter
)
2281 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2284 /* In case the RSS table wasn't initialized by probe */
2285 if (!ena_dev
->rss
.tbl_log_size
) {
2286 rc
= ena_rss_init_default(adapter
);
2287 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
2288 netif_err(adapter
, ifup
, adapter
->netdev
,
2289 "Failed to init RSS rc: %d\n", rc
);
2294 /* Set indirect table */
2295 rc
= ena_com_indirect_table_set(ena_dev
);
2296 if (unlikely(rc
&& rc
!= -EOPNOTSUPP
))
2299 /* Configure hash function (if supported) */
2300 rc
= ena_com_set_hash_function(ena_dev
);
2301 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
2304 /* Configure hash inputs (if supported) */
2305 rc
= ena_com_set_hash_ctrl(ena_dev
);
2306 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
)))
2312 static int ena_up_complete(struct ena_adapter
*adapter
)
2316 rc
= ena_rss_configure(adapter
);
2320 ena_change_mtu(adapter
->netdev
, adapter
->netdev
->mtu
);
2322 ena_refill_all_rx_bufs(adapter
);
2324 /* enable transmits */
2325 netif_tx_start_all_queues(adapter
->netdev
);
2327 ena_napi_enable_in_range(adapter
,
2329 adapter
->xdp_num_queues
+ adapter
->num_io_queues
);
2334 static int ena_create_io_tx_queue(struct ena_adapter
*adapter
, int qid
)
2336 struct ena_com_create_io_ctx ctx
;
2337 struct ena_com_dev
*ena_dev
;
2338 struct ena_ring
*tx_ring
;
2343 ena_dev
= adapter
->ena_dev
;
2345 tx_ring
= &adapter
->tx_ring
[qid
];
2346 msix_vector
= ENA_IO_IRQ_IDX(qid
);
2347 ena_qid
= ENA_IO_TXQ_IDX(qid
);
2349 memset(&ctx
, 0x0, sizeof(ctx
));
2351 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_TX
;
2353 ctx
.mem_queue_type
= ena_dev
->tx_mem_queue_type
;
2354 ctx
.msix_vector
= msix_vector
;
2355 ctx
.queue_size
= tx_ring
->ring_size
;
2356 ctx
.numa_node
= cpu_to_node(tx_ring
->cpu
);
2358 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
2360 netif_err(adapter
, ifup
, adapter
->netdev
,
2361 "Failed to create I/O TX queue num %d rc: %d\n",
2366 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
2367 &tx_ring
->ena_com_io_sq
,
2368 &tx_ring
->ena_com_io_cq
);
2370 netif_err(adapter
, ifup
, adapter
->netdev
,
2371 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2373 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
2377 ena_com_update_numa_node(tx_ring
->ena_com_io_cq
, ctx
.numa_node
);
2381 static int ena_create_io_tx_queues_in_range(struct ena_adapter
*adapter
,
2382 int first_index
, int count
)
2384 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2387 for (i
= first_index
; i
< first_index
+ count
; i
++) {
2388 rc
= ena_create_io_tx_queue(adapter
, i
);
2396 while (i
-- > first_index
)
2397 ena_com_destroy_io_queue(ena_dev
, ENA_IO_TXQ_IDX(i
));
2402 static int ena_create_io_rx_queue(struct ena_adapter
*adapter
, int qid
)
2404 struct ena_com_dev
*ena_dev
;
2405 struct ena_com_create_io_ctx ctx
;
2406 struct ena_ring
*rx_ring
;
2411 ena_dev
= adapter
->ena_dev
;
2413 rx_ring
= &adapter
->rx_ring
[qid
];
2414 msix_vector
= ENA_IO_IRQ_IDX(qid
);
2415 ena_qid
= ENA_IO_RXQ_IDX(qid
);
2417 memset(&ctx
, 0x0, sizeof(ctx
));
2420 ctx
.direction
= ENA_COM_IO_QUEUE_DIRECTION_RX
;
2421 ctx
.mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2422 ctx
.msix_vector
= msix_vector
;
2423 ctx
.queue_size
= rx_ring
->ring_size
;
2424 ctx
.numa_node
= cpu_to_node(rx_ring
->cpu
);
2426 rc
= ena_com_create_io_queue(ena_dev
, &ctx
);
2428 netif_err(adapter
, ifup
, adapter
->netdev
,
2429 "Failed to create I/O RX queue num %d rc: %d\n",
2434 rc
= ena_com_get_io_handlers(ena_dev
, ena_qid
,
2435 &rx_ring
->ena_com_io_sq
,
2436 &rx_ring
->ena_com_io_cq
);
2438 netif_err(adapter
, ifup
, adapter
->netdev
,
2439 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2444 ena_com_update_numa_node(rx_ring
->ena_com_io_cq
, ctx
.numa_node
);
2448 ena_com_destroy_io_queue(ena_dev
, ena_qid
);
2452 static int ena_create_all_io_rx_queues(struct ena_adapter
*adapter
)
2454 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2457 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
2458 rc
= ena_create_io_rx_queue(adapter
, i
);
2461 INIT_WORK(&adapter
->ena_napi
[i
].dim
.work
, ena_dim_work
);
2468 cancel_work_sync(&adapter
->ena_napi
[i
].dim
.work
);
2469 ena_com_destroy_io_queue(ena_dev
, ENA_IO_RXQ_IDX(i
));
2475 static void set_io_rings_size(struct ena_adapter
*adapter
,
2481 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
2482 adapter
->tx_ring
[i
].ring_size
= new_tx_size
;
2483 adapter
->rx_ring
[i
].ring_size
= new_rx_size
;
2487 /* This function allows queue allocation to backoff when the system is
2488 * low on memory. If there is not enough memory to allocate io queues
2489 * the driver will try to allocate smaller queues.
2491 * The backoff algorithm is as follows:
2492 * 1. Try to allocate TX and RX and if successful.
2493 * 1.1. return success
2495 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2497 * 3. If TX or RX is smaller than 256
2498 * 3.1. return failure.
2500 * 4.1. go back to 1.
2502 static int create_queues_with_size_backoff(struct ena_adapter
*adapter
)
2504 int rc
, cur_rx_ring_size
, cur_tx_ring_size
;
2505 int new_rx_ring_size
, new_tx_ring_size
;
2507 /* current queue sizes might be set to smaller than the requested
2508 * ones due to past queue allocation failures.
2510 set_io_rings_size(adapter
, adapter
->requested_tx_ring_size
,
2511 adapter
->requested_rx_ring_size
);
2514 if (ena_xdp_present(adapter
)) {
2515 rc
= ena_setup_and_create_all_xdp_queues(adapter
);
2520 rc
= ena_setup_tx_resources_in_range(adapter
,
2522 adapter
->num_io_queues
);
2526 rc
= ena_create_io_tx_queues_in_range(adapter
,
2528 adapter
->num_io_queues
);
2530 goto err_create_tx_queues
;
2532 rc
= ena_setup_all_rx_resources(adapter
);
2536 rc
= ena_create_all_io_rx_queues(adapter
);
2538 goto err_create_rx_queues
;
2542 err_create_rx_queues
:
2543 ena_free_all_io_rx_resources(adapter
);
2545 ena_destroy_all_tx_queues(adapter
);
2546 err_create_tx_queues
:
2547 ena_free_all_io_tx_resources(adapter
);
2549 if (rc
!= -ENOMEM
) {
2550 netif_err(adapter
, ifup
, adapter
->netdev
,
2551 "Queue creation failed with error code %d\n",
2556 cur_tx_ring_size
= adapter
->tx_ring
[0].ring_size
;
2557 cur_rx_ring_size
= adapter
->rx_ring
[0].ring_size
;
2559 netif_err(adapter
, ifup
, adapter
->netdev
,
2560 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2561 cur_tx_ring_size
, cur_rx_ring_size
);
2563 new_tx_ring_size
= cur_tx_ring_size
;
2564 new_rx_ring_size
= cur_rx_ring_size
;
2566 /* Decrease the size of the larger queue, or
2567 * decrease both if they are the same size.
2569 if (cur_rx_ring_size
<= cur_tx_ring_size
)
2570 new_tx_ring_size
= cur_tx_ring_size
/ 2;
2571 if (cur_rx_ring_size
>= cur_tx_ring_size
)
2572 new_rx_ring_size
= cur_rx_ring_size
/ 2;
2574 if (new_tx_ring_size
< ENA_MIN_RING_SIZE
||
2575 new_rx_ring_size
< ENA_MIN_RING_SIZE
) {
2576 netif_err(adapter
, ifup
, adapter
->netdev
,
2577 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2582 netif_err(adapter
, ifup
, adapter
->netdev
,
2583 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2587 set_io_rings_size(adapter
, new_tx_ring_size
,
2592 static int ena_up(struct ena_adapter
*adapter
)
2594 int io_queue_count
, rc
, i
;
2596 netif_dbg(adapter
, ifup
, adapter
->netdev
, "%s\n", __func__
);
2598 io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2599 ena_setup_io_intr(adapter
);
2601 /* napi poll functions should be initialized before running
2602 * request_irq(), to handle a rare condition where there is a pending
2603 * interrupt, causing the ISR to fire immediately while the poll
2604 * function wasn't set yet, causing a null dereference
2606 ena_init_napi_in_range(adapter
, 0, io_queue_count
);
2608 rc
= ena_request_io_irq(adapter
);
2612 rc
= create_queues_with_size_backoff(adapter
);
2614 goto err_create_queues_with_backoff
;
2616 rc
= ena_up_complete(adapter
);
2620 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
2621 netif_carrier_on(adapter
->netdev
);
2623 ena_increase_stat(&adapter
->dev_stats
.interface_up
, 1,
2626 set_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2628 /* Enable completion queues interrupt */
2629 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
2630 ena_unmask_interrupt(&adapter
->tx_ring
[i
],
2631 &adapter
->rx_ring
[i
]);
2633 /* schedule napi in case we had pending packets
2634 * from the last time we disable napi
2636 for (i
= 0; i
< io_queue_count
; i
++)
2637 napi_schedule(&adapter
->ena_napi
[i
].napi
);
2642 ena_destroy_all_tx_queues(adapter
);
2643 ena_free_all_io_tx_resources(adapter
);
2644 ena_destroy_all_rx_queues(adapter
);
2645 ena_free_all_io_rx_resources(adapter
);
2646 err_create_queues_with_backoff
:
2647 ena_free_io_irq(adapter
);
2649 ena_del_napi_in_range(adapter
, 0, io_queue_count
);
2654 static void ena_down(struct ena_adapter
*adapter
)
2656 int io_queue_count
= adapter
->num_io_queues
+ adapter
->xdp_num_queues
;
2658 netif_info(adapter
, ifdown
, adapter
->netdev
, "%s\n", __func__
);
2660 clear_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2662 ena_increase_stat(&adapter
->dev_stats
.interface_down
, 1,
2665 netif_carrier_off(adapter
->netdev
);
2666 netif_tx_disable(adapter
->netdev
);
2668 /* After this point the napi handler won't enable the tx queue */
2669 ena_napi_disable_in_range(adapter
, 0, io_queue_count
);
2671 /* After destroy the queue there won't be any new interrupts */
2673 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
)) {
2676 rc
= ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
2678 netif_err(adapter
, ifdown
, adapter
->netdev
,
2679 "Device reset failed\n");
2680 /* stop submitting admin commands on a device that was reset */
2681 ena_com_set_admin_running_state(adapter
->ena_dev
, false);
2684 ena_destroy_all_io_queues(adapter
);
2686 ena_disable_io_intr_sync(adapter
);
2687 ena_free_io_irq(adapter
);
2688 ena_del_napi_in_range(adapter
, 0, io_queue_count
);
2690 ena_free_all_tx_bufs(adapter
);
2691 ena_free_all_rx_bufs(adapter
);
2692 ena_free_all_io_tx_resources(adapter
);
2693 ena_free_all_io_rx_resources(adapter
);
2696 /* ena_open - Called when a network interface is made active
2697 * @netdev: network interface device structure
2699 * Returns 0 on success, negative value on failure
2701 * The open entry point is called when a network interface is made
2702 * active by the system (IFF_UP). At this point all resources needed
2703 * for transmit and receive operations are allocated, the interrupt
2704 * handler is registered with the OS, the watchdog timer is started,
2705 * and the stack is notified that the interface is ready.
2707 static int ena_open(struct net_device
*netdev
)
2709 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2712 /* Notify the stack of the actual queue counts. */
2713 rc
= netif_set_real_num_tx_queues(netdev
, adapter
->num_io_queues
);
2715 netif_err(adapter
, ifup
, netdev
, "Can't set num tx queues\n");
2719 rc
= netif_set_real_num_rx_queues(netdev
, adapter
->num_io_queues
);
2721 netif_err(adapter
, ifup
, netdev
, "Can't set num rx queues\n");
2725 rc
= ena_up(adapter
);
2732 /* ena_close - Disables a network interface
2733 * @netdev: network interface device structure
2735 * Returns 0, this is not allowed to fail
2737 * The close entry point is called when an interface is de-activated
2738 * by the OS. The hardware is still under the drivers control, but
2739 * needs to be disabled. A global MAC reset is issued to stop the
2740 * hardware, and all transmit and receive resources are freed.
2742 static int ena_close(struct net_device
*netdev
)
2744 struct ena_adapter
*adapter
= netdev_priv(netdev
);
2746 netif_dbg(adapter
, ifdown
, netdev
, "%s\n", __func__
);
2748 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
2751 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
2754 /* Check for device status and issue reset if needed*/
2755 check_for_admin_com_state(adapter
);
2756 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
2757 netif_err(adapter
, ifdown
, adapter
->netdev
,
2758 "Destroy failure, restarting device\n");
2759 ena_dump_stats_to_dmesg(adapter
);
2760 /* rtnl lock already obtained in dev_ioctl() layer */
2761 ena_destroy_device(adapter
, false);
2762 ena_restore_device(adapter
);
2768 int ena_update_queue_sizes(struct ena_adapter
*adapter
,
2774 dev_was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2775 ena_close(adapter
->netdev
);
2776 adapter
->requested_tx_ring_size
= new_tx_size
;
2777 adapter
->requested_rx_ring_size
= new_rx_size
;
2778 ena_init_io_rings(adapter
,
2780 adapter
->xdp_num_queues
+
2781 adapter
->num_io_queues
);
2782 return dev_was_up
? ena_up(adapter
) : 0;
2785 int ena_update_queue_count(struct ena_adapter
*adapter
, u32 new_channel_count
)
2787 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
2788 int prev_channel_count
;
2791 dev_was_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
2792 ena_close(adapter
->netdev
);
2793 prev_channel_count
= adapter
->num_io_queues
;
2794 adapter
->num_io_queues
= new_channel_count
;
2795 if (ena_xdp_present(adapter
) &&
2796 ena_xdp_allowed(adapter
) == ENA_XDP_ALLOWED
) {
2797 adapter
->xdp_first_ring
= new_channel_count
;
2798 adapter
->xdp_num_queues
= new_channel_count
;
2799 if (prev_channel_count
> new_channel_count
)
2800 ena_xdp_exchange_program_rx_in_range(adapter
,
2803 prev_channel_count
);
2805 ena_xdp_exchange_program_rx_in_range(adapter
,
2806 adapter
->xdp_bpf_prog
,
2811 /* We need to destroy the rss table so that the indirection
2812 * table will be reinitialized by ena_up()
2814 ena_com_rss_destroy(ena_dev
);
2815 ena_init_io_rings(adapter
,
2817 adapter
->xdp_num_queues
+
2818 adapter
->num_io_queues
);
2819 return dev_was_up
? ena_open(adapter
->netdev
) : 0;
2822 static void ena_tx_csum(struct ena_com_tx_ctx
*ena_tx_ctx
,
2823 struct sk_buff
*skb
,
2824 bool disable_meta_caching
)
2826 u32 mss
= skb_shinfo(skb
)->gso_size
;
2827 struct ena_com_tx_meta
*ena_meta
= &ena_tx_ctx
->ena_meta
;
2830 if ((skb
->ip_summed
== CHECKSUM_PARTIAL
) || mss
) {
2831 ena_tx_ctx
->l4_csum_enable
= 1;
2833 ena_tx_ctx
->tso_enable
= 1;
2834 ena_meta
->l4_hdr_len
= tcp_hdr(skb
)->doff
;
2835 ena_tx_ctx
->l4_csum_partial
= 0;
2837 ena_tx_ctx
->tso_enable
= 0;
2838 ena_meta
->l4_hdr_len
= 0;
2839 ena_tx_ctx
->l4_csum_partial
= 1;
2842 switch (ip_hdr(skb
)->version
) {
2844 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV4
;
2845 if (ip_hdr(skb
)->frag_off
& htons(IP_DF
))
2848 ena_tx_ctx
->l3_csum_enable
= 1;
2849 l4_protocol
= ip_hdr(skb
)->protocol
;
2852 ena_tx_ctx
->l3_proto
= ENA_ETH_IO_L3_PROTO_IPV6
;
2853 l4_protocol
= ipv6_hdr(skb
)->nexthdr
;
2859 if (l4_protocol
== IPPROTO_TCP
)
2860 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_TCP
;
2862 ena_tx_ctx
->l4_proto
= ENA_ETH_IO_L4_PROTO_UDP
;
2864 ena_meta
->mss
= mss
;
2865 ena_meta
->l3_hdr_len
= skb_network_header_len(skb
);
2866 ena_meta
->l3_hdr_offset
= skb_network_offset(skb
);
2867 ena_tx_ctx
->meta_valid
= 1;
2868 } else if (disable_meta_caching
) {
2869 memset(ena_meta
, 0, sizeof(*ena_meta
));
2870 ena_tx_ctx
->meta_valid
= 1;
2872 ena_tx_ctx
->meta_valid
= 0;
2876 static int ena_check_and_linearize_skb(struct ena_ring
*tx_ring
,
2877 struct sk_buff
*skb
)
2879 int num_frags
, header_len
, rc
;
2881 num_frags
= skb_shinfo(skb
)->nr_frags
;
2882 header_len
= skb_headlen(skb
);
2884 if (num_frags
< tx_ring
->sgl_size
)
2887 if ((num_frags
== tx_ring
->sgl_size
) &&
2888 (header_len
< tx_ring
->tx_max_header_size
))
2891 ena_increase_stat(&tx_ring
->tx_stats
.linearize
, 1, &tx_ring
->syncp
);
2893 rc
= skb_linearize(skb
);
2895 ena_increase_stat(&tx_ring
->tx_stats
.linearize_failed
, 1,
2902 static int ena_tx_map_skb(struct ena_ring
*tx_ring
,
2903 struct ena_tx_buffer
*tx_info
,
2904 struct sk_buff
*skb
,
2908 struct ena_adapter
*adapter
= tx_ring
->adapter
;
2909 struct ena_com_buf
*ena_buf
;
2911 u32 skb_head_len
, frag_len
, last_frag
;
2916 skb_head_len
= skb_headlen(skb
);
2918 ena_buf
= tx_info
->bufs
;
2920 if (tx_ring
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
2921 /* When the device is LLQ mode, the driver will copy
2922 * the header into the device memory space.
2923 * the ena_com layer assume the header is in a linear
2925 * This assumption might be wrong since part of the header
2926 * can be in the fragmented buffers.
2927 * Use skb_header_pointer to make sure the header is in a
2928 * linear memory space.
2931 push_len
= min_t(u32
, skb
->len
, tx_ring
->tx_max_header_size
);
2932 *push_hdr
= skb_header_pointer(skb
, 0, push_len
,
2933 tx_ring
->push_buf_intermediate_buf
);
2934 *header_len
= push_len
;
2935 if (unlikely(skb
->data
!= *push_hdr
)) {
2936 ena_increase_stat(&tx_ring
->tx_stats
.llq_buffer_copy
, 1,
2939 delta
= push_len
- skb_head_len
;
2943 *header_len
= min_t(u32
, skb_head_len
,
2944 tx_ring
->tx_max_header_size
);
2947 netif_dbg(adapter
, tx_queued
, adapter
->netdev
,
2948 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb
,
2949 *push_hdr
, push_len
);
2951 if (skb_head_len
> push_len
) {
2952 dma
= dma_map_single(tx_ring
->dev
, skb
->data
+ push_len
,
2953 skb_head_len
- push_len
, DMA_TO_DEVICE
);
2954 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
2955 goto error_report_dma_error
;
2957 ena_buf
->paddr
= dma
;
2958 ena_buf
->len
= skb_head_len
- push_len
;
2961 tx_info
->num_of_bufs
++;
2962 tx_info
->map_linear_data
= 1;
2964 tx_info
->map_linear_data
= 0;
2967 last_frag
= skb_shinfo(skb
)->nr_frags
;
2969 for (i
= 0; i
< last_frag
; i
++) {
2970 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2972 frag_len
= skb_frag_size(frag
);
2974 if (unlikely(delta
>= frag_len
)) {
2979 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, delta
,
2980 frag_len
- delta
, DMA_TO_DEVICE
);
2981 if (unlikely(dma_mapping_error(tx_ring
->dev
, dma
)))
2982 goto error_report_dma_error
;
2984 ena_buf
->paddr
= dma
;
2985 ena_buf
->len
= frag_len
- delta
;
2987 tx_info
->num_of_bufs
++;
2993 error_report_dma_error
:
2994 ena_increase_stat(&tx_ring
->tx_stats
.dma_mapping_err
, 1,
2996 netif_warn(adapter
, tx_queued
, adapter
->netdev
, "Failed to map skb\n");
2998 tx_info
->skb
= NULL
;
3000 tx_info
->num_of_bufs
+= i
;
3001 ena_unmap_tx_buff(tx_ring
, tx_info
);
3006 /* Called with netif_tx_lock. */
3007 static netdev_tx_t
ena_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3009 struct ena_adapter
*adapter
= netdev_priv(dev
);
3010 struct ena_tx_buffer
*tx_info
;
3011 struct ena_com_tx_ctx ena_tx_ctx
;
3012 struct ena_ring
*tx_ring
;
3013 struct netdev_queue
*txq
;
3015 u16 next_to_use
, req_id
, header_len
;
3018 netif_dbg(adapter
, tx_queued
, dev
, "%s skb %p\n", __func__
, skb
);
3019 /* Determine which tx ring we will be placed on */
3020 qid
= skb_get_queue_mapping(skb
);
3021 tx_ring
= &adapter
->tx_ring
[qid
];
3022 txq
= netdev_get_tx_queue(dev
, qid
);
3024 rc
= ena_check_and_linearize_skb(tx_ring
, skb
);
3026 goto error_drop_packet
;
3028 skb_tx_timestamp(skb
);
3030 next_to_use
= tx_ring
->next_to_use
;
3031 req_id
= tx_ring
->free_ids
[next_to_use
];
3032 tx_info
= &tx_ring
->tx_buffer_info
[req_id
];
3033 tx_info
->num_of_bufs
= 0;
3035 WARN(tx_info
->skb
, "SKB isn't NULL req_id %d\n", req_id
);
3037 rc
= ena_tx_map_skb(tx_ring
, tx_info
, skb
, &push_hdr
, &header_len
);
3039 goto error_drop_packet
;
3041 memset(&ena_tx_ctx
, 0x0, sizeof(struct ena_com_tx_ctx
));
3042 ena_tx_ctx
.ena_bufs
= tx_info
->bufs
;
3043 ena_tx_ctx
.push_header
= push_hdr
;
3044 ena_tx_ctx
.num_bufs
= tx_info
->num_of_bufs
;
3045 ena_tx_ctx
.req_id
= req_id
;
3046 ena_tx_ctx
.header_len
= header_len
;
3048 /* set flags and meta data */
3049 ena_tx_csum(&ena_tx_ctx
, skb
, tx_ring
->disable_meta_caching
);
3051 rc
= ena_xmit_common(dev
,
3058 goto error_unmap_dma
;
3060 netdev_tx_sent_queue(txq
, skb
->len
);
3062 /* stop the queue when no more space available, the packet can have up
3063 * to sgl_size + 2. one for the meta descriptor and one for header
3064 * (if the header is larger than tx_max_header_size).
3066 if (unlikely(!ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
3067 tx_ring
->sgl_size
+ 2))) {
3068 netif_dbg(adapter
, tx_queued
, dev
, "%s stop queue %d\n",
3071 netif_tx_stop_queue(txq
);
3072 ena_increase_stat(&tx_ring
->tx_stats
.queue_stop
, 1,
3075 /* There is a rare condition where this function decide to
3076 * stop the queue but meanwhile clean_tx_irq updates
3077 * next_to_completion and terminates.
3078 * The queue will remain stopped forever.
3079 * To solve this issue add a mb() to make sure that
3080 * netif_tx_stop_queue() write is vissible before checking if
3081 * there is additional space in the queue.
3085 if (ena_com_sq_have_enough_space(tx_ring
->ena_com_io_sq
,
3086 ENA_TX_WAKEUP_THRESH
)) {
3087 netif_tx_wake_queue(txq
);
3088 ena_increase_stat(&tx_ring
->tx_stats
.queue_wakeup
, 1,
3093 if (netif_xmit_stopped(txq
) || !netdev_xmit_more()) {
3094 /* trigger the dma engine. ena_com_write_sq_doorbell()
3097 ena_com_write_sq_doorbell(tx_ring
->ena_com_io_sq
);
3098 ena_increase_stat(&tx_ring
->tx_stats
.doorbells
, 1,
3102 return NETDEV_TX_OK
;
3105 ena_unmap_tx_buff(tx_ring
, tx_info
);
3106 tx_info
->skb
= NULL
;
3110 return NETDEV_TX_OK
;
3113 static u16
ena_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
3114 struct net_device
*sb_dev
)
3117 /* we suspect that this is good for in--kernel network services that
3118 * want to loop incoming skb rx to tx in normal user generated traffic,
3119 * most probably we will not get to this
3121 if (skb_rx_queue_recorded(skb
))
3122 qid
= skb_get_rx_queue(skb
);
3124 qid
= netdev_pick_tx(dev
, skb
, NULL
);
3129 static void ena_config_host_info(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
3131 struct device
*dev
= &pdev
->dev
;
3132 struct ena_admin_host_info
*host_info
;
3135 /* Allocate only the host info */
3136 rc
= ena_com_allocate_host_info(ena_dev
);
3138 dev_err(dev
, "Cannot allocate host info\n");
3142 host_info
= ena_dev
->host_attr
.host_info
;
3144 host_info
->bdf
= (pdev
->bus
->number
<< 8) | pdev
->devfn
;
3145 host_info
->os_type
= ENA_ADMIN_OS_LINUX
;
3146 host_info
->kernel_ver
= LINUX_VERSION_CODE
;
3147 strlcpy(host_info
->kernel_ver_str
, utsname()->version
,
3148 sizeof(host_info
->kernel_ver_str
) - 1);
3149 host_info
->os_dist
= 0;
3150 strncpy(host_info
->os_dist_str
, utsname()->release
,
3151 sizeof(host_info
->os_dist_str
) - 1);
3152 host_info
->driver_version
=
3153 (DRV_MODULE_GEN_MAJOR
) |
3154 (DRV_MODULE_GEN_MINOR
<< ENA_ADMIN_HOST_INFO_MINOR_SHIFT
) |
3155 (DRV_MODULE_GEN_SUBMINOR
<< ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT
) |
3156 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT
);
3157 host_info
->num_cpus
= num_online_cpus();
3159 host_info
->driver_supported_features
=
3160 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK
|
3161 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK
|
3162 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK
|
3163 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK
;
3165 rc
= ena_com_set_host_attributes(ena_dev
);
3167 if (rc
== -EOPNOTSUPP
)
3168 dev_warn(dev
, "Cannot set host attributes\n");
3170 dev_err(dev
, "Cannot set host attributes\n");
3178 ena_com_delete_host_info(ena_dev
);
3181 static void ena_config_debug_area(struct ena_adapter
*adapter
)
3183 u32 debug_area_size
;
3186 ss_count
= ena_get_sset_count(adapter
->netdev
, ETH_SS_STATS
);
3187 if (ss_count
<= 0) {
3188 netif_err(adapter
, drv
, adapter
->netdev
,
3189 "SS count is negative\n");
3193 /* allocate 32 bytes for each string and 64bit for the value */
3194 debug_area_size
= ss_count
* ETH_GSTRING_LEN
+ sizeof(u64
) * ss_count
;
3196 rc
= ena_com_allocate_debug_area(adapter
->ena_dev
, debug_area_size
);
3198 netif_err(adapter
, drv
, adapter
->netdev
,
3199 "Cannot allocate debug area\n");
3203 rc
= ena_com_set_host_attributes(adapter
->ena_dev
);
3205 if (rc
== -EOPNOTSUPP
)
3206 netif_warn(adapter
, drv
, adapter
->netdev
,
3207 "Cannot set host attributes\n");
3209 netif_err(adapter
, drv
, adapter
->netdev
,
3210 "Cannot set host attributes\n");
3216 ena_com_delete_debug_area(adapter
->ena_dev
);
3219 int ena_update_hw_stats(struct ena_adapter
*adapter
)
3223 rc
= ena_com_get_eni_stats(adapter
->ena_dev
, &adapter
->eni_stats
);
3225 dev_info_once(&adapter
->pdev
->dev
, "Failed to get ENI stats\n");
3232 static void ena_get_stats64(struct net_device
*netdev
,
3233 struct rtnl_link_stats64
*stats
)
3235 struct ena_adapter
*adapter
= netdev_priv(netdev
);
3236 struct ena_ring
*rx_ring
, *tx_ring
;
3242 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3245 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
3248 tx_ring
= &adapter
->tx_ring
[i
];
3251 start
= u64_stats_fetch_begin_irq(&tx_ring
->syncp
);
3252 packets
= tx_ring
->tx_stats
.cnt
;
3253 bytes
= tx_ring
->tx_stats
.bytes
;
3254 } while (u64_stats_fetch_retry_irq(&tx_ring
->syncp
, start
));
3256 stats
->tx_packets
+= packets
;
3257 stats
->tx_bytes
+= bytes
;
3259 rx_ring
= &adapter
->rx_ring
[i
];
3262 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
3263 packets
= rx_ring
->rx_stats
.cnt
;
3264 bytes
= rx_ring
->rx_stats
.bytes
;
3265 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
3267 stats
->rx_packets
+= packets
;
3268 stats
->rx_bytes
+= bytes
;
3272 start
= u64_stats_fetch_begin_irq(&adapter
->syncp
);
3273 rx_drops
= adapter
->dev_stats
.rx_drops
;
3274 tx_drops
= adapter
->dev_stats
.tx_drops
;
3275 } while (u64_stats_fetch_retry_irq(&adapter
->syncp
, start
));
3277 stats
->rx_dropped
= rx_drops
;
3278 stats
->tx_dropped
= tx_drops
;
3280 stats
->multicast
= 0;
3281 stats
->collisions
= 0;
3283 stats
->rx_length_errors
= 0;
3284 stats
->rx_crc_errors
= 0;
3285 stats
->rx_frame_errors
= 0;
3286 stats
->rx_fifo_errors
= 0;
3287 stats
->rx_missed_errors
= 0;
3288 stats
->tx_window_errors
= 0;
3290 stats
->rx_errors
= 0;
3291 stats
->tx_errors
= 0;
3294 static const struct net_device_ops ena_netdev_ops
= {
3295 .ndo_open
= ena_open
,
3296 .ndo_stop
= ena_close
,
3297 .ndo_start_xmit
= ena_start_xmit
,
3298 .ndo_select_queue
= ena_select_queue
,
3299 .ndo_get_stats64
= ena_get_stats64
,
3300 .ndo_tx_timeout
= ena_tx_timeout
,
3301 .ndo_change_mtu
= ena_change_mtu
,
3302 .ndo_set_mac_address
= NULL
,
3303 .ndo_validate_addr
= eth_validate_addr
,
3305 .ndo_xdp_xmit
= ena_xdp_xmit
,
3308 static int ena_device_validate_params(struct ena_adapter
*adapter
,
3309 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3311 struct net_device
*netdev
= adapter
->netdev
;
3314 rc
= ether_addr_equal(get_feat_ctx
->dev_attr
.mac_addr
,
3317 netif_err(adapter
, drv
, netdev
,
3318 "Error, mac address are different\n");
3322 if (get_feat_ctx
->dev_attr
.max_mtu
< netdev
->mtu
) {
3323 netif_err(adapter
, drv
, netdev
,
3324 "Error, device max mtu is smaller than netdev MTU\n");
3331 static void set_default_llq_configurations(struct ena_llq_configurations
*llq_config
)
3333 llq_config
->llq_header_location
= ENA_ADMIN_INLINE_HEADER
;
3334 llq_config
->llq_stride_ctrl
= ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
;
3335 llq_config
->llq_num_decs_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
;
3336 llq_config
->llq_ring_entry_size
= ENA_ADMIN_LIST_ENTRY_SIZE_128B
;
3337 llq_config
->llq_ring_entry_size_value
= 128;
3340 static int ena_set_queues_placement_policy(struct pci_dev
*pdev
,
3341 struct ena_com_dev
*ena_dev
,
3342 struct ena_admin_feature_llq_desc
*llq
,
3343 struct ena_llq_configurations
*llq_default_configurations
)
3346 u32 llq_feature_mask
;
3348 llq_feature_mask
= 1 << ENA_ADMIN_LLQ
;
3349 if (!(ena_dev
->supported_features
& llq_feature_mask
)) {
3351 "LLQ is not supported Fallback to host mode policy.\n");
3352 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3356 rc
= ena_com_config_dev_mode(ena_dev
, llq
, llq_default_configurations
);
3359 "Failed to configure the device mode. Fallback to host mode policy.\n");
3360 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3366 static int ena_map_llq_mem_bar(struct pci_dev
*pdev
, struct ena_com_dev
*ena_dev
,
3369 bool has_mem_bar
= !!(bars
& BIT(ENA_MEM_BAR
));
3372 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
3374 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3375 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
3381 ena_dev
->mem_bar
= devm_ioremap_wc(&pdev
->dev
,
3382 pci_resource_start(pdev
, ENA_MEM_BAR
),
3383 pci_resource_len(pdev
, ENA_MEM_BAR
));
3385 if (!ena_dev
->mem_bar
)
3391 static int ena_device_init(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
,
3392 struct ena_com_dev_get_features_ctx
*get_feat_ctx
,
3395 struct ena_llq_configurations llq_config
;
3396 struct device
*dev
= &pdev
->dev
;
3397 bool readless_supported
;
3402 rc
= ena_com_mmio_reg_read_request_init(ena_dev
);
3404 dev_err(dev
, "Failed to init mmio read less\n");
3408 /* The PCIe configuration space revision id indicate if mmio reg
3411 readless_supported
= !(pdev
->revision
& ENA_MMIO_DISABLE_REG_READ
);
3412 ena_com_set_mmio_read_mode(ena_dev
, readless_supported
);
3414 rc
= ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_NORMAL
);
3416 dev_err(dev
, "Can not reset device\n");
3417 goto err_mmio_read_less
;
3420 rc
= ena_com_validate_version(ena_dev
);
3422 dev_err(dev
, "Device version is too low\n");
3423 goto err_mmio_read_less
;
3426 dma_width
= ena_com_get_dma_width(ena_dev
);
3427 if (dma_width
< 0) {
3428 dev_err(dev
, "Invalid dma width value %d", dma_width
);
3430 goto err_mmio_read_less
;
3433 rc
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(dma_width
));
3435 dev_err(dev
, "dma_set_mask_and_coherent failed %d\n", rc
);
3436 goto err_mmio_read_less
;
3439 /* ENA admin level init */
3440 rc
= ena_com_admin_init(ena_dev
, &aenq_handlers
);
3443 "Can not initialize ena admin queue with device\n");
3444 goto err_mmio_read_less
;
3447 /* To enable the msix interrupts the driver needs to know the number
3448 * of queues. So the driver uses polling mode to retrieve this
3451 ena_com_set_admin_polling_mode(ena_dev
, true);
3453 ena_config_host_info(ena_dev
, pdev
);
3455 /* Get Device Attributes*/
3456 rc
= ena_com_get_dev_attr_feat(ena_dev
, get_feat_ctx
);
3458 dev_err(dev
, "Cannot get attribute for ena device rc=%d\n", rc
);
3459 goto err_admin_init
;
3462 /* Try to turn all the available aenq groups */
3463 aenq_groups
= BIT(ENA_ADMIN_LINK_CHANGE
) |
3464 BIT(ENA_ADMIN_FATAL_ERROR
) |
3465 BIT(ENA_ADMIN_WARNING
) |
3466 BIT(ENA_ADMIN_NOTIFICATION
) |
3467 BIT(ENA_ADMIN_KEEP_ALIVE
);
3469 aenq_groups
&= get_feat_ctx
->aenq
.supported_groups
;
3471 rc
= ena_com_set_aenq_config(ena_dev
, aenq_groups
);
3473 dev_err(dev
, "Cannot configure aenq groups rc= %d\n", rc
);
3474 goto err_admin_init
;
3477 *wd_state
= !!(aenq_groups
& BIT(ENA_ADMIN_KEEP_ALIVE
));
3479 set_default_llq_configurations(&llq_config
);
3481 rc
= ena_set_queues_placement_policy(pdev
, ena_dev
, &get_feat_ctx
->llq
,
3484 dev_err(dev
, "ENA device init failed\n");
3485 goto err_admin_init
;
3491 ena_com_delete_host_info(ena_dev
);
3492 ena_com_admin_destroy(ena_dev
);
3494 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3499 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter
*adapter
)
3501 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3502 struct device
*dev
= &adapter
->pdev
->dev
;
3505 rc
= ena_enable_msix(adapter
);
3507 dev_err(dev
, "Can not reserve msix vectors\n");
3511 ena_setup_mgmnt_intr(adapter
);
3513 rc
= ena_request_mgmnt_irq(adapter
);
3515 dev_err(dev
, "Can not setup management interrupts\n");
3516 goto err_disable_msix
;
3519 ena_com_set_admin_polling_mode(ena_dev
, false);
3521 ena_com_admin_aenq_enable(ena_dev
);
3526 ena_disable_msix(adapter
);
3531 static void ena_destroy_device(struct ena_adapter
*adapter
, bool graceful
)
3533 struct net_device
*netdev
= adapter
->netdev
;
3534 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3537 if (!test_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
))
3540 netif_carrier_off(netdev
);
3542 del_timer_sync(&adapter
->timer_service
);
3544 dev_up
= test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
);
3545 adapter
->dev_up_before_reset
= dev_up
;
3547 ena_com_set_admin_running_state(ena_dev
, false);
3549 if (test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3552 /* Stop the device from sending AENQ events (in case reset flag is set
3553 * and device is up, ena_down() already reset the device.
3555 if (!(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
) && dev_up
))
3556 ena_com_dev_reset(adapter
->ena_dev
, adapter
->reset_reason
);
3558 ena_free_mgmnt_irq(adapter
);
3560 ena_disable_msix(adapter
);
3562 ena_com_abort_admin_commands(ena_dev
);
3564 ena_com_wait_for_abort_completion(ena_dev
);
3566 ena_com_admin_destroy(ena_dev
);
3568 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3570 /* return reset reason to default value */
3571 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
3573 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3574 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3577 static int ena_restore_device(struct ena_adapter
*adapter
)
3579 struct ena_com_dev_get_features_ctx get_feat_ctx
;
3580 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
3581 struct pci_dev
*pdev
= adapter
->pdev
;
3585 set_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3586 rc
= ena_device_init(ena_dev
, adapter
->pdev
, &get_feat_ctx
, &wd_state
);
3588 dev_err(&pdev
->dev
, "Can not initialize device\n");
3591 adapter
->wd_state
= wd_state
;
3593 rc
= ena_device_validate_params(adapter
, &get_feat_ctx
);
3595 dev_err(&pdev
->dev
, "Validation of device parameters failed\n");
3596 goto err_device_destroy
;
3599 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
);
3601 dev_err(&pdev
->dev
, "Enable MSI-X failed\n");
3602 goto err_device_destroy
;
3604 /* If the interface was up before the reset bring it up */
3605 if (adapter
->dev_up_before_reset
) {
3606 rc
= ena_up(adapter
);
3608 dev_err(&pdev
->dev
, "Failed to create I/O queues\n");
3609 goto err_disable_msix
;
3613 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3615 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3616 if (test_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
))
3617 netif_carrier_on(adapter
->netdev
);
3619 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3620 adapter
->last_keep_alive_jiffies
= jiffies
;
3622 dev_err(&pdev
->dev
, "Device reset completed successfully\n");
3626 ena_free_mgmnt_irq(adapter
);
3627 ena_disable_msix(adapter
);
3629 ena_com_abort_admin_commands(ena_dev
);
3630 ena_com_wait_for_abort_completion(ena_dev
);
3631 ena_com_admin_destroy(ena_dev
);
3632 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_DRIVER_INVALID_STATE
);
3633 ena_com_mmio_reg_read_request_destroy(ena_dev
);
3635 clear_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
3636 clear_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
);
3638 "Reset attempt failed. Can not reset the device\n");
3643 static void ena_fw_reset_device(struct work_struct
*work
)
3645 struct ena_adapter
*adapter
=
3646 container_of(work
, struct ena_adapter
, reset_task
);
3650 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3651 ena_destroy_device(adapter
, false);
3652 ena_restore_device(adapter
);
3658 static int check_for_rx_interrupt_queue(struct ena_adapter
*adapter
,
3659 struct ena_ring
*rx_ring
)
3661 if (likely(rx_ring
->first_interrupt
))
3664 if (ena_com_cq_empty(rx_ring
->ena_com_io_cq
))
3667 rx_ring
->no_interrupt_event_cnt
++;
3669 if (rx_ring
->no_interrupt_event_cnt
== ENA_MAX_NO_INTERRUPT_ITERATIONS
) {
3670 netif_err(adapter
, rx_err
, adapter
->netdev
,
3671 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3673 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
3674 smp_mb__before_atomic();
3675 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3682 static int check_missing_comp_in_tx_queue(struct ena_adapter
*adapter
,
3683 struct ena_ring
*tx_ring
)
3685 struct ena_tx_buffer
*tx_buf
;
3686 unsigned long last_jiffies
;
3690 for (i
= 0; i
< tx_ring
->ring_size
; i
++) {
3691 tx_buf
= &tx_ring
->tx_buffer_info
[i
];
3692 last_jiffies
= tx_buf
->last_jiffies
;
3694 if (last_jiffies
== 0)
3695 /* no pending Tx at this location */
3698 if (unlikely(!tx_ring
->first_interrupt
&& time_is_before_jiffies(last_jiffies
+
3699 2 * adapter
->missing_tx_completion_to
))) {
3700 /* If after graceful period interrupt is still not
3701 * received, we schedule a reset
3703 netif_err(adapter
, tx_err
, adapter
->netdev
,
3704 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3706 adapter
->reset_reason
= ENA_REGS_RESET_MISS_INTERRUPT
;
3707 smp_mb__before_atomic();
3708 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3712 if (unlikely(time_is_before_jiffies(last_jiffies
+
3713 adapter
->missing_tx_completion_to
))) {
3714 if (!tx_buf
->print_once
)
3715 netif_notice(adapter
, tx_err
, adapter
->netdev
,
3716 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3719 tx_buf
->print_once
= 1;
3724 if (unlikely(missed_tx
> adapter
->missing_tx_completion_threshold
)) {
3725 netif_err(adapter
, tx_err
, adapter
->netdev
,
3726 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3728 adapter
->missing_tx_completion_threshold
);
3729 adapter
->reset_reason
=
3730 ENA_REGS_RESET_MISS_TX_CMPL
;
3731 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3735 ena_increase_stat(&tx_ring
->tx_stats
.missed_tx
, missed_tx
,
3741 static void check_for_missing_completions(struct ena_adapter
*adapter
)
3743 struct ena_ring
*tx_ring
;
3744 struct ena_ring
*rx_ring
;
3748 io_queue_count
= adapter
->xdp_num_queues
+ adapter
->num_io_queues
;
3749 /* Make sure the driver doesn't turn the device in other process */
3752 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3755 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
3758 if (adapter
->missing_tx_completion_to
== ENA_HW_HINTS_NO_TIMEOUT
)
3761 budget
= ENA_MONITORED_TX_QUEUES
;
3763 for (i
= adapter
->last_monitored_tx_qid
; i
< io_queue_count
; i
++) {
3764 tx_ring
= &adapter
->tx_ring
[i
];
3765 rx_ring
= &adapter
->rx_ring
[i
];
3767 rc
= check_missing_comp_in_tx_queue(adapter
, tx_ring
);
3771 rc
= !ENA_IS_XDP_INDEX(adapter
, i
) ?
3772 check_for_rx_interrupt_queue(adapter
, rx_ring
) : 0;
3781 adapter
->last_monitored_tx_qid
= i
% io_queue_count
;
3784 /* trigger napi schedule after 2 consecutive detections */
3785 #define EMPTY_RX_REFILL 2
3786 /* For the rare case where the device runs out of Rx descriptors and the
3787 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3789 * This case will lead to a deadlock:
3790 * The device won't send interrupts since all the new Rx packets will be dropped
3791 * The napi handler won't allocate new Rx descriptors so the device will be
3792 * able to send new packets.
3794 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3795 * It is recommended to have at least 512MB, with a minimum of 128MB for
3796 * constrained environment).
3798 * When such a situation is detected - Reschedule napi
3800 static void check_for_empty_rx_ring(struct ena_adapter
*adapter
)
3802 struct ena_ring
*rx_ring
;
3803 int i
, refill_required
;
3805 if (!test_bit(ENA_FLAG_DEV_UP
, &adapter
->flags
))
3808 if (test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))
3811 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
3812 rx_ring
= &adapter
->rx_ring
[i
];
3814 refill_required
= ena_com_free_q_entries(rx_ring
->ena_com_io_sq
);
3815 if (unlikely(refill_required
== (rx_ring
->ring_size
- 1))) {
3816 rx_ring
->empty_rx_queue
++;
3818 if (rx_ring
->empty_rx_queue
>= EMPTY_RX_REFILL
) {
3819 ena_increase_stat(&rx_ring
->rx_stats
.empty_rx_ring
, 1,
3822 netif_err(adapter
, drv
, adapter
->netdev
,
3823 "Trigger refill for ring %d\n", i
);
3825 napi_schedule(rx_ring
->napi
);
3826 rx_ring
->empty_rx_queue
= 0;
3829 rx_ring
->empty_rx_queue
= 0;
3834 /* Check for keep alive expiration */
3835 static void check_for_missing_keep_alive(struct ena_adapter
*adapter
)
3837 unsigned long keep_alive_expired
;
3839 if (!adapter
->wd_state
)
3842 if (adapter
->keep_alive_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3845 keep_alive_expired
= adapter
->last_keep_alive_jiffies
+
3846 adapter
->keep_alive_timeout
;
3847 if (unlikely(time_is_before_jiffies(keep_alive_expired
))) {
3848 netif_err(adapter
, drv
, adapter
->netdev
,
3849 "Keep alive watchdog timeout.\n");
3850 ena_increase_stat(&adapter
->dev_stats
.wd_expired
, 1,
3852 adapter
->reset_reason
= ENA_REGS_RESET_KEEP_ALIVE_TO
;
3853 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3857 static void check_for_admin_com_state(struct ena_adapter
*adapter
)
3859 if (unlikely(!ena_com_get_admin_running_state(adapter
->ena_dev
))) {
3860 netif_err(adapter
, drv
, adapter
->netdev
,
3861 "ENA admin queue is not in running state!\n");
3862 ena_increase_stat(&adapter
->dev_stats
.admin_q_pause
, 1,
3864 adapter
->reset_reason
= ENA_REGS_RESET_ADMIN_TO
;
3865 set_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
3869 static void ena_update_hints(struct ena_adapter
*adapter
,
3870 struct ena_admin_ena_hw_hints
*hints
)
3872 struct net_device
*netdev
= adapter
->netdev
;
3874 if (hints
->admin_completion_tx_timeout
)
3875 adapter
->ena_dev
->admin_queue
.completion_timeout
=
3876 hints
->admin_completion_tx_timeout
* 1000;
3878 if (hints
->mmio_read_timeout
)
3879 /* convert to usec */
3880 adapter
->ena_dev
->mmio_read
.reg_read_to
=
3881 hints
->mmio_read_timeout
* 1000;
3883 if (hints
->missed_tx_completion_count_threshold_to_reset
)
3884 adapter
->missing_tx_completion_threshold
=
3885 hints
->missed_tx_completion_count_threshold_to_reset
;
3887 if (hints
->missing_tx_completion_timeout
) {
3888 if (hints
->missing_tx_completion_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3889 adapter
->missing_tx_completion_to
= ENA_HW_HINTS_NO_TIMEOUT
;
3891 adapter
->missing_tx_completion_to
=
3892 msecs_to_jiffies(hints
->missing_tx_completion_timeout
);
3895 if (hints
->netdev_wd_timeout
)
3896 netdev
->watchdog_timeo
= msecs_to_jiffies(hints
->netdev_wd_timeout
);
3898 if (hints
->driver_watchdog_timeout
) {
3899 if (hints
->driver_watchdog_timeout
== ENA_HW_HINTS_NO_TIMEOUT
)
3900 adapter
->keep_alive_timeout
= ENA_HW_HINTS_NO_TIMEOUT
;
3902 adapter
->keep_alive_timeout
=
3903 msecs_to_jiffies(hints
->driver_watchdog_timeout
);
3907 static void ena_update_host_info(struct ena_admin_host_info
*host_info
,
3908 struct net_device
*netdev
)
3910 host_info
->supported_network_features
[0] =
3911 netdev
->features
& GENMASK_ULL(31, 0);
3912 host_info
->supported_network_features
[1] =
3913 (netdev
->features
& GENMASK_ULL(63, 32)) >> 32;
3916 static void ena_timer_service(struct timer_list
*t
)
3918 struct ena_adapter
*adapter
= from_timer(adapter
, t
, timer_service
);
3919 u8
*debug_area
= adapter
->ena_dev
->host_attr
.debug_area_virt_addr
;
3920 struct ena_admin_host_info
*host_info
=
3921 adapter
->ena_dev
->host_attr
.host_info
;
3923 check_for_missing_keep_alive(adapter
);
3925 check_for_admin_com_state(adapter
);
3927 check_for_missing_completions(adapter
);
3929 check_for_empty_rx_ring(adapter
);
3932 ena_dump_stats_to_buf(adapter
, debug_area
);
3935 ena_update_host_info(host_info
, adapter
->netdev
);
3937 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
3938 netif_err(adapter
, drv
, adapter
->netdev
,
3939 "Trigger reset is on\n");
3940 ena_dump_stats_to_dmesg(adapter
);
3941 queue_work(ena_wq
, &adapter
->reset_task
);
3945 /* Reset the timer */
3946 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
3949 static u32
ena_calc_max_io_queue_num(struct pci_dev
*pdev
,
3950 struct ena_com_dev
*ena_dev
,
3951 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
3953 u32 io_tx_sq_num
, io_tx_cq_num
, io_rx_num
, max_num_io_queues
;
3955 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
3956 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
3957 &get_feat_ctx
->max_queue_ext
.max_queue_ext
;
3958 io_rx_num
= min_t(u32
, max_queue_ext
->max_rx_sq_num
,
3959 max_queue_ext
->max_rx_cq_num
);
3961 io_tx_sq_num
= max_queue_ext
->max_tx_sq_num
;
3962 io_tx_cq_num
= max_queue_ext
->max_tx_cq_num
;
3964 struct ena_admin_queue_feature_desc
*max_queues
=
3965 &get_feat_ctx
->max_queues
;
3966 io_tx_sq_num
= max_queues
->max_sq_num
;
3967 io_tx_cq_num
= max_queues
->max_cq_num
;
3968 io_rx_num
= min_t(u32
, io_tx_sq_num
, io_tx_cq_num
);
3971 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3972 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
3973 io_tx_sq_num
= get_feat_ctx
->llq
.max_llq_num
;
3975 max_num_io_queues
= min_t(u32
, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES
);
3976 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_rx_num
);
3977 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_tx_sq_num
);
3978 max_num_io_queues
= min_t(u32
, max_num_io_queues
, io_tx_cq_num
);
3979 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3980 max_num_io_queues
= min_t(u32
, max_num_io_queues
, pci_msix_vec_count(pdev
) - 1);
3981 if (unlikely(!max_num_io_queues
)) {
3982 dev_err(&pdev
->dev
, "The device doesn't have io queues\n");
3986 return max_num_io_queues
;
3989 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx
*feat
,
3990 struct net_device
*netdev
)
3992 netdev_features_t dev_features
= 0;
3994 /* Set offload features */
3995 if (feat
->offload
.tx
&
3996 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK
)
3997 dev_features
|= NETIF_F_IP_CSUM
;
3999 if (feat
->offload
.tx
&
4000 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK
)
4001 dev_features
|= NETIF_F_IPV6_CSUM
;
4003 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK
)
4004 dev_features
|= NETIF_F_TSO
;
4006 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK
)
4007 dev_features
|= NETIF_F_TSO6
;
4009 if (feat
->offload
.tx
& ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK
)
4010 dev_features
|= NETIF_F_TSO_ECN
;
4012 if (feat
->offload
.rx_supported
&
4013 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK
)
4014 dev_features
|= NETIF_F_RXCSUM
;
4016 if (feat
->offload
.rx_supported
&
4017 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK
)
4018 dev_features
|= NETIF_F_RXCSUM
;
4026 netdev
->hw_features
|= netdev
->features
;
4027 netdev
->vlan_features
|= netdev
->features
;
4030 static void ena_set_conf_feat_params(struct ena_adapter
*adapter
,
4031 struct ena_com_dev_get_features_ctx
*feat
)
4033 struct net_device
*netdev
= adapter
->netdev
;
4035 /* Copy mac address */
4036 if (!is_valid_ether_addr(feat
->dev_attr
.mac_addr
)) {
4037 eth_hw_addr_random(netdev
);
4038 ether_addr_copy(adapter
->mac_addr
, netdev
->dev_addr
);
4040 ether_addr_copy(adapter
->mac_addr
, feat
->dev_attr
.mac_addr
);
4041 ether_addr_copy(netdev
->dev_addr
, adapter
->mac_addr
);
4044 /* Set offload features */
4045 ena_set_dev_offloads(feat
, netdev
);
4047 adapter
->max_mtu
= feat
->dev_attr
.max_mtu
;
4048 netdev
->max_mtu
= adapter
->max_mtu
;
4049 netdev
->min_mtu
= ENA_MIN_MTU
;
4052 static int ena_rss_init_default(struct ena_adapter
*adapter
)
4054 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
4055 struct device
*dev
= &adapter
->pdev
->dev
;
4059 rc
= ena_com_rss_init(ena_dev
, ENA_RX_RSS_TABLE_LOG_SIZE
);
4061 dev_err(dev
, "Cannot init indirect table\n");
4065 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
4066 val
= ethtool_rxfh_indir_default(i
, adapter
->num_io_queues
);
4067 rc
= ena_com_indirect_table_fill_entry(ena_dev
, i
,
4068 ENA_IO_RXQ_IDX(val
));
4069 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
4070 dev_err(dev
, "Cannot fill indirect table\n");
4071 goto err_fill_indir
;
4075 rc
= ena_com_fill_hash_function(ena_dev
, ENA_ADMIN_TOEPLITZ
, NULL
,
4076 ENA_HASH_KEY_SIZE
, 0xFFFFFFFF);
4077 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
4078 dev_err(dev
, "Cannot fill hash function\n");
4079 goto err_fill_indir
;
4082 rc
= ena_com_set_default_hash_ctrl(ena_dev
);
4083 if (unlikely(rc
&& (rc
!= -EOPNOTSUPP
))) {
4084 dev_err(dev
, "Cannot fill hash control\n");
4085 goto err_fill_indir
;
4091 ena_com_rss_destroy(ena_dev
);
4097 static void ena_release_bars(struct ena_com_dev
*ena_dev
, struct pci_dev
*pdev
)
4099 int release_bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
4101 pci_release_selected_regions(pdev
, release_bars
);
4105 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx
*ctx
)
4107 struct ena_admin_feature_llq_desc
*llq
= &ctx
->get_feat_ctx
->llq
;
4108 struct ena_com_dev
*ena_dev
= ctx
->ena_dev
;
4109 u32 tx_queue_size
= ENA_DEFAULT_RING_SIZE
;
4110 u32 rx_queue_size
= ENA_DEFAULT_RING_SIZE
;
4111 u32 max_tx_queue_size
;
4112 u32 max_rx_queue_size
;
4114 if (ena_dev
->supported_features
& BIT(ENA_ADMIN_MAX_QUEUES_EXT
)) {
4115 struct ena_admin_queue_ext_feature_fields
*max_queue_ext
=
4116 &ctx
->get_feat_ctx
->max_queue_ext
.max_queue_ext
;
4117 max_rx_queue_size
= min_t(u32
, max_queue_ext
->max_rx_cq_depth
,
4118 max_queue_ext
->max_rx_sq_depth
);
4119 max_tx_queue_size
= max_queue_ext
->max_tx_cq_depth
;
4121 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
4122 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4123 llq
->max_llq_depth
);
4125 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4126 max_queue_ext
->max_tx_sq_depth
);
4128 ctx
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4129 max_queue_ext
->max_per_packet_tx_descs
);
4130 ctx
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4131 max_queue_ext
->max_per_packet_rx_descs
);
4133 struct ena_admin_queue_feature_desc
*max_queues
=
4134 &ctx
->get_feat_ctx
->max_queues
;
4135 max_rx_queue_size
= min_t(u32
, max_queues
->max_cq_depth
,
4136 max_queues
->max_sq_depth
);
4137 max_tx_queue_size
= max_queues
->max_cq_depth
;
4139 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
4140 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4141 llq
->max_llq_depth
);
4143 max_tx_queue_size
= min_t(u32
, max_tx_queue_size
,
4144 max_queues
->max_sq_depth
);
4146 ctx
->max_tx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4147 max_queues
->max_packet_tx_descs
);
4148 ctx
->max_rx_sgl_size
= min_t(u16
, ENA_PKT_MAX_BUFS
,
4149 max_queues
->max_packet_rx_descs
);
4152 max_tx_queue_size
= rounddown_pow_of_two(max_tx_queue_size
);
4153 max_rx_queue_size
= rounddown_pow_of_two(max_rx_queue_size
);
4155 tx_queue_size
= clamp_val(tx_queue_size
, ENA_MIN_RING_SIZE
,
4157 rx_queue_size
= clamp_val(rx_queue_size
, ENA_MIN_RING_SIZE
,
4160 tx_queue_size
= rounddown_pow_of_two(tx_queue_size
);
4161 rx_queue_size
= rounddown_pow_of_two(rx_queue_size
);
4163 ctx
->max_tx_queue_size
= max_tx_queue_size
;
4164 ctx
->max_rx_queue_size
= max_rx_queue_size
;
4165 ctx
->tx_queue_size
= tx_queue_size
;
4166 ctx
->rx_queue_size
= rx_queue_size
;
4171 /* ena_probe - Device Initialization Routine
4172 * @pdev: PCI device information struct
4173 * @ent: entry in ena_pci_tbl
4175 * Returns 0 on success, negative on failure
4177 * ena_probe initializes an adapter identified by a pci_dev structure.
4178 * The OS initialization, configuring of the adapter private structure,
4179 * and a hardware reset occur.
4181 static int ena_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4183 struct ena_calc_queue_size_ctx calc_queue_ctx
= {};
4184 struct ena_com_dev_get_features_ctx get_feat_ctx
;
4185 struct ena_com_dev
*ena_dev
= NULL
;
4186 struct ena_adapter
*adapter
;
4187 struct net_device
*netdev
;
4188 static int adapters_found
;
4189 u32 max_num_io_queues
;
4193 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
4195 rc
= pci_enable_device_mem(pdev
);
4197 dev_err(&pdev
->dev
, "pci_enable_device_mem() failed!\n");
4201 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS
));
4203 dev_err(&pdev
->dev
, "dma_set_mask_and_coherent failed %d\n", rc
);
4204 goto err_disable_device
;
4207 pci_set_master(pdev
);
4209 ena_dev
= vzalloc(sizeof(*ena_dev
));
4212 goto err_disable_device
;
4215 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
) & ENA_BAR_MASK
;
4216 rc
= pci_request_selected_regions(pdev
, bars
, DRV_MODULE_NAME
);
4218 dev_err(&pdev
->dev
, "pci_request_selected_regions failed %d\n",
4220 goto err_free_ena_dev
;
4223 ena_dev
->reg_bar
= devm_ioremap(&pdev
->dev
,
4224 pci_resource_start(pdev
, ENA_REG_BAR
),
4225 pci_resource_len(pdev
, ENA_REG_BAR
));
4226 if (!ena_dev
->reg_bar
) {
4227 dev_err(&pdev
->dev
, "Failed to remap regs bar\n");
4229 goto err_free_region
;
4232 ena_dev
->ena_min_poll_delay_us
= ENA_ADMIN_POLL_DELAY_US
;
4234 ena_dev
->dmadev
= &pdev
->dev
;
4236 netdev
= alloc_etherdev_mq(sizeof(struct ena_adapter
), ENA_MAX_RINGS
);
4238 dev_err(&pdev
->dev
, "alloc_etherdev_mq failed\n");
4240 goto err_free_region
;
4243 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4244 adapter
= netdev_priv(netdev
);
4245 adapter
->ena_dev
= ena_dev
;
4246 adapter
->netdev
= netdev
;
4247 adapter
->pdev
= pdev
;
4248 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
4250 ena_dev
->net_device
= netdev
;
4252 pci_set_drvdata(pdev
, adapter
);
4254 rc
= ena_device_init(ena_dev
, pdev
, &get_feat_ctx
, &wd_state
);
4256 dev_err(&pdev
->dev
, "ENA device init failed\n");
4259 goto err_netdev_destroy
;
4262 rc
= ena_map_llq_mem_bar(pdev
, ena_dev
, bars
);
4264 dev_err(&pdev
->dev
, "ENA llq bar mapping failed\n");
4265 goto err_device_destroy
;
4268 calc_queue_ctx
.ena_dev
= ena_dev
;
4269 calc_queue_ctx
.get_feat_ctx
= &get_feat_ctx
;
4270 calc_queue_ctx
.pdev
= pdev
;
4272 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4273 * Updated during device initialization with the real granularity
4275 ena_dev
->intr_moder_tx_interval
= ENA_INTR_INITIAL_TX_INTERVAL_USECS
;
4276 ena_dev
->intr_moder_rx_interval
= ENA_INTR_INITIAL_RX_INTERVAL_USECS
;
4277 ena_dev
->intr_delay_resolution
= ENA_DEFAULT_INTR_DELAY_RESOLUTION
;
4278 max_num_io_queues
= ena_calc_max_io_queue_num(pdev
, ena_dev
, &get_feat_ctx
);
4279 rc
= ena_calc_io_queue_size(&calc_queue_ctx
);
4280 if (rc
|| !max_num_io_queues
) {
4282 goto err_device_destroy
;
4285 ena_set_conf_feat_params(adapter
, &get_feat_ctx
);
4287 adapter
->reset_reason
= ENA_REGS_RESET_NORMAL
;
4289 adapter
->requested_tx_ring_size
= calc_queue_ctx
.tx_queue_size
;
4290 adapter
->requested_rx_ring_size
= calc_queue_ctx
.rx_queue_size
;
4291 adapter
->max_tx_ring_size
= calc_queue_ctx
.max_tx_queue_size
;
4292 adapter
->max_rx_ring_size
= calc_queue_ctx
.max_rx_queue_size
;
4293 adapter
->max_tx_sgl_size
= calc_queue_ctx
.max_tx_sgl_size
;
4294 adapter
->max_rx_sgl_size
= calc_queue_ctx
.max_rx_sgl_size
;
4296 adapter
->num_io_queues
= max_num_io_queues
;
4297 adapter
->max_num_io_queues
= max_num_io_queues
;
4298 adapter
->last_monitored_tx_qid
= 0;
4300 adapter
->xdp_first_ring
= 0;
4301 adapter
->xdp_num_queues
= 0;
4303 adapter
->rx_copybreak
= ENA_DEFAULT_RX_COPYBREAK
;
4304 if (ena_dev
->tx_mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
)
4305 adapter
->disable_meta_caching
=
4306 !!(get_feat_ctx
.llq
.accel_mode
.u
.get
.supported_flags
&
4307 BIT(ENA_ADMIN_DISABLE_META_CACHING
));
4309 adapter
->wd_state
= wd_state
;
4311 snprintf(adapter
->name
, ENA_NAME_MAX_LEN
, "ena_%d", adapters_found
);
4313 rc
= ena_com_init_interrupt_moderation(adapter
->ena_dev
);
4316 "Failed to query interrupt moderation feature\n");
4317 goto err_device_destroy
;
4319 ena_init_io_rings(adapter
,
4321 adapter
->xdp_num_queues
+
4322 adapter
->num_io_queues
);
4324 netdev
->netdev_ops
= &ena_netdev_ops
;
4325 netdev
->watchdog_timeo
= TX_TIMEOUT
;
4326 ena_set_ethtool_ops(netdev
);
4328 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4330 u64_stats_init(&adapter
->syncp
);
4332 rc
= ena_enable_msix_and_set_admin_interrupts(adapter
);
4335 "Failed to enable and set the admin interrupts\n");
4336 goto err_worker_destroy
;
4338 rc
= ena_rss_init_default(adapter
);
4339 if (rc
&& (rc
!= -EOPNOTSUPP
)) {
4340 dev_err(&pdev
->dev
, "Cannot init RSS rc: %d\n", rc
);
4344 ena_config_debug_area(adapter
);
4346 if (!ena_update_hw_stats(adapter
))
4347 adapter
->eni_stats_supported
= true;
4349 adapter
->eni_stats_supported
= false;
4351 memcpy(adapter
->netdev
->perm_addr
, adapter
->mac_addr
, netdev
->addr_len
);
4353 netif_carrier_off(netdev
);
4355 rc
= register_netdev(netdev
);
4357 dev_err(&pdev
->dev
, "Cannot register net device\n");
4361 INIT_WORK(&adapter
->reset_task
, ena_fw_reset_device
);
4363 adapter
->last_keep_alive_jiffies
= jiffies
;
4364 adapter
->keep_alive_timeout
= ENA_DEVICE_KALIVE_TIMEOUT
;
4365 adapter
->missing_tx_completion_to
= TX_TIMEOUT
;
4366 adapter
->missing_tx_completion_threshold
= MAX_NUM_OF_TIMEOUTED_PACKETS
;
4368 ena_update_hints(adapter
, &get_feat_ctx
.hw_hints
);
4370 timer_setup(&adapter
->timer_service
, ena_timer_service
, 0);
4371 mod_timer(&adapter
->timer_service
, round_jiffies(jiffies
+ HZ
));
4373 dev_info(&pdev
->dev
,
4374 "%s found at mem %lx, mac addr %pM\n",
4375 DEVICE_NAME
, (long)pci_resource_start(pdev
, 0),
4378 set_bit(ENA_FLAG_DEVICE_RUNNING
, &adapter
->flags
);
4385 ena_com_delete_debug_area(ena_dev
);
4386 ena_com_rss_destroy(ena_dev
);
4388 ena_com_dev_reset(ena_dev
, ENA_REGS_RESET_INIT_ERR
);
4389 /* stop submitting admin commands on a device that was reset */
4390 ena_com_set_admin_running_state(ena_dev
, false);
4391 ena_free_mgmnt_irq(adapter
);
4392 ena_disable_msix(adapter
);
4394 del_timer(&adapter
->timer_service
);
4396 ena_com_delete_host_info(ena_dev
);
4397 ena_com_admin_destroy(ena_dev
);
4399 free_netdev(netdev
);
4401 ena_release_bars(ena_dev
, pdev
);
4405 pci_disable_device(pdev
);
4409 /*****************************************************************************/
4411 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4412 * @pdev: PCI device information struct
4413 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4415 * __ena_shutoff is a helper routine that does the real work on shutdown and
4416 * removal paths; the difference between those paths is with regards to whether
4417 * dettach or unregister the netdevice.
4419 static void __ena_shutoff(struct pci_dev
*pdev
, bool shutdown
)
4421 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
4422 struct ena_com_dev
*ena_dev
;
4423 struct net_device
*netdev
;
4425 ena_dev
= adapter
->ena_dev
;
4426 netdev
= adapter
->netdev
;
4428 #ifdef CONFIG_RFS_ACCEL
4429 if ((adapter
->msix_vecs
>= 1) && (netdev
->rx_cpu_rmap
)) {
4430 free_irq_cpu_rmap(netdev
->rx_cpu_rmap
);
4431 netdev
->rx_cpu_rmap
= NULL
;
4433 #endif /* CONFIG_RFS_ACCEL */
4435 /* Make sure timer and reset routine won't be called after
4436 * freeing device resources.
4438 del_timer_sync(&adapter
->timer_service
);
4439 cancel_work_sync(&adapter
->reset_task
);
4441 rtnl_lock(); /* lock released inside the below if-else block */
4442 adapter
->reset_reason
= ENA_REGS_RESET_SHUTDOWN
;
4443 ena_destroy_device(adapter
, true);
4445 netif_device_detach(netdev
);
4450 unregister_netdev(netdev
);
4451 free_netdev(netdev
);
4454 ena_com_rss_destroy(ena_dev
);
4456 ena_com_delete_debug_area(ena_dev
);
4458 ena_com_delete_host_info(ena_dev
);
4460 ena_release_bars(ena_dev
, pdev
);
4462 pci_disable_device(pdev
);
4467 /* ena_remove - Device Removal Routine
4468 * @pdev: PCI device information struct
4470 * ena_remove is called by the PCI subsystem to alert the driver
4471 * that it should release a PCI device.
4474 static void ena_remove(struct pci_dev
*pdev
)
4476 __ena_shutoff(pdev
, false);
4479 /* ena_shutdown - Device Shutdown Routine
4480 * @pdev: PCI device information struct
4482 * ena_shutdown is called by the PCI subsystem to alert the driver that
4483 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4486 static void ena_shutdown(struct pci_dev
*pdev
)
4488 __ena_shutoff(pdev
, true);
4491 /* ena_suspend - PM suspend callback
4492 * @dev_d: Device information struct
4494 static int __maybe_unused
ena_suspend(struct device
*dev_d
)
4496 struct pci_dev
*pdev
= to_pci_dev(dev_d
);
4497 struct ena_adapter
*adapter
= pci_get_drvdata(pdev
);
4499 ena_increase_stat(&adapter
->dev_stats
.suspend
, 1, &adapter
->syncp
);
4502 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
))) {
4504 "Ignoring device reset request as the device is being suspended\n");
4505 clear_bit(ENA_FLAG_TRIGGER_RESET
, &adapter
->flags
);
4507 ena_destroy_device(adapter
, true);
4512 /* ena_resume - PM resume callback
4513 * @dev_d: Device information struct
4515 static int __maybe_unused
ena_resume(struct device
*dev_d
)
4517 struct ena_adapter
*adapter
= dev_get_drvdata(dev_d
);
4520 ena_increase_stat(&adapter
->dev_stats
.resume
, 1, &adapter
->syncp
);
4523 rc
= ena_restore_device(adapter
);
4528 static SIMPLE_DEV_PM_OPS(ena_pm_ops
, ena_suspend
, ena_resume
);
4530 static struct pci_driver ena_pci_driver
= {
4531 .name
= DRV_MODULE_NAME
,
4532 .id_table
= ena_pci_tbl
,
4534 .remove
= ena_remove
,
4535 .shutdown
= ena_shutdown
,
4536 .driver
.pm
= &ena_pm_ops
,
4537 .sriov_configure
= pci_sriov_configure_simple
,
4540 static int __init
ena_init(void)
4542 ena_wq
= create_singlethread_workqueue(DRV_MODULE_NAME
);
4544 pr_err("Failed to create workqueue\n");
4548 return pci_register_driver(&ena_pci_driver
);
4551 static void __exit
ena_cleanup(void)
4553 pci_unregister_driver(&ena_pci_driver
);
4556 destroy_workqueue(ena_wq
);
4561 /******************************************************************************
4562 ******************************** AENQ Handlers *******************************
4563 *****************************************************************************/
4564 /* ena_update_on_link_change:
4565 * Notify the network interface about the change in link status
4567 static void ena_update_on_link_change(void *adapter_data
,
4568 struct ena_admin_aenq_entry
*aenq_e
)
4570 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4571 struct ena_admin_aenq_link_change_desc
*aenq_desc
=
4572 (struct ena_admin_aenq_link_change_desc
*)aenq_e
;
4573 int status
= aenq_desc
->flags
&
4574 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK
;
4577 netif_dbg(adapter
, ifup
, adapter
->netdev
, "%s\n", __func__
);
4578 set_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
4579 if (!test_bit(ENA_FLAG_ONGOING_RESET
, &adapter
->flags
))
4580 netif_carrier_on(adapter
->netdev
);
4582 clear_bit(ENA_FLAG_LINK_UP
, &adapter
->flags
);
4583 netif_carrier_off(adapter
->netdev
);
4587 static void ena_keep_alive_wd(void *adapter_data
,
4588 struct ena_admin_aenq_entry
*aenq_e
)
4590 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4591 struct ena_admin_aenq_keep_alive_desc
*desc
;
4595 desc
= (struct ena_admin_aenq_keep_alive_desc
*)aenq_e
;
4596 adapter
->last_keep_alive_jiffies
= jiffies
;
4598 rx_drops
= ((u64
)desc
->rx_drops_high
<< 32) | desc
->rx_drops_low
;
4599 tx_drops
= ((u64
)desc
->tx_drops_high
<< 32) | desc
->tx_drops_low
;
4601 u64_stats_update_begin(&adapter
->syncp
);
4602 /* These stats are accumulated by the device, so the counters indicate
4603 * all drops since last reset.
4605 adapter
->dev_stats
.rx_drops
= rx_drops
;
4606 adapter
->dev_stats
.tx_drops
= tx_drops
;
4607 u64_stats_update_end(&adapter
->syncp
);
4610 static void ena_notification(void *adapter_data
,
4611 struct ena_admin_aenq_entry
*aenq_e
)
4613 struct ena_adapter
*adapter
= (struct ena_adapter
*)adapter_data
;
4614 struct ena_admin_ena_hw_hints
*hints
;
4616 WARN(aenq_e
->aenq_common_desc
.group
!= ENA_ADMIN_NOTIFICATION
,
4617 "Invalid group(%x) expected %x\n",
4618 aenq_e
->aenq_common_desc
.group
,
4619 ENA_ADMIN_NOTIFICATION
);
4621 switch (aenq_e
->aenq_common_desc
.syndrome
) {
4622 case ENA_ADMIN_UPDATE_HINTS
:
4623 hints
= (struct ena_admin_ena_hw_hints
*)
4624 (&aenq_e
->inline_data_w4
);
4625 ena_update_hints(adapter
, hints
);
4628 netif_err(adapter
, drv
, adapter
->netdev
,
4629 "Invalid aenq notification link state %d\n",
4630 aenq_e
->aenq_common_desc
.syndrome
);
4634 /* This handler will called for unknown event group or unimplemented handlers*/
4635 static void unimplemented_aenq_handler(void *data
,
4636 struct ena_admin_aenq_entry
*aenq_e
)
4638 struct ena_adapter
*adapter
= (struct ena_adapter
*)data
;
4640 netif_err(adapter
, drv
, adapter
->netdev
,
4641 "Unknown event was received or event with unimplemented handler\n");
4644 static struct ena_aenq_handlers aenq_handlers
= {
4646 [ENA_ADMIN_LINK_CHANGE
] = ena_update_on_link_change
,
4647 [ENA_ADMIN_NOTIFICATION
] = ena_notification
,
4648 [ENA_ADMIN_KEEP_ALIVE
] = ena_keep_alive_wd
,
4650 .unimplemented_handler
= unimplemented_aenq_handler
4653 module_init(ena_init
);
4654 module_exit(ena_cleanup
);