1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2013 Solarflare Communications Inc.
12 #include <linux/ipv6.h>
13 #include <linux/slab.h>
15 #include <linux/if_ether.h>
16 #include <linux/highmem.h>
17 #include <linux/cache.h>
18 #include "net_driver.h"
23 #include "tx_common.h"
24 #include "workarounds.h"
25 #include "ef10_regs.h"
29 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
30 unsigned int efx_piobuf_size __read_mostly
= EFX_PIOBUF_SIZE_DEF
;
32 #endif /* EFX_USE_PIO */
34 static inline u8
*efx_tx_get_copy_buffer(struct efx_tx_queue
*tx_queue
,
35 struct efx_tx_buffer
*buffer
)
37 unsigned int index
= efx_tx_queue_get_insert_index(tx_queue
);
38 struct efx_buffer
*page_buf
=
39 &tx_queue
->cb_page
[index
>> (PAGE_SHIFT
- EFX_TX_CB_ORDER
)];
41 ((index
<< EFX_TX_CB_ORDER
) + NET_IP_ALIGN
) & (PAGE_SIZE
- 1);
43 if (unlikely(!page_buf
->addr
) &&
44 efx_nic_alloc_buffer(tx_queue
->efx
, page_buf
, PAGE_SIZE
,
47 buffer
->dma_addr
= page_buf
->dma_addr
+ offset
;
48 buffer
->unmap_len
= 0;
49 return (u8
*)page_buf
->addr
+ offset
;
52 u8
*efx_tx_get_copy_buffer_limited(struct efx_tx_queue
*tx_queue
,
53 struct efx_tx_buffer
*buffer
, size_t len
)
55 if (len
> EFX_TX_CB_SIZE
)
57 return efx_tx_get_copy_buffer(tx_queue
, buffer
);
60 static void efx_tx_maybe_stop_queue(struct efx_tx_queue
*txq1
)
62 /* We need to consider both queues that the net core sees as one */
63 struct efx_tx_queue
*txq2
= efx_tx_queue_partner(txq1
);
64 struct efx_nic
*efx
= txq1
->efx
;
65 unsigned int fill_level
;
67 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
68 txq2
->insert_count
- txq2
->old_read_count
);
69 if (likely(fill_level
< efx
->txq_stop_thresh
))
72 /* We used the stale old_read_count above, which gives us a
73 * pessimistic estimate of the fill level (which may even
74 * validly be >= efx->txq_entries). Now try again using
75 * read_count (more likely to be a cache miss).
77 * If we read read_count and then conditionally stop the
78 * queue, it is possible for the completion path to race with
79 * us and complete all outstanding descriptors in the middle,
80 * after which there will be no more completions to wake it.
81 * Therefore we stop the queue first, then read read_count
82 * (with a memory barrier to ensure the ordering), then
83 * restart the queue if the fill level turns out to be low
86 netif_tx_stop_queue(txq1
->core_txq
);
88 txq1
->old_read_count
= READ_ONCE(txq1
->read_count
);
89 txq2
->old_read_count
= READ_ONCE(txq2
->read_count
);
91 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
92 txq2
->insert_count
- txq2
->old_read_count
);
93 EFX_WARN_ON_ONCE_PARANOID(fill_level
>= efx
->txq_entries
);
94 if (likely(fill_level
< efx
->txq_stop_thresh
)) {
96 if (likely(!efx
->loopback_selftest
))
97 netif_tx_start_queue(txq1
->core_txq
);
101 static int efx_enqueue_skb_copy(struct efx_tx_queue
*tx_queue
,
104 unsigned int copy_len
= skb
->len
;
105 struct efx_tx_buffer
*buffer
;
109 EFX_WARN_ON_ONCE_PARANOID(copy_len
> EFX_TX_CB_SIZE
);
111 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
113 copy_buffer
= efx_tx_get_copy_buffer(tx_queue
, buffer
);
114 if (unlikely(!copy_buffer
))
117 rc
= skb_copy_bits(skb
, 0, copy_buffer
, copy_len
);
118 EFX_WARN_ON_PARANOID(rc
);
119 buffer
->len
= copy_len
;
122 buffer
->flags
= EFX_TX_BUF_SKB
;
124 ++tx_queue
->insert_count
;
130 struct efx_short_copy_buffer
{
132 u8 buf
[L1_CACHE_BYTES
];
135 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
136 * Advances piobuf pointer. Leaves additional data in the copy buffer.
138 static void efx_memcpy_toio_aligned(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
140 struct efx_short_copy_buffer
*copy_buf
)
142 int block_len
= len
& ~(sizeof(copy_buf
->buf
) - 1);
144 __iowrite64_copy(*piobuf
, data
, block_len
>> 3);
145 *piobuf
+= block_len
;
150 BUG_ON(copy_buf
->used
);
151 BUG_ON(len
> sizeof(copy_buf
->buf
));
152 memcpy(copy_buf
->buf
, data
, len
);
153 copy_buf
->used
= len
;
157 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
158 * Advances piobuf pointer. Leaves additional data in the copy buffer.
160 static void efx_memcpy_toio_aligned_cb(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
162 struct efx_short_copy_buffer
*copy_buf
)
164 if (copy_buf
->used
) {
165 /* if the copy buffer is partially full, fill it up and write */
167 min_t(int, sizeof(copy_buf
->buf
) - copy_buf
->used
, len
);
169 memcpy(copy_buf
->buf
+ copy_buf
->used
, data
, copy_to_buf
);
170 copy_buf
->used
+= copy_to_buf
;
172 /* if we didn't fill it up then we're done for now */
173 if (copy_buf
->used
< sizeof(copy_buf
->buf
))
176 __iowrite64_copy(*piobuf
, copy_buf
->buf
,
177 sizeof(copy_buf
->buf
) >> 3);
178 *piobuf
+= sizeof(copy_buf
->buf
);
184 efx_memcpy_toio_aligned(efx
, piobuf
, data
, len
, copy_buf
);
187 static void efx_flush_copy_buffer(struct efx_nic
*efx
, u8 __iomem
*piobuf
,
188 struct efx_short_copy_buffer
*copy_buf
)
190 /* if there's anything in it, write the whole buffer, including junk */
192 __iowrite64_copy(piobuf
, copy_buf
->buf
,
193 sizeof(copy_buf
->buf
) >> 3);
196 /* Traverse skb structure and copy fragments in to PIO buffer.
197 * Advances piobuf pointer.
199 static void efx_skb_copy_bits_to_pio(struct efx_nic
*efx
, struct sk_buff
*skb
,
201 struct efx_short_copy_buffer
*copy_buf
)
205 efx_memcpy_toio_aligned(efx
, piobuf
, skb
->data
, skb_headlen(skb
),
208 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
209 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
212 vaddr
= kmap_atomic(skb_frag_page(f
));
214 efx_memcpy_toio_aligned_cb(efx
, piobuf
, vaddr
+ skb_frag_off(f
),
215 skb_frag_size(f
), copy_buf
);
216 kunmap_atomic(vaddr
);
219 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb
)->frag_list
);
222 static int efx_enqueue_skb_pio(struct efx_tx_queue
*tx_queue
,
225 struct efx_tx_buffer
*buffer
=
226 efx_tx_queue_get_insert_buffer(tx_queue
);
227 u8 __iomem
*piobuf
= tx_queue
->piobuf
;
229 /* Copy to PIO buffer. Ensure the writes are padded to the end
230 * of a cache line, as this is required for write-combining to be
231 * effective on at least x86.
234 if (skb_shinfo(skb
)->nr_frags
) {
235 /* The size of the copy buffer will ensure all writes
236 * are the size of a cache line.
238 struct efx_short_copy_buffer copy_buf
;
242 efx_skb_copy_bits_to_pio(tx_queue
->efx
, skb
,
244 efx_flush_copy_buffer(tx_queue
->efx
, piobuf
, ©_buf
);
246 /* Pad the write to the size of a cache line.
247 * We can do this because we know the skb_shared_info struct is
248 * after the source, and the destination buffer is big enough.
250 BUILD_BUG_ON(L1_CACHE_BYTES
>
251 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
252 __iowrite64_copy(tx_queue
->piobuf
, skb
->data
,
253 ALIGN(skb
->len
, L1_CACHE_BYTES
) >> 3);
257 buffer
->flags
= EFX_TX_BUF_SKB
| EFX_TX_BUF_OPTION
;
259 EFX_POPULATE_QWORD_5(buffer
->option
,
260 ESF_DZ_TX_DESC_IS_OPT
, 1,
261 ESF_DZ_TX_OPTION_TYPE
, ESE_DZ_TX_OPTION_DESC_PIO
,
262 ESF_DZ_TX_PIO_CONT
, 0,
263 ESF_DZ_TX_PIO_BYTE_CNT
, skb
->len
,
264 ESF_DZ_TX_PIO_BUF_ADDR
,
265 tx_queue
->piobuf_offset
);
266 ++tx_queue
->insert_count
;
269 #endif /* EFX_USE_PIO */
272 * Fallback to software TSO.
274 * This is used if we are unable to send a GSO packet through hardware TSO.
275 * This should only ever happen due to per-queue restrictions - unsupported
276 * packets should first be filtered by the feature flags.
278 * Returns 0 on success, error code otherwise.
280 static int efx_tx_tso_fallback(struct efx_tx_queue
*tx_queue
,
283 struct sk_buff
*segments
, *next
;
285 segments
= skb_gso_segment(skb
, 0);
286 if (IS_ERR(segments
))
287 return PTR_ERR(segments
);
289 dev_consume_skb_any(skb
);
292 skb_list_walk_safe(skb
, skb
, next
) {
293 skb_mark_not_on_list(skb
);
294 efx_enqueue_skb(tx_queue
, skb
);
301 * Add a socket buffer to a TX queue
303 * This maps all fragments of a socket buffer for DMA and adds them to
304 * the TX queue. The queue's insert pointer will be incremented by
305 * the number of fragments in the socket buffer.
307 * If any DMA mapping fails, any mapped fragments will be unmapped,
308 * the queue's insert pointer will be restored to its original value.
310 * This function is split out from efx_hard_start_xmit to allow the
311 * loopback test to direct packets via specific TX queues.
313 * Returns NETDEV_TX_OK.
314 * You must hold netif_tx_lock() to call this function.
316 netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
318 unsigned int old_insert_count
= tx_queue
->insert_count
;
319 bool xmit_more
= netdev_xmit_more();
320 bool data_mapped
= false;
321 unsigned int segments
;
322 unsigned int skb_len
;
326 segments
= skb_is_gso(skb
) ? skb_shinfo(skb
)->gso_segs
: 0;
328 segments
= 0; /* Don't use TSO for a single segment. */
330 /* Handle TSO first - it's *possible* (although unlikely) that we might
331 * be passed a packet to segment that's smaller than the copybreak/PIO
335 EFX_WARN_ON_ONCE_PARANOID(!tx_queue
->handle_tso
);
336 rc
= tx_queue
->handle_tso(tx_queue
, skb
, &data_mapped
);
338 rc
= efx_tx_tso_fallback(tx_queue
, skb
);
339 tx_queue
->tso_fallbacks
++;
346 } else if (skb_len
<= efx_piobuf_size
&& !xmit_more
&&
347 efx_nic_may_tx_pio(tx_queue
)) {
348 /* Use PIO for short packets with an empty queue. */
349 if (efx_enqueue_skb_pio(tx_queue
, skb
))
351 tx_queue
->pio_packets
++;
354 } else if (skb
->data_len
&& skb_len
<= EFX_TX_CB_SIZE
) {
355 /* Pad short packets or coalesce short fragmented packets. */
356 if (efx_enqueue_skb_copy(tx_queue
, skb
))
358 tx_queue
->cb_packets
++;
362 /* Map for DMA and create descriptors if we haven't done so already. */
363 if (!data_mapped
&& (efx_tx_map_data(tx_queue
, skb
, segments
)))
366 efx_tx_maybe_stop_queue(tx_queue
);
368 /* Pass off to hardware */
369 if (__netdev_tx_sent_queue(tx_queue
->core_txq
, skb_len
, xmit_more
)) {
370 struct efx_tx_queue
*txq2
= efx_tx_queue_partner(tx_queue
);
372 /* There could be packets left on the partner queue if
373 * xmit_more was set. If we do not push those they
374 * could be left for a long time and cause a netdev watchdog.
376 if (txq2
->xmit_more_available
)
377 efx_nic_push_buffers(txq2
);
379 efx_nic_push_buffers(tx_queue
);
381 tx_queue
->xmit_more_available
= xmit_more
;
385 tx_queue
->tso_bursts
++;
386 tx_queue
->tso_packets
+= segments
;
387 tx_queue
->tx_packets
+= segments
;
389 tx_queue
->tx_packets
++;
396 efx_enqueue_unwind(tx_queue
, old_insert_count
);
397 dev_kfree_skb_any(skb
);
399 /* If we're not expecting another transmit and we had something to push
400 * on this queue or a partner queue then we need to push here to get the
401 * previous packets out.
404 struct efx_tx_queue
*txq2
= efx_tx_queue_partner(tx_queue
);
406 if (txq2
->xmit_more_available
)
407 efx_nic_push_buffers(txq2
);
409 efx_nic_push_buffers(tx_queue
);
415 static void efx_xdp_return_frames(int n
, struct xdp_frame
**xdpfs
)
419 for (i
= 0; i
< n
; i
++)
420 xdp_return_frame_rx_napi(xdpfs
[i
]);
423 /* Transmit a packet from an XDP buffer
425 * Returns number of packets sent on success, error code otherwise.
426 * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
427 * (for XDP redirect).
429 int efx_xdp_tx_buffers(struct efx_nic
*efx
, int n
, struct xdp_frame
**xdpfs
,
432 struct efx_tx_buffer
*tx_buffer
;
433 struct efx_tx_queue
*tx_queue
;
434 struct xdp_frame
*xdpf
;
441 cpu
= raw_smp_processor_id();
443 if (!efx
->xdp_tx_queue_count
||
444 unlikely(cpu
>= efx
->xdp_tx_queue_count
))
447 tx_queue
= efx
->xdp_tx_queues
[cpu
];
448 if (unlikely(!tx_queue
))
451 if (unlikely(n
&& !xdpfs
))
457 /* Check for available space. We should never need multiple
458 * descriptors per frame.
460 space
= efx
->txq_entries
+
461 tx_queue
->read_count
- tx_queue
->insert_count
;
463 for (i
= 0; i
< n
; i
++) {
469 /* We'll want a descriptor for this tx. */
470 prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue
));
475 dma_addr
= dma_map_single(&efx
->pci_dev
->dev
,
478 if (dma_mapping_error(&efx
->pci_dev
->dev
, dma_addr
))
481 /* Create descriptor and set up for unmapping DMA. */
482 tx_buffer
= efx_tx_map_chunk(tx_queue
, dma_addr
, len
);
483 tx_buffer
->xdpf
= xdpf
;
484 tx_buffer
->flags
= EFX_TX_BUF_XDP
|
485 EFX_TX_BUF_MAP_SINGLE
;
486 tx_buffer
->dma_offset
= 0;
487 tx_buffer
->unmap_len
= len
;
488 tx_queue
->tx_packets
++;
491 /* Pass mapped frames to hardware. */
493 efx_nic_push_buffers(tx_queue
);
498 efx_xdp_return_frames(n
- i
, xdpfs
+ i
);
503 /* Initiate a packet transmission. We use one channel per CPU
504 * (sharing when we have more CPUs than channels). On Falcon, the TX
505 * completion events will be directed back to the CPU that transmitted
506 * the packet, which should be cache-efficient.
508 * Context: non-blocking.
509 * Note that returning anything other than NETDEV_TX_OK will cause the
510 * OS to free the skb.
512 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
513 struct net_device
*net_dev
)
515 struct efx_nic
*efx
= netdev_priv(net_dev
);
516 struct efx_tx_queue
*tx_queue
;
517 unsigned index
, type
;
519 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev
));
521 /* PTP "event" packet */
522 if (unlikely(efx_xmit_with_hwtstamp(skb
)) &&
523 unlikely(efx_ptp_is_ptp_tx(efx
, skb
))) {
524 return efx_ptp_tx(efx
, skb
);
527 index
= skb_get_queue_mapping(skb
);
528 type
= skb
->ip_summed
== CHECKSUM_PARTIAL
? EFX_TXQ_TYPE_OFFLOAD
: 0;
529 if (index
>= efx
->n_tx_channels
) {
530 index
-= efx
->n_tx_channels
;
531 type
|= EFX_TXQ_TYPE_HIGHPRI
;
533 tx_queue
= efx_get_tx_queue(efx
, index
, type
);
535 return efx_enqueue_skb(tx_queue
, skb
);
538 void efx_init_tx_queue_core_txq(struct efx_tx_queue
*tx_queue
)
540 struct efx_nic
*efx
= tx_queue
->efx
;
542 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
544 netdev_get_tx_queue(efx
->net_dev
,
545 tx_queue
->queue
/ EFX_TXQ_TYPES
+
546 ((tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
547 efx
->n_tx_channels
: 0));
550 int efx_setup_tc(struct net_device
*net_dev
, enum tc_setup_type type
,
553 struct efx_nic
*efx
= netdev_priv(net_dev
);
554 struct tc_mqprio_qopt
*mqprio
= type_data
;
555 struct efx_channel
*channel
;
556 struct efx_tx_queue
*tx_queue
;
560 if (type
!= TC_SETUP_QDISC_MQPRIO
)
563 num_tc
= mqprio
->num_tc
;
565 if (num_tc
> EFX_MAX_TX_TC
)
568 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
570 if (num_tc
== net_dev
->num_tc
)
573 for (tc
= 0; tc
< num_tc
; tc
++) {
574 net_dev
->tc_to_txq
[tc
].offset
= tc
* efx
->n_tx_channels
;
575 net_dev
->tc_to_txq
[tc
].count
= efx
->n_tx_channels
;
578 if (num_tc
> net_dev
->num_tc
) {
579 /* Initialise high-priority queues as necessary */
580 efx_for_each_channel(channel
, efx
) {
581 efx_for_each_possible_channel_tx_queue(tx_queue
,
583 if (!(tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
))
585 if (!tx_queue
->buffer
) {
586 rc
= efx_probe_tx_queue(tx_queue
);
590 if (!tx_queue
->initialised
)
591 efx_init_tx_queue(tx_queue
);
592 efx_init_tx_queue_core_txq(tx_queue
);
596 /* Reduce number of classes before number of queues */
597 net_dev
->num_tc
= num_tc
;
600 rc
= netif_set_real_num_tx_queues(net_dev
,
601 max_t(int, num_tc
, 1) *
606 /* Do not destroy high-priority queues when they become
607 * unused. We would have to flush them first, and it is
608 * fairly difficult to flush a subset of TX queues. Leave
609 * it to efx_fini_channels().
612 net_dev
->num_tc
= num_tc
;