1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include <linux/cache.h>
21 #include "net_driver.h"
25 #include "workarounds.h"
26 #include "ef10_regs.h"
30 #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32 unsigned int efx_piobuf_size __read_mostly
= EFX_PIOBUF_SIZE_DEF
;
34 #endif /* EFX_USE_PIO */
36 static inline unsigned int
37 efx_tx_queue_get_insert_index(const struct efx_tx_queue
*tx_queue
)
39 return tx_queue
->insert_count
& tx_queue
->ptr_mask
;
42 static inline struct efx_tx_buffer
*
43 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue
*tx_queue
)
45 return &tx_queue
->buffer
[efx_tx_queue_get_insert_index(tx_queue
)];
48 static inline struct efx_tx_buffer
*
49 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue
*tx_queue
)
51 struct efx_tx_buffer
*buffer
=
52 __efx_tx_queue_get_insert_buffer(tx_queue
);
54 EFX_BUG_ON_PARANOID(buffer
->len
);
55 EFX_BUG_ON_PARANOID(buffer
->flags
);
56 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
61 static void efx_dequeue_buffer(struct efx_tx_queue
*tx_queue
,
62 struct efx_tx_buffer
*buffer
,
63 unsigned int *pkts_compl
,
64 unsigned int *bytes_compl
)
66 if (buffer
->unmap_len
) {
67 struct device
*dma_dev
= &tx_queue
->efx
->pci_dev
->dev
;
68 dma_addr_t unmap_addr
= buffer
->dma_addr
- buffer
->dma_offset
;
69 if (buffer
->flags
& EFX_TX_BUF_MAP_SINGLE
)
70 dma_unmap_single(dma_dev
, unmap_addr
, buffer
->unmap_len
,
73 dma_unmap_page(dma_dev
, unmap_addr
, buffer
->unmap_len
,
75 buffer
->unmap_len
= 0;
78 if (buffer
->flags
& EFX_TX_BUF_SKB
) {
80 (*bytes_compl
) += buffer
->skb
->len
;
81 dev_kfree_skb_any((struct sk_buff
*) buffer
->skb
);
82 netif_vdbg(tx_queue
->efx
, tx_done
, tx_queue
->efx
->net_dev
,
83 "TX queue %d transmission id %x complete\n",
84 tx_queue
->queue
, tx_queue
->read_count
);
85 } else if (buffer
->flags
& EFX_TX_BUF_HEAP
) {
86 kfree(buffer
->heap_buf
);
93 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
96 static inline unsigned
97 efx_max_tx_len(struct efx_nic
*efx
, dma_addr_t dma_addr
)
99 /* Depending on the NIC revision, we can use descriptor
100 * lengths up to 8K or 8K-1. However, since PCI Express
101 * devices must split read requests at 4K boundaries, there is
102 * little benefit from using descriptors that cross those
103 * boundaries and we keep things simple by not doing so.
105 unsigned len
= (~dma_addr
& (EFX_PAGE_SIZE
- 1)) + 1;
107 /* Work around hardware bug for unaligned buffers. */
108 if (EFX_WORKAROUND_5391(efx
) && (dma_addr
& 0xf))
109 len
= min_t(unsigned, len
, 512 - (dma_addr
& 0xf));
114 unsigned int efx_tx_max_skb_descs(struct efx_nic
*efx
)
116 /* Header and payload descriptor for each output segment, plus
117 * one for every input fragment boundary within a segment
119 unsigned int max_descs
= EFX_TSO_MAX_SEGS
* 2 + MAX_SKB_FRAGS
;
121 /* Possibly one more per segment for the alignment workaround,
122 * or for option descriptors
124 if (EFX_WORKAROUND_5391(efx
) || efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
)
125 max_descs
+= EFX_TSO_MAX_SEGS
;
127 /* Possibly more for PCIe page boundaries within input fragments */
128 if (PAGE_SIZE
> EFX_PAGE_SIZE
)
129 max_descs
+= max_t(unsigned int, MAX_SKB_FRAGS
,
130 DIV_ROUND_UP(GSO_MAX_SIZE
, EFX_PAGE_SIZE
));
135 /* Get partner of a TX queue, seen as part of the same net core queue */
136 static struct efx_tx_queue
*efx_tx_queue_partner(struct efx_tx_queue
*tx_queue
)
138 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
)
139 return tx_queue
- EFX_TXQ_TYPE_OFFLOAD
;
141 return tx_queue
+ EFX_TXQ_TYPE_OFFLOAD
;
144 static void efx_tx_maybe_stop_queue(struct efx_tx_queue
*txq1
)
146 /* We need to consider both queues that the net core sees as one */
147 struct efx_tx_queue
*txq2
= efx_tx_queue_partner(txq1
);
148 struct efx_nic
*efx
= txq1
->efx
;
149 unsigned int fill_level
;
151 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
152 txq2
->insert_count
- txq2
->old_read_count
);
153 if (likely(fill_level
< efx
->txq_stop_thresh
))
156 /* We used the stale old_read_count above, which gives us a
157 * pessimistic estimate of the fill level (which may even
158 * validly be >= efx->txq_entries). Now try again using
159 * read_count (more likely to be a cache miss).
161 * If we read read_count and then conditionally stop the
162 * queue, it is possible for the completion path to race with
163 * us and complete all outstanding descriptors in the middle,
164 * after which there will be no more completions to wake it.
165 * Therefore we stop the queue first, then read read_count
166 * (with a memory barrier to ensure the ordering), then
167 * restart the queue if the fill level turns out to be low
170 netif_tx_stop_queue(txq1
->core_txq
);
172 txq1
->old_read_count
= ACCESS_ONCE(txq1
->read_count
);
173 txq2
->old_read_count
= ACCESS_ONCE(txq2
->read_count
);
175 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
176 txq2
->insert_count
- txq2
->old_read_count
);
177 EFX_BUG_ON_PARANOID(fill_level
>= efx
->txq_entries
);
178 if (likely(fill_level
< efx
->txq_stop_thresh
)) {
180 if (likely(!efx
->loopback_selftest
))
181 netif_tx_start_queue(txq1
->core_txq
);
187 struct efx_short_copy_buffer
{
189 u8 buf
[L1_CACHE_BYTES
];
192 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
193 * Advances piobuf pointer. Leaves additional data in the copy buffer.
195 static void efx_memcpy_toio_aligned(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
197 struct efx_short_copy_buffer
*copy_buf
)
199 int block_len
= len
& ~(sizeof(copy_buf
->buf
) - 1);
201 memcpy_toio(*piobuf
, data
, block_len
);
202 *piobuf
+= block_len
;
207 BUG_ON(copy_buf
->used
);
208 BUG_ON(len
> sizeof(copy_buf
->buf
));
209 memcpy(copy_buf
->buf
, data
, len
);
210 copy_buf
->used
= len
;
214 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
215 * Advances piobuf pointer. Leaves additional data in the copy buffer.
217 static void efx_memcpy_toio_aligned_cb(struct efx_nic
*efx
, u8 __iomem
**piobuf
,
219 struct efx_short_copy_buffer
*copy_buf
)
221 if (copy_buf
->used
) {
222 /* if the copy buffer is partially full, fill it up and write */
224 min_t(int, sizeof(copy_buf
->buf
) - copy_buf
->used
, len
);
226 memcpy(copy_buf
->buf
+ copy_buf
->used
, data
, copy_to_buf
);
227 copy_buf
->used
+= copy_to_buf
;
229 /* if we didn't fill it up then we're done for now */
230 if (copy_buf
->used
< sizeof(copy_buf
->buf
))
233 memcpy_toio(*piobuf
, copy_buf
->buf
, sizeof(copy_buf
->buf
));
234 *piobuf
+= sizeof(copy_buf
->buf
);
240 efx_memcpy_toio_aligned(efx
, piobuf
, data
, len
, copy_buf
);
243 static void efx_flush_copy_buffer(struct efx_nic
*efx
, u8 __iomem
*piobuf
,
244 struct efx_short_copy_buffer
*copy_buf
)
246 /* if there's anything in it, write the whole buffer, including junk */
248 memcpy_toio(piobuf
, copy_buf
->buf
, sizeof(copy_buf
->buf
));
251 /* Traverse skb structure and copy fragments in to PIO buffer.
252 * Advances piobuf pointer.
254 static void efx_skb_copy_bits_to_pio(struct efx_nic
*efx
, struct sk_buff
*skb
,
256 struct efx_short_copy_buffer
*copy_buf
)
260 efx_memcpy_toio_aligned(efx
, piobuf
, skb
->data
, skb_headlen(skb
),
263 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
264 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
267 vaddr
= kmap_atomic(skb_frag_page(f
));
269 efx_memcpy_toio_aligned_cb(efx
, piobuf
, vaddr
+ f
->page_offset
,
270 skb_frag_size(f
), copy_buf
);
271 kunmap_atomic(vaddr
);
274 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->frag_list
);
277 static struct efx_tx_buffer
*
278 efx_enqueue_skb_pio(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
280 struct efx_tx_buffer
*buffer
=
281 efx_tx_queue_get_insert_buffer(tx_queue
);
282 u8 __iomem
*piobuf
= tx_queue
->piobuf
;
284 /* Copy to PIO buffer. Ensure the writes are padded to the end
285 * of a cache line, as this is required for write-combining to be
286 * effective on at least x86.
289 if (skb_shinfo(skb
)->nr_frags
) {
290 /* The size of the copy buffer will ensure all writes
291 * are the size of a cache line.
293 struct efx_short_copy_buffer copy_buf
;
297 efx_skb_copy_bits_to_pio(tx_queue
->efx
, skb
,
299 efx_flush_copy_buffer(tx_queue
->efx
, piobuf
, ©_buf
);
301 /* Pad the write to the size of a cache line.
302 * We can do this because we know the skb_shared_info sruct is
303 * after the source, and the destination buffer is big enough.
305 BUILD_BUG_ON(L1_CACHE_BYTES
>
306 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
307 memcpy_toio(tx_queue
->piobuf
, skb
->data
,
308 ALIGN(skb
->len
, L1_CACHE_BYTES
));
311 EFX_POPULATE_QWORD_5(buffer
->option
,
312 ESF_DZ_TX_DESC_IS_OPT
, 1,
313 ESF_DZ_TX_OPTION_TYPE
, ESE_DZ_TX_OPTION_DESC_PIO
,
314 ESF_DZ_TX_PIO_CONT
, 0,
315 ESF_DZ_TX_PIO_BYTE_CNT
, skb
->len
,
316 ESF_DZ_TX_PIO_BUF_ADDR
,
317 tx_queue
->piobuf_offset
);
318 ++tx_queue
->pio_packets
;
319 ++tx_queue
->insert_count
;
322 #endif /* EFX_USE_PIO */
325 * Add a socket buffer to a TX queue
327 * This maps all fragments of a socket buffer for DMA and adds them to
328 * the TX queue. The queue's insert pointer will be incremented by
329 * the number of fragments in the socket buffer.
331 * If any DMA mapping fails, any mapped fragments will be unmapped,
332 * the queue's insert pointer will be restored to its original value.
334 * This function is split out from efx_hard_start_xmit to allow the
335 * loopback test to direct packets via specific TX queues.
337 * Returns NETDEV_TX_OK.
338 * You must hold netif_tx_lock() to call this function.
340 netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue
*tx_queue
, struct sk_buff
*skb
)
342 struct efx_nic
*efx
= tx_queue
->efx
;
343 struct device
*dma_dev
= &efx
->pci_dev
->dev
;
344 struct efx_tx_buffer
*buffer
;
345 skb_frag_t
*fragment
;
346 unsigned int len
, unmap_len
= 0;
347 dma_addr_t dma_addr
, unmap_addr
= 0;
348 unsigned int dma_len
;
349 unsigned short dma_flags
;
352 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
354 if (skb_shinfo(skb
)->gso_size
)
355 return efx_enqueue_skb_tso(tx_queue
, skb
);
357 /* Get size of the initial fragment */
358 len
= skb_headlen(skb
);
360 /* Pad if necessary */
361 if (EFX_WORKAROUND_15592(efx
) && skb
->len
<= 32) {
362 EFX_BUG_ON_PARANOID(skb
->data_len
);
364 if (skb_pad(skb
, len
- skb
->len
))
368 /* Consider using PIO for short packets */
370 if (skb
->len
<= efx_piobuf_size
&& tx_queue
->piobuf
&&
371 efx_nic_tx_is_empty(tx_queue
) &&
372 efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue
))) {
373 buffer
= efx_enqueue_skb_pio(tx_queue
, skb
);
374 dma_flags
= EFX_TX_BUF_OPTION
;
379 /* Map for DMA. Use dma_map_single rather than dma_map_page
380 * since this is more efficient on machines with sparse
383 dma_flags
= EFX_TX_BUF_MAP_SINGLE
;
384 dma_addr
= dma_map_single(dma_dev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
386 /* Process all fragments */
388 if (unlikely(dma_mapping_error(dma_dev
, dma_addr
)))
391 /* Store fields for marking in the per-fragment final
394 unmap_addr
= dma_addr
;
396 /* Add to TX queue, splitting across DMA boundaries */
398 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
400 dma_len
= efx_max_tx_len(efx
, dma_addr
);
401 if (likely(dma_len
>= len
))
404 /* Fill out per descriptor fields */
405 buffer
->len
= dma_len
;
406 buffer
->dma_addr
= dma_addr
;
407 buffer
->flags
= EFX_TX_BUF_CONT
;
410 ++tx_queue
->insert_count
;
413 /* Transfer ownership of the unmapping to the final buffer */
414 buffer
->flags
= EFX_TX_BUF_CONT
| dma_flags
;
415 buffer
->unmap_len
= unmap_len
;
416 buffer
->dma_offset
= buffer
->dma_addr
- unmap_addr
;
419 /* Get address and size of next fragment */
420 if (i
>= skb_shinfo(skb
)->nr_frags
)
422 fragment
= &skb_shinfo(skb
)->frags
[i
];
423 len
= skb_frag_size(fragment
);
427 dma_addr
= skb_frag_dma_map(dma_dev
, fragment
, 0, len
,
431 /* Transfer ownership of the skb to the final buffer */
436 buffer
->flags
= EFX_TX_BUF_SKB
| dma_flags
;
438 netdev_tx_sent_queue(tx_queue
->core_txq
, skb
->len
);
440 /* Pass off to hardware */
441 efx_nic_push_buffers(tx_queue
);
443 efx_tx_maybe_stop_queue(tx_queue
);
448 netif_err(efx
, tx_err
, efx
->net_dev
,
449 " TX queue %d could not map skb with %d bytes %d "
450 "fragments for DMA\n", tx_queue
->queue
, skb
->len
,
451 skb_shinfo(skb
)->nr_frags
+ 1);
453 /* Mark the packet as transmitted, and free the SKB ourselves */
454 dev_kfree_skb_any(skb
);
456 /* Work backwards until we hit the original insert pointer value */
457 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
458 unsigned int pkts_compl
= 0, bytes_compl
= 0;
459 --tx_queue
->insert_count
;
460 buffer
= __efx_tx_queue_get_insert_buffer(tx_queue
);
461 efx_dequeue_buffer(tx_queue
, buffer
, &pkts_compl
, &bytes_compl
);
464 /* Free the fragment we were mid-way through pushing */
466 if (dma_flags
& EFX_TX_BUF_MAP_SINGLE
)
467 dma_unmap_single(dma_dev
, unmap_addr
, unmap_len
,
470 dma_unmap_page(dma_dev
, unmap_addr
, unmap_len
,
477 /* Remove packets from the TX queue
479 * This removes packets from the TX queue, up to and including the
482 static void efx_dequeue_buffers(struct efx_tx_queue
*tx_queue
,
484 unsigned int *pkts_compl
,
485 unsigned int *bytes_compl
)
487 struct efx_nic
*efx
= tx_queue
->efx
;
488 unsigned int stop_index
, read_ptr
;
490 stop_index
= (index
+ 1) & tx_queue
->ptr_mask
;
491 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
493 while (read_ptr
!= stop_index
) {
494 struct efx_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
496 if (!(buffer
->flags
& EFX_TX_BUF_OPTION
) &&
497 unlikely(buffer
->len
== 0)) {
498 netif_err(efx
, tx_err
, efx
->net_dev
,
499 "TX queue %d spurious TX completion id %x\n",
500 tx_queue
->queue
, read_ptr
);
501 efx_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
505 efx_dequeue_buffer(tx_queue
, buffer
, pkts_compl
, bytes_compl
);
507 ++tx_queue
->read_count
;
508 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
512 /* Initiate a packet transmission. We use one channel per CPU
513 * (sharing when we have more CPUs than channels). On Falcon, the TX
514 * completion events will be directed back to the CPU that transmitted
515 * the packet, which should be cache-efficient.
517 * Context: non-blocking.
518 * Note that returning anything other than NETDEV_TX_OK will cause the
519 * OS to free the skb.
521 netdev_tx_t
efx_hard_start_xmit(struct sk_buff
*skb
,
522 struct net_device
*net_dev
)
524 struct efx_nic
*efx
= netdev_priv(net_dev
);
525 struct efx_tx_queue
*tx_queue
;
526 unsigned index
, type
;
528 EFX_WARN_ON_PARANOID(!netif_device_present(net_dev
));
530 /* PTP "event" packet */
531 if (unlikely(efx_xmit_with_hwtstamp(skb
)) &&
532 unlikely(efx_ptp_is_ptp_tx(efx
, skb
))) {
533 return efx_ptp_tx(efx
, skb
);
536 index
= skb_get_queue_mapping(skb
);
537 type
= skb
->ip_summed
== CHECKSUM_PARTIAL
? EFX_TXQ_TYPE_OFFLOAD
: 0;
538 if (index
>= efx
->n_tx_channels
) {
539 index
-= efx
->n_tx_channels
;
540 type
|= EFX_TXQ_TYPE_HIGHPRI
;
542 tx_queue
= efx_get_tx_queue(efx
, index
, type
);
544 return efx_enqueue_skb(tx_queue
, skb
);
547 void efx_init_tx_queue_core_txq(struct efx_tx_queue
*tx_queue
)
549 struct efx_nic
*efx
= tx_queue
->efx
;
551 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
553 netdev_get_tx_queue(efx
->net_dev
,
554 tx_queue
->queue
/ EFX_TXQ_TYPES
+
555 ((tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
) ?
556 efx
->n_tx_channels
: 0));
559 int efx_setup_tc(struct net_device
*net_dev
, u8 num_tc
)
561 struct efx_nic
*efx
= netdev_priv(net_dev
);
562 struct efx_channel
*channel
;
563 struct efx_tx_queue
*tx_queue
;
567 if (efx_nic_rev(efx
) < EFX_REV_FALCON_B0
|| num_tc
> EFX_MAX_TX_TC
)
570 if (num_tc
== net_dev
->num_tc
)
573 for (tc
= 0; tc
< num_tc
; tc
++) {
574 net_dev
->tc_to_txq
[tc
].offset
= tc
* efx
->n_tx_channels
;
575 net_dev
->tc_to_txq
[tc
].count
= efx
->n_tx_channels
;
578 if (num_tc
> net_dev
->num_tc
) {
579 /* Initialise high-priority queues as necessary */
580 efx_for_each_channel(channel
, efx
) {
581 efx_for_each_possible_channel_tx_queue(tx_queue
,
583 if (!(tx_queue
->queue
& EFX_TXQ_TYPE_HIGHPRI
))
585 if (!tx_queue
->buffer
) {
586 rc
= efx_probe_tx_queue(tx_queue
);
590 if (!tx_queue
->initialised
)
591 efx_init_tx_queue(tx_queue
);
592 efx_init_tx_queue_core_txq(tx_queue
);
596 /* Reduce number of classes before number of queues */
597 net_dev
->num_tc
= num_tc
;
600 rc
= netif_set_real_num_tx_queues(net_dev
,
601 max_t(int, num_tc
, 1) *
606 /* Do not destroy high-priority queues when they become
607 * unused. We would have to flush them first, and it is
608 * fairly difficult to flush a subset of TX queues. Leave
609 * it to efx_fini_channels().
612 net_dev
->num_tc
= num_tc
;
616 void efx_xmit_done(struct efx_tx_queue
*tx_queue
, unsigned int index
)
619 struct efx_nic
*efx
= tx_queue
->efx
;
620 struct efx_tx_queue
*txq2
;
621 unsigned int pkts_compl
= 0, bytes_compl
= 0;
623 EFX_BUG_ON_PARANOID(index
> tx_queue
->ptr_mask
);
625 efx_dequeue_buffers(tx_queue
, index
, &pkts_compl
, &bytes_compl
);
626 netdev_tx_completed_queue(tx_queue
->core_txq
, pkts_compl
, bytes_compl
);
629 ++tx_queue
->merge_events
;
631 /* See if we need to restart the netif queue. This memory
632 * barrier ensures that we write read_count (inside
633 * efx_dequeue_buffers()) before reading the queue status.
636 if (unlikely(netif_tx_queue_stopped(tx_queue
->core_txq
)) &&
637 likely(efx
->port_enabled
) &&
638 likely(netif_device_present(efx
->net_dev
))) {
639 txq2
= efx_tx_queue_partner(tx_queue
);
640 fill_level
= max(tx_queue
->insert_count
- tx_queue
->read_count
,
641 txq2
->insert_count
- txq2
->read_count
);
642 if (fill_level
<= efx
->txq_wake_thresh
)
643 netif_tx_wake_queue(tx_queue
->core_txq
);
646 /* Check whether the hardware queue is now empty */
647 if ((int)(tx_queue
->read_count
- tx_queue
->old_write_count
) >= 0) {
648 tx_queue
->old_write_count
= ACCESS_ONCE(tx_queue
->write_count
);
649 if (tx_queue
->read_count
== tx_queue
->old_write_count
) {
651 tx_queue
->empty_read_count
=
652 tx_queue
->read_count
| EFX_EMPTY_COUNT_VALID
;
657 /* Size of page-based TSO header buffers. Larger blocks must be
658 * allocated from the heap.
660 #define TSOH_STD_SIZE 128
661 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
663 /* At most half the descriptors in the queue at any time will refer to
664 * a TSO header buffer, since they must always be followed by a
665 * payload descriptor referring to an skb.
667 static unsigned int efx_tsoh_page_count(struct efx_tx_queue
*tx_queue
)
669 return DIV_ROUND_UP(tx_queue
->ptr_mask
+ 1, 2 * TSOH_PER_PAGE
);
672 int efx_probe_tx_queue(struct efx_tx_queue
*tx_queue
)
674 struct efx_nic
*efx
= tx_queue
->efx
;
675 unsigned int entries
;
678 /* Create the smallest power-of-two aligned ring */
679 entries
= max(roundup_pow_of_two(efx
->txq_entries
), EFX_MIN_DMAQ_SIZE
);
680 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
681 tx_queue
->ptr_mask
= entries
- 1;
683 netif_dbg(efx
, probe
, efx
->net_dev
,
684 "creating TX queue %d size %#x mask %#x\n",
685 tx_queue
->queue
, efx
->txq_entries
, tx_queue
->ptr_mask
);
687 /* Allocate software ring */
688 tx_queue
->buffer
= kcalloc(entries
, sizeof(*tx_queue
->buffer
),
690 if (!tx_queue
->buffer
)
693 if (tx_queue
->queue
& EFX_TXQ_TYPE_OFFLOAD
) {
694 tx_queue
->tsoh_page
=
695 kcalloc(efx_tsoh_page_count(tx_queue
),
696 sizeof(tx_queue
->tsoh_page
[0]), GFP_KERNEL
);
697 if (!tx_queue
->tsoh_page
) {
703 /* Allocate hardware ring */
704 rc
= efx_nic_probe_tx(tx_queue
);
711 kfree(tx_queue
->tsoh_page
);
712 tx_queue
->tsoh_page
= NULL
;
714 kfree(tx_queue
->buffer
);
715 tx_queue
->buffer
= NULL
;
719 void efx_init_tx_queue(struct efx_tx_queue
*tx_queue
)
721 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
722 "initialising TX queue %d\n", tx_queue
->queue
);
724 tx_queue
->insert_count
= 0;
725 tx_queue
->write_count
= 0;
726 tx_queue
->old_write_count
= 0;
727 tx_queue
->read_count
= 0;
728 tx_queue
->old_read_count
= 0;
729 tx_queue
->empty_read_count
= 0 | EFX_EMPTY_COUNT_VALID
;
731 /* Set up TX descriptor ring */
732 efx_nic_init_tx(tx_queue
);
734 tx_queue
->initialised
= true;
737 void efx_fini_tx_queue(struct efx_tx_queue
*tx_queue
)
739 struct efx_tx_buffer
*buffer
;
741 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
742 "shutting down TX queue %d\n", tx_queue
->queue
);
744 if (!tx_queue
->buffer
)
747 /* Free any buffers left in the ring */
748 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
749 unsigned int pkts_compl
= 0, bytes_compl
= 0;
750 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& tx_queue
->ptr_mask
];
751 efx_dequeue_buffer(tx_queue
, buffer
, &pkts_compl
, &bytes_compl
);
753 ++tx_queue
->read_count
;
755 netdev_tx_reset_queue(tx_queue
->core_txq
);
758 void efx_remove_tx_queue(struct efx_tx_queue
*tx_queue
)
762 if (!tx_queue
->buffer
)
765 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
766 "destroying TX queue %d\n", tx_queue
->queue
);
767 efx_nic_remove_tx(tx_queue
);
769 if (tx_queue
->tsoh_page
) {
770 for (i
= 0; i
< efx_tsoh_page_count(tx_queue
); i
++)
771 efx_nic_free_buffer(tx_queue
->efx
,
772 &tx_queue
->tsoh_page
[i
]);
773 kfree(tx_queue
->tsoh_page
);
774 tx_queue
->tsoh_page
= NULL
;
777 kfree(tx_queue
->buffer
);
778 tx_queue
->buffer
= NULL
;
782 /* Efx TCP segmentation acceleration.
784 * Why? Because by doing it here in the driver we can go significantly
785 * faster than the GSO.
787 * Requires TX checksum offload support.
790 /* Number of bytes inserted at the start of a TSO header buffer,
791 * similar to NET_IP_ALIGN.
793 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
794 #define TSOH_OFFSET 0
796 #define TSOH_OFFSET NET_IP_ALIGN
799 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
802 * struct tso_state - TSO state for an SKB
803 * @out_len: Remaining length in current segment
804 * @seqnum: Current sequence number
805 * @ipv4_id: Current IPv4 ID, host endian
806 * @packet_space: Remaining space in current packet
807 * @dma_addr: DMA address of current position
808 * @in_len: Remaining length in current SKB fragment
809 * @unmap_len: Length of SKB fragment
810 * @unmap_addr: DMA address of SKB fragment
811 * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
812 * @protocol: Network protocol (after any VLAN header)
813 * @ip_off: Offset of IP header
814 * @tcp_off: Offset of TCP header
815 * @header_len: Number of bytes of header
816 * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
817 * @header_dma_addr: Header DMA address, when using option descriptors
818 * @header_unmap_len: Header DMA mapped length, or 0 if not using option
821 * The state used during segmentation. It is put into this data structure
822 * just to make it easy to pass into inline functions.
825 /* Output position */
829 unsigned packet_space
;
835 dma_addr_t unmap_addr
;
836 unsigned short dma_flags
;
840 unsigned int tcp_off
;
842 unsigned int ip_base_len
;
843 dma_addr_t header_dma_addr
;
844 unsigned int header_unmap_len
;
849 * Verify that our various assumptions about sk_buffs and the conditions
850 * under which TSO will be attempted hold true. Return the protocol number.
852 static __be16
efx_tso_check_protocol(struct sk_buff
*skb
)
854 __be16 protocol
= skb
->protocol
;
856 EFX_BUG_ON_PARANOID(((struct ethhdr
*)skb
->data
)->h_proto
!=
858 if (protocol
== htons(ETH_P_8021Q
)) {
859 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
860 protocol
= veh
->h_vlan_encapsulated_proto
;
863 if (protocol
== htons(ETH_P_IP
)) {
864 EFX_BUG_ON_PARANOID(ip_hdr(skb
)->protocol
!= IPPROTO_TCP
);
866 EFX_BUG_ON_PARANOID(protocol
!= htons(ETH_P_IPV6
));
867 EFX_BUG_ON_PARANOID(ipv6_hdr(skb
)->nexthdr
!= NEXTHDR_TCP
);
869 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb
), skb
->data
)
870 + (tcp_hdr(skb
)->doff
<< 2u)) >
876 static u8
*efx_tsoh_get_buffer(struct efx_tx_queue
*tx_queue
,
877 struct efx_tx_buffer
*buffer
, unsigned int len
)
881 EFX_BUG_ON_PARANOID(buffer
->len
);
882 EFX_BUG_ON_PARANOID(buffer
->flags
);
883 EFX_BUG_ON_PARANOID(buffer
->unmap_len
);
885 if (likely(len
<= TSOH_STD_SIZE
- TSOH_OFFSET
)) {
887 (tx_queue
->insert_count
& tx_queue
->ptr_mask
) / 2;
888 struct efx_buffer
*page_buf
=
889 &tx_queue
->tsoh_page
[index
/ TSOH_PER_PAGE
];
891 TSOH_STD_SIZE
* (index
% TSOH_PER_PAGE
) + TSOH_OFFSET
;
893 if (unlikely(!page_buf
->addr
) &&
894 efx_nic_alloc_buffer(tx_queue
->efx
, page_buf
, PAGE_SIZE
,
898 result
= (u8
*)page_buf
->addr
+ offset
;
899 buffer
->dma_addr
= page_buf
->dma_addr
+ offset
;
900 buffer
->flags
= EFX_TX_BUF_CONT
;
902 tx_queue
->tso_long_headers
++;
904 buffer
->heap_buf
= kmalloc(TSOH_OFFSET
+ len
, GFP_ATOMIC
);
905 if (unlikely(!buffer
->heap_buf
))
907 result
= (u8
*)buffer
->heap_buf
+ TSOH_OFFSET
;
908 buffer
->flags
= EFX_TX_BUF_CONT
| EFX_TX_BUF_HEAP
;
917 * efx_tx_queue_insert - push descriptors onto the TX queue
918 * @tx_queue: Efx TX queue
919 * @dma_addr: DMA address of fragment
920 * @len: Length of fragment
921 * @final_buffer: The final buffer inserted into the queue
923 * Push descriptors onto the TX queue.
925 static void efx_tx_queue_insert(struct efx_tx_queue
*tx_queue
,
926 dma_addr_t dma_addr
, unsigned len
,
927 struct efx_tx_buffer
**final_buffer
)
929 struct efx_tx_buffer
*buffer
;
930 struct efx_nic
*efx
= tx_queue
->efx
;
933 EFX_BUG_ON_PARANOID(len
<= 0);
936 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
937 ++tx_queue
->insert_count
;
939 EFX_BUG_ON_PARANOID(tx_queue
->insert_count
-
940 tx_queue
->read_count
>=
943 buffer
->dma_addr
= dma_addr
;
945 dma_len
= efx_max_tx_len(efx
, dma_addr
);
947 /* If there is enough space to send then do so */
951 buffer
->len
= dma_len
;
952 buffer
->flags
= EFX_TX_BUF_CONT
;
957 EFX_BUG_ON_PARANOID(!len
);
959 *final_buffer
= buffer
;
964 * Put a TSO header into the TX queue.
966 * This is special-cased because we know that it is small enough to fit in
967 * a single fragment, and we know it doesn't cross a page boundary. It
968 * also allows us to not worry about end-of-packet etc.
970 static int efx_tso_put_header(struct efx_tx_queue
*tx_queue
,
971 struct efx_tx_buffer
*buffer
, u8
*header
)
973 if (unlikely(buffer
->flags
& EFX_TX_BUF_HEAP
)) {
974 buffer
->dma_addr
= dma_map_single(&tx_queue
->efx
->pci_dev
->dev
,
977 if (unlikely(dma_mapping_error(&tx_queue
->efx
->pci_dev
->dev
,
978 buffer
->dma_addr
))) {
979 kfree(buffer
->heap_buf
);
984 buffer
->unmap_len
= buffer
->len
;
985 buffer
->dma_offset
= 0;
986 buffer
->flags
|= EFX_TX_BUF_MAP_SINGLE
;
989 ++tx_queue
->insert_count
;
994 /* Remove buffers put into a tx_queue. None of the buffers must have
997 static void efx_enqueue_unwind(struct efx_tx_queue
*tx_queue
)
999 struct efx_tx_buffer
*buffer
;
1001 /* Work backwards until we hit the original insert pointer value */
1002 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
1003 --tx_queue
->insert_count
;
1004 buffer
= __efx_tx_queue_get_insert_buffer(tx_queue
);
1005 efx_dequeue_buffer(tx_queue
, buffer
, NULL
, NULL
);
1010 /* Parse the SKB header and initialise state. */
1011 static int tso_start(struct tso_state
*st
, struct efx_nic
*efx
,
1012 const struct sk_buff
*skb
)
1014 bool use_options
= efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
;
1015 struct device
*dma_dev
= &efx
->pci_dev
->dev
;
1016 unsigned int header_len
, in_len
;
1017 dma_addr_t dma_addr
;
1019 st
->ip_off
= skb_network_header(skb
) - skb
->data
;
1020 st
->tcp_off
= skb_transport_header(skb
) - skb
->data
;
1021 header_len
= st
->tcp_off
+ (tcp_hdr(skb
)->doff
<< 2u);
1022 in_len
= skb_headlen(skb
) - header_len
;
1023 st
->header_len
= header_len
;
1024 st
->in_len
= in_len
;
1025 if (st
->protocol
== htons(ETH_P_IP
)) {
1026 st
->ip_base_len
= st
->header_len
- st
->ip_off
;
1027 st
->ipv4_id
= ntohs(ip_hdr(skb
)->id
);
1029 st
->ip_base_len
= st
->header_len
- st
->tcp_off
;
1032 st
->seqnum
= ntohl(tcp_hdr(skb
)->seq
);
1034 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->urg
);
1035 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->syn
);
1036 EFX_BUG_ON_PARANOID(tcp_hdr(skb
)->rst
);
1038 st
->out_len
= skb
->len
- header_len
;
1041 st
->header_unmap_len
= 0;
1043 if (likely(in_len
== 0)) {
1049 dma_addr
= dma_map_single(dma_dev
, skb
->data
+ header_len
,
1050 in_len
, DMA_TO_DEVICE
);
1051 st
->dma_flags
= EFX_TX_BUF_MAP_SINGLE
;
1052 st
->dma_addr
= dma_addr
;
1053 st
->unmap_addr
= dma_addr
;
1054 st
->unmap_len
= in_len
;
1056 dma_addr
= dma_map_single(dma_dev
, skb
->data
,
1057 skb_headlen(skb
), DMA_TO_DEVICE
);
1058 st
->header_dma_addr
= dma_addr
;
1059 st
->header_unmap_len
= skb_headlen(skb
);
1061 st
->dma_addr
= dma_addr
+ header_len
;
1065 return unlikely(dma_mapping_error(dma_dev
, dma_addr
)) ? -ENOMEM
: 0;
1068 static int tso_get_fragment(struct tso_state
*st
, struct efx_nic
*efx
,
1071 st
->unmap_addr
= skb_frag_dma_map(&efx
->pci_dev
->dev
, frag
, 0,
1072 skb_frag_size(frag
), DMA_TO_DEVICE
);
1073 if (likely(!dma_mapping_error(&efx
->pci_dev
->dev
, st
->unmap_addr
))) {
1075 st
->unmap_len
= skb_frag_size(frag
);
1076 st
->in_len
= skb_frag_size(frag
);
1077 st
->dma_addr
= st
->unmap_addr
;
1085 * tso_fill_packet_with_fragment - form descriptors for the current fragment
1086 * @tx_queue: Efx TX queue
1087 * @skb: Socket buffer
1090 * Form descriptors for the current fragment, until we reach the end
1091 * of fragment or end-of-packet.
1093 static void tso_fill_packet_with_fragment(struct efx_tx_queue
*tx_queue
,
1094 const struct sk_buff
*skb
,
1095 struct tso_state
*st
)
1097 struct efx_tx_buffer
*buffer
;
1100 if (st
->in_len
== 0)
1102 if (st
->packet_space
== 0)
1105 EFX_BUG_ON_PARANOID(st
->in_len
<= 0);
1106 EFX_BUG_ON_PARANOID(st
->packet_space
<= 0);
1108 n
= min(st
->in_len
, st
->packet_space
);
1110 st
->packet_space
-= n
;
1114 efx_tx_queue_insert(tx_queue
, st
->dma_addr
, n
, &buffer
);
1116 if (st
->out_len
== 0) {
1117 /* Transfer ownership of the skb */
1119 buffer
->flags
= EFX_TX_BUF_SKB
;
1120 } else if (st
->packet_space
!= 0) {
1121 buffer
->flags
= EFX_TX_BUF_CONT
;
1124 if (st
->in_len
== 0) {
1125 /* Transfer ownership of the DMA mapping */
1126 buffer
->unmap_len
= st
->unmap_len
;
1127 buffer
->dma_offset
= buffer
->unmap_len
- buffer
->len
;
1128 buffer
->flags
|= st
->dma_flags
;
1137 * tso_start_new_packet - generate a new header and prepare for the new packet
1138 * @tx_queue: Efx TX queue
1139 * @skb: Socket buffer
1142 * Generate a new header and prepare for the new packet. Return 0 on
1143 * success, or -%ENOMEM if failed to alloc header.
1145 static int tso_start_new_packet(struct efx_tx_queue
*tx_queue
,
1146 const struct sk_buff
*skb
,
1147 struct tso_state
*st
)
1149 struct efx_tx_buffer
*buffer
=
1150 efx_tx_queue_get_insert_buffer(tx_queue
);
1151 bool is_last
= st
->out_len
<= skb_shinfo(skb
)->gso_size
;
1155 st
->packet_space
= skb_shinfo(skb
)->gso_size
;
1156 tcp_flags_clear
= 0x09; /* mask out FIN and PSH */
1158 st
->packet_space
= st
->out_len
;
1159 tcp_flags_clear
= 0x00;
1162 if (!st
->header_unmap_len
) {
1163 /* Allocate and insert a DMA-mapped header buffer. */
1164 struct tcphdr
*tsoh_th
;
1169 header
= efx_tsoh_get_buffer(tx_queue
, buffer
, st
->header_len
);
1173 tsoh_th
= (struct tcphdr
*)(header
+ st
->tcp_off
);
1175 /* Copy and update the headers. */
1176 memcpy(header
, skb
->data
, st
->header_len
);
1178 tsoh_th
->seq
= htonl(st
->seqnum
);
1179 ((u8
*)tsoh_th
)[13] &= ~tcp_flags_clear
;
1181 ip_length
= st
->ip_base_len
+ st
->packet_space
;
1183 if (st
->protocol
== htons(ETH_P_IP
)) {
1184 struct iphdr
*tsoh_iph
=
1185 (struct iphdr
*)(header
+ st
->ip_off
);
1187 tsoh_iph
->tot_len
= htons(ip_length
);
1188 tsoh_iph
->id
= htons(st
->ipv4_id
);
1190 struct ipv6hdr
*tsoh_iph
=
1191 (struct ipv6hdr
*)(header
+ st
->ip_off
);
1193 tsoh_iph
->payload_len
= htons(ip_length
);
1196 rc
= efx_tso_put_header(tx_queue
, buffer
, header
);
1200 /* Send the original headers with a TSO option descriptor
1203 u8 tcp_flags
= ((u8
*)tcp_hdr(skb
))[13] & ~tcp_flags_clear
;
1205 buffer
->flags
= EFX_TX_BUF_OPTION
;
1207 buffer
->unmap_len
= 0;
1208 EFX_POPULATE_QWORD_5(buffer
->option
,
1209 ESF_DZ_TX_DESC_IS_OPT
, 1,
1210 ESF_DZ_TX_OPTION_TYPE
,
1211 ESE_DZ_TX_OPTION_DESC_TSO
,
1212 ESF_DZ_TX_TSO_TCP_FLAGS
, tcp_flags
,
1213 ESF_DZ_TX_TSO_IP_ID
, st
->ipv4_id
,
1214 ESF_DZ_TX_TSO_TCP_SEQNO
, st
->seqnum
);
1215 ++tx_queue
->insert_count
;
1217 /* We mapped the headers in tso_start(). Unmap them
1218 * when the last segment is completed.
1220 buffer
= efx_tx_queue_get_insert_buffer(tx_queue
);
1221 buffer
->dma_addr
= st
->header_dma_addr
;
1222 buffer
->len
= st
->header_len
;
1224 buffer
->flags
= EFX_TX_BUF_CONT
| EFX_TX_BUF_MAP_SINGLE
;
1225 buffer
->unmap_len
= st
->header_unmap_len
;
1226 buffer
->dma_offset
= 0;
1227 /* Ensure we only unmap them once in case of a
1228 * later DMA mapping error and rollback
1230 st
->header_unmap_len
= 0;
1232 buffer
->flags
= EFX_TX_BUF_CONT
;
1233 buffer
->unmap_len
= 0;
1235 ++tx_queue
->insert_count
;
1238 st
->seqnum
+= skb_shinfo(skb
)->gso_size
;
1240 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1243 ++tx_queue
->tso_packets
;
1250 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1251 * @tx_queue: Efx TX queue
1252 * @skb: Socket buffer
1254 * Context: You must hold netif_tx_lock() to call this function.
1256 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1257 * @skb was not enqueued. In all cases @skb is consumed. Return
1260 static int efx_enqueue_skb_tso(struct efx_tx_queue
*tx_queue
,
1261 struct sk_buff
*skb
)
1263 struct efx_nic
*efx
= tx_queue
->efx
;
1265 struct tso_state state
;
1267 /* Find the packet protocol and sanity-check it */
1268 state
.protocol
= efx_tso_check_protocol(skb
);
1270 EFX_BUG_ON_PARANOID(tx_queue
->write_count
!= tx_queue
->insert_count
);
1272 rc
= tso_start(&state
, efx
, skb
);
1276 if (likely(state
.in_len
== 0)) {
1277 /* Grab the first payload fragment. */
1278 EFX_BUG_ON_PARANOID(skb_shinfo(skb
)->nr_frags
< 1);
1280 rc
= tso_get_fragment(&state
, efx
,
1281 skb_shinfo(skb
)->frags
+ frag_i
);
1285 /* Payload starts in the header area. */
1289 if (tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1293 tso_fill_packet_with_fragment(tx_queue
, skb
, &state
);
1295 /* Move onto the next fragment? */
1296 if (state
.in_len
== 0) {
1297 if (++frag_i
>= skb_shinfo(skb
)->nr_frags
)
1298 /* End of payload reached. */
1300 rc
= tso_get_fragment(&state
, efx
,
1301 skb_shinfo(skb
)->frags
+ frag_i
);
1306 /* Start at new packet? */
1307 if (state
.packet_space
== 0 &&
1308 tso_start_new_packet(tx_queue
, skb
, &state
) < 0)
1312 netdev_tx_sent_queue(tx_queue
->core_txq
, skb
->len
);
1314 /* Pass off to hardware */
1315 efx_nic_push_buffers(tx_queue
);
1317 efx_tx_maybe_stop_queue(tx_queue
);
1319 tx_queue
->tso_bursts
++;
1320 return NETDEV_TX_OK
;
1323 netif_err(efx
, tx_err
, efx
->net_dev
,
1324 "Out of memory for TSO headers, or DMA mapping error\n");
1325 dev_kfree_skb_any(skb
);
1327 /* Free the DMA mapping we were in the process of writing out */
1328 if (state
.unmap_len
) {
1329 if (state
.dma_flags
& EFX_TX_BUF_MAP_SINGLE
)
1330 dma_unmap_single(&efx
->pci_dev
->dev
, state
.unmap_addr
,
1331 state
.unmap_len
, DMA_TO_DEVICE
);
1333 dma_unmap_page(&efx
->pci_dev
->dev
, state
.unmap_addr
,
1334 state
.unmap_len
, DMA_TO_DEVICE
);
1337 /* Free the header DMA mapping, if using option descriptors */
1338 if (state
.header_unmap_len
)
1339 dma_unmap_single(&efx
->pci_dev
->dev
, state
.header_dma_addr
,
1340 state
.header_unmap_len
, DMA_TO_DEVICE
);
1342 efx_enqueue_unwind(tx_queue
);
1343 return NETDEV_TX_OK
;