1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include <linux/cache.h>
21 #include "net_driver.h"
26 #include "workarounds.h"
28 static inline u8
*ef4_tx_get_copy_buffer(struct ef4_tx_queue
*tx_queue
,
29 struct ef4_tx_buffer
*buffer
)
31 unsigned int index
= ef4_tx_queue_get_insert_index(tx_queue
);
32 struct ef4_buffer
*page_buf
=
33 &tx_queue
->cb_page
[index
>> (PAGE_SHIFT
- EF4_TX_CB_ORDER
)];
35 ((index
<< EF4_TX_CB_ORDER
) + NET_IP_ALIGN
) & (PAGE_SIZE
- 1);
37 if (unlikely(!page_buf
->addr
) &&
38 ef4_nic_alloc_buffer(tx_queue
->efx
, page_buf
, PAGE_SIZE
,
41 buffer
->dma_addr
= page_buf
->dma_addr
+ offset
;
42 buffer
->unmap_len
= 0;
43 return (u8
*)page_buf
->addr
+ offset
;
46 u8
*ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue
*tx_queue
,
47 struct ef4_tx_buffer
*buffer
, size_t len
)
49 if (len
> EF4_TX_CB_SIZE
)
51 return ef4_tx_get_copy_buffer(tx_queue
, buffer
);
54 static void ef4_dequeue_buffer(struct ef4_tx_queue
*tx_queue
,
55 struct ef4_tx_buffer
*buffer
,
56 unsigned int *pkts_compl
,
57 unsigned int *bytes_compl
)
59 if (buffer
->unmap_len
) {
60 struct device
*dma_dev
= &tx_queue
->efx
->pci_dev
->dev
;
61 dma_addr_t unmap_addr
= buffer
->dma_addr
- buffer
->dma_offset
;
62 if (buffer
->flags
& EF4_TX_BUF_MAP_SINGLE
)
63 dma_unmap_single(dma_dev
, unmap_addr
, buffer
->unmap_len
,
66 dma_unmap_page(dma_dev
, unmap_addr
, buffer
->unmap_len
,
68 buffer
->unmap_len
= 0;
71 if (buffer
->flags
& EF4_TX_BUF_SKB
) {
73 (*bytes_compl
) += buffer
->skb
->len
;
74 dev_consume_skb_any((struct sk_buff
*)buffer
->skb
);
75 netif_vdbg(tx_queue
->efx
, tx_done
, tx_queue
->efx
->net_dev
,
76 "TX queue %d transmission id %x complete\n",
77 tx_queue
->queue
, tx_queue
->read_count
);
84 unsigned int ef4_tx_max_skb_descs(struct ef4_nic
*efx
)
86 /* This is probably too much since we don't have any TSO support;
87 * it's a left-over from when we had Software TSO. But it's safer
88 * to leave it as-is than try to determine a new bound.
90 /* Header and payload descriptor for each output segment, plus
91 * one for every input fragment boundary within a segment
93 unsigned int max_descs
= EF4_TSO_MAX_SEGS
* 2 + MAX_SKB_FRAGS
;
95 /* Possibly one more per segment for the alignment workaround,
96 * or for option descriptors
98 if (EF4_WORKAROUND_5391(efx
))
99 max_descs
+= EF4_TSO_MAX_SEGS
;
101 /* Possibly more for PCIe page boundaries within input fragments */
102 if (PAGE_SIZE
> EF4_PAGE_SIZE
)
103 max_descs
+= max_t(unsigned int, MAX_SKB_FRAGS
,
104 DIV_ROUND_UP(GSO_MAX_SIZE
, EF4_PAGE_SIZE
));
109 static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue
*txq1
)
111 /* We need to consider both queues that the net core sees as one */
112 struct ef4_tx_queue
*txq2
= ef4_tx_queue_partner(txq1
);
113 struct ef4_nic
*efx
= txq1
->efx
;
114 unsigned int fill_level
;
116 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
117 txq2
->insert_count
- txq2
->old_read_count
);
118 if (likely(fill_level
< efx
->txq_stop_thresh
))
121 /* We used the stale old_read_count above, which gives us a
122 * pessimistic estimate of the fill level (which may even
123 * validly be >= efx->txq_entries). Now try again using
124 * read_count (more likely to be a cache miss).
126 * If we read read_count and then conditionally stop the
127 * queue, it is possible for the completion path to race with
128 * us and complete all outstanding descriptors in the middle,
129 * after which there will be no more completions to wake it.
130 * Therefore we stop the queue first, then read read_count
131 * (with a memory barrier to ensure the ordering), then
132 * restart the queue if the fill level turns out to be low
135 netif_tx_stop_queue(txq1
->core_txq
);
137 txq1
->old_read_count
= ACCESS_ONCE(txq1
->read_count
);
138 txq2
->old_read_count
= ACCESS_ONCE(txq2
->read_count
);
140 fill_level
= max(txq1
->insert_count
- txq1
->old_read_count
,
141 txq2
->insert_count
- txq2
->old_read_count
);
142 EF4_BUG_ON_PARANOID(fill_level
>= efx
->txq_entries
);
143 if (likely(fill_level
< efx
->txq_stop_thresh
)) {
145 if (likely(!efx
->loopback_selftest
))
146 netif_tx_start_queue(txq1
->core_txq
);
150 static int ef4_enqueue_skb_copy(struct ef4_tx_queue
*tx_queue
,
153 unsigned int min_len
= tx_queue
->tx_min_size
;
154 unsigned int copy_len
= skb
->len
;
155 struct ef4_tx_buffer
*buffer
;
159 EF4_BUG_ON_PARANOID(copy_len
> EF4_TX_CB_SIZE
);
161 buffer
= ef4_tx_queue_get_insert_buffer(tx_queue
);
163 copy_buffer
= ef4_tx_get_copy_buffer(tx_queue
, buffer
);
164 if (unlikely(!copy_buffer
))
167 rc
= skb_copy_bits(skb
, 0, copy_buffer
, copy_len
);
168 EF4_WARN_ON_PARANOID(rc
);
169 if (unlikely(copy_len
< min_len
)) {
170 memset(copy_buffer
+ copy_len
, 0, min_len
- copy_len
);
171 buffer
->len
= min_len
;
173 buffer
->len
= copy_len
;
177 buffer
->flags
= EF4_TX_BUF_SKB
;
179 ++tx_queue
->insert_count
;
183 static struct ef4_tx_buffer
*ef4_tx_map_chunk(struct ef4_tx_queue
*tx_queue
,
187 const struct ef4_nic_type
*nic_type
= tx_queue
->efx
->type
;
188 struct ef4_tx_buffer
*buffer
;
189 unsigned int dma_len
;
191 /* Map the fragment taking account of NIC-dependent DMA limits. */
193 buffer
= ef4_tx_queue_get_insert_buffer(tx_queue
);
194 dma_len
= nic_type
->tx_limit_len(tx_queue
, dma_addr
, len
);
196 buffer
->len
= dma_len
;
197 buffer
->dma_addr
= dma_addr
;
198 buffer
->flags
= EF4_TX_BUF_CONT
;
201 ++tx_queue
->insert_count
;
207 /* Map all data from an SKB for DMA and create descriptors on the queue.
209 static int ef4_tx_map_data(struct ef4_tx_queue
*tx_queue
, struct sk_buff
*skb
)
211 struct ef4_nic
*efx
= tx_queue
->efx
;
212 struct device
*dma_dev
= &efx
->pci_dev
->dev
;
213 unsigned int frag_index
, nr_frags
;
214 dma_addr_t dma_addr
, unmap_addr
;
215 unsigned short dma_flags
;
216 size_t len
, unmap_len
;
218 nr_frags
= skb_shinfo(skb
)->nr_frags
;
221 /* Map header data. */
222 len
= skb_headlen(skb
);
223 dma_addr
= dma_map_single(dma_dev
, skb
->data
, len
, DMA_TO_DEVICE
);
224 dma_flags
= EF4_TX_BUF_MAP_SINGLE
;
226 unmap_addr
= dma_addr
;
228 if (unlikely(dma_mapping_error(dma_dev
, dma_addr
)))
231 /* Add descriptors for each fragment. */
233 struct ef4_tx_buffer
*buffer
;
234 skb_frag_t
*fragment
;
236 buffer
= ef4_tx_map_chunk(tx_queue
, dma_addr
, len
);
238 /* The final descriptor for a fragment is responsible for
239 * unmapping the whole fragment.
241 buffer
->flags
= EF4_TX_BUF_CONT
| dma_flags
;
242 buffer
->unmap_len
= unmap_len
;
243 buffer
->dma_offset
= buffer
->dma_addr
- unmap_addr
;
245 if (frag_index
>= nr_frags
) {
246 /* Store SKB details with the final buffer for
250 buffer
->flags
= EF4_TX_BUF_SKB
| dma_flags
;
254 /* Move on to the next fragment. */
255 fragment
= &skb_shinfo(skb
)->frags
[frag_index
++];
256 len
= skb_frag_size(fragment
);
257 dma_addr
= skb_frag_dma_map(dma_dev
, fragment
,
258 0, len
, DMA_TO_DEVICE
);
261 unmap_addr
= dma_addr
;
263 if (unlikely(dma_mapping_error(dma_dev
, dma_addr
)))
268 /* Remove buffers put into a tx_queue. None of the buffers must have
271 static void ef4_enqueue_unwind(struct ef4_tx_queue
*tx_queue
)
273 struct ef4_tx_buffer
*buffer
;
275 /* Work backwards until we hit the original insert pointer value */
276 while (tx_queue
->insert_count
!= tx_queue
->write_count
) {
277 --tx_queue
->insert_count
;
278 buffer
= __ef4_tx_queue_get_insert_buffer(tx_queue
);
279 ef4_dequeue_buffer(tx_queue
, buffer
, NULL
, NULL
);
284 * Add a socket buffer to a TX queue
286 * This maps all fragments of a socket buffer for DMA and adds them to
287 * the TX queue. The queue's insert pointer will be incremented by
288 * the number of fragments in the socket buffer.
290 * If any DMA mapping fails, any mapped fragments will be unmapped,
291 * the queue's insert pointer will be restored to its original value.
293 * This function is split out from ef4_hard_start_xmit to allow the
294 * loopback test to direct packets via specific TX queues.
296 * Returns NETDEV_TX_OK.
297 * You must hold netif_tx_lock() to call this function.
299 netdev_tx_t
ef4_enqueue_skb(struct ef4_tx_queue
*tx_queue
, struct sk_buff
*skb
)
301 bool data_mapped
= false;
302 unsigned int skb_len
;
305 EF4_WARN_ON_PARANOID(skb_is_gso(skb
));
307 if (skb_len
< tx_queue
->tx_min_size
||
308 (skb
->data_len
&& skb_len
<= EF4_TX_CB_SIZE
)) {
309 /* Pad short packets or coalesce short fragmented packets. */
310 if (ef4_enqueue_skb_copy(tx_queue
, skb
))
312 tx_queue
->cb_packets
++;
316 /* Map for DMA and create descriptors if we haven't done so already. */
317 if (!data_mapped
&& (ef4_tx_map_data(tx_queue
, skb
)))
321 netdev_tx_sent_queue(tx_queue
->core_txq
, skb_len
);
323 /* Pass off to hardware */
324 if (!skb
->xmit_more
|| netif_xmit_stopped(tx_queue
->core_txq
)) {
325 struct ef4_tx_queue
*txq2
= ef4_tx_queue_partner(tx_queue
);
327 /* There could be packets left on the partner queue if those
328 * SKBs had skb->xmit_more set. If we do not push those they
329 * could be left for a long time and cause a netdev watchdog.
331 if (txq2
->xmit_more_available
)
332 ef4_nic_push_buffers(txq2
);
334 ef4_nic_push_buffers(tx_queue
);
336 tx_queue
->xmit_more_available
= skb
->xmit_more
;
339 tx_queue
->tx_packets
++;
341 ef4_tx_maybe_stop_queue(tx_queue
);
347 ef4_enqueue_unwind(tx_queue
);
348 dev_kfree_skb_any(skb
);
352 /* Remove packets from the TX queue
354 * This removes packets from the TX queue, up to and including the
357 static void ef4_dequeue_buffers(struct ef4_tx_queue
*tx_queue
,
359 unsigned int *pkts_compl
,
360 unsigned int *bytes_compl
)
362 struct ef4_nic
*efx
= tx_queue
->efx
;
363 unsigned int stop_index
, read_ptr
;
365 stop_index
= (index
+ 1) & tx_queue
->ptr_mask
;
366 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
368 while (read_ptr
!= stop_index
) {
369 struct ef4_tx_buffer
*buffer
= &tx_queue
->buffer
[read_ptr
];
371 if (!(buffer
->flags
& EF4_TX_BUF_OPTION
) &&
372 unlikely(buffer
->len
== 0)) {
373 netif_err(efx
, tx_err
, efx
->net_dev
,
374 "TX queue %d spurious TX completion id %x\n",
375 tx_queue
->queue
, read_ptr
);
376 ef4_schedule_reset(efx
, RESET_TYPE_TX_SKIP
);
380 ef4_dequeue_buffer(tx_queue
, buffer
, pkts_compl
, bytes_compl
);
382 ++tx_queue
->read_count
;
383 read_ptr
= tx_queue
->read_count
& tx_queue
->ptr_mask
;
387 /* Initiate a packet transmission. We use one channel per CPU
388 * (sharing when we have more CPUs than channels). On Falcon, the TX
389 * completion events will be directed back to the CPU that transmitted
390 * the packet, which should be cache-efficient.
392 * Context: non-blocking.
393 * Note that returning anything other than NETDEV_TX_OK will cause the
394 * OS to free the skb.
396 netdev_tx_t
ef4_hard_start_xmit(struct sk_buff
*skb
,
397 struct net_device
*net_dev
)
399 struct ef4_nic
*efx
= netdev_priv(net_dev
);
400 struct ef4_tx_queue
*tx_queue
;
401 unsigned index
, type
;
403 EF4_WARN_ON_PARANOID(!netif_device_present(net_dev
));
405 index
= skb_get_queue_mapping(skb
);
406 type
= skb
->ip_summed
== CHECKSUM_PARTIAL
? EF4_TXQ_TYPE_OFFLOAD
: 0;
407 if (index
>= efx
->n_tx_channels
) {
408 index
-= efx
->n_tx_channels
;
409 type
|= EF4_TXQ_TYPE_HIGHPRI
;
411 tx_queue
= ef4_get_tx_queue(efx
, index
, type
);
413 return ef4_enqueue_skb(tx_queue
, skb
);
416 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue
*tx_queue
)
418 struct ef4_nic
*efx
= tx_queue
->efx
;
420 /* Must be inverse of queue lookup in ef4_hard_start_xmit() */
422 netdev_get_tx_queue(efx
->net_dev
,
423 tx_queue
->queue
/ EF4_TXQ_TYPES
+
424 ((tx_queue
->queue
& EF4_TXQ_TYPE_HIGHPRI
) ?
425 efx
->n_tx_channels
: 0));
428 int ef4_setup_tc(struct net_device
*net_dev
, u32 handle
, __be16 proto
,
429 struct tc_to_netdev
*ntc
)
431 struct ef4_nic
*efx
= netdev_priv(net_dev
);
432 struct ef4_channel
*channel
;
433 struct ef4_tx_queue
*tx_queue
;
437 if (ntc
->type
!= TC_SETUP_MQPRIO
)
442 if (ef4_nic_rev(efx
) < EF4_REV_FALCON_B0
|| num_tc
> EF4_MAX_TX_TC
)
445 if (num_tc
== net_dev
->num_tc
)
448 for (tc
= 0; tc
< num_tc
; tc
++) {
449 net_dev
->tc_to_txq
[tc
].offset
= tc
* efx
->n_tx_channels
;
450 net_dev
->tc_to_txq
[tc
].count
= efx
->n_tx_channels
;
453 if (num_tc
> net_dev
->num_tc
) {
454 /* Initialise high-priority queues as necessary */
455 ef4_for_each_channel(channel
, efx
) {
456 ef4_for_each_possible_channel_tx_queue(tx_queue
,
458 if (!(tx_queue
->queue
& EF4_TXQ_TYPE_HIGHPRI
))
460 if (!tx_queue
->buffer
) {
461 rc
= ef4_probe_tx_queue(tx_queue
);
465 if (!tx_queue
->initialised
)
466 ef4_init_tx_queue(tx_queue
);
467 ef4_init_tx_queue_core_txq(tx_queue
);
471 /* Reduce number of classes before number of queues */
472 net_dev
->num_tc
= num_tc
;
475 rc
= netif_set_real_num_tx_queues(net_dev
,
476 max_t(int, num_tc
, 1) *
481 /* Do not destroy high-priority queues when they become
482 * unused. We would have to flush them first, and it is
483 * fairly difficult to flush a subset of TX queues. Leave
484 * it to ef4_fini_channels().
487 net_dev
->num_tc
= num_tc
;
491 void ef4_xmit_done(struct ef4_tx_queue
*tx_queue
, unsigned int index
)
494 struct ef4_nic
*efx
= tx_queue
->efx
;
495 struct ef4_tx_queue
*txq2
;
496 unsigned int pkts_compl
= 0, bytes_compl
= 0;
498 EF4_BUG_ON_PARANOID(index
> tx_queue
->ptr_mask
);
500 ef4_dequeue_buffers(tx_queue
, index
, &pkts_compl
, &bytes_compl
);
501 tx_queue
->pkts_compl
+= pkts_compl
;
502 tx_queue
->bytes_compl
+= bytes_compl
;
505 ++tx_queue
->merge_events
;
507 /* See if we need to restart the netif queue. This memory
508 * barrier ensures that we write read_count (inside
509 * ef4_dequeue_buffers()) before reading the queue status.
512 if (unlikely(netif_tx_queue_stopped(tx_queue
->core_txq
)) &&
513 likely(efx
->port_enabled
) &&
514 likely(netif_device_present(efx
->net_dev
))) {
515 txq2
= ef4_tx_queue_partner(tx_queue
);
516 fill_level
= max(tx_queue
->insert_count
- tx_queue
->read_count
,
517 txq2
->insert_count
- txq2
->read_count
);
518 if (fill_level
<= efx
->txq_wake_thresh
)
519 netif_tx_wake_queue(tx_queue
->core_txq
);
522 /* Check whether the hardware queue is now empty */
523 if ((int)(tx_queue
->read_count
- tx_queue
->old_write_count
) >= 0) {
524 tx_queue
->old_write_count
= ACCESS_ONCE(tx_queue
->write_count
);
525 if (tx_queue
->read_count
== tx_queue
->old_write_count
) {
527 tx_queue
->empty_read_count
=
528 tx_queue
->read_count
| EF4_EMPTY_COUNT_VALID
;
533 static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue
*tx_queue
)
535 return DIV_ROUND_UP(tx_queue
->ptr_mask
+ 1, PAGE_SIZE
>> EF4_TX_CB_ORDER
);
538 int ef4_probe_tx_queue(struct ef4_tx_queue
*tx_queue
)
540 struct ef4_nic
*efx
= tx_queue
->efx
;
541 unsigned int entries
;
544 /* Create the smallest power-of-two aligned ring */
545 entries
= max(roundup_pow_of_two(efx
->txq_entries
), EF4_MIN_DMAQ_SIZE
);
546 EF4_BUG_ON_PARANOID(entries
> EF4_MAX_DMAQ_SIZE
);
547 tx_queue
->ptr_mask
= entries
- 1;
549 netif_dbg(efx
, probe
, efx
->net_dev
,
550 "creating TX queue %d size %#x mask %#x\n",
551 tx_queue
->queue
, efx
->txq_entries
, tx_queue
->ptr_mask
);
553 /* Allocate software ring */
554 tx_queue
->buffer
= kcalloc(entries
, sizeof(*tx_queue
->buffer
),
556 if (!tx_queue
->buffer
)
559 tx_queue
->cb_page
= kcalloc(ef4_tx_cb_page_count(tx_queue
),
560 sizeof(tx_queue
->cb_page
[0]), GFP_KERNEL
);
561 if (!tx_queue
->cb_page
) {
566 /* Allocate hardware ring */
567 rc
= ef4_nic_probe_tx(tx_queue
);
574 kfree(tx_queue
->cb_page
);
575 tx_queue
->cb_page
= NULL
;
577 kfree(tx_queue
->buffer
);
578 tx_queue
->buffer
= NULL
;
582 void ef4_init_tx_queue(struct ef4_tx_queue
*tx_queue
)
584 struct ef4_nic
*efx
= tx_queue
->efx
;
586 netif_dbg(efx
, drv
, efx
->net_dev
,
587 "initialising TX queue %d\n", tx_queue
->queue
);
589 tx_queue
->insert_count
= 0;
590 tx_queue
->write_count
= 0;
591 tx_queue
->old_write_count
= 0;
592 tx_queue
->read_count
= 0;
593 tx_queue
->old_read_count
= 0;
594 tx_queue
->empty_read_count
= 0 | EF4_EMPTY_COUNT_VALID
;
595 tx_queue
->xmit_more_available
= false;
597 /* Some older hardware requires Tx writes larger than 32. */
598 tx_queue
->tx_min_size
= EF4_WORKAROUND_15592(efx
) ? 33 : 0;
600 /* Set up TX descriptor ring */
601 ef4_nic_init_tx(tx_queue
);
603 tx_queue
->initialised
= true;
606 void ef4_fini_tx_queue(struct ef4_tx_queue
*tx_queue
)
608 struct ef4_tx_buffer
*buffer
;
610 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
611 "shutting down TX queue %d\n", tx_queue
->queue
);
613 if (!tx_queue
->buffer
)
616 /* Free any buffers left in the ring */
617 while (tx_queue
->read_count
!= tx_queue
->write_count
) {
618 unsigned int pkts_compl
= 0, bytes_compl
= 0;
619 buffer
= &tx_queue
->buffer
[tx_queue
->read_count
& tx_queue
->ptr_mask
];
620 ef4_dequeue_buffer(tx_queue
, buffer
, &pkts_compl
, &bytes_compl
);
622 ++tx_queue
->read_count
;
624 tx_queue
->xmit_more_available
= false;
625 netdev_tx_reset_queue(tx_queue
->core_txq
);
628 void ef4_remove_tx_queue(struct ef4_tx_queue
*tx_queue
)
632 if (!tx_queue
->buffer
)
635 netif_dbg(tx_queue
->efx
, drv
, tx_queue
->efx
->net_dev
,
636 "destroying TX queue %d\n", tx_queue
->queue
);
637 ef4_nic_remove_tx(tx_queue
);
639 if (tx_queue
->cb_page
) {
640 for (i
= 0; i
< ef4_tx_cb_page_count(tx_queue
); i
++)
641 ef4_nic_free_buffer(tx_queue
->efx
,
642 &tx_queue
->cb_page
[i
]);
643 kfree(tx_queue
->cb_page
);
644 tx_queue
->cb_page
= NULL
;
647 kfree(tx_queue
->buffer
);
648 tx_queue
->buffer
= NULL
;