2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/module.h>
118 #include <linux/spinlock.h>
119 #include <linux/tcp.h>
120 #include <linux/if_vlan.h>
121 #include <linux/interrupt.h>
122 #include <net/busy_poll.h>
123 #include <linux/clk.h>
124 #include <linux/if_ether.h>
125 #include <linux/net_tstamp.h>
126 #include <linux/phy.h>
127 #include <net/vxlan.h>
130 #include "xgbe-common.h"
132 static unsigned int ecc_sec_info_threshold
= 10;
133 static unsigned int ecc_sec_warn_threshold
= 10000;
134 static unsigned int ecc_sec_period
= 600;
135 static unsigned int ecc_ded_threshold
= 2;
136 static unsigned int ecc_ded_period
= 600;
138 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
139 /* Only expose the ECC parameters if supported */
140 module_param(ecc_sec_info_threshold
, uint
, S_IWUSR
| S_IRUGO
);
141 MODULE_PARM_DESC(ecc_sec_info_threshold
,
142 " ECC corrected error informational threshold setting");
144 module_param(ecc_sec_warn_threshold
, uint
, S_IWUSR
| S_IRUGO
);
145 MODULE_PARM_DESC(ecc_sec_warn_threshold
,
146 " ECC corrected error warning threshold setting");
148 module_param(ecc_sec_period
, uint
, S_IWUSR
| S_IRUGO
);
149 MODULE_PARM_DESC(ecc_sec_period
, " ECC corrected error period (in seconds)");
151 module_param(ecc_ded_threshold
, uint
, S_IWUSR
| S_IRUGO
);
152 MODULE_PARM_DESC(ecc_ded_threshold
, " ECC detected error threshold setting");
154 module_param(ecc_ded_period
, uint
, S_IWUSR
| S_IRUGO
);
155 MODULE_PARM_DESC(ecc_ded_period
, " ECC detected error period (in seconds)");
158 static int xgbe_one_poll(struct napi_struct
*, int);
159 static int xgbe_all_poll(struct napi_struct
*, int);
160 static void xgbe_stop(struct xgbe_prv_data
*);
162 static void *xgbe_alloc_node(size_t size
, int node
)
166 mem
= kzalloc_node(size
, GFP_KERNEL
, node
);
168 mem
= kzalloc(size
, GFP_KERNEL
);
173 static void xgbe_free_channels(struct xgbe_prv_data
*pdata
)
177 for (i
= 0; i
< ARRAY_SIZE(pdata
->channel
); i
++) {
178 if (!pdata
->channel
[i
])
181 kfree(pdata
->channel
[i
]->rx_ring
);
182 kfree(pdata
->channel
[i
]->tx_ring
);
183 kfree(pdata
->channel
[i
]);
185 pdata
->channel
[i
] = NULL
;
188 pdata
->channel_count
= 0;
191 static int xgbe_alloc_channels(struct xgbe_prv_data
*pdata
)
193 struct xgbe_channel
*channel
;
194 struct xgbe_ring
*ring
;
195 unsigned int count
, i
;
199 count
= max_t(unsigned int, pdata
->tx_ring_count
, pdata
->rx_ring_count
);
200 for (i
= 0; i
< count
; i
++) {
201 /* Attempt to use a CPU on the node the device is on */
202 cpu
= cpumask_local_spread(i
, dev_to_node(pdata
->dev
));
204 /* Set the allocation node based on the returned CPU */
205 node
= cpu_to_node(cpu
);
207 channel
= xgbe_alloc_node(sizeof(*channel
), node
);
210 pdata
->channel
[i
] = channel
;
212 snprintf(channel
->name
, sizeof(channel
->name
), "channel-%u", i
);
213 channel
->pdata
= pdata
;
214 channel
->queue_index
= i
;
215 channel
->dma_regs
= pdata
->xgmac_regs
+ DMA_CH_BASE
+
217 channel
->node
= node
;
218 cpumask_set_cpu(cpu
, &channel
->affinity_mask
);
220 if (pdata
->per_channel_irq
)
221 channel
->dma_irq
= pdata
->channel_irq
[i
];
223 if (i
< pdata
->tx_ring_count
) {
224 ring
= xgbe_alloc_node(sizeof(*ring
), node
);
228 spin_lock_init(&ring
->lock
);
231 channel
->tx_ring
= ring
;
234 if (i
< pdata
->rx_ring_count
) {
235 ring
= xgbe_alloc_node(sizeof(*ring
), node
);
239 spin_lock_init(&ring
->lock
);
242 channel
->rx_ring
= ring
;
245 netif_dbg(pdata
, drv
, pdata
->netdev
,
246 "%s: cpu=%u, node=%d\n", channel
->name
, cpu
, node
);
248 netif_dbg(pdata
, drv
, pdata
->netdev
,
249 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
250 channel
->name
, channel
->dma_regs
, channel
->dma_irq
,
251 channel
->tx_ring
, channel
->rx_ring
);
254 pdata
->channel_count
= count
;
259 xgbe_free_channels(pdata
);
264 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring
*ring
)
266 return (ring
->rdesc_count
- (ring
->cur
- ring
->dirty
));
269 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring
*ring
)
271 return (ring
->cur
- ring
->dirty
);
274 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel
*channel
,
275 struct xgbe_ring
*ring
, unsigned int count
)
277 struct xgbe_prv_data
*pdata
= channel
->pdata
;
279 if (count
> xgbe_tx_avail_desc(ring
)) {
280 netif_info(pdata
, drv
, pdata
->netdev
,
281 "Tx queue stopped, not enough descriptors available\n");
282 netif_stop_subqueue(pdata
->netdev
, channel
->queue_index
);
283 ring
->tx
.queue_stopped
= 1;
285 /* If we haven't notified the hardware because of xmit_more
286 * support, tell it now
288 if (ring
->tx
.xmit_more
)
289 pdata
->hw_if
.tx_start_xmit(channel
, ring
);
291 return NETDEV_TX_BUSY
;
297 static int xgbe_calc_rx_buf_size(struct net_device
*netdev
, unsigned int mtu
)
299 unsigned int rx_buf_size
;
301 rx_buf_size
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
302 rx_buf_size
= clamp_val(rx_buf_size
, XGBE_RX_MIN_BUF_SIZE
, PAGE_SIZE
);
304 rx_buf_size
= (rx_buf_size
+ XGBE_RX_BUF_ALIGN
- 1) &
305 ~(XGBE_RX_BUF_ALIGN
- 1);
310 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data
*pdata
,
311 struct xgbe_channel
*channel
)
313 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
314 enum xgbe_int int_id
;
316 if (channel
->tx_ring
&& channel
->rx_ring
)
317 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
318 else if (channel
->tx_ring
)
319 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
320 else if (channel
->rx_ring
)
321 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
325 hw_if
->enable_int(channel
, int_id
);
328 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
332 for (i
= 0; i
< pdata
->channel_count
; i
++)
333 xgbe_enable_rx_tx_int(pdata
, pdata
->channel
[i
]);
336 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data
*pdata
,
337 struct xgbe_channel
*channel
)
339 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
340 enum xgbe_int int_id
;
342 if (channel
->tx_ring
&& channel
->rx_ring
)
343 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
344 else if (channel
->tx_ring
)
345 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
346 else if (channel
->rx_ring
)
347 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
351 hw_if
->disable_int(channel
, int_id
);
354 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
358 for (i
= 0; i
< pdata
->channel_count
; i
++)
359 xgbe_disable_rx_tx_int(pdata
, pdata
->channel
[i
]);
362 static bool xgbe_ecc_sec(struct xgbe_prv_data
*pdata
, unsigned long *period
,
363 unsigned int *count
, const char *area
)
365 if (time_before(jiffies
, *period
)) {
368 *period
= jiffies
+ (ecc_sec_period
* HZ
);
372 if (*count
> ecc_sec_info_threshold
)
373 dev_warn_once(pdata
->dev
,
374 "%s ECC corrected errors exceed informational threshold\n",
377 if (*count
> ecc_sec_warn_threshold
) {
378 dev_warn_once(pdata
->dev
,
379 "%s ECC corrected errors exceed warning threshold\n",
387 static bool xgbe_ecc_ded(struct xgbe_prv_data
*pdata
, unsigned long *period
,
388 unsigned int *count
, const char *area
)
390 if (time_before(jiffies
, *period
)) {
393 *period
= jiffies
+ (ecc_ded_period
* HZ
);
397 if (*count
> ecc_ded_threshold
) {
398 netdev_alert(pdata
->netdev
,
399 "%s ECC detected errors exceed threshold\n",
407 static void xgbe_ecc_isr_task(unsigned long data
)
409 struct xgbe_prv_data
*pdata
= (struct xgbe_prv_data
*)data
;
410 unsigned int ecc_isr
;
413 /* Mask status with only the interrupts we care about */
414 ecc_isr
= XP_IOREAD(pdata
, XP_ECC_ISR
);
415 ecc_isr
&= XP_IOREAD(pdata
, XP_ECC_IER
);
416 netif_dbg(pdata
, intr
, pdata
->netdev
, "ECC_ISR=%#010x\n", ecc_isr
);
418 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, TX_DED
)) {
419 stop
|= xgbe_ecc_ded(pdata
, &pdata
->tx_ded_period
,
420 &pdata
->tx_ded_count
, "TX fifo");
423 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, RX_DED
)) {
424 stop
|= xgbe_ecc_ded(pdata
, &pdata
->rx_ded_period
,
425 &pdata
->rx_ded_count
, "RX fifo");
428 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, DESC_DED
)) {
429 stop
|= xgbe_ecc_ded(pdata
, &pdata
->desc_ded_period
,
430 &pdata
->desc_ded_count
,
435 pdata
->hw_if
.disable_ecc_ded(pdata
);
436 schedule_work(&pdata
->stopdev_work
);
440 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, TX_SEC
)) {
441 if (xgbe_ecc_sec(pdata
, &pdata
->tx_sec_period
,
442 &pdata
->tx_sec_count
, "TX fifo"))
443 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_TX
);
446 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, RX_SEC
))
447 if (xgbe_ecc_sec(pdata
, &pdata
->rx_sec_period
,
448 &pdata
->rx_sec_count
, "RX fifo"))
449 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_RX
);
451 if (XP_GET_BITS(ecc_isr
, XP_ECC_ISR
, DESC_SEC
))
452 if (xgbe_ecc_sec(pdata
, &pdata
->desc_sec_period
,
453 &pdata
->desc_sec_count
, "descriptor cache"))
454 pdata
->hw_if
.disable_ecc_sec(pdata
, XGBE_ECC_SEC_DESC
);
457 /* Clear all ECC interrupts */
458 XP_IOWRITE(pdata
, XP_ECC_ISR
, ecc_isr
);
460 /* Reissue interrupt if status is not clear */
461 if (pdata
->vdata
->irq_reissue_support
)
462 XP_IOWRITE(pdata
, XP_INT_REISSUE_EN
, 1 << 1);
465 static irqreturn_t
xgbe_ecc_isr(int irq
, void *data
)
467 struct xgbe_prv_data
*pdata
= data
;
469 if (pdata
->isr_as_tasklet
)
470 tasklet_schedule(&pdata
->tasklet_ecc
);
472 xgbe_ecc_isr_task((unsigned long)pdata
);
477 static void xgbe_isr_task(unsigned long data
)
479 struct xgbe_prv_data
*pdata
= (struct xgbe_prv_data
*)data
;
480 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
481 struct xgbe_channel
*channel
;
482 unsigned int dma_isr
, dma_ch_isr
;
483 unsigned int mac_isr
, mac_tssr
, mac_mdioisr
;
486 /* The DMA interrupt status register also reports MAC and MTL
487 * interrupts. So for polling mode, we just need to check for
488 * this register to be non-zero
490 dma_isr
= XGMAC_IOREAD(pdata
, DMA_ISR
);
494 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_ISR=%#010x\n", dma_isr
);
496 for (i
= 0; i
< pdata
->channel_count
; i
++) {
497 if (!(dma_isr
& (1 << i
)))
500 channel
= pdata
->channel
[i
];
502 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
503 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_CH%u_ISR=%#010x\n",
506 /* The TI or RI interrupt bits may still be set even if using
507 * per channel DMA interrupts. Check to be sure those are not
508 * enabled before using the private data napi structure.
510 if (!pdata
->per_channel_irq
&&
511 (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
) ||
512 XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
))) {
513 if (napi_schedule_prep(&pdata
->napi
)) {
514 /* Disable Tx and Rx interrupts */
515 xgbe_disable_rx_tx_ints(pdata
);
517 /* Turn on polling */
518 __napi_schedule_irqoff(&pdata
->napi
);
521 /* Don't clear Rx/Tx status if doing per channel DMA
522 * interrupts, these will be cleared by the ISR for
523 * per channel DMA interrupts.
525 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
, 0);
526 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
, 0);
529 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RBU
))
530 pdata
->ext_stats
.rx_buffer_unavailable
++;
532 /* Restart the device on a Fatal Bus Error */
533 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, FBE
))
534 schedule_work(&pdata
->restart_work
);
536 /* Clear interrupt signals */
537 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
540 if (XGMAC_GET_BITS(dma_isr
, DMA_ISR
, MACIS
)) {
541 mac_isr
= XGMAC_IOREAD(pdata
, MAC_ISR
);
543 netif_dbg(pdata
, intr
, pdata
->netdev
, "MAC_ISR=%#010x\n",
546 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCTXIS
))
547 hw_if
->tx_mmc_int(pdata
);
549 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCRXIS
))
550 hw_if
->rx_mmc_int(pdata
);
552 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, TSIS
)) {
553 mac_tssr
= XGMAC_IOREAD(pdata
, MAC_TSSR
);
555 netif_dbg(pdata
, intr
, pdata
->netdev
,
556 "MAC_TSSR=%#010x\n", mac_tssr
);
558 if (XGMAC_GET_BITS(mac_tssr
, MAC_TSSR
, TXTSC
)) {
559 /* Read Tx Timestamp to clear interrupt */
561 hw_if
->get_tx_tstamp(pdata
);
562 queue_work(pdata
->dev_workqueue
,
563 &pdata
->tx_tstamp_work
);
567 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, SMI
)) {
568 mac_mdioisr
= XGMAC_IOREAD(pdata
, MAC_MDIOISR
);
570 netif_dbg(pdata
, intr
, pdata
->netdev
,
571 "MAC_MDIOISR=%#010x\n", mac_mdioisr
);
573 if (XGMAC_GET_BITS(mac_mdioisr
, MAC_MDIOISR
,
575 complete(&pdata
->mdio_complete
);
580 /* If there is not a separate AN irq, handle it here */
581 if (pdata
->dev_irq
== pdata
->an_irq
)
582 pdata
->phy_if
.an_isr(pdata
);
584 /* If there is not a separate ECC irq, handle it here */
585 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
== pdata
->ecc_irq
))
586 xgbe_ecc_isr_task((unsigned long)pdata
);
588 /* If there is not a separate I2C irq, handle it here */
589 if (pdata
->vdata
->i2c_support
&& (pdata
->dev_irq
== pdata
->i2c_irq
))
590 pdata
->i2c_if
.i2c_isr(pdata
);
592 /* Reissue interrupt if status is not clear */
593 if (pdata
->vdata
->irq_reissue_support
) {
594 unsigned int reissue_mask
;
596 reissue_mask
= 1 << 0;
597 if (!pdata
->per_channel_irq
)
598 reissue_mask
|= 0xffff << 4;
600 XP_IOWRITE(pdata
, XP_INT_REISSUE_EN
, reissue_mask
);
604 static irqreturn_t
xgbe_isr(int irq
, void *data
)
606 struct xgbe_prv_data
*pdata
= data
;
608 if (pdata
->isr_as_tasklet
)
609 tasklet_schedule(&pdata
->tasklet_dev
);
611 xgbe_isr_task((unsigned long)pdata
);
616 static irqreturn_t
xgbe_dma_isr(int irq
, void *data
)
618 struct xgbe_channel
*channel
= data
;
619 struct xgbe_prv_data
*pdata
= channel
->pdata
;
620 unsigned int dma_status
;
622 /* Per channel DMA interrupts are enabled, so we use the per
623 * channel napi structure and not the private data napi structure
625 if (napi_schedule_prep(&channel
->napi
)) {
626 /* Disable Tx and Rx interrupts */
627 if (pdata
->channel_irq_mode
)
628 xgbe_disable_rx_tx_int(pdata
, channel
);
630 disable_irq_nosync(channel
->dma_irq
);
632 /* Turn on polling */
633 __napi_schedule_irqoff(&channel
->napi
);
636 /* Clear Tx/Rx signals */
638 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, TI
, 1);
639 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, RI
, 1);
640 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_status
);
645 static void xgbe_tx_timer(struct timer_list
*t
)
647 struct xgbe_channel
*channel
= from_timer(channel
, t
, tx_timer
);
648 struct xgbe_prv_data
*pdata
= channel
->pdata
;
649 struct napi_struct
*napi
;
651 DBGPR("-->xgbe_tx_timer\n");
653 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
655 if (napi_schedule_prep(napi
)) {
656 /* Disable Tx and Rx interrupts */
657 if (pdata
->per_channel_irq
)
658 if (pdata
->channel_irq_mode
)
659 xgbe_disable_rx_tx_int(pdata
, channel
);
661 disable_irq_nosync(channel
->dma_irq
);
663 xgbe_disable_rx_tx_ints(pdata
);
665 /* Turn on polling */
666 __napi_schedule(napi
);
669 channel
->tx_timer_active
= 0;
671 DBGPR("<--xgbe_tx_timer\n");
674 static void xgbe_service(struct work_struct
*work
)
676 struct xgbe_prv_data
*pdata
= container_of(work
,
677 struct xgbe_prv_data
,
680 pdata
->phy_if
.phy_status(pdata
);
683 static void xgbe_service_timer(struct timer_list
*t
)
685 struct xgbe_prv_data
*pdata
= from_timer(pdata
, t
, service_timer
);
687 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
689 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
692 static void xgbe_init_timers(struct xgbe_prv_data
*pdata
)
694 struct xgbe_channel
*channel
;
697 timer_setup(&pdata
->service_timer
, xgbe_service_timer
, 0);
699 for (i
= 0; i
< pdata
->channel_count
; i
++) {
700 channel
= pdata
->channel
[i
];
701 if (!channel
->tx_ring
)
704 timer_setup(&channel
->tx_timer
, xgbe_tx_timer
, 0);
708 static void xgbe_start_timers(struct xgbe_prv_data
*pdata
)
710 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
713 static void xgbe_stop_timers(struct xgbe_prv_data
*pdata
)
715 struct xgbe_channel
*channel
;
718 del_timer_sync(&pdata
->service_timer
);
720 for (i
= 0; i
< pdata
->channel_count
; i
++) {
721 channel
= pdata
->channel
[i
];
722 if (!channel
->tx_ring
)
725 del_timer_sync(&channel
->tx_timer
);
729 void xgbe_get_all_hw_features(struct xgbe_prv_data
*pdata
)
731 unsigned int mac_hfr0
, mac_hfr1
, mac_hfr2
;
732 struct xgbe_hw_features
*hw_feat
= &pdata
->hw_feat
;
734 mac_hfr0
= XGMAC_IOREAD(pdata
, MAC_HWF0R
);
735 mac_hfr1
= XGMAC_IOREAD(pdata
, MAC_HWF1R
);
736 mac_hfr2
= XGMAC_IOREAD(pdata
, MAC_HWF2R
);
738 memset(hw_feat
, 0, sizeof(*hw_feat
));
740 hw_feat
->version
= XGMAC_IOREAD(pdata
, MAC_VR
);
742 /* Hardware feature register 0 */
743 hw_feat
->gmii
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, GMIISEL
);
744 hw_feat
->vlhash
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VLHASH
);
745 hw_feat
->sma
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SMASEL
);
746 hw_feat
->rwk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RWKSEL
);
747 hw_feat
->mgk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MGKSEL
);
748 hw_feat
->mmc
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MMCSEL
);
749 hw_feat
->aoe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, ARPOFFSEL
);
750 hw_feat
->ts
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSEL
);
751 hw_feat
->eee
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, EEESEL
);
752 hw_feat
->tx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TXCOESEL
);
753 hw_feat
->rx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RXCOESEL
);
754 hw_feat
->addn_mac
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
,
756 hw_feat
->ts_src
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSTSSEL
);
757 hw_feat
->sa_vlan_ins
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SAVLANINS
);
758 hw_feat
->vxn
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VXN
);
760 /* Hardware feature register 1 */
761 hw_feat
->rx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
763 hw_feat
->tx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
765 hw_feat
->adv_ts_hi
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADVTHWORD
);
766 hw_feat
->dma_width
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADDR64
);
767 hw_feat
->dcb
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DCBEN
);
768 hw_feat
->sph
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, SPHEN
);
769 hw_feat
->tso
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, TSOEN
);
770 hw_feat
->dma_debug
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DBGMEMA
);
771 hw_feat
->rss
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, RSSEN
);
772 hw_feat
->tc_cnt
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, NUMTC
);
773 hw_feat
->hash_table_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
775 hw_feat
->l3l4_filter_num
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
778 /* Hardware feature register 2 */
779 hw_feat
->rx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXQCNT
);
780 hw_feat
->tx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXQCNT
);
781 hw_feat
->rx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXCHCNT
);
782 hw_feat
->tx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXCHCNT
);
783 hw_feat
->pps_out_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, PPSOUTNUM
);
784 hw_feat
->aux_snap_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, AUXSNAPNUM
);
786 /* Translate the Hash Table size into actual number */
787 switch (hw_feat
->hash_table_size
) {
791 hw_feat
->hash_table_size
= 64;
794 hw_feat
->hash_table_size
= 128;
797 hw_feat
->hash_table_size
= 256;
801 /* Translate the address width setting into actual number */
802 switch (hw_feat
->dma_width
) {
804 hw_feat
->dma_width
= 32;
807 hw_feat
->dma_width
= 40;
810 hw_feat
->dma_width
= 48;
813 hw_feat
->dma_width
= 32;
816 /* The Queue, Channel and TC counts are zero based so increment them
817 * to get the actual number
821 hw_feat
->rx_ch_cnt
++;
822 hw_feat
->tx_ch_cnt
++;
825 /* Translate the fifo sizes into actual numbers */
826 hw_feat
->rx_fifo_size
= 1 << (hw_feat
->rx_fifo_size
+ 7);
827 hw_feat
->tx_fifo_size
= 1 << (hw_feat
->tx_fifo_size
+ 7);
829 if (netif_msg_probe(pdata
)) {
830 dev_dbg(pdata
->dev
, "Hardware features:\n");
832 /* Hardware feature register 0 */
833 dev_dbg(pdata
->dev
, " 1GbE support : %s\n",
834 hw_feat
->gmii
? "yes" : "no");
835 dev_dbg(pdata
->dev
, " VLAN hash filter : %s\n",
836 hw_feat
->vlhash
? "yes" : "no");
837 dev_dbg(pdata
->dev
, " MDIO interface : %s\n",
838 hw_feat
->sma
? "yes" : "no");
839 dev_dbg(pdata
->dev
, " Wake-up packet support : %s\n",
840 hw_feat
->rwk
? "yes" : "no");
841 dev_dbg(pdata
->dev
, " Magic packet support : %s\n",
842 hw_feat
->mgk
? "yes" : "no");
843 dev_dbg(pdata
->dev
, " Management counters : %s\n",
844 hw_feat
->mmc
? "yes" : "no");
845 dev_dbg(pdata
->dev
, " ARP offload : %s\n",
846 hw_feat
->aoe
? "yes" : "no");
847 dev_dbg(pdata
->dev
, " IEEE 1588-2008 Timestamp : %s\n",
848 hw_feat
->ts
? "yes" : "no");
849 dev_dbg(pdata
->dev
, " Energy Efficient Ethernet : %s\n",
850 hw_feat
->eee
? "yes" : "no");
851 dev_dbg(pdata
->dev
, " TX checksum offload : %s\n",
852 hw_feat
->tx_coe
? "yes" : "no");
853 dev_dbg(pdata
->dev
, " RX checksum offload : %s\n",
854 hw_feat
->rx_coe
? "yes" : "no");
855 dev_dbg(pdata
->dev
, " Additional MAC addresses : %u\n",
857 dev_dbg(pdata
->dev
, " Timestamp source : %s\n",
858 (hw_feat
->ts_src
== 1) ? "internal" :
859 (hw_feat
->ts_src
== 2) ? "external" :
860 (hw_feat
->ts_src
== 3) ? "internal/external" : "n/a");
861 dev_dbg(pdata
->dev
, " SA/VLAN insertion : %s\n",
862 hw_feat
->sa_vlan_ins
? "yes" : "no");
863 dev_dbg(pdata
->dev
, " VXLAN/NVGRE support : %s\n",
864 hw_feat
->vxn
? "yes" : "no");
866 /* Hardware feature register 1 */
867 dev_dbg(pdata
->dev
, " RX fifo size : %u\n",
868 hw_feat
->rx_fifo_size
);
869 dev_dbg(pdata
->dev
, " TX fifo size : %u\n",
870 hw_feat
->tx_fifo_size
);
871 dev_dbg(pdata
->dev
, " IEEE 1588 high word : %s\n",
872 hw_feat
->adv_ts_hi
? "yes" : "no");
873 dev_dbg(pdata
->dev
, " DMA width : %u\n",
875 dev_dbg(pdata
->dev
, " Data Center Bridging : %s\n",
876 hw_feat
->dcb
? "yes" : "no");
877 dev_dbg(pdata
->dev
, " Split header : %s\n",
878 hw_feat
->sph
? "yes" : "no");
879 dev_dbg(pdata
->dev
, " TCP Segmentation Offload : %s\n",
880 hw_feat
->tso
? "yes" : "no");
881 dev_dbg(pdata
->dev
, " Debug memory interface : %s\n",
882 hw_feat
->dma_debug
? "yes" : "no");
883 dev_dbg(pdata
->dev
, " Receive Side Scaling : %s\n",
884 hw_feat
->rss
? "yes" : "no");
885 dev_dbg(pdata
->dev
, " Traffic Class count : %u\n",
887 dev_dbg(pdata
->dev
, " Hash table size : %u\n",
888 hw_feat
->hash_table_size
);
889 dev_dbg(pdata
->dev
, " L3/L4 Filters : %u\n",
890 hw_feat
->l3l4_filter_num
);
892 /* Hardware feature register 2 */
893 dev_dbg(pdata
->dev
, " RX queue count : %u\n",
895 dev_dbg(pdata
->dev
, " TX queue count : %u\n",
897 dev_dbg(pdata
->dev
, " RX DMA channel count : %u\n",
899 dev_dbg(pdata
->dev
, " TX DMA channel count : %u\n",
901 dev_dbg(pdata
->dev
, " PPS outputs : %u\n",
902 hw_feat
->pps_out_num
);
903 dev_dbg(pdata
->dev
, " Auxiliary snapshot inputs : %u\n",
904 hw_feat
->aux_snap_num
);
908 static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data
*pdata
)
910 struct net_device
*netdev
= pdata
->netdev
;
912 if (!pdata
->vxlan_offloads_set
)
915 netdev_info(netdev
, "disabling VXLAN offloads\n");
917 netdev
->hw_enc_features
&= ~(NETIF_F_SG
|
924 NETIF_F_GSO_UDP_TUNNEL
|
925 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
927 netdev
->features
&= ~(NETIF_F_GSO_UDP_TUNNEL
|
928 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
930 pdata
->vxlan_offloads_set
= 0;
933 static void xgbe_disable_vxlan_hw(struct xgbe_prv_data
*pdata
)
935 if (!pdata
->vxlan_port_set
)
938 pdata
->hw_if
.disable_vxlan(pdata
);
940 pdata
->vxlan_port_set
= 0;
941 pdata
->vxlan_port
= 0;
944 static void xgbe_disable_vxlan_accel(struct xgbe_prv_data
*pdata
)
946 xgbe_disable_vxlan_offloads(pdata
);
948 xgbe_disable_vxlan_hw(pdata
);
951 static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data
*pdata
)
953 struct net_device
*netdev
= pdata
->netdev
;
955 if (pdata
->vxlan_offloads_set
)
958 netdev_info(netdev
, "enabling VXLAN offloads\n");
960 netdev
->hw_enc_features
|= NETIF_F_SG
|
967 pdata
->vxlan_features
;
969 netdev
->features
|= pdata
->vxlan_features
;
971 pdata
->vxlan_offloads_set
= 1;
974 static void xgbe_enable_vxlan_hw(struct xgbe_prv_data
*pdata
)
976 struct xgbe_vxlan_data
*vdata
;
978 if (pdata
->vxlan_port_set
)
981 if (list_empty(&pdata
->vxlan_ports
))
984 vdata
= list_first_entry(&pdata
->vxlan_ports
,
985 struct xgbe_vxlan_data
, list
);
987 pdata
->vxlan_port_set
= 1;
988 pdata
->vxlan_port
= be16_to_cpu(vdata
->port
);
990 pdata
->hw_if
.enable_vxlan(pdata
);
993 static void xgbe_enable_vxlan_accel(struct xgbe_prv_data
*pdata
)
995 /* VXLAN acceleration desired? */
996 if (!pdata
->vxlan_features
)
999 /* VXLAN acceleration possible? */
1000 if (pdata
->vxlan_force_disable
)
1003 xgbe_enable_vxlan_hw(pdata
);
1005 xgbe_enable_vxlan_offloads(pdata
);
1008 static void xgbe_reset_vxlan_accel(struct xgbe_prv_data
*pdata
)
1010 xgbe_disable_vxlan_hw(pdata
);
1012 if (pdata
->vxlan_features
)
1013 xgbe_enable_vxlan_offloads(pdata
);
1015 pdata
->vxlan_force_disable
= 0;
1018 static void xgbe_napi_enable(struct xgbe_prv_data
*pdata
, unsigned int add
)
1020 struct xgbe_channel
*channel
;
1023 if (pdata
->per_channel_irq
) {
1024 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1025 channel
= pdata
->channel
[i
];
1027 netif_napi_add(pdata
->netdev
, &channel
->napi
,
1028 xgbe_one_poll
, NAPI_POLL_WEIGHT
);
1030 napi_enable(&channel
->napi
);
1034 netif_napi_add(pdata
->netdev
, &pdata
->napi
,
1035 xgbe_all_poll
, NAPI_POLL_WEIGHT
);
1037 napi_enable(&pdata
->napi
);
1041 static void xgbe_napi_disable(struct xgbe_prv_data
*pdata
, unsigned int del
)
1043 struct xgbe_channel
*channel
;
1046 if (pdata
->per_channel_irq
) {
1047 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1048 channel
= pdata
->channel
[i
];
1049 napi_disable(&channel
->napi
);
1052 netif_napi_del(&channel
->napi
);
1055 napi_disable(&pdata
->napi
);
1058 netif_napi_del(&pdata
->napi
);
1062 static int xgbe_request_irqs(struct xgbe_prv_data
*pdata
)
1064 struct xgbe_channel
*channel
;
1065 struct net_device
*netdev
= pdata
->netdev
;
1069 tasklet_init(&pdata
->tasklet_dev
, xgbe_isr_task
, (unsigned long)pdata
);
1070 tasklet_init(&pdata
->tasklet_ecc
, xgbe_ecc_isr_task
,
1071 (unsigned long)pdata
);
1073 ret
= devm_request_irq(pdata
->dev
, pdata
->dev_irq
, xgbe_isr
, 0,
1074 netdev_name(netdev
), pdata
);
1076 netdev_alert(netdev
, "error requesting irq %d\n",
1081 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
)) {
1082 ret
= devm_request_irq(pdata
->dev
, pdata
->ecc_irq
, xgbe_ecc_isr
,
1083 0, pdata
->ecc_name
, pdata
);
1085 netdev_alert(netdev
, "error requesting ecc irq %d\n",
1091 if (!pdata
->per_channel_irq
)
1094 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1095 channel
= pdata
->channel
[i
];
1096 snprintf(channel
->dma_irq_name
,
1097 sizeof(channel
->dma_irq_name
) - 1,
1098 "%s-TxRx-%u", netdev_name(netdev
),
1099 channel
->queue_index
);
1101 ret
= devm_request_irq(pdata
->dev
, channel
->dma_irq
,
1103 channel
->dma_irq_name
, channel
);
1105 netdev_alert(netdev
, "error requesting irq %d\n",
1110 irq_set_affinity_hint(channel
->dma_irq
,
1111 &channel
->affinity_mask
);
1117 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1118 for (i
--; i
< pdata
->channel_count
; i
--) {
1119 channel
= pdata
->channel
[i
];
1121 irq_set_affinity_hint(channel
->dma_irq
, NULL
);
1122 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
1125 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
))
1126 devm_free_irq(pdata
->dev
, pdata
->ecc_irq
, pdata
);
1129 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
1134 static void xgbe_free_irqs(struct xgbe_prv_data
*pdata
)
1136 struct xgbe_channel
*channel
;
1139 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
1141 if (pdata
->vdata
->ecc_support
&& (pdata
->dev_irq
!= pdata
->ecc_irq
))
1142 devm_free_irq(pdata
->dev
, pdata
->ecc_irq
, pdata
);
1144 if (!pdata
->per_channel_irq
)
1147 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1148 channel
= pdata
->channel
[i
];
1150 irq_set_affinity_hint(channel
->dma_irq
, NULL
);
1151 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
1155 void xgbe_init_tx_coalesce(struct xgbe_prv_data
*pdata
)
1157 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1159 DBGPR("-->xgbe_init_tx_coalesce\n");
1161 pdata
->tx_usecs
= XGMAC_INIT_DMA_TX_USECS
;
1162 pdata
->tx_frames
= XGMAC_INIT_DMA_TX_FRAMES
;
1164 hw_if
->config_tx_coalesce(pdata
);
1166 DBGPR("<--xgbe_init_tx_coalesce\n");
1169 void xgbe_init_rx_coalesce(struct xgbe_prv_data
*pdata
)
1171 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1173 DBGPR("-->xgbe_init_rx_coalesce\n");
1175 pdata
->rx_riwt
= hw_if
->usec_to_riwt(pdata
, XGMAC_INIT_DMA_RX_USECS
);
1176 pdata
->rx_usecs
= XGMAC_INIT_DMA_RX_USECS
;
1177 pdata
->rx_frames
= XGMAC_INIT_DMA_RX_FRAMES
;
1179 hw_if
->config_rx_coalesce(pdata
);
1181 DBGPR("<--xgbe_init_rx_coalesce\n");
1184 static void xgbe_free_tx_data(struct xgbe_prv_data
*pdata
)
1186 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1187 struct xgbe_ring
*ring
;
1188 struct xgbe_ring_data
*rdata
;
1191 DBGPR("-->xgbe_free_tx_data\n");
1193 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1194 ring
= pdata
->channel
[i
]->tx_ring
;
1198 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
1199 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
1200 desc_if
->unmap_rdata(pdata
, rdata
);
1204 DBGPR("<--xgbe_free_tx_data\n");
1207 static void xgbe_free_rx_data(struct xgbe_prv_data
*pdata
)
1209 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1210 struct xgbe_ring
*ring
;
1211 struct xgbe_ring_data
*rdata
;
1214 DBGPR("-->xgbe_free_rx_data\n");
1216 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1217 ring
= pdata
->channel
[i
]->rx_ring
;
1221 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
1222 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
1223 desc_if
->unmap_rdata(pdata
, rdata
);
1227 DBGPR("<--xgbe_free_rx_data\n");
1230 static int xgbe_phy_reset(struct xgbe_prv_data
*pdata
)
1232 pdata
->phy_link
= -1;
1233 pdata
->phy_speed
= SPEED_UNKNOWN
;
1235 return pdata
->phy_if
.phy_reset(pdata
);
1238 int xgbe_powerdown(struct net_device
*netdev
, unsigned int caller
)
1240 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1241 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1242 unsigned long flags
;
1244 DBGPR("-->xgbe_powerdown\n");
1246 if (!netif_running(netdev
) ||
1247 (caller
== XGMAC_IOCTL_CONTEXT
&& pdata
->power_down
)) {
1248 netdev_alert(netdev
, "Device is already powered down\n");
1249 DBGPR("<--xgbe_powerdown\n");
1253 spin_lock_irqsave(&pdata
->lock
, flags
);
1255 if (caller
== XGMAC_DRIVER_CONTEXT
)
1256 netif_device_detach(netdev
);
1258 netif_tx_stop_all_queues(netdev
);
1260 xgbe_stop_timers(pdata
);
1261 flush_workqueue(pdata
->dev_workqueue
);
1263 hw_if
->powerdown_tx(pdata
);
1264 hw_if
->powerdown_rx(pdata
);
1266 xgbe_napi_disable(pdata
, 0);
1268 pdata
->power_down
= 1;
1270 spin_unlock_irqrestore(&pdata
->lock
, flags
);
1272 DBGPR("<--xgbe_powerdown\n");
1277 int xgbe_powerup(struct net_device
*netdev
, unsigned int caller
)
1279 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1280 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1281 unsigned long flags
;
1283 DBGPR("-->xgbe_powerup\n");
1285 if (!netif_running(netdev
) ||
1286 (caller
== XGMAC_IOCTL_CONTEXT
&& !pdata
->power_down
)) {
1287 netdev_alert(netdev
, "Device is already powered up\n");
1288 DBGPR("<--xgbe_powerup\n");
1292 spin_lock_irqsave(&pdata
->lock
, flags
);
1294 pdata
->power_down
= 0;
1296 xgbe_napi_enable(pdata
, 0);
1298 hw_if
->powerup_tx(pdata
);
1299 hw_if
->powerup_rx(pdata
);
1301 if (caller
== XGMAC_DRIVER_CONTEXT
)
1302 netif_device_attach(netdev
);
1304 netif_tx_start_all_queues(netdev
);
1306 xgbe_start_timers(pdata
);
1308 spin_unlock_irqrestore(&pdata
->lock
, flags
);
1310 DBGPR("<--xgbe_powerup\n");
1315 static int xgbe_start(struct xgbe_prv_data
*pdata
)
1317 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1318 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
1319 struct net_device
*netdev
= pdata
->netdev
;
1322 DBGPR("-->xgbe_start\n");
1324 ret
= hw_if
->init(pdata
);
1328 xgbe_napi_enable(pdata
, 1);
1330 ret
= xgbe_request_irqs(pdata
);
1334 ret
= phy_if
->phy_start(pdata
);
1338 hw_if
->enable_tx(pdata
);
1339 hw_if
->enable_rx(pdata
);
1341 udp_tunnel_get_rx_info(netdev
);
1343 netif_tx_start_all_queues(netdev
);
1345 xgbe_start_timers(pdata
);
1346 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
1348 clear_bit(XGBE_STOPPED
, &pdata
->dev_state
);
1350 DBGPR("<--xgbe_start\n");
1355 xgbe_free_irqs(pdata
);
1358 xgbe_napi_disable(pdata
, 1);
1365 static void xgbe_stop(struct xgbe_prv_data
*pdata
)
1367 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1368 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
1369 struct xgbe_channel
*channel
;
1370 struct net_device
*netdev
= pdata
->netdev
;
1371 struct netdev_queue
*txq
;
1374 DBGPR("-->xgbe_stop\n");
1376 if (test_bit(XGBE_STOPPED
, &pdata
->dev_state
))
1379 netif_tx_stop_all_queues(netdev
);
1381 xgbe_stop_timers(pdata
);
1382 flush_workqueue(pdata
->dev_workqueue
);
1384 xgbe_reset_vxlan_accel(pdata
);
1386 hw_if
->disable_tx(pdata
);
1387 hw_if
->disable_rx(pdata
);
1389 phy_if
->phy_stop(pdata
);
1391 xgbe_free_irqs(pdata
);
1393 xgbe_napi_disable(pdata
, 1);
1397 for (i
= 0; i
< pdata
->channel_count
; i
++) {
1398 channel
= pdata
->channel
[i
];
1399 if (!channel
->tx_ring
)
1402 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1403 netdev_tx_reset_queue(txq
);
1406 set_bit(XGBE_STOPPED
, &pdata
->dev_state
);
1408 DBGPR("<--xgbe_stop\n");
1411 static void xgbe_stopdev(struct work_struct
*work
)
1413 struct xgbe_prv_data
*pdata
= container_of(work
,
1414 struct xgbe_prv_data
,
1421 xgbe_free_tx_data(pdata
);
1422 xgbe_free_rx_data(pdata
);
1426 netdev_alert(pdata
->netdev
, "device stopped\n");
1429 static void xgbe_restart_dev(struct xgbe_prv_data
*pdata
)
1431 DBGPR("-->xgbe_restart_dev\n");
1433 /* If not running, "restart" will happen on open */
1434 if (!netif_running(pdata
->netdev
))
1439 xgbe_free_tx_data(pdata
);
1440 xgbe_free_rx_data(pdata
);
1444 DBGPR("<--xgbe_restart_dev\n");
1447 static void xgbe_restart(struct work_struct
*work
)
1449 struct xgbe_prv_data
*pdata
= container_of(work
,
1450 struct xgbe_prv_data
,
1455 xgbe_restart_dev(pdata
);
1460 static void xgbe_tx_tstamp(struct work_struct
*work
)
1462 struct xgbe_prv_data
*pdata
= container_of(work
,
1463 struct xgbe_prv_data
,
1465 struct skb_shared_hwtstamps hwtstamps
;
1467 unsigned long flags
;
1469 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1470 if (!pdata
->tx_tstamp_skb
)
1473 if (pdata
->tx_tstamp
) {
1474 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
1477 memset(&hwtstamps
, 0, sizeof(hwtstamps
));
1478 hwtstamps
.hwtstamp
= ns_to_ktime(nsec
);
1479 skb_tstamp_tx(pdata
->tx_tstamp_skb
, &hwtstamps
);
1482 dev_kfree_skb_any(pdata
->tx_tstamp_skb
);
1484 pdata
->tx_tstamp_skb
= NULL
;
1487 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1490 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1491 struct ifreq
*ifreq
)
1493 if (copy_to_user(ifreq
->ifr_data
, &pdata
->tstamp_config
,
1494 sizeof(pdata
->tstamp_config
)))
1500 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1501 struct ifreq
*ifreq
)
1503 struct hwtstamp_config config
;
1504 unsigned int mac_tscr
;
1506 if (copy_from_user(&config
, ifreq
->ifr_data
, sizeof(config
)))
1514 switch (config
.tx_type
) {
1515 case HWTSTAMP_TX_OFF
:
1518 case HWTSTAMP_TX_ON
:
1519 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1526 switch (config
.rx_filter
) {
1527 case HWTSTAMP_FILTER_NONE
:
1530 case HWTSTAMP_FILTER_NTP_ALL
:
1531 case HWTSTAMP_FILTER_ALL
:
1532 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENALL
, 1);
1533 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1536 /* PTP v2, UDP, any kind of event packet */
1537 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1538 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1539 /* PTP v1, UDP, any kind of event packet */
1540 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1541 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1542 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1543 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1544 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1547 /* PTP v2, UDP, Sync packet */
1548 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1549 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1550 /* PTP v1, UDP, Sync packet */
1551 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1552 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1553 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1554 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1555 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1558 /* PTP v2, UDP, Delay_req packet */
1559 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1560 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1561 /* PTP v1, UDP, Delay_req packet */
1562 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1563 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1564 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1565 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1566 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1567 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1570 /* 802.AS1, Ethernet, any kind of event packet */
1571 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1572 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1573 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1574 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1577 /* 802.AS1, Ethernet, Sync packet */
1578 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1579 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1580 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1581 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1584 /* 802.AS1, Ethernet, Delay_req packet */
1585 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1586 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1587 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1588 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1589 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1592 /* PTP v2/802.AS1, any layer, any kind of event packet */
1593 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1594 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1595 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1596 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1597 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1598 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1599 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1602 /* PTP v2/802.AS1, any layer, Sync packet */
1603 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1604 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1605 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1606 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1607 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1608 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1609 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1612 /* PTP v2/802.AS1, any layer, Delay_req packet */
1613 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1614 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1615 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1616 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1617 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1618 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1619 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1620 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1627 pdata
->hw_if
.config_tstamp(pdata
, mac_tscr
);
1629 memcpy(&pdata
->tstamp_config
, &config
, sizeof(config
));
1634 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data
*pdata
,
1635 struct sk_buff
*skb
,
1636 struct xgbe_packet_data
*packet
)
1638 unsigned long flags
;
1640 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
)) {
1641 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1642 if (pdata
->tx_tstamp_skb
) {
1643 /* Another timestamp in progress, ignore this one */
1644 XGMAC_SET_BITS(packet
->attributes
,
1645 TX_PACKET_ATTRIBUTES
, PTP
, 0);
1647 pdata
->tx_tstamp_skb
= skb_get(skb
);
1648 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1650 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1653 skb_tx_timestamp(skb
);
1656 static void xgbe_prep_vlan(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1658 if (skb_vlan_tag_present(skb
))
1659 packet
->vlan_ctag
= skb_vlan_tag_get(skb
);
1662 static int xgbe_prep_tso(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1666 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1670 ret
= skb_cow_head(skb
, 0);
1674 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, VXLAN
)) {
1675 packet
->header_len
= skb_inner_transport_offset(skb
) +
1676 inner_tcp_hdrlen(skb
);
1677 packet
->tcp_header_len
= inner_tcp_hdrlen(skb
);
1679 packet
->header_len
= skb_transport_offset(skb
) +
1681 packet
->tcp_header_len
= tcp_hdrlen(skb
);
1683 packet
->tcp_payload_len
= skb
->len
- packet
->header_len
;
1684 packet
->mss
= skb_shinfo(skb
)->gso_size
;
1686 DBGPR(" packet->header_len=%u\n", packet
->header_len
);
1687 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1688 packet
->tcp_header_len
, packet
->tcp_payload_len
);
1689 DBGPR(" packet->mss=%u\n", packet
->mss
);
1691 /* Update the number of packets that will ultimately be transmitted
1692 * along with the extra bytes for each extra packet
1694 packet
->tx_packets
= skb_shinfo(skb
)->gso_segs
;
1695 packet
->tx_bytes
+= (packet
->tx_packets
- 1) * packet
->header_len
;
1700 static bool xgbe_is_vxlan(struct xgbe_prv_data
*pdata
, struct sk_buff
*skb
)
1702 struct xgbe_vxlan_data
*vdata
;
1704 if (pdata
->vxlan_force_disable
)
1707 if (!skb
->encapsulation
)
1710 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1713 switch (skb
->protocol
) {
1714 case htons(ETH_P_IP
):
1715 if (ip_hdr(skb
)->protocol
!= IPPROTO_UDP
)
1719 case htons(ETH_P_IPV6
):
1720 if (ipv6_hdr(skb
)->nexthdr
!= IPPROTO_UDP
)
1728 /* See if we have the UDP port in our list */
1729 list_for_each_entry(vdata
, &pdata
->vxlan_ports
, list
) {
1730 if ((skb
->protocol
== htons(ETH_P_IP
)) &&
1731 (vdata
->sa_family
== AF_INET
) &&
1732 (vdata
->port
== udp_hdr(skb
)->dest
))
1734 else if ((skb
->protocol
== htons(ETH_P_IPV6
)) &&
1735 (vdata
->sa_family
== AF_INET6
) &&
1736 (vdata
->port
== udp_hdr(skb
)->dest
))
1743 static int xgbe_is_tso(struct sk_buff
*skb
)
1745 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1748 if (!skb_is_gso(skb
))
1751 DBGPR(" TSO packet to be processed\n");
1756 static void xgbe_packet_info(struct xgbe_prv_data
*pdata
,
1757 struct xgbe_ring
*ring
, struct sk_buff
*skb
,
1758 struct xgbe_packet_data
*packet
)
1760 struct skb_frag_struct
*frag
;
1761 unsigned int context_desc
;
1768 packet
->rdesc_count
= 0;
1770 packet
->tx_packets
= 1;
1771 packet
->tx_bytes
= skb
->len
;
1773 if (xgbe_is_tso(skb
)) {
1774 /* TSO requires an extra descriptor if mss is different */
1775 if (skb_shinfo(skb
)->gso_size
!= ring
->tx
.cur_mss
) {
1777 packet
->rdesc_count
++;
1780 /* TSO requires an extra descriptor for TSO header */
1781 packet
->rdesc_count
++;
1783 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1785 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1787 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1788 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1791 if (xgbe_is_vxlan(pdata
, skb
))
1792 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1795 if (skb_vlan_tag_present(skb
)) {
1796 /* VLAN requires an extra descriptor if tag is different */
1797 if (skb_vlan_tag_get(skb
) != ring
->tx
.cur_vlan_ctag
)
1798 /* We can share with the TSO context descriptor */
1799 if (!context_desc
) {
1801 packet
->rdesc_count
++;
1804 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1808 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
1809 (pdata
->tstamp_config
.tx_type
== HWTSTAMP_TX_ON
))
1810 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1813 for (len
= skb_headlen(skb
); len
;) {
1814 packet
->rdesc_count
++;
1815 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1818 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1819 frag
= &skb_shinfo(skb
)->frags
[i
];
1820 for (len
= skb_frag_size(frag
); len
; ) {
1821 packet
->rdesc_count
++;
1822 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1827 static int xgbe_open(struct net_device
*netdev
)
1829 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1830 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1833 DBGPR("-->xgbe_open\n");
1835 /* Create the various names based on netdev name */
1836 snprintf(pdata
->an_name
, sizeof(pdata
->an_name
) - 1, "%s-pcs",
1837 netdev_name(netdev
));
1839 snprintf(pdata
->ecc_name
, sizeof(pdata
->ecc_name
) - 1, "%s-ecc",
1840 netdev_name(netdev
));
1842 snprintf(pdata
->i2c_name
, sizeof(pdata
->i2c_name
) - 1, "%s-i2c",
1843 netdev_name(netdev
));
1845 /* Create workqueues */
1846 pdata
->dev_workqueue
=
1847 create_singlethread_workqueue(netdev_name(netdev
));
1848 if (!pdata
->dev_workqueue
) {
1849 netdev_err(netdev
, "device workqueue creation failed\n");
1853 pdata
->an_workqueue
=
1854 create_singlethread_workqueue(pdata
->an_name
);
1855 if (!pdata
->an_workqueue
) {
1856 netdev_err(netdev
, "phy workqueue creation failed\n");
1861 /* Reset the phy settings */
1862 ret
= xgbe_phy_reset(pdata
);
1866 /* Enable the clocks */
1867 ret
= clk_prepare_enable(pdata
->sysclk
);
1869 netdev_alert(netdev
, "dma clk_prepare_enable failed\n");
1873 ret
= clk_prepare_enable(pdata
->ptpclk
);
1875 netdev_alert(netdev
, "ptp clk_prepare_enable failed\n");
1879 /* Calculate the Rx buffer size before allocating rings */
1880 ret
= xgbe_calc_rx_buf_size(netdev
, netdev
->mtu
);
1883 pdata
->rx_buf_size
= ret
;
1885 /* Allocate the channel and ring structures */
1886 ret
= xgbe_alloc_channels(pdata
);
1890 /* Allocate the ring descriptors and buffers */
1891 ret
= desc_if
->alloc_ring_resources(pdata
);
1895 INIT_WORK(&pdata
->service_work
, xgbe_service
);
1896 INIT_WORK(&pdata
->restart_work
, xgbe_restart
);
1897 INIT_WORK(&pdata
->stopdev_work
, xgbe_stopdev
);
1898 INIT_WORK(&pdata
->tx_tstamp_work
, xgbe_tx_tstamp
);
1899 xgbe_init_timers(pdata
);
1901 ret
= xgbe_start(pdata
);
1905 clear_bit(XGBE_DOWN
, &pdata
->dev_state
);
1907 DBGPR("<--xgbe_open\n");
1912 desc_if
->free_ring_resources(pdata
);
1915 xgbe_free_channels(pdata
);
1918 clk_disable_unprepare(pdata
->ptpclk
);
1921 clk_disable_unprepare(pdata
->sysclk
);
1924 destroy_workqueue(pdata
->an_workqueue
);
1927 destroy_workqueue(pdata
->dev_workqueue
);
1932 static int xgbe_close(struct net_device
*netdev
)
1934 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1935 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1937 DBGPR("-->xgbe_close\n");
1939 /* Stop the device */
1942 /* Free the ring descriptors and buffers */
1943 desc_if
->free_ring_resources(pdata
);
1945 /* Free the channel and ring structures */
1946 xgbe_free_channels(pdata
);
1948 /* Disable the clocks */
1949 clk_disable_unprepare(pdata
->ptpclk
);
1950 clk_disable_unprepare(pdata
->sysclk
);
1952 flush_workqueue(pdata
->an_workqueue
);
1953 destroy_workqueue(pdata
->an_workqueue
);
1955 flush_workqueue(pdata
->dev_workqueue
);
1956 destroy_workqueue(pdata
->dev_workqueue
);
1958 set_bit(XGBE_DOWN
, &pdata
->dev_state
);
1960 DBGPR("<--xgbe_close\n");
1965 static int xgbe_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1967 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1968 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1969 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1970 struct xgbe_channel
*channel
;
1971 struct xgbe_ring
*ring
;
1972 struct xgbe_packet_data
*packet
;
1973 struct netdev_queue
*txq
;
1976 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb
->len
);
1978 channel
= pdata
->channel
[skb
->queue_mapping
];
1979 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1980 ring
= channel
->tx_ring
;
1981 packet
= &ring
->packet_data
;
1985 if (skb
->len
== 0) {
1986 netif_err(pdata
, tx_err
, netdev
,
1987 "empty skb received from stack\n");
1988 dev_kfree_skb_any(skb
);
1989 goto tx_netdev_return
;
1992 /* Calculate preliminary packet info */
1993 memset(packet
, 0, sizeof(*packet
));
1994 xgbe_packet_info(pdata
, ring
, skb
, packet
);
1996 /* Check that there are enough descriptors available */
1997 ret
= xgbe_maybe_stop_tx_queue(channel
, ring
, packet
->rdesc_count
);
1999 goto tx_netdev_return
;
2001 ret
= xgbe_prep_tso(skb
, packet
);
2003 netif_err(pdata
, tx_err
, netdev
,
2004 "error processing TSO packet\n");
2005 dev_kfree_skb_any(skb
);
2006 goto tx_netdev_return
;
2008 xgbe_prep_vlan(skb
, packet
);
2010 if (!desc_if
->map_tx_skb(channel
, skb
)) {
2011 dev_kfree_skb_any(skb
);
2012 goto tx_netdev_return
;
2015 xgbe_prep_tx_tstamp(pdata
, skb
, packet
);
2017 /* Report on the actual number of bytes (to be) sent */
2018 netdev_tx_sent_queue(txq
, packet
->tx_bytes
);
2020 /* Configure required descriptor fields for transmission */
2021 hw_if
->dev_xmit(channel
);
2023 if (netif_msg_pktdata(pdata
))
2024 xgbe_print_pkt(netdev
, skb
, true);
2026 /* Stop the queue in advance if there may not be enough descriptors */
2027 xgbe_maybe_stop_tx_queue(channel
, ring
, XGBE_TX_MAX_DESCS
);
2035 static void xgbe_set_rx_mode(struct net_device
*netdev
)
2037 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2038 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2040 DBGPR("-->xgbe_set_rx_mode\n");
2042 hw_if
->config_rx_mode(pdata
);
2044 DBGPR("<--xgbe_set_rx_mode\n");
2047 static int xgbe_set_mac_address(struct net_device
*netdev
, void *addr
)
2049 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2050 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2051 struct sockaddr
*saddr
= addr
;
2053 DBGPR("-->xgbe_set_mac_address\n");
2055 if (!is_valid_ether_addr(saddr
->sa_data
))
2056 return -EADDRNOTAVAIL
;
2058 memcpy(netdev
->dev_addr
, saddr
->sa_data
, netdev
->addr_len
);
2060 hw_if
->set_mac_address(pdata
, netdev
->dev_addr
);
2062 DBGPR("<--xgbe_set_mac_address\n");
2067 static int xgbe_ioctl(struct net_device
*netdev
, struct ifreq
*ifreq
, int cmd
)
2069 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2074 ret
= xgbe_get_hwtstamp_settings(pdata
, ifreq
);
2078 ret
= xgbe_set_hwtstamp_settings(pdata
, ifreq
);
2088 static int xgbe_change_mtu(struct net_device
*netdev
, int mtu
)
2090 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2093 DBGPR("-->xgbe_change_mtu\n");
2095 ret
= xgbe_calc_rx_buf_size(netdev
, mtu
);
2099 pdata
->rx_buf_size
= ret
;
2102 xgbe_restart_dev(pdata
);
2104 DBGPR("<--xgbe_change_mtu\n");
2109 static void xgbe_tx_timeout(struct net_device
*netdev
)
2111 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2113 netdev_warn(netdev
, "tx timeout, device restarting\n");
2114 schedule_work(&pdata
->restart_work
);
2117 static void xgbe_get_stats64(struct net_device
*netdev
,
2118 struct rtnl_link_stats64
*s
)
2120 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2121 struct xgbe_mmc_stats
*pstats
= &pdata
->mmc_stats
;
2123 DBGPR("-->%s\n", __func__
);
2125 pdata
->hw_if
.read_mmc_stats(pdata
);
2127 s
->rx_packets
= pstats
->rxframecount_gb
;
2128 s
->rx_bytes
= pstats
->rxoctetcount_gb
;
2129 s
->rx_errors
= pstats
->rxframecount_gb
-
2130 pstats
->rxbroadcastframes_g
-
2131 pstats
->rxmulticastframes_g
-
2132 pstats
->rxunicastframes_g
;
2133 s
->multicast
= pstats
->rxmulticastframes_g
;
2134 s
->rx_length_errors
= pstats
->rxlengtherror
;
2135 s
->rx_crc_errors
= pstats
->rxcrcerror
;
2136 s
->rx_fifo_errors
= pstats
->rxfifooverflow
;
2138 s
->tx_packets
= pstats
->txframecount_gb
;
2139 s
->tx_bytes
= pstats
->txoctetcount_gb
;
2140 s
->tx_errors
= pstats
->txframecount_gb
- pstats
->txframecount_g
;
2141 s
->tx_dropped
= netdev
->stats
.tx_dropped
;
2143 DBGPR("<--%s\n", __func__
);
2146 static int xgbe_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
,
2149 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2150 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2152 DBGPR("-->%s\n", __func__
);
2154 set_bit(vid
, pdata
->active_vlans
);
2155 hw_if
->update_vlan_hash_table(pdata
);
2157 DBGPR("<--%s\n", __func__
);
2162 static int xgbe_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
,
2165 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2166 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2168 DBGPR("-->%s\n", __func__
);
2170 clear_bit(vid
, pdata
->active_vlans
);
2171 hw_if
->update_vlan_hash_table(pdata
);
2173 DBGPR("<--%s\n", __func__
);
2178 #ifdef CONFIG_NET_POLL_CONTROLLER
2179 static void xgbe_poll_controller(struct net_device
*netdev
)
2181 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2182 struct xgbe_channel
*channel
;
2185 DBGPR("-->xgbe_poll_controller\n");
2187 if (pdata
->per_channel_irq
) {
2188 for (i
= 0; i
< pdata
->channel_count
; i
++) {
2189 channel
= pdata
->channel
[i
];
2190 xgbe_dma_isr(channel
->dma_irq
, channel
);
2193 disable_irq(pdata
->dev_irq
);
2194 xgbe_isr(pdata
->dev_irq
, pdata
);
2195 enable_irq(pdata
->dev_irq
);
2198 DBGPR("<--xgbe_poll_controller\n");
2200 #endif /* End CONFIG_NET_POLL_CONTROLLER */
2202 static int xgbe_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
2205 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2206 struct tc_mqprio_qopt
*mqprio
= type_data
;
2209 if (type
!= TC_SETUP_QDISC_MQPRIO
)
2212 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
2213 tc
= mqprio
->num_tc
;
2215 if (tc
> pdata
->hw_feat
.tc_cnt
)
2218 pdata
->num_tcs
= tc
;
2219 pdata
->hw_if
.config_tc(pdata
);
2224 static netdev_features_t
xgbe_fix_features(struct net_device
*netdev
,
2225 netdev_features_t features
)
2227 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2228 netdev_features_t vxlan_base
, vxlan_mask
;
2230 vxlan_base
= NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_RX_UDP_TUNNEL_PORT
;
2231 vxlan_mask
= vxlan_base
| NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2233 pdata
->vxlan_features
= features
& vxlan_mask
;
2235 /* Only fix VXLAN-related features */
2236 if (!pdata
->vxlan_features
)
2239 /* If VXLAN isn't supported then clear any features:
2240 * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets
2241 * automatically set if ndo_udp_tunnel_add is set.
2243 if (!pdata
->hw_feat
.vxn
)
2244 return features
& ~vxlan_mask
;
2246 /* VXLAN CSUM requires VXLAN base */
2247 if ((features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
) &&
2248 !(features
& NETIF_F_GSO_UDP_TUNNEL
)) {
2249 netdev_notice(netdev
,
2250 "forcing tx udp tunnel support\n");
2251 features
|= NETIF_F_GSO_UDP_TUNNEL
;
2254 /* Can't do one without doing the other */
2255 if ((features
& vxlan_base
) != vxlan_base
) {
2256 netdev_notice(netdev
,
2257 "forcing both tx and rx udp tunnel support\n");
2258 features
|= vxlan_base
;
2261 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
2262 if (!(features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
)) {
2263 netdev_notice(netdev
,
2264 "forcing tx udp tunnel checksumming on\n");
2265 features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2268 if (features
& NETIF_F_GSO_UDP_TUNNEL_CSUM
) {
2269 netdev_notice(netdev
,
2270 "forcing tx udp tunnel checksumming off\n");
2271 features
&= ~NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2275 pdata
->vxlan_features
= features
& vxlan_mask
;
2277 /* Adjust UDP Tunnel based on current state */
2278 if (pdata
->vxlan_force_disable
) {
2279 netdev_notice(netdev
,
2280 "VXLAN acceleration disabled, turning off udp tunnel features\n");
2281 features
&= ~vxlan_mask
;
2287 static int xgbe_set_features(struct net_device
*netdev
,
2288 netdev_features_t features
)
2290 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2291 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2292 netdev_features_t rxhash
, rxcsum
, rxvlan
, rxvlan_filter
;
2293 netdev_features_t udp_tunnel
;
2296 rxhash
= pdata
->netdev_features
& NETIF_F_RXHASH
;
2297 rxcsum
= pdata
->netdev_features
& NETIF_F_RXCSUM
;
2298 rxvlan
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_RX
;
2299 rxvlan_filter
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_FILTER
;
2300 udp_tunnel
= pdata
->netdev_features
& NETIF_F_GSO_UDP_TUNNEL
;
2302 if ((features
& NETIF_F_RXHASH
) && !rxhash
)
2303 ret
= hw_if
->enable_rss(pdata
);
2304 else if (!(features
& NETIF_F_RXHASH
) && rxhash
)
2305 ret
= hw_if
->disable_rss(pdata
);
2309 if ((features
& NETIF_F_RXCSUM
) && !rxcsum
)
2310 hw_if
->enable_rx_csum(pdata
);
2311 else if (!(features
& NETIF_F_RXCSUM
) && rxcsum
)
2312 hw_if
->disable_rx_csum(pdata
);
2314 if ((features
& NETIF_F_HW_VLAN_CTAG_RX
) && !rxvlan
)
2315 hw_if
->enable_rx_vlan_stripping(pdata
);
2316 else if (!(features
& NETIF_F_HW_VLAN_CTAG_RX
) && rxvlan
)
2317 hw_if
->disable_rx_vlan_stripping(pdata
);
2319 if ((features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && !rxvlan_filter
)
2320 hw_if
->enable_rx_vlan_filtering(pdata
);
2321 else if (!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && rxvlan_filter
)
2322 hw_if
->disable_rx_vlan_filtering(pdata
);
2324 if ((features
& NETIF_F_GSO_UDP_TUNNEL
) && !udp_tunnel
)
2325 xgbe_enable_vxlan_accel(pdata
);
2326 else if (!(features
& NETIF_F_GSO_UDP_TUNNEL
) && udp_tunnel
)
2327 xgbe_disable_vxlan_accel(pdata
);
2329 pdata
->netdev_features
= features
;
2331 DBGPR("<--xgbe_set_features\n");
2336 static void xgbe_udp_tunnel_add(struct net_device
*netdev
,
2337 struct udp_tunnel_info
*ti
)
2339 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2340 struct xgbe_vxlan_data
*vdata
;
2342 if (!pdata
->hw_feat
.vxn
)
2345 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2348 pdata
->vxlan_port_count
++;
2350 netif_dbg(pdata
, drv
, netdev
,
2351 "adding VXLAN tunnel, family=%hx/port=%hx\n",
2352 ti
->sa_family
, be16_to_cpu(ti
->port
));
2354 if (pdata
->vxlan_force_disable
)
2357 vdata
= kzalloc(sizeof(*vdata
), GFP_ATOMIC
);
2359 /* Can no longer properly track VXLAN ports */
2360 pdata
->vxlan_force_disable
= 1;
2361 netif_dbg(pdata
, drv
, netdev
,
2362 "internal error, disabling VXLAN accelerations\n");
2364 xgbe_disable_vxlan_accel(pdata
);
2368 vdata
->sa_family
= ti
->sa_family
;
2369 vdata
->port
= ti
->port
;
2371 list_add_tail(&vdata
->list
, &pdata
->vxlan_ports
);
2373 /* First port added? */
2374 if (pdata
->vxlan_port_count
== 1) {
2375 xgbe_enable_vxlan_accel(pdata
);
2381 static void xgbe_udp_tunnel_del(struct net_device
*netdev
,
2382 struct udp_tunnel_info
*ti
)
2384 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
2385 struct xgbe_vxlan_data
*vdata
;
2387 if (!pdata
->hw_feat
.vxn
)
2390 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2393 netif_dbg(pdata
, drv
, netdev
,
2394 "deleting VXLAN tunnel, family=%hx/port=%hx\n",
2395 ti
->sa_family
, be16_to_cpu(ti
->port
));
2397 /* Don't need safe version since loop terminates with deletion */
2398 list_for_each_entry(vdata
, &pdata
->vxlan_ports
, list
) {
2399 if (vdata
->sa_family
!= ti
->sa_family
)
2402 if (vdata
->port
!= ti
->port
)
2405 list_del(&vdata
->list
);
2411 pdata
->vxlan_port_count
--;
2412 if (!pdata
->vxlan_port_count
) {
2413 xgbe_reset_vxlan_accel(pdata
);
2418 if (pdata
->vxlan_force_disable
)
2421 /* See if VXLAN tunnel id needs to be changed */
2422 vdata
= list_first_entry(&pdata
->vxlan_ports
,
2423 struct xgbe_vxlan_data
, list
);
2424 if (pdata
->vxlan_port
== be16_to_cpu(vdata
->port
))
2427 pdata
->vxlan_port
= be16_to_cpu(vdata
->port
);
2428 pdata
->hw_if
.set_vxlan_id(pdata
);
2431 static netdev_features_t
xgbe_features_check(struct sk_buff
*skb
,
2432 struct net_device
*netdev
,
2433 netdev_features_t features
)
2435 features
= vlan_features_check(skb
, features
);
2436 features
= vxlan_features_check(skb
, features
);
2441 static const struct net_device_ops xgbe_netdev_ops
= {
2442 .ndo_open
= xgbe_open
,
2443 .ndo_stop
= xgbe_close
,
2444 .ndo_start_xmit
= xgbe_xmit
,
2445 .ndo_set_rx_mode
= xgbe_set_rx_mode
,
2446 .ndo_set_mac_address
= xgbe_set_mac_address
,
2447 .ndo_validate_addr
= eth_validate_addr
,
2448 .ndo_do_ioctl
= xgbe_ioctl
,
2449 .ndo_change_mtu
= xgbe_change_mtu
,
2450 .ndo_tx_timeout
= xgbe_tx_timeout
,
2451 .ndo_get_stats64
= xgbe_get_stats64
,
2452 .ndo_vlan_rx_add_vid
= xgbe_vlan_rx_add_vid
,
2453 .ndo_vlan_rx_kill_vid
= xgbe_vlan_rx_kill_vid
,
2454 #ifdef CONFIG_NET_POLL_CONTROLLER
2455 .ndo_poll_controller
= xgbe_poll_controller
,
2457 .ndo_setup_tc
= xgbe_setup_tc
,
2458 .ndo_fix_features
= xgbe_fix_features
,
2459 .ndo_set_features
= xgbe_set_features
,
2460 .ndo_udp_tunnel_add
= xgbe_udp_tunnel_add
,
2461 .ndo_udp_tunnel_del
= xgbe_udp_tunnel_del
,
2462 .ndo_features_check
= xgbe_features_check
,
2465 const struct net_device_ops
*xgbe_get_netdev_ops(void)
2467 return &xgbe_netdev_ops
;
2470 static void xgbe_rx_refresh(struct xgbe_channel
*channel
)
2472 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2473 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2474 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2475 struct xgbe_ring
*ring
= channel
->rx_ring
;
2476 struct xgbe_ring_data
*rdata
;
2478 while (ring
->dirty
!= ring
->cur
) {
2479 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
2481 /* Reset rdata values */
2482 desc_if
->unmap_rdata(pdata
, rdata
);
2484 if (desc_if
->map_rx_buffer(pdata
, ring
, rdata
))
2487 hw_if
->rx_desc_reset(pdata
, rdata
, ring
->dirty
);
2492 /* Make sure everything is written before the register write */
2495 /* Update the Rx Tail Pointer Register with address of
2496 * the last cleaned entry */
2497 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
- 1);
2498 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
2499 lower_32_bits(rdata
->rdesc_dma
));
2502 static struct sk_buff
*xgbe_create_skb(struct xgbe_prv_data
*pdata
,
2503 struct napi_struct
*napi
,
2504 struct xgbe_ring_data
*rdata
,
2507 struct sk_buff
*skb
;
2510 skb
= napi_alloc_skb(napi
, rdata
->rx
.hdr
.dma_len
);
2514 /* Pull in the header buffer which may contain just the header
2515 * or the header plus data
2517 dma_sync_single_range_for_cpu(pdata
->dev
, rdata
->rx
.hdr
.dma_base
,
2518 rdata
->rx
.hdr
.dma_off
,
2519 rdata
->rx
.hdr
.dma_len
, DMA_FROM_DEVICE
);
2521 packet
= page_address(rdata
->rx
.hdr
.pa
.pages
) +
2522 rdata
->rx
.hdr
.pa
.pages_offset
;
2523 skb_copy_to_linear_data(skb
, packet
, len
);
2529 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data
*rdata
,
2530 struct xgbe_packet_data
*packet
)
2532 /* Always zero if not the first descriptor */
2533 if (!XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
, FIRST
))
2536 /* First descriptor with split header, return header length */
2537 if (rdata
->rx
.hdr_len
)
2538 return rdata
->rx
.hdr_len
;
2540 /* First descriptor but not the last descriptor and no split header,
2541 * so the full buffer was used
2543 if (!XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
, LAST
))
2544 return rdata
->rx
.hdr
.dma_len
;
2546 /* First descriptor and last descriptor and no split header, so
2547 * calculate how much of the buffer was used
2549 return min_t(unsigned int, rdata
->rx
.hdr
.dma_len
, rdata
->rx
.len
);
2552 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data
*rdata
,
2553 struct xgbe_packet_data
*packet
,
2556 /* Always the full buffer if not the last descriptor */
2557 if (!XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
, LAST
))
2558 return rdata
->rx
.buf
.dma_len
;
2560 /* Last descriptor so calculate how much of the buffer was used
2561 * for the last bit of data
2563 return rdata
->rx
.len
- len
;
2566 static int xgbe_tx_poll(struct xgbe_channel
*channel
)
2568 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2569 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2570 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2571 struct xgbe_ring
*ring
= channel
->tx_ring
;
2572 struct xgbe_ring_data
*rdata
;
2573 struct xgbe_ring_desc
*rdesc
;
2574 struct net_device
*netdev
= pdata
->netdev
;
2575 struct netdev_queue
*txq
;
2577 unsigned int tx_packets
= 0, tx_bytes
= 0;
2580 DBGPR("-->xgbe_tx_poll\n");
2582 /* Nothing to do if there isn't a Tx ring for this channel */
2588 /* Be sure we get ring->cur before accessing descriptor data */
2591 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
2593 while ((processed
< XGBE_TX_DESC_MAX_PROC
) &&
2594 (ring
->dirty
!= cur
)) {
2595 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
2596 rdesc
= rdata
->rdesc
;
2598 if (!hw_if
->tx_complete(rdesc
))
2601 /* Make sure descriptor fields are read after reading the OWN
2605 if (netif_msg_tx_done(pdata
))
2606 xgbe_dump_tx_desc(pdata
, ring
, ring
->dirty
, 1, 0);
2608 if (hw_if
->is_last_desc(rdesc
)) {
2609 tx_packets
+= rdata
->tx
.packets
;
2610 tx_bytes
+= rdata
->tx
.bytes
;
2613 /* Free the SKB and reset the descriptor for re-use */
2614 desc_if
->unmap_rdata(pdata
, rdata
);
2615 hw_if
->tx_desc_reset(rdata
);
2624 netdev_tx_completed_queue(txq
, tx_packets
, tx_bytes
);
2626 if ((ring
->tx
.queue_stopped
== 1) &&
2627 (xgbe_tx_avail_desc(ring
) > XGBE_TX_DESC_MIN_FREE
)) {
2628 ring
->tx
.queue_stopped
= 0;
2629 netif_tx_wake_queue(txq
);
2632 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed
);
2637 static int xgbe_rx_poll(struct xgbe_channel
*channel
, int budget
)
2639 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2640 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
2641 struct xgbe_ring
*ring
= channel
->rx_ring
;
2642 struct xgbe_ring_data
*rdata
;
2643 struct xgbe_packet_data
*packet
;
2644 struct net_device
*netdev
= pdata
->netdev
;
2645 struct napi_struct
*napi
;
2646 struct sk_buff
*skb
;
2647 struct skb_shared_hwtstamps
*hwtstamps
;
2648 unsigned int last
, error
, context_next
, context
;
2649 unsigned int len
, buf1_len
, buf2_len
, max_len
;
2650 unsigned int received
= 0;
2651 int packet_count
= 0;
2653 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget
);
2655 /* Nothing to do if there isn't a Rx ring for this channel */
2662 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
2664 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2665 packet
= &ring
->packet_data
;
2666 while (packet_count
< budget
) {
2667 DBGPR(" cur = %d\n", ring
->cur
);
2669 /* First time in loop see if we need to restore state */
2670 if (!received
&& rdata
->state_saved
) {
2671 skb
= rdata
->state
.skb
;
2672 error
= rdata
->state
.error
;
2673 len
= rdata
->state
.len
;
2675 memset(packet
, 0, sizeof(*packet
));
2682 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2684 if (xgbe_rx_dirty_desc(ring
) > (XGBE_RX_DESC_CNT
>> 3))
2685 xgbe_rx_refresh(channel
);
2687 if (hw_if
->dev_read(channel
))
2693 last
= XGMAC_GET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
2695 context_next
= XGMAC_GET_BITS(packet
->attributes
,
2696 RX_PACKET_ATTRIBUTES
,
2698 context
= XGMAC_GET_BITS(packet
->attributes
,
2699 RX_PACKET_ATTRIBUTES
,
2702 /* Earlier error, just drain the remaining data */
2703 if ((!last
|| context_next
) && error
)
2706 if (error
|| packet
->errors
) {
2708 netif_err(pdata
, rx_err
, netdev
,
2709 "error in received packet\n");
2715 /* Get the data length in the descriptor buffers */
2716 buf1_len
= xgbe_rx_buf1_len(rdata
, packet
);
2718 buf2_len
= xgbe_rx_buf2_len(rdata
, packet
, len
);
2722 skb
= xgbe_create_skb(pdata
, napi
, rdata
,
2731 dma_sync_single_range_for_cpu(pdata
->dev
,
2732 rdata
->rx
.buf
.dma_base
,
2733 rdata
->rx
.buf
.dma_off
,
2734 rdata
->rx
.buf
.dma_len
,
2737 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2738 rdata
->rx
.buf
.pa
.pages
,
2739 rdata
->rx
.buf
.pa
.pages_offset
,
2741 rdata
->rx
.buf
.dma_len
);
2742 rdata
->rx
.buf
.pa
.pages
= NULL
;
2747 if (!last
|| context_next
)
2753 /* Be sure we don't exceed the configured MTU */
2754 max_len
= netdev
->mtu
+ ETH_HLEN
;
2755 if (!(netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2756 (skb
->protocol
== htons(ETH_P_8021Q
)))
2757 max_len
+= VLAN_HLEN
;
2759 if (skb
->len
> max_len
) {
2760 netif_err(pdata
, rx_err
, netdev
,
2761 "packet length exceeds configured MTU\n");
2766 if (netif_msg_pktdata(pdata
))
2767 xgbe_print_pkt(netdev
, skb
, false);
2769 skb_checksum_none_assert(skb
);
2770 if (XGMAC_GET_BITS(packet
->attributes
,
2771 RX_PACKET_ATTRIBUTES
, CSUM_DONE
))
2772 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2774 if (XGMAC_GET_BITS(packet
->attributes
,
2775 RX_PACKET_ATTRIBUTES
, TNP
)) {
2776 skb
->encapsulation
= 1;
2778 if (XGMAC_GET_BITS(packet
->attributes
,
2779 RX_PACKET_ATTRIBUTES
, TNPCSUM_DONE
))
2780 skb
->csum_level
= 1;
2783 if (XGMAC_GET_BITS(packet
->attributes
,
2784 RX_PACKET_ATTRIBUTES
, VLAN_CTAG
))
2785 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2788 if (XGMAC_GET_BITS(packet
->attributes
,
2789 RX_PACKET_ATTRIBUTES
, RX_TSTAMP
)) {
2792 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
2794 hwtstamps
= skb_hwtstamps(skb
);
2795 hwtstamps
->hwtstamp
= ns_to_ktime(nsec
);
2798 if (XGMAC_GET_BITS(packet
->attributes
,
2799 RX_PACKET_ATTRIBUTES
, RSS_HASH
))
2800 skb_set_hash(skb
, packet
->rss_hash
,
2801 packet
->rss_hash_type
);
2804 skb
->protocol
= eth_type_trans(skb
, netdev
);
2805 skb_record_rx_queue(skb
, channel
->queue_index
);
2807 napi_gro_receive(napi
, skb
);
2813 /* Check if we need to save state before leaving */
2814 if (received
&& (!last
|| context_next
)) {
2815 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2816 rdata
->state_saved
= 1;
2817 rdata
->state
.skb
= skb
;
2818 rdata
->state
.len
= len
;
2819 rdata
->state
.error
= error
;
2822 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count
);
2824 return packet_count
;
2827 static int xgbe_one_poll(struct napi_struct
*napi
, int budget
)
2829 struct xgbe_channel
*channel
= container_of(napi
, struct xgbe_channel
,
2831 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2834 DBGPR("-->xgbe_one_poll: budget=%d\n", budget
);
2836 /* Cleanup Tx ring first */
2837 xgbe_tx_poll(channel
);
2839 /* Process Rx ring next */
2840 processed
= xgbe_rx_poll(channel
, budget
);
2842 /* If we processed everything, we are done */
2843 if ((processed
< budget
) && napi_complete_done(napi
, processed
)) {
2844 /* Enable Tx and Rx interrupts */
2845 if (pdata
->channel_irq_mode
)
2846 xgbe_enable_rx_tx_int(pdata
, channel
);
2848 enable_irq(channel
->dma_irq
);
2851 DBGPR("<--xgbe_one_poll: received = %d\n", processed
);
2856 static int xgbe_all_poll(struct napi_struct
*napi
, int budget
)
2858 struct xgbe_prv_data
*pdata
= container_of(napi
, struct xgbe_prv_data
,
2860 struct xgbe_channel
*channel
;
2862 int processed
, last_processed
;
2865 DBGPR("-->xgbe_all_poll: budget=%d\n", budget
);
2868 ring_budget
= budget
/ pdata
->rx_ring_count
;
2870 last_processed
= processed
;
2872 for (i
= 0; i
< pdata
->channel_count
; i
++) {
2873 channel
= pdata
->channel
[i
];
2875 /* Cleanup Tx ring first */
2876 xgbe_tx_poll(channel
);
2878 /* Process Rx ring next */
2879 if (ring_budget
> (budget
- processed
))
2880 ring_budget
= budget
- processed
;
2881 processed
+= xgbe_rx_poll(channel
, ring_budget
);
2883 } while ((processed
< budget
) && (processed
!= last_processed
));
2885 /* If we processed everything, we are done */
2886 if ((processed
< budget
) && napi_complete_done(napi
, processed
)) {
2887 /* Enable Tx and Rx interrupts */
2888 xgbe_enable_rx_tx_ints(pdata
);
2891 DBGPR("<--xgbe_all_poll: received = %d\n", processed
);
2896 void xgbe_dump_tx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2897 unsigned int idx
, unsigned int count
, unsigned int flag
)
2899 struct xgbe_ring_data
*rdata
;
2900 struct xgbe_ring_desc
*rdesc
;
2903 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2904 rdesc
= rdata
->rdesc
;
2905 netdev_dbg(pdata
->netdev
,
2906 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx
,
2907 (flag
== 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2908 le32_to_cpu(rdesc
->desc0
),
2909 le32_to_cpu(rdesc
->desc1
),
2910 le32_to_cpu(rdesc
->desc2
),
2911 le32_to_cpu(rdesc
->desc3
));
2916 void xgbe_dump_rx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2919 struct xgbe_ring_data
*rdata
;
2920 struct xgbe_ring_desc
*rdesc
;
2922 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2923 rdesc
= rdata
->rdesc
;
2924 netdev_dbg(pdata
->netdev
,
2925 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2926 idx
, le32_to_cpu(rdesc
->desc0
), le32_to_cpu(rdesc
->desc1
),
2927 le32_to_cpu(rdesc
->desc2
), le32_to_cpu(rdesc
->desc3
));
2930 void xgbe_print_pkt(struct net_device
*netdev
, struct sk_buff
*skb
, bool tx_rx
)
2932 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
2933 unsigned char buffer
[128];
2936 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");
2938 netdev_dbg(netdev
, "%s packet of %d bytes\n",
2939 (tx_rx
? "TX" : "RX"), skb
->len
);
2941 netdev_dbg(netdev
, "Dst MAC addr: %pM\n", eth
->h_dest
);
2942 netdev_dbg(netdev
, "Src MAC addr: %pM\n", eth
->h_source
);
2943 netdev_dbg(netdev
, "Protocol: %#06hx\n", ntohs(eth
->h_proto
));
2945 for (i
= 0; i
< skb
->len
; i
+= 32) {
2946 unsigned int len
= min(skb
->len
- i
, 32U);
2948 hex_dump_to_buffer(&skb
->data
[i
], len
, 32, 1,
2949 buffer
, sizeof(buffer
), false);
2950 netdev_dbg(netdev
, " %#06x: %s\n", i
, buffer
);
2953 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");