2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/spinlock.h>
118 #include <linux/tcp.h>
119 #include <linux/if_vlan.h>
120 #include <linux/phy.h>
121 #include <net/busy_poll.h>
122 #include <linux/clk.h>
123 #include <linux/if_ether.h>
126 #include "xgbe-common.h"
129 static int xgbe_poll(struct napi_struct
*, int);
130 static void xgbe_set_rx_mode(struct net_device
*);
132 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring
*ring
)
134 return (ring
->rdesc_count
- (ring
->cur
- ring
->dirty
));
137 static int xgbe_calc_rx_buf_size(struct net_device
*netdev
, unsigned int mtu
)
139 unsigned int rx_buf_size
;
141 if (mtu
> XGMAC_JUMBO_PACKET_MTU
) {
142 netdev_alert(netdev
, "MTU exceeds maximum supported value\n");
146 rx_buf_size
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
147 if (rx_buf_size
< RX_MIN_BUF_SIZE
)
148 rx_buf_size
= RX_MIN_BUF_SIZE
;
149 rx_buf_size
= (rx_buf_size
+ RX_BUF_ALIGN
- 1) & ~(RX_BUF_ALIGN
- 1);
154 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
156 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
157 struct xgbe_channel
*channel
;
160 channel
= pdata
->channel
;
161 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
162 if (channel
->tx_ring
)
163 hw_if
->enable_int(channel
,
164 XGMAC_INT_DMA_CH_SR_TI
);
165 if (channel
->rx_ring
)
166 hw_if
->enable_int(channel
,
167 XGMAC_INT_DMA_CH_SR_RI
);
171 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
173 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
174 struct xgbe_channel
*channel
;
177 channel
= pdata
->channel
;
178 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
179 if (channel
->tx_ring
)
180 hw_if
->disable_int(channel
,
181 XGMAC_INT_DMA_CH_SR_TI
);
182 if (channel
->rx_ring
)
183 hw_if
->disable_int(channel
,
184 XGMAC_INT_DMA_CH_SR_RI
);
188 static irqreturn_t
xgbe_isr(int irq
, void *data
)
190 struct xgbe_prv_data
*pdata
= data
;
191 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
192 struct xgbe_channel
*channel
;
193 unsigned int dma_isr
, dma_ch_isr
;
194 unsigned int mac_isr
;
197 /* The DMA interrupt status register also reports MAC and MTL
198 * interrupts. So for polling mode, we just need to check for
199 * this register to be non-zero
201 dma_isr
= XGMAC_IOREAD(pdata
, DMA_ISR
);
205 DBGPR("-->xgbe_isr\n");
207 DBGPR(" DMA_ISR = %08x\n", dma_isr
);
208 DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata
, DMA_DSR0
));
209 DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata
, DMA_DSR1
));
211 for (i
= 0; i
< pdata
->channel_count
; i
++) {
212 if (!(dma_isr
& (1 << i
)))
215 channel
= pdata
->channel
+ i
;
217 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
218 DBGPR(" DMA_CH%u_ISR = %08x\n", i
, dma_ch_isr
);
220 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
) ||
221 XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
)) {
222 if (napi_schedule_prep(&pdata
->napi
)) {
223 /* Disable Tx and Rx interrupts */
224 xgbe_disable_rx_tx_ints(pdata
);
226 /* Turn on polling */
227 __napi_schedule(&pdata
->napi
);
231 /* Restart the device on a Fatal Bus Error */
232 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, FBE
))
233 schedule_work(&pdata
->restart_work
);
235 /* Clear all interrupt signals */
236 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
239 if (XGMAC_GET_BITS(dma_isr
, DMA_ISR
, MACIS
)) {
240 mac_isr
= XGMAC_IOREAD(pdata
, MAC_ISR
);
242 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCTXIS
))
243 hw_if
->tx_mmc_int(pdata
);
245 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCRXIS
))
246 hw_if
->rx_mmc_int(pdata
);
249 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata
, DMA_ISR
));
251 DBGPR("<--xgbe_isr\n");
257 static enum hrtimer_restart
xgbe_tx_timer(struct hrtimer
*timer
)
259 struct xgbe_channel
*channel
= container_of(timer
,
262 struct xgbe_ring
*ring
= channel
->tx_ring
;
263 struct xgbe_prv_data
*pdata
= channel
->pdata
;
266 DBGPR("-->xgbe_tx_timer\n");
268 spin_lock_irqsave(&ring
->lock
, flags
);
270 if (napi_schedule_prep(&pdata
->napi
)) {
271 /* Disable Tx and Rx interrupts */
272 xgbe_disable_rx_tx_ints(pdata
);
274 /* Turn on polling */
275 __napi_schedule(&pdata
->napi
);
278 channel
->tx_timer_active
= 0;
280 spin_unlock_irqrestore(&ring
->lock
, flags
);
282 DBGPR("<--xgbe_tx_timer\n");
284 return HRTIMER_NORESTART
;
287 static void xgbe_init_tx_timers(struct xgbe_prv_data
*pdata
)
289 struct xgbe_channel
*channel
;
292 DBGPR("-->xgbe_init_tx_timers\n");
294 channel
= pdata
->channel
;
295 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
296 if (!channel
->tx_ring
)
299 DBGPR(" %s adding tx timer\n", channel
->name
);
300 hrtimer_init(&channel
->tx_timer
, CLOCK_MONOTONIC
,
302 channel
->tx_timer
.function
= xgbe_tx_timer
;
305 DBGPR("<--xgbe_init_tx_timers\n");
308 static void xgbe_stop_tx_timers(struct xgbe_prv_data
*pdata
)
310 struct xgbe_channel
*channel
;
313 DBGPR("-->xgbe_stop_tx_timers\n");
315 channel
= pdata
->channel
;
316 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
317 if (!channel
->tx_ring
)
320 DBGPR(" %s deleting tx timer\n", channel
->name
);
321 channel
->tx_timer_active
= 0;
322 hrtimer_cancel(&channel
->tx_timer
);
325 DBGPR("<--xgbe_stop_tx_timers\n");
328 void xgbe_get_all_hw_features(struct xgbe_prv_data
*pdata
)
330 unsigned int mac_hfr0
, mac_hfr1
, mac_hfr2
;
331 struct xgbe_hw_features
*hw_feat
= &pdata
->hw_feat
;
333 DBGPR("-->xgbe_get_all_hw_features\n");
335 mac_hfr0
= XGMAC_IOREAD(pdata
, MAC_HWF0R
);
336 mac_hfr1
= XGMAC_IOREAD(pdata
, MAC_HWF1R
);
337 mac_hfr2
= XGMAC_IOREAD(pdata
, MAC_HWF2R
);
339 memset(hw_feat
, 0, sizeof(*hw_feat
));
341 /* Hardware feature register 0 */
342 hw_feat
->gmii
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, GMIISEL
);
343 hw_feat
->vlhash
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VLHASH
);
344 hw_feat
->sma
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SMASEL
);
345 hw_feat
->rwk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RWKSEL
);
346 hw_feat
->mgk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MGKSEL
);
347 hw_feat
->mmc
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MMCSEL
);
348 hw_feat
->aoe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, ARPOFFSEL
);
349 hw_feat
->ts
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSEL
);
350 hw_feat
->eee
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, EEESEL
);
351 hw_feat
->tx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TXCOESEL
);
352 hw_feat
->rx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RXCOESEL
);
353 hw_feat
->addn_mac
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
,
355 hw_feat
->ts_src
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSTSSEL
);
356 hw_feat
->sa_vlan_ins
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SAVLANINS
);
358 /* Hardware feature register 1 */
359 hw_feat
->rx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
361 hw_feat
->tx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
363 hw_feat
->dcb
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DCBEN
);
364 hw_feat
->sph
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, SPHEN
);
365 hw_feat
->tso
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, TSOEN
);
366 hw_feat
->dma_debug
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DBGMEMA
);
367 hw_feat
->hash_table_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
369 hw_feat
->l3l4_filter_num
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
372 /* Hardware feature register 2 */
373 hw_feat
->rx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXQCNT
);
374 hw_feat
->tx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXQCNT
);
375 hw_feat
->rx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXCHCNT
);
376 hw_feat
->tx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXCHCNT
);
377 hw_feat
->pps_out_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, PPSOUTNUM
);
378 hw_feat
->aux_snap_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, AUXSNAPNUM
);
380 /* The Queue and Channel counts are zero based so increment them
381 * to get the actual number
385 hw_feat
->rx_ch_cnt
++;
386 hw_feat
->tx_ch_cnt
++;
388 DBGPR("<--xgbe_get_all_hw_features\n");
391 static void xgbe_napi_enable(struct xgbe_prv_data
*pdata
, unsigned int add
)
394 netif_napi_add(pdata
->netdev
, &pdata
->napi
, xgbe_poll
,
396 napi_enable(&pdata
->napi
);
399 static void xgbe_napi_disable(struct xgbe_prv_data
*pdata
)
401 napi_disable(&pdata
->napi
);
404 void xgbe_init_tx_coalesce(struct xgbe_prv_data
*pdata
)
406 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
408 DBGPR("-->xgbe_init_tx_coalesce\n");
410 pdata
->tx_usecs
= XGMAC_INIT_DMA_TX_USECS
;
411 pdata
->tx_frames
= XGMAC_INIT_DMA_TX_FRAMES
;
413 hw_if
->config_tx_coalesce(pdata
);
415 DBGPR("<--xgbe_init_tx_coalesce\n");
418 void xgbe_init_rx_coalesce(struct xgbe_prv_data
*pdata
)
420 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
422 DBGPR("-->xgbe_init_rx_coalesce\n");
424 pdata
->rx_riwt
= hw_if
->usec_to_riwt(pdata
, XGMAC_INIT_DMA_RX_USECS
);
425 pdata
->rx_frames
= XGMAC_INIT_DMA_RX_FRAMES
;
427 hw_if
->config_rx_coalesce(pdata
);
429 DBGPR("<--xgbe_init_rx_coalesce\n");
432 static void xgbe_free_tx_skbuff(struct xgbe_prv_data
*pdata
)
434 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
435 struct xgbe_channel
*channel
;
436 struct xgbe_ring
*ring
;
437 struct xgbe_ring_data
*rdata
;
440 DBGPR("-->xgbe_free_tx_skbuff\n");
442 channel
= pdata
->channel
;
443 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
444 ring
= channel
->tx_ring
;
448 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
449 rdata
= GET_DESC_DATA(ring
, j
);
450 desc_if
->unmap_skb(pdata
, rdata
);
454 DBGPR("<--xgbe_free_tx_skbuff\n");
457 static void xgbe_free_rx_skbuff(struct xgbe_prv_data
*pdata
)
459 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
460 struct xgbe_channel
*channel
;
461 struct xgbe_ring
*ring
;
462 struct xgbe_ring_data
*rdata
;
465 DBGPR("-->xgbe_free_rx_skbuff\n");
467 channel
= pdata
->channel
;
468 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
469 ring
= channel
->rx_ring
;
473 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
474 rdata
= GET_DESC_DATA(ring
, j
);
475 desc_if
->unmap_skb(pdata
, rdata
);
479 DBGPR("<--xgbe_free_rx_skbuff\n");
482 int xgbe_powerdown(struct net_device
*netdev
, unsigned int caller
)
484 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
485 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
488 DBGPR("-->xgbe_powerdown\n");
490 if (!netif_running(netdev
) ||
491 (caller
== XGMAC_IOCTL_CONTEXT
&& pdata
->power_down
)) {
492 netdev_alert(netdev
, "Device is already powered down\n");
493 DBGPR("<--xgbe_powerdown\n");
497 phy_stop(pdata
->phydev
);
499 spin_lock_irqsave(&pdata
->lock
, flags
);
501 if (caller
== XGMAC_DRIVER_CONTEXT
)
502 netif_device_detach(netdev
);
504 netif_tx_stop_all_queues(netdev
);
505 xgbe_napi_disable(pdata
);
507 /* Powerdown Tx/Rx */
508 hw_if
->powerdown_tx(pdata
);
509 hw_if
->powerdown_rx(pdata
);
511 pdata
->power_down
= 1;
513 spin_unlock_irqrestore(&pdata
->lock
, flags
);
515 DBGPR("<--xgbe_powerdown\n");
520 int xgbe_powerup(struct net_device
*netdev
, unsigned int caller
)
522 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
523 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
526 DBGPR("-->xgbe_powerup\n");
528 if (!netif_running(netdev
) ||
529 (caller
== XGMAC_IOCTL_CONTEXT
&& !pdata
->power_down
)) {
530 netdev_alert(netdev
, "Device is already powered up\n");
531 DBGPR("<--xgbe_powerup\n");
535 spin_lock_irqsave(&pdata
->lock
, flags
);
537 pdata
->power_down
= 0;
539 phy_start(pdata
->phydev
);
542 hw_if
->powerup_tx(pdata
);
543 hw_if
->powerup_rx(pdata
);
545 if (caller
== XGMAC_DRIVER_CONTEXT
)
546 netif_device_attach(netdev
);
548 xgbe_napi_enable(pdata
, 0);
549 netif_tx_start_all_queues(netdev
);
551 spin_unlock_irqrestore(&pdata
->lock
, flags
);
553 DBGPR("<--xgbe_powerup\n");
558 static int xgbe_start(struct xgbe_prv_data
*pdata
)
560 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
561 struct net_device
*netdev
= pdata
->netdev
;
563 DBGPR("-->xgbe_start\n");
565 xgbe_set_rx_mode(netdev
);
569 phy_start(pdata
->phydev
);
571 hw_if
->enable_tx(pdata
);
572 hw_if
->enable_rx(pdata
);
574 xgbe_init_tx_timers(pdata
);
576 xgbe_napi_enable(pdata
, 1);
577 netif_tx_start_all_queues(netdev
);
579 DBGPR("<--xgbe_start\n");
584 static void xgbe_stop(struct xgbe_prv_data
*pdata
)
586 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
587 struct net_device
*netdev
= pdata
->netdev
;
589 DBGPR("-->xgbe_stop\n");
591 phy_stop(pdata
->phydev
);
593 netif_tx_stop_all_queues(netdev
);
594 xgbe_napi_disable(pdata
);
596 xgbe_stop_tx_timers(pdata
);
598 hw_if
->disable_tx(pdata
);
599 hw_if
->disable_rx(pdata
);
601 DBGPR("<--xgbe_stop\n");
604 static void xgbe_restart_dev(struct xgbe_prv_data
*pdata
, unsigned int reset
)
606 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
608 DBGPR("-->xgbe_restart_dev\n");
610 /* If not running, "restart" will happen on open */
611 if (!netif_running(pdata
->netdev
))
615 synchronize_irq(pdata
->irq_number
);
617 xgbe_free_tx_skbuff(pdata
);
618 xgbe_free_rx_skbuff(pdata
);
620 /* Issue software reset to device if requested */
626 DBGPR("<--xgbe_restart_dev\n");
629 static void xgbe_restart(struct work_struct
*work
)
631 struct xgbe_prv_data
*pdata
= container_of(work
,
632 struct xgbe_prv_data
,
637 xgbe_restart_dev(pdata
, 1);
642 static void xgbe_prep_vlan(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
644 if (vlan_tx_tag_present(skb
))
645 packet
->vlan_ctag
= vlan_tx_tag_get(skb
);
648 static int xgbe_prep_tso(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
652 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
656 ret
= skb_cow_head(skb
, 0);
660 packet
->header_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
661 packet
->tcp_header_len
= tcp_hdrlen(skb
);
662 packet
->tcp_payload_len
= skb
->len
- packet
->header_len
;
663 packet
->mss
= skb_shinfo(skb
)->gso_size
;
664 DBGPR(" packet->header_len=%u\n", packet
->header_len
);
665 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
666 packet
->tcp_header_len
, packet
->tcp_payload_len
);
667 DBGPR(" packet->mss=%u\n", packet
->mss
);
672 static int xgbe_is_tso(struct sk_buff
*skb
)
674 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
677 if (!skb_is_gso(skb
))
680 DBGPR(" TSO packet to be processed\n");
685 static void xgbe_packet_info(struct xgbe_ring
*ring
, struct sk_buff
*skb
,
686 struct xgbe_packet_data
*packet
)
688 struct skb_frag_struct
*frag
;
689 unsigned int context_desc
;
694 packet
->rdesc_count
= 0;
696 if (xgbe_is_tso(skb
)) {
697 /* TSO requires an extra desriptor if mss is different */
698 if (skb_shinfo(skb
)->gso_size
!= ring
->tx
.cur_mss
) {
700 packet
->rdesc_count
++;
703 /* TSO requires an extra desriptor for TSO header */
704 packet
->rdesc_count
++;
706 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
708 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
710 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
711 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
714 if (vlan_tx_tag_present(skb
)) {
715 /* VLAN requires an extra descriptor if tag is different */
716 if (vlan_tx_tag_get(skb
) != ring
->tx
.cur_vlan_ctag
)
717 /* We can share with the TSO context descriptor */
720 packet
->rdesc_count
++;
723 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
727 for (len
= skb_headlen(skb
); len
;) {
728 packet
->rdesc_count
++;
729 len
-= min_t(unsigned int, len
, TX_MAX_BUF_SIZE
);
732 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
733 frag
= &skb_shinfo(skb
)->frags
[i
];
734 for (len
= skb_frag_size(frag
); len
; ) {
735 packet
->rdesc_count
++;
736 len
-= min_t(unsigned int, len
, TX_MAX_BUF_SIZE
);
741 static int xgbe_open(struct net_device
*netdev
)
743 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
744 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
745 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
748 DBGPR("-->xgbe_open\n");
750 /* Enable the clock */
751 ret
= clk_prepare_enable(pdata
->sysclock
);
753 netdev_alert(netdev
, "clk_prepare_enable failed\n");
757 /* Calculate the Rx buffer size before allocating rings */
758 ret
= xgbe_calc_rx_buf_size(netdev
, netdev
->mtu
);
761 pdata
->rx_buf_size
= ret
;
763 /* Allocate the ring descriptors and buffers */
764 ret
= desc_if
->alloc_ring_resources(pdata
);
768 /* Initialize the device restart work struct */
769 INIT_WORK(&pdata
->restart_work
, xgbe_restart
);
771 /* Request interrupts */
772 ret
= devm_request_irq(pdata
->dev
, netdev
->irq
, xgbe_isr
, 0,
773 netdev
->name
, pdata
);
775 netdev_alert(netdev
, "error requesting irq %d\n",
779 pdata
->irq_number
= netdev
->irq
;
781 ret
= xgbe_start(pdata
);
785 DBGPR("<--xgbe_open\n");
792 devm_free_irq(pdata
->dev
, pdata
->irq_number
, pdata
);
793 pdata
->irq_number
= 0;
796 desc_if
->free_ring_resources(pdata
);
799 clk_disable_unprepare(pdata
->sysclock
);
804 static int xgbe_close(struct net_device
*netdev
)
806 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
807 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
808 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
810 DBGPR("-->xgbe_close\n");
812 /* Stop the device */
815 /* Issue software reset to device */
818 /* Free all the ring data */
819 desc_if
->free_ring_resources(pdata
);
821 /* Release the interrupt */
822 if (pdata
->irq_number
!= 0) {
823 devm_free_irq(pdata
->dev
, pdata
->irq_number
, pdata
);
824 pdata
->irq_number
= 0;
827 /* Disable the clock */
828 clk_disable_unprepare(pdata
->sysclock
);
830 DBGPR("<--xgbe_close\n");
835 static int xgbe_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
837 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
838 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
839 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
840 struct xgbe_channel
*channel
;
841 struct xgbe_ring
*ring
;
842 struct xgbe_packet_data
*packet
;
846 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb
->len
);
848 channel
= pdata
->channel
+ skb
->queue_mapping
;
849 ring
= channel
->tx_ring
;
850 packet
= &ring
->packet_data
;
854 spin_lock_irqsave(&ring
->lock
, flags
);
857 netdev_err(netdev
, "empty skb received from stack\n");
858 dev_kfree_skb_any(skb
);
859 goto tx_netdev_return
;
862 /* Calculate preliminary packet info */
863 memset(packet
, 0, sizeof(*packet
));
864 xgbe_packet_info(ring
, skb
, packet
);
866 /* Check that there are enough descriptors available */
867 if (packet
->rdesc_count
> xgbe_tx_avail_desc(ring
)) {
868 DBGPR(" Tx queue stopped, not enough descriptors available\n");
869 netif_stop_subqueue(netdev
, channel
->queue_index
);
870 ring
->tx
.queue_stopped
= 1;
871 ret
= NETDEV_TX_BUSY
;
872 goto tx_netdev_return
;
875 ret
= xgbe_prep_tso(skb
, packet
);
877 netdev_err(netdev
, "error processing TSO packet\n");
878 dev_kfree_skb_any(skb
);
879 goto tx_netdev_return
;
881 xgbe_prep_vlan(skb
, packet
);
883 if (!desc_if
->map_tx_skb(channel
, skb
)) {
884 dev_kfree_skb_any(skb
);
885 goto tx_netdev_return
;
888 /* Configure required descriptor fields for transmission */
889 hw_if
->pre_xmit(channel
);
891 #ifdef XGMAC_ENABLE_TX_PKT_DUMP
892 xgbe_print_pkt(netdev
, skb
, true);
896 spin_unlock_irqrestore(&ring
->lock
, flags
);
898 DBGPR("<--xgbe_xmit\n");
903 static void xgbe_set_rx_mode(struct net_device
*netdev
)
905 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
906 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
907 unsigned int pr_mode
, am_mode
;
909 DBGPR("-->xgbe_set_rx_mode\n");
911 pr_mode
= ((netdev
->flags
& IFF_PROMISC
) != 0);
912 am_mode
= ((netdev
->flags
& IFF_ALLMULTI
) != 0);
914 if (netdev_uc_count(netdev
) > pdata
->hw_feat
.addn_mac
)
916 if (netdev_mc_count(netdev
) > pdata
->hw_feat
.addn_mac
)
918 if ((netdev_uc_count(netdev
) + netdev_mc_count(netdev
)) >
919 pdata
->hw_feat
.addn_mac
)
922 hw_if
->set_promiscuous_mode(pdata
, pr_mode
);
923 hw_if
->set_all_multicast_mode(pdata
, am_mode
);
925 hw_if
->set_addn_mac_addrs(pdata
, am_mode
);
927 DBGPR("<--xgbe_set_rx_mode\n");
930 static int xgbe_set_mac_address(struct net_device
*netdev
, void *addr
)
932 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
933 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
934 struct sockaddr
*saddr
= addr
;
936 DBGPR("-->xgbe_set_mac_address\n");
938 if (!is_valid_ether_addr(saddr
->sa_data
))
939 return -EADDRNOTAVAIL
;
941 memcpy(netdev
->dev_addr
, saddr
->sa_data
, netdev
->addr_len
);
943 hw_if
->set_mac_address(pdata
, netdev
->dev_addr
);
945 DBGPR("<--xgbe_set_mac_address\n");
950 static int xgbe_change_mtu(struct net_device
*netdev
, int mtu
)
952 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
955 DBGPR("-->xgbe_change_mtu\n");
957 ret
= xgbe_calc_rx_buf_size(netdev
, mtu
);
961 pdata
->rx_buf_size
= ret
;
964 xgbe_restart_dev(pdata
, 0);
966 DBGPR("<--xgbe_change_mtu\n");
971 static struct rtnl_link_stats64
*xgbe_get_stats64(struct net_device
*netdev
,
972 struct rtnl_link_stats64
*s
)
974 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
975 struct xgbe_mmc_stats
*pstats
= &pdata
->mmc_stats
;
977 DBGPR("-->%s\n", __func__
);
979 pdata
->hw_if
.read_mmc_stats(pdata
);
981 s
->rx_packets
= pstats
->rxframecount_gb
;
982 s
->rx_bytes
= pstats
->rxoctetcount_gb
;
983 s
->rx_errors
= pstats
->rxframecount_gb
-
984 pstats
->rxbroadcastframes_g
-
985 pstats
->rxmulticastframes_g
-
986 pstats
->rxunicastframes_g
;
987 s
->multicast
= pstats
->rxmulticastframes_g
;
988 s
->rx_length_errors
= pstats
->rxlengtherror
;
989 s
->rx_crc_errors
= pstats
->rxcrcerror
;
990 s
->rx_fifo_errors
= pstats
->rxfifooverflow
;
992 s
->tx_packets
= pstats
->txframecount_gb
;
993 s
->tx_bytes
= pstats
->txoctetcount_gb
;
994 s
->tx_errors
= pstats
->txframecount_gb
- pstats
->txframecount_g
;
995 s
->tx_dropped
= netdev
->stats
.tx_dropped
;
997 DBGPR("<--%s\n", __func__
);
1002 #ifdef CONFIG_NET_POLL_CONTROLLER
1003 static void xgbe_poll_controller(struct net_device
*netdev
)
1005 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1007 DBGPR("-->xgbe_poll_controller\n");
1009 disable_irq(pdata
->irq_number
);
1011 xgbe_isr(pdata
->irq_number
, pdata
);
1013 enable_irq(pdata
->irq_number
);
1015 DBGPR("<--xgbe_poll_controller\n");
1017 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1019 static int xgbe_set_features(struct net_device
*netdev
,
1020 netdev_features_t features
)
1022 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1023 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1024 unsigned int rxcsum_enabled
, rxvlan_enabled
;
1026 rxcsum_enabled
= !!(pdata
->netdev_features
& NETIF_F_RXCSUM
);
1027 rxvlan_enabled
= !!(pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_RX
);
1029 if ((features
& NETIF_F_RXCSUM
) && !rxcsum_enabled
) {
1030 hw_if
->enable_rx_csum(pdata
);
1031 netdev_alert(netdev
, "state change - rxcsum enabled\n");
1032 } else if (!(features
& NETIF_F_RXCSUM
) && rxcsum_enabled
) {
1033 hw_if
->disable_rx_csum(pdata
);
1034 netdev_alert(netdev
, "state change - rxcsum disabled\n");
1037 if ((features
& NETIF_F_HW_VLAN_CTAG_RX
) && !rxvlan_enabled
) {
1038 hw_if
->enable_rx_vlan_stripping(pdata
);
1039 netdev_alert(netdev
, "state change - rxvlan enabled\n");
1040 } else if (!(features
& NETIF_F_HW_VLAN_CTAG_RX
) && rxvlan_enabled
) {
1041 hw_if
->disable_rx_vlan_stripping(pdata
);
1042 netdev_alert(netdev
, "state change - rxvlan disabled\n");
1045 pdata
->netdev_features
= features
;
1047 DBGPR("<--xgbe_set_features\n");
1052 static const struct net_device_ops xgbe_netdev_ops
= {
1053 .ndo_open
= xgbe_open
,
1054 .ndo_stop
= xgbe_close
,
1055 .ndo_start_xmit
= xgbe_xmit
,
1056 .ndo_set_rx_mode
= xgbe_set_rx_mode
,
1057 .ndo_set_mac_address
= xgbe_set_mac_address
,
1058 .ndo_validate_addr
= eth_validate_addr
,
1059 .ndo_change_mtu
= xgbe_change_mtu
,
1060 .ndo_get_stats64
= xgbe_get_stats64
,
1061 #ifdef CONFIG_NET_POLL_CONTROLLER
1062 .ndo_poll_controller
= xgbe_poll_controller
,
1064 .ndo_set_features
= xgbe_set_features
,
1067 struct net_device_ops
*xgbe_get_netdev_ops(void)
1069 return (struct net_device_ops
*)&xgbe_netdev_ops
;
1072 static int xgbe_tx_poll(struct xgbe_channel
*channel
)
1074 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1075 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1076 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1077 struct xgbe_ring
*ring
= channel
->tx_ring
;
1078 struct xgbe_ring_data
*rdata
;
1079 struct xgbe_ring_desc
*rdesc
;
1080 struct net_device
*netdev
= pdata
->netdev
;
1081 unsigned long flags
;
1084 DBGPR("-->xgbe_tx_poll\n");
1086 /* Nothing to do if there isn't a Tx ring for this channel */
1090 spin_lock_irqsave(&ring
->lock
, flags
);
1092 while ((processed
< TX_DESC_MAX_PROC
) && (ring
->dirty
< ring
->cur
)) {
1093 rdata
= GET_DESC_DATA(ring
, ring
->dirty
);
1094 rdesc
= rdata
->rdesc
;
1096 if (!hw_if
->tx_complete(rdesc
))
1099 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1100 xgbe_dump_tx_desc(ring
, ring
->dirty
, 1, 0);
1103 /* Free the SKB and reset the descriptor for re-use */
1104 desc_if
->unmap_skb(pdata
, rdata
);
1105 hw_if
->tx_desc_reset(rdata
);
1111 if ((ring
->tx
.queue_stopped
== 1) &&
1112 (xgbe_tx_avail_desc(ring
) > TX_DESC_MIN_FREE
)) {
1113 ring
->tx
.queue_stopped
= 0;
1114 netif_wake_subqueue(netdev
, channel
->queue_index
);
1117 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed
);
1119 spin_unlock_irqrestore(&ring
->lock
, flags
);
1124 static int xgbe_rx_poll(struct xgbe_channel
*channel
, int budget
)
1126 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1127 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1128 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1129 struct xgbe_ring
*ring
= channel
->rx_ring
;
1130 struct xgbe_ring_data
*rdata
;
1131 struct xgbe_packet_data
*packet
;
1132 struct net_device
*netdev
= pdata
->netdev
;
1133 struct sk_buff
*skb
;
1134 unsigned int incomplete
, error
;
1135 unsigned int cur_len
, put_len
, max_len
;
1138 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget
);
1140 /* Nothing to do if there isn't a Rx ring for this channel */
1144 packet
= &ring
->packet_data
;
1145 while (received
< budget
) {
1146 DBGPR(" cur = %d\n", ring
->cur
);
1148 /* Clear the packet data information */
1149 memset(packet
, 0, sizeof(*packet
));
1155 rdata
= GET_DESC_DATA(ring
, ring
->cur
);
1157 if (hw_if
->dev_read(channel
))
1164 dma_unmap_single(pdata
->dev
, rdata
->skb_dma
,
1165 rdata
->skb_dma_len
, DMA_FROM_DEVICE
);
1168 incomplete
= XGMAC_GET_BITS(packet
->attributes
,
1169 RX_PACKET_ATTRIBUTES
,
1172 /* Earlier error, just drain the remaining data */
1173 if (incomplete
&& error
)
1176 if (error
|| packet
->errors
) {
1178 DBGPR("Error in received packet\n");
1183 put_len
= rdata
->len
- cur_len
;
1185 if (pskb_expand_head(skb
, 0, put_len
, GFP_ATOMIC
)) {
1186 DBGPR("pskb_expand_head error\n");
1195 memcpy(skb_tail_pointer(skb
), rdata
->skb
->data
,
1201 skb_put(skb
, put_len
);
1207 /* Be sure we don't exceed the configured MTU */
1208 max_len
= netdev
->mtu
+ ETH_HLEN
;
1209 if (!(netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1210 (skb
->protocol
== htons(ETH_P_8021Q
)))
1211 max_len
+= VLAN_HLEN
;
1213 if (skb
->len
> max_len
) {
1214 DBGPR("packet length exceeds configured MTU\n");
1219 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
1220 xgbe_print_pkt(netdev
, skb
, false);
1223 skb_checksum_none_assert(skb
);
1224 if (XGMAC_GET_BITS(packet
->attributes
,
1225 RX_PACKET_ATTRIBUTES
, CSUM_DONE
))
1226 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1228 if (XGMAC_GET_BITS(packet
->attributes
,
1229 RX_PACKET_ATTRIBUTES
, VLAN_CTAG
))
1230 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1234 skb
->protocol
= eth_type_trans(skb
, netdev
);
1235 skb_record_rx_queue(skb
, channel
->queue_index
);
1236 skb_mark_napi_id(skb
, &pdata
->napi
);
1238 netdev
->last_rx
= jiffies
;
1239 napi_gro_receive(&pdata
->napi
, skb
);
1243 desc_if
->realloc_skb(channel
);
1245 /* Update the Rx Tail Pointer Register with address of
1246 * the last cleaned entry */
1247 rdata
= GET_DESC_DATA(ring
, ring
->rx
.realloc_index
- 1);
1248 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
1249 lower_32_bits(rdata
->rdesc_dma
));
1252 DBGPR("<--xgbe_rx_poll: received = %d\n", received
);
1257 static int xgbe_poll(struct napi_struct
*napi
, int budget
)
1259 struct xgbe_prv_data
*pdata
= container_of(napi
, struct xgbe_prv_data
,
1261 struct xgbe_channel
*channel
;
1265 DBGPR("-->xgbe_poll: budget=%d\n", budget
);
1267 /* Cleanup Tx ring first */
1268 channel
= pdata
->channel
;
1269 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
1270 xgbe_tx_poll(channel
);
1272 /* Process Rx ring next */
1274 channel
= pdata
->channel
;
1275 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
1276 processed
+= xgbe_rx_poll(channel
, budget
- processed
);
1278 /* If we processed everything, we are done */
1279 if (processed
< budget
) {
1280 /* Turn off polling */
1281 napi_complete(napi
);
1283 /* Enable Tx and Rx interrupts */
1284 xgbe_enable_rx_tx_ints(pdata
);
1287 DBGPR("<--xgbe_poll: received = %d\n", processed
);
1292 void xgbe_dump_tx_desc(struct xgbe_ring
*ring
, unsigned int idx
,
1293 unsigned int count
, unsigned int flag
)
1295 struct xgbe_ring_data
*rdata
;
1296 struct xgbe_ring_desc
*rdesc
;
1299 rdata
= GET_DESC_DATA(ring
, idx
);
1300 rdesc
= rdata
->rdesc
;
1301 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx
,
1302 (flag
== 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1303 le32_to_cpu(rdesc
->desc0
), le32_to_cpu(rdesc
->desc1
),
1304 le32_to_cpu(rdesc
->desc2
), le32_to_cpu(rdesc
->desc3
));
1309 void xgbe_dump_rx_desc(struct xgbe_ring
*ring
, struct xgbe_ring_desc
*desc
,
1312 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx
,
1313 le32_to_cpu(desc
->desc0
), le32_to_cpu(desc
->desc1
),
1314 le32_to_cpu(desc
->desc2
), le32_to_cpu(desc
->desc3
));
1317 void xgbe_print_pkt(struct net_device
*netdev
, struct sk_buff
*skb
, bool tx_rx
)
1319 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
1320 unsigned char *buf
= skb
->data
;
1321 unsigned char buffer
[128];
1324 netdev_alert(netdev
, "\n************** SKB dump ****************\n");
1326 netdev_alert(netdev
, "%s packet of %d bytes\n",
1327 (tx_rx
? "TX" : "RX"), skb
->len
);
1329 netdev_alert(netdev
, "Dst MAC addr: %pM\n", eth
->h_dest
);
1330 netdev_alert(netdev
, "Src MAC addr: %pM\n", eth
->h_source
);
1331 netdev_alert(netdev
, "Protocol: 0x%04hx\n", ntohs(eth
->h_proto
));
1333 for (i
= 0, j
= 0; i
< skb
->len
;) {
1334 j
+= snprintf(buffer
+ j
, sizeof(buffer
) - j
, "%02hhx",
1337 if ((i
% 32) == 0) {
1338 netdev_alert(netdev
, " 0x%04x: %s\n", i
- 32, buffer
);
1340 } else if ((i
% 16) == 0) {
1343 } else if ((i
% 4) == 0) {
1348 netdev_alert(netdev
, " 0x%04x: %s\n", i
- (i
% 32), buffer
);
1350 netdev_alert(netdev
, "\n************** SKB dump ****************\n");