2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/mdio.h>
119 #include <linux/clk.h>
120 #include <linux/bitrev.h>
121 #include <linux/crc32.h>
124 #include "xgbe-common.h"
126 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data
*pdata
,
132 DBGPR("-->xgbe_usec_to_riwt\n");
134 rate
= pdata
->sysclk_rate
;
137 * Convert the input usec value to the watchdog timer value. Each
138 * watchdog timer value is equivalent to 256 clock cycles.
139 * Calculate the required value as:
140 * ( usec * ( system_clock_mhz / 10^6 ) / 256
142 ret
= (usec
* (rate
/ 1000000)) / 256;
144 DBGPR("<--xgbe_usec_to_riwt\n");
149 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data
*pdata
,
155 DBGPR("-->xgbe_riwt_to_usec\n");
157 rate
= pdata
->sysclk_rate
;
160 * Convert the input watchdog timer value to the usec value. Each
161 * watchdog timer value is equivalent to 256 clock cycles.
162 * Calculate the required value as:
163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
165 ret
= (riwt
* 256) / (rate
/ 1000000);
167 DBGPR("<--xgbe_riwt_to_usec\n");
172 static int xgbe_config_pblx8(struct xgbe_prv_data
*pdata
)
174 struct xgbe_channel
*channel
;
177 channel
= pdata
->channel
;
178 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
179 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_CR
, PBLX8
,
185 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data
*pdata
)
187 return XGMAC_DMA_IOREAD_BITS(pdata
->channel
, DMA_CH_TCR
, PBL
);
190 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data
*pdata
)
192 struct xgbe_channel
*channel
;
195 channel
= pdata
->channel
;
196 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
197 if (!channel
->tx_ring
)
200 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, PBL
,
207 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data
*pdata
)
209 return XGMAC_DMA_IOREAD_BITS(pdata
->channel
, DMA_CH_RCR
, PBL
);
212 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data
*pdata
)
214 struct xgbe_channel
*channel
;
217 channel
= pdata
->channel
;
218 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
219 if (!channel
->rx_ring
)
222 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, PBL
,
229 static int xgbe_config_osp_mode(struct xgbe_prv_data
*pdata
)
231 struct xgbe_channel
*channel
;
234 channel
= pdata
->channel
;
235 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
236 if (!channel
->tx_ring
)
239 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, OSP
,
246 static int xgbe_config_rsf_mode(struct xgbe_prv_data
*pdata
, unsigned int val
)
250 for (i
= 0; i
< pdata
->rx_q_count
; i
++)
251 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RSF
, val
);
256 static int xgbe_config_tsf_mode(struct xgbe_prv_data
*pdata
, unsigned int val
)
260 for (i
= 0; i
< pdata
->tx_q_count
; i
++)
261 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TSF
, val
);
266 static int xgbe_config_rx_threshold(struct xgbe_prv_data
*pdata
,
271 for (i
= 0; i
< pdata
->rx_q_count
; i
++)
272 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RTC
, val
);
277 static int xgbe_config_tx_threshold(struct xgbe_prv_data
*pdata
,
282 for (i
= 0; i
< pdata
->tx_q_count
; i
++)
283 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TTC
, val
);
288 static int xgbe_config_rx_coalesce(struct xgbe_prv_data
*pdata
)
290 struct xgbe_channel
*channel
;
293 channel
= pdata
->channel
;
294 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
295 if (!channel
->rx_ring
)
298 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RIWT
, RWT
,
305 static int xgbe_config_tx_coalesce(struct xgbe_prv_data
*pdata
)
310 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data
*pdata
)
312 struct xgbe_channel
*channel
;
315 channel
= pdata
->channel
;
316 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
317 if (!channel
->rx_ring
)
320 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, RBSZ
,
325 static void xgbe_config_tso_mode(struct xgbe_prv_data
*pdata
)
327 struct xgbe_channel
*channel
;
330 channel
= pdata
->channel
;
331 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
332 if (!channel
->tx_ring
)
335 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, TSE
, 1);
339 static void xgbe_config_sph_mode(struct xgbe_prv_data
*pdata
)
341 struct xgbe_channel
*channel
;
344 channel
= pdata
->channel
;
345 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
346 if (!channel
->rx_ring
)
349 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_CR
, SPH
, 1);
352 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, HDSMS
, XGBE_SPH_HDSMS_SIZE
);
355 static int xgbe_write_rss_reg(struct xgbe_prv_data
*pdata
, unsigned int type
,
356 unsigned int index
, unsigned int val
)
361 mutex_lock(&pdata
->rss_mutex
);
363 if (XGMAC_IOREAD_BITS(pdata
, MAC_RSSAR
, OB
)) {
368 XGMAC_IOWRITE(pdata
, MAC_RSSDR
, val
);
370 XGMAC_IOWRITE_BITS(pdata
, MAC_RSSAR
, RSSIA
, index
);
371 XGMAC_IOWRITE_BITS(pdata
, MAC_RSSAR
, ADDRT
, type
);
372 XGMAC_IOWRITE_BITS(pdata
, MAC_RSSAR
, CT
, 0);
373 XGMAC_IOWRITE_BITS(pdata
, MAC_RSSAR
, OB
, 1);
377 if (!XGMAC_IOREAD_BITS(pdata
, MAC_RSSAR
, OB
))
380 usleep_range(1000, 1500);
386 mutex_unlock(&pdata
->rss_mutex
);
391 static int xgbe_write_rss_hash_key(struct xgbe_prv_data
*pdata
)
393 unsigned int key_regs
= sizeof(pdata
->rss_key
) / sizeof(u32
);
394 unsigned int *key
= (unsigned int *)&pdata
->rss_key
;
398 ret
= xgbe_write_rss_reg(pdata
, XGBE_RSS_HASH_KEY_TYPE
,
407 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data
*pdata
)
412 for (i
= 0; i
< ARRAY_SIZE(pdata
->rss_table
); i
++) {
413 ret
= xgbe_write_rss_reg(pdata
,
414 XGBE_RSS_LOOKUP_TABLE_TYPE
, i
,
415 pdata
->rss_table
[i
]);
423 static int xgbe_set_rss_hash_key(struct xgbe_prv_data
*pdata
, const u8
*key
)
425 memcpy(pdata
->rss_key
, key
, sizeof(pdata
->rss_key
));
427 return xgbe_write_rss_hash_key(pdata
);
430 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data
*pdata
,
435 for (i
= 0; i
< ARRAY_SIZE(pdata
->rss_table
); i
++)
436 XGMAC_SET_BITS(pdata
->rss_table
[i
], MAC_RSSDR
, DMCH
, table
[i
]);
438 return xgbe_write_rss_lookup_table(pdata
);
441 static int xgbe_enable_rss(struct xgbe_prv_data
*pdata
)
445 if (!pdata
->hw_feat
.rss
)
448 /* Program the hash key */
449 ret
= xgbe_write_rss_hash_key(pdata
);
453 /* Program the lookup table */
454 ret
= xgbe_write_rss_lookup_table(pdata
);
458 /* Set the RSS options */
459 XGMAC_IOWRITE(pdata
, MAC_RSSCR
, pdata
->rss_options
);
462 XGMAC_IOWRITE_BITS(pdata
, MAC_RSSCR
, RSSE
, 1);
467 static int xgbe_disable_rss(struct xgbe_prv_data
*pdata
)
469 if (!pdata
->hw_feat
.rss
)
472 XGMAC_IOWRITE_BITS(pdata
, MAC_RSSCR
, RSSE
, 0);
477 static void xgbe_config_rss(struct xgbe_prv_data
*pdata
)
481 if (!pdata
->hw_feat
.rss
)
484 if (pdata
->netdev
->features
& NETIF_F_RXHASH
)
485 ret
= xgbe_enable_rss(pdata
);
487 ret
= xgbe_disable_rss(pdata
);
490 netdev_err(pdata
->netdev
,
491 "error configuring RSS, RSS disabled\n");
494 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data
*pdata
)
496 unsigned int max_q_count
, q_count
;
497 unsigned int reg
, reg_val
;
500 /* Clear MTL flow control */
501 for (i
= 0; i
< pdata
->rx_q_count
; i
++)
502 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, EHFC
, 0);
504 /* Clear MAC flow control */
505 max_q_count
= XGMAC_MAX_FLOW_CONTROL_QUEUES
;
506 q_count
= min_t(unsigned int, pdata
->tx_q_count
, max_q_count
);
508 for (i
= 0; i
< q_count
; i
++) {
509 reg_val
= XGMAC_IOREAD(pdata
, reg
);
510 XGMAC_SET_BITS(reg_val
, MAC_Q0TFCR
, TFE
, 0);
511 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
513 reg
+= MAC_QTFCR_INC
;
519 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data
*pdata
)
521 struct ieee_pfc
*pfc
= pdata
->pfc
;
522 struct ieee_ets
*ets
= pdata
->ets
;
523 unsigned int max_q_count
, q_count
;
524 unsigned int reg
, reg_val
;
527 /* Set MTL flow control */
528 for (i
= 0; i
< pdata
->rx_q_count
; i
++) {
529 unsigned int ehfc
= 0;
534 for (prio
= 0; prio
< IEEE_8021QAZ_MAX_TCS
; prio
++) {
537 /* Does this queue handle the priority? */
538 if (pdata
->prio2q_map
[prio
] != i
)
541 /* Get the Traffic Class for this priority */
542 tc
= ets
->prio_tc
[prio
];
544 /* Check if flow control should be enabled */
545 if (pfc
->pfc_en
& (1 << tc
)) {
554 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, EHFC
, ehfc
);
556 netif_dbg(pdata
, drv
, pdata
->netdev
,
557 "flow control %s for RXq%u\n",
558 ehfc
? "enabled" : "disabled", i
);
561 /* Set MAC flow control */
562 max_q_count
= XGMAC_MAX_FLOW_CONTROL_QUEUES
;
563 q_count
= min_t(unsigned int, pdata
->tx_q_count
, max_q_count
);
565 for (i
= 0; i
< q_count
; i
++) {
566 reg_val
= XGMAC_IOREAD(pdata
, reg
);
568 /* Enable transmit flow control */
569 XGMAC_SET_BITS(reg_val
, MAC_Q0TFCR
, TFE
, 1);
571 XGMAC_SET_BITS(reg_val
, MAC_Q0TFCR
, PT
, 0xffff);
573 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
575 reg
+= MAC_QTFCR_INC
;
581 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data
*pdata
)
583 XGMAC_IOWRITE_BITS(pdata
, MAC_RFCR
, RFE
, 0);
588 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data
*pdata
)
590 XGMAC_IOWRITE_BITS(pdata
, MAC_RFCR
, RFE
, 1);
595 static int xgbe_config_tx_flow_control(struct xgbe_prv_data
*pdata
)
597 struct ieee_pfc
*pfc
= pdata
->pfc
;
599 if (pdata
->tx_pause
|| (pfc
&& pfc
->pfc_en
))
600 xgbe_enable_tx_flow_control(pdata
);
602 xgbe_disable_tx_flow_control(pdata
);
607 static int xgbe_config_rx_flow_control(struct xgbe_prv_data
*pdata
)
609 struct ieee_pfc
*pfc
= pdata
->pfc
;
611 if (pdata
->rx_pause
|| (pfc
&& pfc
->pfc_en
))
612 xgbe_enable_rx_flow_control(pdata
);
614 xgbe_disable_rx_flow_control(pdata
);
619 static void xgbe_config_flow_control(struct xgbe_prv_data
*pdata
)
621 struct ieee_pfc
*pfc
= pdata
->pfc
;
623 xgbe_config_tx_flow_control(pdata
);
624 xgbe_config_rx_flow_control(pdata
);
626 XGMAC_IOWRITE_BITS(pdata
, MAC_RFCR
, PFCE
,
627 (pfc
&& pfc
->pfc_en
) ? 1 : 0);
630 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data
*pdata
)
632 struct xgbe_channel
*channel
;
633 unsigned int dma_ch_isr
, dma_ch_ier
;
636 channel
= pdata
->channel
;
637 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
638 /* Clear all the interrupts which are set */
639 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
640 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
642 /* Clear all interrupt enable bits */
645 /* Enable following interrupts
646 * NIE - Normal Interrupt Summary Enable
647 * AIE - Abnormal Interrupt Summary Enable
648 * FBEE - Fatal Bus Error Enable
650 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, NIE
, 1);
651 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, AIE
, 1);
652 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, FBEE
, 1);
654 if (channel
->tx_ring
) {
655 /* Enable the following Tx interrupts
656 * TIE - Transmit Interrupt Enable (unless using
657 * per channel interrupts)
659 if (!pdata
->per_channel_irq
)
660 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 1);
662 if (channel
->rx_ring
) {
663 /* Enable following Rx interrupts
664 * RBUE - Receive Buffer Unavailable Enable
665 * RIE - Receive Interrupt Enable (unless using
666 * per channel interrupts)
668 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RBUE
, 1);
669 if (!pdata
->per_channel_irq
)
670 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 1);
673 XGMAC_DMA_IOWRITE(channel
, DMA_CH_IER
, dma_ch_ier
);
677 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data
*pdata
)
679 unsigned int mtl_q_isr
;
680 unsigned int q_count
, i
;
682 q_count
= max(pdata
->hw_feat
.tx_q_cnt
, pdata
->hw_feat
.rx_q_cnt
);
683 for (i
= 0; i
< q_count
; i
++) {
684 /* Clear all the interrupts which are set */
685 mtl_q_isr
= XGMAC_MTL_IOREAD(pdata
, i
, MTL_Q_ISR
);
686 XGMAC_MTL_IOWRITE(pdata
, i
, MTL_Q_ISR
, mtl_q_isr
);
688 /* No MTL interrupts to be enabled */
689 XGMAC_MTL_IOWRITE(pdata
, i
, MTL_Q_IER
, 0);
693 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data
*pdata
)
695 unsigned int mac_ier
= 0;
697 /* Enable Timestamp interrupt */
698 XGMAC_SET_BITS(mac_ier
, MAC_IER
, TSIE
, 1);
700 XGMAC_IOWRITE(pdata
, MAC_IER
, mac_ier
);
702 /* Enable all counter interrupts */
703 XGMAC_IOWRITE_BITS(pdata
, MMC_RIER
, ALL_INTERRUPTS
, 0xffffffff);
704 XGMAC_IOWRITE_BITS(pdata
, MMC_TIER
, ALL_INTERRUPTS
, 0xffffffff);
707 static int xgbe_set_gmii_speed(struct xgbe_prv_data
*pdata
)
709 if (XGMAC_IOREAD_BITS(pdata
, MAC_TCR
, SS
) == 0x3)
712 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, SS
, 0x3);
717 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data
*pdata
)
719 if (XGMAC_IOREAD_BITS(pdata
, MAC_TCR
, SS
) == 0x2)
722 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, SS
, 0x2);
727 static int xgbe_set_xgmii_speed(struct xgbe_prv_data
*pdata
)
729 if (XGMAC_IOREAD_BITS(pdata
, MAC_TCR
, SS
) == 0)
732 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, SS
, 0);
737 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data
*pdata
)
739 /* Put the VLAN tag in the Rx descriptor */
740 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, EVLRXS
, 1);
742 /* Don't check the VLAN type */
743 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, DOVLTC
, 1);
745 /* Check only C-TAG (0x8100) packets */
746 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, ERSVLM
, 0);
748 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
749 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, ESVL
, 0);
751 /* Enable VLAN tag stripping */
752 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, EVLS
, 0x3);
757 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data
*pdata
)
759 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, EVLS
, 0);
764 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data
*pdata
)
766 /* Enable VLAN filtering */
767 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, VTFE
, 1);
769 /* Enable VLAN Hash Table filtering */
770 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, VTHM
, 1);
772 /* Disable VLAN tag inverse matching */
773 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, VTIM
, 0);
775 /* Only filter on the lower 12-bits of the VLAN tag */
776 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, ETV
, 1);
778 /* In order for the VLAN Hash Table filtering to be effective,
779 * the VLAN tag identifier in the VLAN Tag Register must not
780 * be zero. Set the VLAN tag identifier to "1" to enable the
781 * VLAN Hash Table filtering. This implies that a VLAN tag of
782 * 1 will always pass filtering.
784 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANTR
, VL
, 1);
789 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data
*pdata
)
791 /* Disable VLAN filtering */
792 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, VTFE
, 0);
797 static u32
xgbe_vid_crc32_le(__le16 vid_le
)
799 u32 poly
= 0xedb88320; /* CRCPOLY_LE */
802 unsigned char *data
= (unsigned char *)&vid_le
;
803 unsigned char data_byte
= 0;
806 bits
= get_bitmask_order(VLAN_VID_MASK
);
807 for (i
= 0; i
< bits
; i
++) {
809 data_byte
= data
[i
/ 8];
811 temp
= ((crc
& 1) ^ data_byte
) & 1;
822 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data
*pdata
)
827 u16 vlan_hash_table
= 0;
829 /* Generate the VLAN Hash Table value */
830 for_each_set_bit(vid
, pdata
->active_vlans
, VLAN_N_VID
) {
831 /* Get the CRC32 value of the VLAN ID */
832 vid_le
= cpu_to_le16(vid
);
833 crc
= bitrev32(~xgbe_vid_crc32_le(vid_le
)) >> 28;
835 vlan_hash_table
|= (1 << crc
);
838 /* Set the VLAN Hash Table filtering register */
839 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANHTR
, VLHT
, vlan_hash_table
);
844 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data
*pdata
,
847 unsigned int val
= enable
? 1 : 0;
849 if (XGMAC_IOREAD_BITS(pdata
, MAC_PFR
, PR
) == val
)
852 netif_dbg(pdata
, drv
, pdata
->netdev
, "%s promiscuous mode\n",
853 enable
? "entering" : "leaving");
854 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PR
, val
);
856 /* Hardware will still perform VLAN filtering in promiscuous mode */
858 xgbe_disable_rx_vlan_filtering(pdata
);
860 if (pdata
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
861 xgbe_enable_rx_vlan_filtering(pdata
);
867 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data
*pdata
,
870 unsigned int val
= enable
? 1 : 0;
872 if (XGMAC_IOREAD_BITS(pdata
, MAC_PFR
, PM
) == val
)
875 netif_dbg(pdata
, drv
, pdata
->netdev
, "%s allmulti mode\n",
876 enable
? "entering" : "leaving");
877 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, PM
, val
);
882 static void xgbe_set_mac_reg(struct xgbe_prv_data
*pdata
,
883 struct netdev_hw_addr
*ha
, unsigned int *mac_reg
)
885 unsigned int mac_addr_hi
, mac_addr_lo
;
892 mac_addr
= (u8
*)&mac_addr_lo
;
893 mac_addr
[0] = ha
->addr
[0];
894 mac_addr
[1] = ha
->addr
[1];
895 mac_addr
[2] = ha
->addr
[2];
896 mac_addr
[3] = ha
->addr
[3];
897 mac_addr
= (u8
*)&mac_addr_hi
;
898 mac_addr
[0] = ha
->addr
[4];
899 mac_addr
[1] = ha
->addr
[5];
901 netif_dbg(pdata
, drv
, pdata
->netdev
,
902 "adding mac address %pM at %#x\n",
905 XGMAC_SET_BITS(mac_addr_hi
, MAC_MACA1HR
, AE
, 1);
908 XGMAC_IOWRITE(pdata
, *mac_reg
, mac_addr_hi
);
909 *mac_reg
+= MAC_MACA_INC
;
910 XGMAC_IOWRITE(pdata
, *mac_reg
, mac_addr_lo
);
911 *mac_reg
+= MAC_MACA_INC
;
914 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data
*pdata
)
916 struct net_device
*netdev
= pdata
->netdev
;
917 struct netdev_hw_addr
*ha
;
918 unsigned int mac_reg
;
919 unsigned int addn_macs
;
921 mac_reg
= MAC_MACA1HR
;
922 addn_macs
= pdata
->hw_feat
.addn_mac
;
924 if (netdev_uc_count(netdev
) > addn_macs
) {
925 xgbe_set_promiscuous_mode(pdata
, 1);
927 netdev_for_each_uc_addr(ha
, netdev
) {
928 xgbe_set_mac_reg(pdata
, ha
, &mac_reg
);
932 if (netdev_mc_count(netdev
) > addn_macs
) {
933 xgbe_set_all_multicast_mode(pdata
, 1);
935 netdev_for_each_mc_addr(ha
, netdev
) {
936 xgbe_set_mac_reg(pdata
, ha
, &mac_reg
);
942 /* Clear remaining additional MAC address entries */
944 xgbe_set_mac_reg(pdata
, NULL
, &mac_reg
);
947 static void xgbe_set_mac_hash_table(struct xgbe_prv_data
*pdata
)
949 struct net_device
*netdev
= pdata
->netdev
;
950 struct netdev_hw_addr
*ha
;
951 unsigned int hash_reg
;
952 unsigned int hash_table_shift
, hash_table_count
;
953 u32 hash_table
[XGBE_MAC_HASH_TABLE_SIZE
];
957 hash_table_shift
= 26 - (pdata
->hw_feat
.hash_table_size
>> 7);
958 hash_table_count
= pdata
->hw_feat
.hash_table_size
/ 32;
959 memset(hash_table
, 0, sizeof(hash_table
));
961 /* Build the MAC Hash Table register values */
962 netdev_for_each_uc_addr(ha
, netdev
) {
963 crc
= bitrev32(~crc32_le(~0, ha
->addr
, ETH_ALEN
));
964 crc
>>= hash_table_shift
;
965 hash_table
[crc
>> 5] |= (1 << (crc
& 0x1f));
968 netdev_for_each_mc_addr(ha
, netdev
) {
969 crc
= bitrev32(~crc32_le(~0, ha
->addr
, ETH_ALEN
));
970 crc
>>= hash_table_shift
;
971 hash_table
[crc
>> 5] |= (1 << (crc
& 0x1f));
974 /* Set the MAC Hash Table registers */
976 for (i
= 0; i
< hash_table_count
; i
++) {
977 XGMAC_IOWRITE(pdata
, hash_reg
, hash_table
[i
]);
978 hash_reg
+= MAC_HTR_INC
;
982 static int xgbe_add_mac_addresses(struct xgbe_prv_data
*pdata
)
984 if (pdata
->hw_feat
.hash_table_size
)
985 xgbe_set_mac_hash_table(pdata
);
987 xgbe_set_mac_addn_addrs(pdata
);
992 static int xgbe_set_mac_address(struct xgbe_prv_data
*pdata
, u8
*addr
)
994 unsigned int mac_addr_hi
, mac_addr_lo
;
996 mac_addr_hi
= (addr
[5] << 8) | (addr
[4] << 0);
997 mac_addr_lo
= (addr
[3] << 24) | (addr
[2] << 16) |
998 (addr
[1] << 8) | (addr
[0] << 0);
1000 XGMAC_IOWRITE(pdata
, MAC_MACA0HR
, mac_addr_hi
);
1001 XGMAC_IOWRITE(pdata
, MAC_MACA0LR
, mac_addr_lo
);
1006 static int xgbe_config_rx_mode(struct xgbe_prv_data
*pdata
)
1008 struct net_device
*netdev
= pdata
->netdev
;
1009 unsigned int pr_mode
, am_mode
;
1011 pr_mode
= ((netdev
->flags
& IFF_PROMISC
) != 0);
1012 am_mode
= ((netdev
->flags
& IFF_ALLMULTI
) != 0);
1014 xgbe_set_promiscuous_mode(pdata
, pr_mode
);
1015 xgbe_set_all_multicast_mode(pdata
, am_mode
);
1017 xgbe_add_mac_addresses(pdata
);
1022 static int xgbe_read_mmd_regs(struct xgbe_prv_data
*pdata
, int prtad
,
1025 unsigned long flags
;
1026 unsigned int mmd_address
;
1029 if (mmd_reg
& MII_ADDR_C45
)
1030 mmd_address
= mmd_reg
& ~MII_ADDR_C45
;
1032 mmd_address
= (pdata
->mdio_mmd
<< 16) | (mmd_reg
& 0xffff);
1034 /* The PCS registers are accessed using mmio. The underlying APB3
1035 * management interface uses indirect addressing to access the MMD
1036 * register sets. This requires accessing of the PCS register in two
1037 * phases, an address phase and a data phase.
1039 * The mmio interface is based on 32-bit offsets and values. All
1040 * register offsets must therefore be adjusted by left shifting the
1041 * offset 2 bits and reading 32 bits of data.
1043 spin_lock_irqsave(&pdata
->xpcs_lock
, flags
);
1044 XPCS_IOWRITE(pdata
, PCS_MMD_SELECT
<< 2, mmd_address
>> 8);
1045 mmd_data
= XPCS_IOREAD(pdata
, (mmd_address
& 0xff) << 2);
1046 spin_unlock_irqrestore(&pdata
->xpcs_lock
, flags
);
1051 static void xgbe_write_mmd_regs(struct xgbe_prv_data
*pdata
, int prtad
,
1052 int mmd_reg
, int mmd_data
)
1054 unsigned int mmd_address
;
1055 unsigned long flags
;
1057 if (mmd_reg
& MII_ADDR_C45
)
1058 mmd_address
= mmd_reg
& ~MII_ADDR_C45
;
1060 mmd_address
= (pdata
->mdio_mmd
<< 16) | (mmd_reg
& 0xffff);
1062 /* The PCS registers are accessed using mmio. The underlying APB3
1063 * management interface uses indirect addressing to access the MMD
1064 * register sets. This requires accessing of the PCS register in two
1065 * phases, an address phase and a data phase.
1067 * The mmio interface is based on 32-bit offsets and values. All
1068 * register offsets must therefore be adjusted by left shifting the
1069 * offset 2 bits and reading 32 bits of data.
1071 spin_lock_irqsave(&pdata
->xpcs_lock
, flags
);
1072 XPCS_IOWRITE(pdata
, PCS_MMD_SELECT
<< 2, mmd_address
>> 8);
1073 XPCS_IOWRITE(pdata
, (mmd_address
& 0xff) << 2, mmd_data
);
1074 spin_unlock_irqrestore(&pdata
->xpcs_lock
, flags
);
1077 static int xgbe_tx_complete(struct xgbe_ring_desc
*rdesc
)
1079 return !XGMAC_GET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
);
1082 static int xgbe_disable_rx_csum(struct xgbe_prv_data
*pdata
)
1084 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, IPC
, 0);
1089 static int xgbe_enable_rx_csum(struct xgbe_prv_data
*pdata
)
1091 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, IPC
, 1);
1096 static void xgbe_tx_desc_reset(struct xgbe_ring_data
*rdata
)
1098 struct xgbe_ring_desc
*rdesc
= rdata
->rdesc
;
1100 /* Reset the Tx descriptor
1101 * Set buffer 1 (lo) address to zero
1102 * Set buffer 1 (hi) address to zero
1103 * Reset all other control bits (IC, TTSE, B2L & B1L)
1104 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1111 /* Make sure ownership is written to the descriptor */
1115 static void xgbe_tx_desc_init(struct xgbe_channel
*channel
)
1117 struct xgbe_ring
*ring
= channel
->tx_ring
;
1118 struct xgbe_ring_data
*rdata
;
1120 int start_index
= ring
->cur
;
1122 DBGPR("-->tx_desc_init\n");
1124 /* Initialze all descriptors */
1125 for (i
= 0; i
< ring
->rdesc_count
; i
++) {
1126 rdata
= XGBE_GET_DESC_DATA(ring
, i
);
1128 /* Initialize Tx descriptor */
1129 xgbe_tx_desc_reset(rdata
);
1132 /* Update the total number of Tx descriptors */
1133 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDRLR
, ring
->rdesc_count
- 1);
1135 /* Update the starting address of descriptor ring */
1136 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
);
1137 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDLR_HI
,
1138 upper_32_bits(rdata
->rdesc_dma
));
1139 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDLR_LO
,
1140 lower_32_bits(rdata
->rdesc_dma
));
1142 DBGPR("<--tx_desc_init\n");
1145 static void xgbe_rx_desc_reset(struct xgbe_prv_data
*pdata
,
1146 struct xgbe_ring_data
*rdata
, unsigned int index
)
1148 struct xgbe_ring_desc
*rdesc
= rdata
->rdesc
;
1149 unsigned int rx_usecs
= pdata
->rx_usecs
;
1150 unsigned int rx_frames
= pdata
->rx_frames
;
1152 dma_addr_t hdr_dma
, buf_dma
;
1154 if (!rx_usecs
&& !rx_frames
) {
1155 /* No coalescing, interrupt for every descriptor */
1158 /* Set interrupt based on Rx frame coalescing setting */
1159 if (rx_frames
&& !((index
+ 1) % rx_frames
))
1165 /* Reset the Rx descriptor
1166 * Set buffer 1 (lo) address to header dma address (lo)
1167 * Set buffer 1 (hi) address to header dma address (hi)
1168 * Set buffer 2 (lo) address to buffer dma address (lo)
1169 * Set buffer 2 (hi) address to buffer dma address (hi) and
1170 * set control bits OWN and INTE
1172 hdr_dma
= rdata
->rx
.hdr
.dma_base
+ rdata
->rx
.hdr
.dma_off
;
1173 buf_dma
= rdata
->rx
.buf
.dma_base
+ rdata
->rx
.buf
.dma_off
;
1174 rdesc
->desc0
= cpu_to_le32(lower_32_bits(hdr_dma
));
1175 rdesc
->desc1
= cpu_to_le32(upper_32_bits(hdr_dma
));
1176 rdesc
->desc2
= cpu_to_le32(lower_32_bits(buf_dma
));
1177 rdesc
->desc3
= cpu_to_le32(upper_32_bits(buf_dma
));
1179 XGMAC_SET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, INTE
, inte
);
1181 /* Since the Rx DMA engine is likely running, make sure everything
1182 * is written to the descriptor(s) before setting the OWN bit
1183 * for the descriptor
1187 XGMAC_SET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, OWN
, 1);
1189 /* Make sure ownership is written to the descriptor */
1193 static void xgbe_rx_desc_init(struct xgbe_channel
*channel
)
1195 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1196 struct xgbe_ring
*ring
= channel
->rx_ring
;
1197 struct xgbe_ring_data
*rdata
;
1198 unsigned int start_index
= ring
->cur
;
1201 DBGPR("-->rx_desc_init\n");
1203 /* Initialize all descriptors */
1204 for (i
= 0; i
< ring
->rdesc_count
; i
++) {
1205 rdata
= XGBE_GET_DESC_DATA(ring
, i
);
1207 /* Initialize Rx descriptor */
1208 xgbe_rx_desc_reset(pdata
, rdata
, i
);
1211 /* Update the total number of Rx descriptors */
1212 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDRLR
, ring
->rdesc_count
- 1);
1214 /* Update the starting address of descriptor ring */
1215 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
);
1216 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDLR_HI
,
1217 upper_32_bits(rdata
->rdesc_dma
));
1218 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDLR_LO
,
1219 lower_32_bits(rdata
->rdesc_dma
));
1221 /* Update the Rx Descriptor Tail Pointer */
1222 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
+ ring
->rdesc_count
- 1);
1223 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
1224 lower_32_bits(rdata
->rdesc_dma
));
1226 DBGPR("<--rx_desc_init\n");
1229 static void xgbe_update_tstamp_addend(struct xgbe_prv_data
*pdata
,
1230 unsigned int addend
)
1232 /* Set the addend register value and tell the device */
1233 XGMAC_IOWRITE(pdata
, MAC_TSAR
, addend
);
1234 XGMAC_IOWRITE_BITS(pdata
, MAC_TSCR
, TSADDREG
, 1);
1236 /* Wait for addend update to complete */
1237 while (XGMAC_IOREAD_BITS(pdata
, MAC_TSCR
, TSADDREG
))
1241 static void xgbe_set_tstamp_time(struct xgbe_prv_data
*pdata
, unsigned int sec
,
1244 /* Set the time values and tell the device */
1245 XGMAC_IOWRITE(pdata
, MAC_STSUR
, sec
);
1246 XGMAC_IOWRITE(pdata
, MAC_STNUR
, nsec
);
1247 XGMAC_IOWRITE_BITS(pdata
, MAC_TSCR
, TSINIT
, 1);
1249 /* Wait for time update to complete */
1250 while (XGMAC_IOREAD_BITS(pdata
, MAC_TSCR
, TSINIT
))
1254 static u64
xgbe_get_tstamp_time(struct xgbe_prv_data
*pdata
)
1258 nsec
= XGMAC_IOREAD(pdata
, MAC_STSR
);
1259 nsec
*= NSEC_PER_SEC
;
1260 nsec
+= XGMAC_IOREAD(pdata
, MAC_STNR
);
1265 static u64
xgbe_get_tx_tstamp(struct xgbe_prv_data
*pdata
)
1267 unsigned int tx_snr
;
1270 tx_snr
= XGMAC_IOREAD(pdata
, MAC_TXSNR
);
1271 if (XGMAC_GET_BITS(tx_snr
, MAC_TXSNR
, TXTSSTSMIS
))
1274 nsec
= XGMAC_IOREAD(pdata
, MAC_TXSSR
);
1275 nsec
*= NSEC_PER_SEC
;
1281 static void xgbe_get_rx_tstamp(struct xgbe_packet_data
*packet
,
1282 struct xgbe_ring_desc
*rdesc
)
1286 if (XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_CONTEXT_DESC3
, TSA
) &&
1287 !XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_CONTEXT_DESC3
, TSD
)) {
1288 nsec
= le32_to_cpu(rdesc
->desc1
);
1290 nsec
|= le32_to_cpu(rdesc
->desc0
);
1291 if (nsec
!= 0xffffffffffffffffULL
) {
1292 packet
->rx_tstamp
= nsec
;
1293 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1299 static int xgbe_config_tstamp(struct xgbe_prv_data
*pdata
,
1300 unsigned int mac_tscr
)
1302 /* Set one nano-second accuracy */
1303 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSCTRLSSR
, 1);
1305 /* Set fine timestamp update */
1306 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSCFUPDT
, 1);
1308 /* Overwrite earlier timestamps */
1309 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TXTSSTSM
, 1);
1311 XGMAC_IOWRITE(pdata
, MAC_TSCR
, mac_tscr
);
1313 /* Exit if timestamping is not enabled */
1314 if (!XGMAC_GET_BITS(mac_tscr
, MAC_TSCR
, TSENA
))
1317 /* Initialize time registers */
1318 XGMAC_IOWRITE_BITS(pdata
, MAC_SSIR
, SSINC
, XGBE_TSTAMP_SSINC
);
1319 XGMAC_IOWRITE_BITS(pdata
, MAC_SSIR
, SNSINC
, XGBE_TSTAMP_SNSINC
);
1320 xgbe_update_tstamp_addend(pdata
, pdata
->tstamp_addend
);
1321 xgbe_set_tstamp_time(pdata
, 0, 0);
1323 /* Initialize the timecounter */
1324 timecounter_init(&pdata
->tstamp_tc
, &pdata
->tstamp_cc
,
1325 ktime_to_ns(ktime_get_real()));
1330 static void xgbe_config_tc(struct xgbe_prv_data
*pdata
)
1332 unsigned int offset
, queue
, prio
;
1335 netdev_reset_tc(pdata
->netdev
);
1336 if (!pdata
->num_tcs
)
1339 netdev_set_num_tc(pdata
->netdev
, pdata
->num_tcs
);
1341 for (i
= 0, queue
= 0, offset
= 0; i
< pdata
->num_tcs
; i
++) {
1342 while ((queue
< pdata
->tx_q_count
) &&
1343 (pdata
->q2tc_map
[queue
] == i
))
1346 netif_dbg(pdata
, drv
, pdata
->netdev
, "TC%u using TXq%u-%u\n",
1347 i
, offset
, queue
- 1);
1348 netdev_set_tc_queue(pdata
->netdev
, i
, queue
- offset
, offset
);
1355 for (prio
= 0; prio
< IEEE_8021QAZ_MAX_TCS
; prio
++)
1356 netdev_set_prio_tc_map(pdata
->netdev
, prio
,
1357 pdata
->ets
->prio_tc
[prio
]);
1360 static void xgbe_config_dcb_tc(struct xgbe_prv_data
*pdata
)
1362 struct ieee_ets
*ets
= pdata
->ets
;
1363 unsigned int total_weight
, min_weight
, weight
;
1364 unsigned int mask
, reg
, reg_val
;
1365 unsigned int i
, prio
;
1370 /* Set Tx to deficit weighted round robin scheduling algorithm (when
1371 * traffic class is using ETS algorithm)
1373 XGMAC_IOWRITE_BITS(pdata
, MTL_OMR
, ETSALG
, MTL_ETSALG_DWRR
);
1375 /* Set Traffic Class algorithms */
1376 total_weight
= pdata
->netdev
->mtu
* pdata
->hw_feat
.tc_cnt
;
1377 min_weight
= total_weight
/ 100;
1381 for (i
= 0; i
< pdata
->hw_feat
.tc_cnt
; i
++) {
1382 /* Map the priorities to the traffic class */
1384 for (prio
= 0; prio
< IEEE_8021QAZ_MAX_TCS
; prio
++) {
1385 if (ets
->prio_tc
[prio
] == i
)
1386 mask
|= (1 << prio
);
1390 netif_dbg(pdata
, drv
, pdata
->netdev
, "TC%u PRIO mask=%#x\n",
1392 reg
= MTL_TCPM0R
+ (MTL_TCPM_INC
* (i
/ MTL_TCPM_TC_PER_REG
));
1393 reg_val
= XGMAC_IOREAD(pdata
, reg
);
1395 reg_val
&= ~(0xff << ((i
% MTL_TCPM_TC_PER_REG
) << 3));
1396 reg_val
|= (mask
<< ((i
% MTL_TCPM_TC_PER_REG
) << 3));
1398 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
1400 /* Set the traffic class algorithm */
1401 switch (ets
->tc_tsa
[i
]) {
1402 case IEEE_8021QAZ_TSA_STRICT
:
1403 netif_dbg(pdata
, drv
, pdata
->netdev
,
1404 "TC%u using SP\n", i
);
1405 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_TC_ETSCR
, TSA
,
1408 case IEEE_8021QAZ_TSA_ETS
:
1409 weight
= total_weight
* ets
->tc_tx_bw
[i
] / 100;
1410 weight
= clamp(weight
, min_weight
, total_weight
);
1412 netif_dbg(pdata
, drv
, pdata
->netdev
,
1413 "TC%u using DWRR (weight %u)\n", i
, weight
);
1414 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_TC_ETSCR
, TSA
,
1416 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_TC_QWR
, QW
,
1422 xgbe_config_tc(pdata
);
1425 static void xgbe_config_dcb_pfc(struct xgbe_prv_data
*pdata
)
1427 xgbe_config_flow_control(pdata
);
1430 static void xgbe_tx_start_xmit(struct xgbe_channel
*channel
,
1431 struct xgbe_ring
*ring
)
1433 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1434 struct xgbe_ring_data
*rdata
;
1436 /* Make sure everything is written before the register write */
1439 /* Issue a poll command to Tx DMA by writing address
1440 * of next immediate free descriptor */
1441 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1442 XGMAC_DMA_IOWRITE(channel
, DMA_CH_TDTR_LO
,
1443 lower_32_bits(rdata
->rdesc_dma
));
1445 /* Start the Tx timer */
1446 if (pdata
->tx_usecs
&& !channel
->tx_timer_active
) {
1447 channel
->tx_timer_active
= 1;
1448 mod_timer(&channel
->tx_timer
,
1449 jiffies
+ usecs_to_jiffies(pdata
->tx_usecs
));
1452 ring
->tx
.xmit_more
= 0;
1455 static void xgbe_dev_xmit(struct xgbe_channel
*channel
)
1457 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1458 struct xgbe_ring
*ring
= channel
->tx_ring
;
1459 struct xgbe_ring_data
*rdata
;
1460 struct xgbe_ring_desc
*rdesc
;
1461 struct xgbe_packet_data
*packet
= &ring
->packet_data
;
1462 unsigned int csum
, tso
, vlan
;
1463 unsigned int tso_context
, vlan_context
;
1464 unsigned int tx_set_ic
;
1465 int start_index
= ring
->cur
;
1466 int cur_index
= ring
->cur
;
1469 DBGPR("-->xgbe_dev_xmit\n");
1471 csum
= XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1473 tso
= XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1475 vlan
= XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1478 if (tso
&& (packet
->mss
!= ring
->tx
.cur_mss
))
1483 if (vlan
&& (packet
->vlan_ctag
!= ring
->tx
.cur_vlan_ctag
))
1488 /* Determine if an interrupt should be generated for this Tx:
1490 * - Tx frame count exceeds the frame count setting
1491 * - Addition of Tx frame count to the frame count since the
1492 * last interrupt was set exceeds the frame count setting
1494 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1495 * - Addition of Tx frame count to the frame count since the
1496 * last interrupt was set does not exceed the frame count setting
1498 ring
->coalesce_count
+= packet
->tx_packets
;
1499 if (!pdata
->tx_frames
)
1501 else if (packet
->tx_packets
> pdata
->tx_frames
)
1503 else if ((ring
->coalesce_count
% pdata
->tx_frames
) <
1509 rdata
= XGBE_GET_DESC_DATA(ring
, cur_index
);
1510 rdesc
= rdata
->rdesc
;
1512 /* Create a context descriptor if this is a TSO packet */
1513 if (tso_context
|| vlan_context
) {
1515 netif_dbg(pdata
, tx_queued
, pdata
->netdev
,
1516 "TSO context descriptor, mss=%u\n",
1519 /* Set the MSS size */
1520 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_CONTEXT_DESC2
,
1523 /* Mark it as a CONTEXT descriptor */
1524 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1527 /* Indicate this descriptor contains the MSS */
1528 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1531 ring
->tx
.cur_mss
= packet
->mss
;
1535 netif_dbg(pdata
, tx_queued
, pdata
->netdev
,
1536 "VLAN context descriptor, ctag=%u\n",
1539 /* Mark it as a CONTEXT descriptor */
1540 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1543 /* Set the VLAN tag */
1544 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1545 VT
, packet
->vlan_ctag
);
1547 /* Indicate this descriptor contains the VLAN tag */
1548 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_CONTEXT_DESC3
,
1551 ring
->tx
.cur_vlan_ctag
= packet
->vlan_ctag
;
1555 rdata
= XGBE_GET_DESC_DATA(ring
, cur_index
);
1556 rdesc
= rdata
->rdesc
;
1559 /* Update buffer address (for TSO this is the header) */
1560 rdesc
->desc0
= cpu_to_le32(lower_32_bits(rdata
->skb_dma
));
1561 rdesc
->desc1
= cpu_to_le32(upper_32_bits(rdata
->skb_dma
));
1563 /* Update the buffer length */
1564 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, HL_B1L
,
1565 rdata
->skb_dma_len
);
1567 /* VLAN tag insertion check */
1569 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, VTIR
,
1570 TX_NORMAL_DESC2_VLAN_INSERT
);
1572 /* Timestamp enablement check */
1573 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
))
1574 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, TTSE
, 1);
1576 /* Mark it as First Descriptor */
1577 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, FD
, 1);
1579 /* Mark it as a NORMAL descriptor */
1580 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CTXT
, 0);
1582 /* Set OWN bit if not the first descriptor */
1583 if (cur_index
!= start_index
)
1584 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
, 1);
1588 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, TSE
, 1);
1589 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, TCPPL
,
1590 packet
->tcp_payload_len
);
1591 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, TCPHDRLEN
,
1592 packet
->tcp_header_len
/ 4);
1594 pdata
->ext_stats
.tx_tso_packets
++;
1596 /* Enable CRC and Pad Insertion */
1597 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CPC
, 0);
1599 /* Enable HW CSUM */
1601 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
,
1604 /* Set the total length to be transmitted */
1605 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, FL
,
1609 for (i
= cur_index
- start_index
+ 1; i
< packet
->rdesc_count
; i
++) {
1611 rdata
= XGBE_GET_DESC_DATA(ring
, cur_index
);
1612 rdesc
= rdata
->rdesc
;
1614 /* Update buffer address */
1615 rdesc
->desc0
= cpu_to_le32(lower_32_bits(rdata
->skb_dma
));
1616 rdesc
->desc1
= cpu_to_le32(upper_32_bits(rdata
->skb_dma
));
1618 /* Update the buffer length */
1619 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, HL_B1L
,
1620 rdata
->skb_dma_len
);
1623 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
, 1);
1625 /* Mark it as NORMAL descriptor */
1626 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CTXT
, 0);
1628 /* Enable HW CSUM */
1630 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
,
1634 /* Set LAST bit for the last descriptor */
1635 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, LD
, 1);
1637 /* Set IC bit based on Tx coalescing settings */
1639 XGMAC_SET_BITS_LE(rdesc
->desc2
, TX_NORMAL_DESC2
, IC
, 1);
1641 /* Save the Tx info to report back during cleanup */
1642 rdata
->tx
.packets
= packet
->tx_packets
;
1643 rdata
->tx
.bytes
= packet
->tx_bytes
;
1645 /* In case the Tx DMA engine is running, make sure everything
1646 * is written to the descriptor(s) before setting the OWN bit
1647 * for the first descriptor
1651 /* Set OWN bit for the first descriptor */
1652 rdata
= XGBE_GET_DESC_DATA(ring
, start_index
);
1653 rdesc
= rdata
->rdesc
;
1654 XGMAC_SET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, OWN
, 1);
1656 if (netif_msg_tx_queued(pdata
))
1657 xgbe_dump_tx_desc(pdata
, ring
, start_index
,
1658 packet
->rdesc_count
, 1);
1660 /* Make sure ownership is written to the descriptor */
1663 ring
->cur
= cur_index
+ 1;
1664 if (!packet
->skb
->xmit_more
||
1665 netif_xmit_stopped(netdev_get_tx_queue(pdata
->netdev
,
1666 channel
->queue_index
)))
1667 xgbe_tx_start_xmit(channel
, ring
);
1669 ring
->tx
.xmit_more
= 1;
1671 DBGPR(" %s: descriptors %u to %u written\n",
1672 channel
->name
, start_index
& (ring
->rdesc_count
- 1),
1673 (ring
->cur
- 1) & (ring
->rdesc_count
- 1));
1675 DBGPR("<--xgbe_dev_xmit\n");
1678 static int xgbe_dev_read(struct xgbe_channel
*channel
)
1680 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1681 struct xgbe_ring
*ring
= channel
->rx_ring
;
1682 struct xgbe_ring_data
*rdata
;
1683 struct xgbe_ring_desc
*rdesc
;
1684 struct xgbe_packet_data
*packet
= &ring
->packet_data
;
1685 struct net_device
*netdev
= pdata
->netdev
;
1686 unsigned int err
, etlt
, l34t
;
1688 DBGPR("-->xgbe_dev_read: cur = %d\n", ring
->cur
);
1690 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1691 rdesc
= rdata
->rdesc
;
1693 /* Check for data availability */
1694 if (XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, OWN
))
1697 /* Make sure descriptor fields are read after reading the OWN bit */
1700 if (netif_msg_rx_status(pdata
))
1701 xgbe_dump_rx_desc(pdata
, ring
, ring
->cur
);
1703 if (XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, CTXT
)) {
1704 /* Timestamp Context Descriptor */
1705 xgbe_get_rx_tstamp(packet
, rdesc
);
1707 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1709 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1714 /* Normal Descriptor, be sure Context Descriptor bit is off */
1715 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
, CONTEXT
, 0);
1717 /* Indicate if a Context Descriptor is next */
1718 if (XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, CDA
))
1719 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1722 /* Get the header length */
1723 if (XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, FD
)) {
1724 rdata
->rx
.hdr_len
= XGMAC_GET_BITS_LE(rdesc
->desc2
,
1725 RX_NORMAL_DESC2
, HL
);
1726 if (rdata
->rx
.hdr_len
)
1727 pdata
->ext_stats
.rx_split_header_packets
++;
1730 /* Get the RSS hash */
1731 if (XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, RSV
)) {
1732 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1735 packet
->rss_hash
= le32_to_cpu(rdesc
->desc1
);
1737 l34t
= XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, L34T
);
1739 case RX_DESC3_L34T_IPV4_TCP
:
1740 case RX_DESC3_L34T_IPV4_UDP
:
1741 case RX_DESC3_L34T_IPV6_TCP
:
1742 case RX_DESC3_L34T_IPV6_UDP
:
1743 packet
->rss_hash_type
= PKT_HASH_TYPE_L4
;
1746 packet
->rss_hash_type
= PKT_HASH_TYPE_L3
;
1750 /* Get the packet length */
1751 rdata
->rx
.len
= XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, PL
);
1753 if (!XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, LD
)) {
1754 /* Not all the data has been transferred for this packet */
1755 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1760 /* This is the last of the data for this packet */
1761 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1764 /* Set checksum done indicator as appropriate */
1765 if (netdev
->features
& NETIF_F_RXCSUM
)
1766 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1769 /* Check for errors (only valid in last descriptor) */
1770 err
= XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, ES
);
1771 etlt
= XGMAC_GET_BITS_LE(rdesc
->desc3
, RX_NORMAL_DESC3
, ETLT
);
1772 netif_dbg(pdata
, rx_status
, netdev
, "err=%u, etlt=%#x\n", err
, etlt
);
1774 if (!err
|| !etlt
) {
1775 /* No error if err is 0 or etlt is 0 */
1776 if ((etlt
== 0x09) &&
1777 (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1778 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1780 packet
->vlan_ctag
= XGMAC_GET_BITS_LE(rdesc
->desc0
,
1783 netif_dbg(pdata
, rx_status
, netdev
, "vlan-ctag=%#06x\n",
1787 if ((etlt
== 0x05) || (etlt
== 0x06))
1788 XGMAC_SET_BITS(packet
->attributes
, RX_PACKET_ATTRIBUTES
,
1791 XGMAC_SET_BITS(packet
->errors
, RX_PACKET_ERRORS
,
1795 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel
->name
,
1796 ring
->cur
& (ring
->rdesc_count
- 1), ring
->cur
);
1801 static int xgbe_is_context_desc(struct xgbe_ring_desc
*rdesc
)
1803 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1804 return XGMAC_GET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, CTXT
);
1807 static int xgbe_is_last_desc(struct xgbe_ring_desc
*rdesc
)
1809 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1810 return XGMAC_GET_BITS_LE(rdesc
->desc3
, TX_NORMAL_DESC3
, LD
);
1813 static int xgbe_enable_int(struct xgbe_channel
*channel
,
1814 enum xgbe_int int_id
)
1816 unsigned int dma_ch_ier
;
1818 dma_ch_ier
= XGMAC_DMA_IOREAD(channel
, DMA_CH_IER
);
1821 case XGMAC_INT_DMA_CH_SR_TI
:
1822 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 1);
1824 case XGMAC_INT_DMA_CH_SR_TPS
:
1825 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TXSE
, 1);
1827 case XGMAC_INT_DMA_CH_SR_TBU
:
1828 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TBUE
, 1);
1830 case XGMAC_INT_DMA_CH_SR_RI
:
1831 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 1);
1833 case XGMAC_INT_DMA_CH_SR_RBU
:
1834 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RBUE
, 1);
1836 case XGMAC_INT_DMA_CH_SR_RPS
:
1837 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RSE
, 1);
1839 case XGMAC_INT_DMA_CH_SR_TI_RI
:
1840 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 1);
1841 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 1);
1843 case XGMAC_INT_DMA_CH_SR_FBE
:
1844 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, FBEE
, 1);
1846 case XGMAC_INT_DMA_ALL
:
1847 dma_ch_ier
|= channel
->saved_ier
;
1853 XGMAC_DMA_IOWRITE(channel
, DMA_CH_IER
, dma_ch_ier
);
1858 static int xgbe_disable_int(struct xgbe_channel
*channel
,
1859 enum xgbe_int int_id
)
1861 unsigned int dma_ch_ier
;
1863 dma_ch_ier
= XGMAC_DMA_IOREAD(channel
, DMA_CH_IER
);
1866 case XGMAC_INT_DMA_CH_SR_TI
:
1867 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 0);
1869 case XGMAC_INT_DMA_CH_SR_TPS
:
1870 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TXSE
, 0);
1872 case XGMAC_INT_DMA_CH_SR_TBU
:
1873 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TBUE
, 0);
1875 case XGMAC_INT_DMA_CH_SR_RI
:
1876 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 0);
1878 case XGMAC_INT_DMA_CH_SR_RBU
:
1879 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RBUE
, 0);
1881 case XGMAC_INT_DMA_CH_SR_RPS
:
1882 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RSE
, 0);
1884 case XGMAC_INT_DMA_CH_SR_TI_RI
:
1885 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, TIE
, 0);
1886 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, RIE
, 0);
1888 case XGMAC_INT_DMA_CH_SR_FBE
:
1889 XGMAC_SET_BITS(dma_ch_ier
, DMA_CH_IER
, FBEE
, 0);
1891 case XGMAC_INT_DMA_ALL
:
1892 channel
->saved_ier
= dma_ch_ier
& XGBE_DMA_INTERRUPT_MASK
;
1893 dma_ch_ier
&= ~XGBE_DMA_INTERRUPT_MASK
;
1899 XGMAC_DMA_IOWRITE(channel
, DMA_CH_IER
, dma_ch_ier
);
1904 static int xgbe_exit(struct xgbe_prv_data
*pdata
)
1906 unsigned int count
= 2000;
1908 DBGPR("-->xgbe_exit\n");
1910 /* Issue a software reset */
1911 XGMAC_IOWRITE_BITS(pdata
, DMA_MR
, SWR
, 1);
1912 usleep_range(10, 15);
1914 /* Poll Until Poll Condition */
1915 while (--count
&& XGMAC_IOREAD_BITS(pdata
, DMA_MR
, SWR
))
1916 usleep_range(500, 600);
1921 DBGPR("<--xgbe_exit\n");
1926 static int xgbe_flush_tx_queues(struct xgbe_prv_data
*pdata
)
1928 unsigned int i
, count
;
1930 if (XGMAC_GET_BITS(pdata
->hw_feat
.version
, MAC_VR
, SNPSVER
) < 0x21)
1933 for (i
= 0; i
< pdata
->tx_q_count
; i
++)
1934 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, FTQ
, 1);
1936 /* Poll Until Poll Condition */
1937 for (i
= 0; i
< pdata
->tx_q_count
; i
++) {
1939 while (--count
&& XGMAC_MTL_IOREAD_BITS(pdata
, i
,
1941 usleep_range(500, 600);
1950 static void xgbe_config_dma_bus(struct xgbe_prv_data
*pdata
)
1952 /* Set enhanced addressing mode */
1953 XGMAC_IOWRITE_BITS(pdata
, DMA_SBMR
, EAME
, 1);
1955 /* Set the System Bus mode */
1956 XGMAC_IOWRITE_BITS(pdata
, DMA_SBMR
, UNDEF
, 1);
1957 XGMAC_IOWRITE_BITS(pdata
, DMA_SBMR
, BLEN_256
, 1);
1960 static void xgbe_config_dma_cache(struct xgbe_prv_data
*pdata
)
1962 unsigned int arcache
, awcache
;
1965 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, DRC
, pdata
->arcache
);
1966 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, DRD
, pdata
->axdomain
);
1967 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, TEC
, pdata
->arcache
);
1968 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, TED
, pdata
->axdomain
);
1969 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, THC
, pdata
->arcache
);
1970 XGMAC_SET_BITS(arcache
, DMA_AXIARCR
, THD
, pdata
->axdomain
);
1971 XGMAC_IOWRITE(pdata
, DMA_AXIARCR
, arcache
);
1974 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, DWC
, pdata
->awcache
);
1975 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, DWD
, pdata
->axdomain
);
1976 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RPC
, pdata
->awcache
);
1977 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RPD
, pdata
->axdomain
);
1978 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RHC
, pdata
->awcache
);
1979 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, RHD
, pdata
->axdomain
);
1980 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, TDC
, pdata
->awcache
);
1981 XGMAC_SET_BITS(awcache
, DMA_AXIAWCR
, TDD
, pdata
->axdomain
);
1982 XGMAC_IOWRITE(pdata
, DMA_AXIAWCR
, awcache
);
1985 static void xgbe_config_mtl_mode(struct xgbe_prv_data
*pdata
)
1989 /* Set Tx to weighted round robin scheduling algorithm */
1990 XGMAC_IOWRITE_BITS(pdata
, MTL_OMR
, ETSALG
, MTL_ETSALG_WRR
);
1992 /* Set Tx traffic classes to use WRR algorithm with equal weights */
1993 for (i
= 0; i
< pdata
->hw_feat
.tc_cnt
; i
++) {
1994 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_TC_ETSCR
, TSA
,
1996 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_TC_QWR
, QW
, 1);
1999 /* Set Rx to strict priority algorithm */
2000 XGMAC_IOWRITE_BITS(pdata
, MTL_OMR
, RAA
, MTL_RAA_SP
);
2003 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size
,
2004 unsigned int queue_count
)
2006 unsigned int q_fifo_size
;
2007 unsigned int p_fifo
;
2009 /* Calculate the configured fifo size */
2010 q_fifo_size
= 1 << (fifo_size
+ 7);
2012 /* The configured value may not be the actual amount of fifo RAM */
2013 q_fifo_size
= min_t(unsigned int, XGBE_FIFO_MAX
, q_fifo_size
);
2015 q_fifo_size
= q_fifo_size
/ queue_count
;
2017 /* Each increment in the queue fifo size represents 256 bytes of
2018 * fifo, with 0 representing 256 bytes. Distribute the fifo equally
2019 * between the queues.
2021 p_fifo
= q_fifo_size
/ 256;
2028 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data
*pdata
)
2030 unsigned int fifo_size
;
2033 fifo_size
= xgbe_calculate_per_queue_fifo(pdata
->hw_feat
.tx_fifo_size
,
2036 for (i
= 0; i
< pdata
->tx_q_count
; i
++)
2037 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TQS
, fifo_size
);
2039 netif_info(pdata
, drv
, pdata
->netdev
,
2040 "%d Tx hardware queues, %d byte fifo per queue\n",
2041 pdata
->tx_q_count
, ((fifo_size
+ 1) * 256));
2044 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data
*pdata
)
2046 unsigned int fifo_size
;
2049 fifo_size
= xgbe_calculate_per_queue_fifo(pdata
->hw_feat
.rx_fifo_size
,
2052 for (i
= 0; i
< pdata
->rx_q_count
; i
++)
2053 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQOMR
, RQS
, fifo_size
);
2055 netif_info(pdata
, drv
, pdata
->netdev
,
2056 "%d Rx hardware queues, %d byte fifo per queue\n",
2057 pdata
->rx_q_count
, ((fifo_size
+ 1) * 256));
2060 static void xgbe_config_queue_mapping(struct xgbe_prv_data
*pdata
)
2062 unsigned int qptc
, qptc_extra
, queue
;
2063 unsigned int prio_queues
;
2064 unsigned int ppq
, ppq_extra
, prio
;
2066 unsigned int i
, j
, reg
, reg_val
;
2068 /* Map the MTL Tx Queues to Traffic Classes
2069 * Note: Tx Queues >= Traffic Classes
2071 qptc
= pdata
->tx_q_count
/ pdata
->hw_feat
.tc_cnt
;
2072 qptc_extra
= pdata
->tx_q_count
% pdata
->hw_feat
.tc_cnt
;
2074 for (i
= 0, queue
= 0; i
< pdata
->hw_feat
.tc_cnt
; i
++) {
2075 for (j
= 0; j
< qptc
; j
++) {
2076 netif_dbg(pdata
, drv
, pdata
->netdev
,
2077 "TXq%u mapped to TC%u\n", queue
, i
);
2078 XGMAC_MTL_IOWRITE_BITS(pdata
, queue
, MTL_Q_TQOMR
,
2080 pdata
->q2tc_map
[queue
++] = i
;
2083 if (i
< qptc_extra
) {
2084 netif_dbg(pdata
, drv
, pdata
->netdev
,
2085 "TXq%u mapped to TC%u\n", queue
, i
);
2086 XGMAC_MTL_IOWRITE_BITS(pdata
, queue
, MTL_Q_TQOMR
,
2088 pdata
->q2tc_map
[queue
++] = i
;
2092 /* Map the 8 VLAN priority values to available MTL Rx queues */
2093 prio_queues
= min_t(unsigned int, IEEE_8021QAZ_MAX_TCS
,
2095 ppq
= IEEE_8021QAZ_MAX_TCS
/ prio_queues
;
2096 ppq_extra
= IEEE_8021QAZ_MAX_TCS
% prio_queues
;
2100 for (i
= 0, prio
= 0; i
< prio_queues
;) {
2102 for (j
= 0; j
< ppq
; j
++) {
2103 netif_dbg(pdata
, drv
, pdata
->netdev
,
2104 "PRIO%u mapped to RXq%u\n", prio
, i
);
2105 mask
|= (1 << prio
);
2106 pdata
->prio2q_map
[prio
++] = i
;
2109 if (i
< ppq_extra
) {
2110 netif_dbg(pdata
, drv
, pdata
->netdev
,
2111 "PRIO%u mapped to RXq%u\n", prio
, i
);
2112 mask
|= (1 << prio
);
2113 pdata
->prio2q_map
[prio
++] = i
;
2116 reg_val
|= (mask
<< ((i
++ % MAC_RQC2_Q_PER_REG
) << 3));
2118 if ((i
% MAC_RQC2_Q_PER_REG
) && (i
!= prio_queues
))
2121 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
2122 reg
+= MAC_RQC2_INC
;
2126 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2129 for (i
= 0; i
< pdata
->rx_q_count
;) {
2130 reg_val
|= (0x80 << ((i
++ % MTL_RQDCM_Q_PER_REG
) << 3));
2132 if ((i
% MTL_RQDCM_Q_PER_REG
) && (i
!= pdata
->rx_q_count
))
2135 XGMAC_IOWRITE(pdata
, reg
, reg_val
);
2137 reg
+= MTL_RQDCM_INC
;
2142 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data
*pdata
)
2146 for (i
= 0; i
< pdata
->rx_q_count
; i
++) {
2147 /* Activate flow control when less than 4k left in fifo */
2148 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQFCR
, RFA
, 2);
2150 /* De-activate flow control when more than 6k left in fifo */
2151 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_RQFCR
, RFD
, 4);
2155 static void xgbe_config_mac_address(struct xgbe_prv_data
*pdata
)
2157 xgbe_set_mac_address(pdata
, pdata
->netdev
->dev_addr
);
2159 /* Filtering is done using perfect filtering and hash filtering */
2160 if (pdata
->hw_feat
.hash_table_size
) {
2161 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, HPF
, 1);
2162 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, HUC
, 1);
2163 XGMAC_IOWRITE_BITS(pdata
, MAC_PFR
, HMC
, 1);
2167 static void xgbe_config_jumbo_enable(struct xgbe_prv_data
*pdata
)
2171 val
= (pdata
->netdev
->mtu
> XGMAC_STD_PACKET_MTU
) ? 1 : 0;
2173 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, JE
, val
);
2176 static void xgbe_config_mac_speed(struct xgbe_prv_data
*pdata
)
2178 switch (pdata
->phy_speed
) {
2180 xgbe_set_xgmii_speed(pdata
);
2184 xgbe_set_gmii_2500_speed(pdata
);
2188 xgbe_set_gmii_speed(pdata
);
2193 static void xgbe_config_checksum_offload(struct xgbe_prv_data
*pdata
)
2195 if (pdata
->netdev
->features
& NETIF_F_RXCSUM
)
2196 xgbe_enable_rx_csum(pdata
);
2198 xgbe_disable_rx_csum(pdata
);
2201 static void xgbe_config_vlan_support(struct xgbe_prv_data
*pdata
)
2203 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2204 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANIR
, CSVL
, 0);
2205 XGMAC_IOWRITE_BITS(pdata
, MAC_VLANIR
, VLTI
, 1);
2207 /* Set the current VLAN Hash Table register value */
2208 xgbe_update_vlan_hash_table(pdata
);
2210 if (pdata
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
2211 xgbe_enable_rx_vlan_filtering(pdata
);
2213 xgbe_disable_rx_vlan_filtering(pdata
);
2215 if (pdata
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
2216 xgbe_enable_rx_vlan_stripping(pdata
);
2218 xgbe_disable_rx_vlan_stripping(pdata
);
2221 static u64
xgbe_mmc_read(struct xgbe_prv_data
*pdata
, unsigned int reg_lo
)
2227 /* These registers are always 64 bit */
2228 case MMC_TXOCTETCOUNT_GB_LO
:
2229 case MMC_TXOCTETCOUNT_G_LO
:
2230 case MMC_RXOCTETCOUNT_GB_LO
:
2231 case MMC_RXOCTETCOUNT_G_LO
:
2239 val
= XGMAC_IOREAD(pdata
, reg_lo
);
2242 val
|= ((u64
)XGMAC_IOREAD(pdata
, reg_lo
+ 4) << 32);
2247 static void xgbe_tx_mmc_int(struct xgbe_prv_data
*pdata
)
2249 struct xgbe_mmc_stats
*stats
= &pdata
->mmc_stats
;
2250 unsigned int mmc_isr
= XGMAC_IOREAD(pdata
, MMC_TISR
);
2252 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXOCTETCOUNT_GB
))
2253 stats
->txoctetcount_gb
+=
2254 xgbe_mmc_read(pdata
, MMC_TXOCTETCOUNT_GB_LO
);
2256 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXFRAMECOUNT_GB
))
2257 stats
->txframecount_gb
+=
2258 xgbe_mmc_read(pdata
, MMC_TXFRAMECOUNT_GB_LO
);
2260 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXBROADCASTFRAMES_G
))
2261 stats
->txbroadcastframes_g
+=
2262 xgbe_mmc_read(pdata
, MMC_TXBROADCASTFRAMES_G_LO
);
2264 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXMULTICASTFRAMES_G
))
2265 stats
->txmulticastframes_g
+=
2266 xgbe_mmc_read(pdata
, MMC_TXMULTICASTFRAMES_G_LO
);
2268 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX64OCTETS_GB
))
2269 stats
->tx64octets_gb
+=
2270 xgbe_mmc_read(pdata
, MMC_TX64OCTETS_GB_LO
);
2272 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX65TO127OCTETS_GB
))
2273 stats
->tx65to127octets_gb
+=
2274 xgbe_mmc_read(pdata
, MMC_TX65TO127OCTETS_GB_LO
);
2276 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX128TO255OCTETS_GB
))
2277 stats
->tx128to255octets_gb
+=
2278 xgbe_mmc_read(pdata
, MMC_TX128TO255OCTETS_GB_LO
);
2280 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX256TO511OCTETS_GB
))
2281 stats
->tx256to511octets_gb
+=
2282 xgbe_mmc_read(pdata
, MMC_TX256TO511OCTETS_GB_LO
);
2284 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX512TO1023OCTETS_GB
))
2285 stats
->tx512to1023octets_gb
+=
2286 xgbe_mmc_read(pdata
, MMC_TX512TO1023OCTETS_GB_LO
);
2288 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TX1024TOMAXOCTETS_GB
))
2289 stats
->tx1024tomaxoctets_gb
+=
2290 xgbe_mmc_read(pdata
, MMC_TX1024TOMAXOCTETS_GB_LO
);
2292 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXUNICASTFRAMES_GB
))
2293 stats
->txunicastframes_gb
+=
2294 xgbe_mmc_read(pdata
, MMC_TXUNICASTFRAMES_GB_LO
);
2296 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXMULTICASTFRAMES_GB
))
2297 stats
->txmulticastframes_gb
+=
2298 xgbe_mmc_read(pdata
, MMC_TXMULTICASTFRAMES_GB_LO
);
2300 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXBROADCASTFRAMES_GB
))
2301 stats
->txbroadcastframes_g
+=
2302 xgbe_mmc_read(pdata
, MMC_TXBROADCASTFRAMES_GB_LO
);
2304 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXUNDERFLOWERROR
))
2305 stats
->txunderflowerror
+=
2306 xgbe_mmc_read(pdata
, MMC_TXUNDERFLOWERROR_LO
);
2308 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXOCTETCOUNT_G
))
2309 stats
->txoctetcount_g
+=
2310 xgbe_mmc_read(pdata
, MMC_TXOCTETCOUNT_G_LO
);
2312 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXFRAMECOUNT_G
))
2313 stats
->txframecount_g
+=
2314 xgbe_mmc_read(pdata
, MMC_TXFRAMECOUNT_G_LO
);
2316 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXPAUSEFRAMES
))
2317 stats
->txpauseframes
+=
2318 xgbe_mmc_read(pdata
, MMC_TXPAUSEFRAMES_LO
);
2320 if (XGMAC_GET_BITS(mmc_isr
, MMC_TISR
, TXVLANFRAMES_G
))
2321 stats
->txvlanframes_g
+=
2322 xgbe_mmc_read(pdata
, MMC_TXVLANFRAMES_G_LO
);
2325 static void xgbe_rx_mmc_int(struct xgbe_prv_data
*pdata
)
2327 struct xgbe_mmc_stats
*stats
= &pdata
->mmc_stats
;
2328 unsigned int mmc_isr
= XGMAC_IOREAD(pdata
, MMC_RISR
);
2330 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXFRAMECOUNT_GB
))
2331 stats
->rxframecount_gb
+=
2332 xgbe_mmc_read(pdata
, MMC_RXFRAMECOUNT_GB_LO
);
2334 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOCTETCOUNT_GB
))
2335 stats
->rxoctetcount_gb
+=
2336 xgbe_mmc_read(pdata
, MMC_RXOCTETCOUNT_GB_LO
);
2338 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOCTETCOUNT_G
))
2339 stats
->rxoctetcount_g
+=
2340 xgbe_mmc_read(pdata
, MMC_RXOCTETCOUNT_G_LO
);
2342 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXBROADCASTFRAMES_G
))
2343 stats
->rxbroadcastframes_g
+=
2344 xgbe_mmc_read(pdata
, MMC_RXBROADCASTFRAMES_G_LO
);
2346 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXMULTICASTFRAMES_G
))
2347 stats
->rxmulticastframes_g
+=
2348 xgbe_mmc_read(pdata
, MMC_RXMULTICASTFRAMES_G_LO
);
2350 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXCRCERROR
))
2351 stats
->rxcrcerror
+=
2352 xgbe_mmc_read(pdata
, MMC_RXCRCERROR_LO
);
2354 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXRUNTERROR
))
2355 stats
->rxrunterror
+=
2356 xgbe_mmc_read(pdata
, MMC_RXRUNTERROR
);
2358 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXJABBERERROR
))
2359 stats
->rxjabbererror
+=
2360 xgbe_mmc_read(pdata
, MMC_RXJABBERERROR
);
2362 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXUNDERSIZE_G
))
2363 stats
->rxundersize_g
+=
2364 xgbe_mmc_read(pdata
, MMC_RXUNDERSIZE_G
);
2366 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOVERSIZE_G
))
2367 stats
->rxoversize_g
+=
2368 xgbe_mmc_read(pdata
, MMC_RXOVERSIZE_G
);
2370 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX64OCTETS_GB
))
2371 stats
->rx64octets_gb
+=
2372 xgbe_mmc_read(pdata
, MMC_RX64OCTETS_GB_LO
);
2374 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX65TO127OCTETS_GB
))
2375 stats
->rx65to127octets_gb
+=
2376 xgbe_mmc_read(pdata
, MMC_RX65TO127OCTETS_GB_LO
);
2378 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX128TO255OCTETS_GB
))
2379 stats
->rx128to255octets_gb
+=
2380 xgbe_mmc_read(pdata
, MMC_RX128TO255OCTETS_GB_LO
);
2382 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX256TO511OCTETS_GB
))
2383 stats
->rx256to511octets_gb
+=
2384 xgbe_mmc_read(pdata
, MMC_RX256TO511OCTETS_GB_LO
);
2386 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX512TO1023OCTETS_GB
))
2387 stats
->rx512to1023octets_gb
+=
2388 xgbe_mmc_read(pdata
, MMC_RX512TO1023OCTETS_GB_LO
);
2390 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RX1024TOMAXOCTETS_GB
))
2391 stats
->rx1024tomaxoctets_gb
+=
2392 xgbe_mmc_read(pdata
, MMC_RX1024TOMAXOCTETS_GB_LO
);
2394 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXUNICASTFRAMES_G
))
2395 stats
->rxunicastframes_g
+=
2396 xgbe_mmc_read(pdata
, MMC_RXUNICASTFRAMES_G_LO
);
2398 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXLENGTHERROR
))
2399 stats
->rxlengtherror
+=
2400 xgbe_mmc_read(pdata
, MMC_RXLENGTHERROR_LO
);
2402 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXOUTOFRANGETYPE
))
2403 stats
->rxoutofrangetype
+=
2404 xgbe_mmc_read(pdata
, MMC_RXOUTOFRANGETYPE_LO
);
2406 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXPAUSEFRAMES
))
2407 stats
->rxpauseframes
+=
2408 xgbe_mmc_read(pdata
, MMC_RXPAUSEFRAMES_LO
);
2410 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXFIFOOVERFLOW
))
2411 stats
->rxfifooverflow
+=
2412 xgbe_mmc_read(pdata
, MMC_RXFIFOOVERFLOW_LO
);
2414 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXVLANFRAMES_GB
))
2415 stats
->rxvlanframes_gb
+=
2416 xgbe_mmc_read(pdata
, MMC_RXVLANFRAMES_GB_LO
);
2418 if (XGMAC_GET_BITS(mmc_isr
, MMC_RISR
, RXWATCHDOGERROR
))
2419 stats
->rxwatchdogerror
+=
2420 xgbe_mmc_read(pdata
, MMC_RXWATCHDOGERROR
);
2423 static void xgbe_read_mmc_stats(struct xgbe_prv_data
*pdata
)
2425 struct xgbe_mmc_stats
*stats
= &pdata
->mmc_stats
;
2427 /* Freeze counters */
2428 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, MCF
, 1);
2430 stats
->txoctetcount_gb
+=
2431 xgbe_mmc_read(pdata
, MMC_TXOCTETCOUNT_GB_LO
);
2433 stats
->txframecount_gb
+=
2434 xgbe_mmc_read(pdata
, MMC_TXFRAMECOUNT_GB_LO
);
2436 stats
->txbroadcastframes_g
+=
2437 xgbe_mmc_read(pdata
, MMC_TXBROADCASTFRAMES_G_LO
);
2439 stats
->txmulticastframes_g
+=
2440 xgbe_mmc_read(pdata
, MMC_TXMULTICASTFRAMES_G_LO
);
2442 stats
->tx64octets_gb
+=
2443 xgbe_mmc_read(pdata
, MMC_TX64OCTETS_GB_LO
);
2445 stats
->tx65to127octets_gb
+=
2446 xgbe_mmc_read(pdata
, MMC_TX65TO127OCTETS_GB_LO
);
2448 stats
->tx128to255octets_gb
+=
2449 xgbe_mmc_read(pdata
, MMC_TX128TO255OCTETS_GB_LO
);
2451 stats
->tx256to511octets_gb
+=
2452 xgbe_mmc_read(pdata
, MMC_TX256TO511OCTETS_GB_LO
);
2454 stats
->tx512to1023octets_gb
+=
2455 xgbe_mmc_read(pdata
, MMC_TX512TO1023OCTETS_GB_LO
);
2457 stats
->tx1024tomaxoctets_gb
+=
2458 xgbe_mmc_read(pdata
, MMC_TX1024TOMAXOCTETS_GB_LO
);
2460 stats
->txunicastframes_gb
+=
2461 xgbe_mmc_read(pdata
, MMC_TXUNICASTFRAMES_GB_LO
);
2463 stats
->txmulticastframes_gb
+=
2464 xgbe_mmc_read(pdata
, MMC_TXMULTICASTFRAMES_GB_LO
);
2466 stats
->txbroadcastframes_g
+=
2467 xgbe_mmc_read(pdata
, MMC_TXBROADCASTFRAMES_GB_LO
);
2469 stats
->txunderflowerror
+=
2470 xgbe_mmc_read(pdata
, MMC_TXUNDERFLOWERROR_LO
);
2472 stats
->txoctetcount_g
+=
2473 xgbe_mmc_read(pdata
, MMC_TXOCTETCOUNT_G_LO
);
2475 stats
->txframecount_g
+=
2476 xgbe_mmc_read(pdata
, MMC_TXFRAMECOUNT_G_LO
);
2478 stats
->txpauseframes
+=
2479 xgbe_mmc_read(pdata
, MMC_TXPAUSEFRAMES_LO
);
2481 stats
->txvlanframes_g
+=
2482 xgbe_mmc_read(pdata
, MMC_TXVLANFRAMES_G_LO
);
2484 stats
->rxframecount_gb
+=
2485 xgbe_mmc_read(pdata
, MMC_RXFRAMECOUNT_GB_LO
);
2487 stats
->rxoctetcount_gb
+=
2488 xgbe_mmc_read(pdata
, MMC_RXOCTETCOUNT_GB_LO
);
2490 stats
->rxoctetcount_g
+=
2491 xgbe_mmc_read(pdata
, MMC_RXOCTETCOUNT_G_LO
);
2493 stats
->rxbroadcastframes_g
+=
2494 xgbe_mmc_read(pdata
, MMC_RXBROADCASTFRAMES_G_LO
);
2496 stats
->rxmulticastframes_g
+=
2497 xgbe_mmc_read(pdata
, MMC_RXMULTICASTFRAMES_G_LO
);
2499 stats
->rxcrcerror
+=
2500 xgbe_mmc_read(pdata
, MMC_RXCRCERROR_LO
);
2502 stats
->rxrunterror
+=
2503 xgbe_mmc_read(pdata
, MMC_RXRUNTERROR
);
2505 stats
->rxjabbererror
+=
2506 xgbe_mmc_read(pdata
, MMC_RXJABBERERROR
);
2508 stats
->rxundersize_g
+=
2509 xgbe_mmc_read(pdata
, MMC_RXUNDERSIZE_G
);
2511 stats
->rxoversize_g
+=
2512 xgbe_mmc_read(pdata
, MMC_RXOVERSIZE_G
);
2514 stats
->rx64octets_gb
+=
2515 xgbe_mmc_read(pdata
, MMC_RX64OCTETS_GB_LO
);
2517 stats
->rx65to127octets_gb
+=
2518 xgbe_mmc_read(pdata
, MMC_RX65TO127OCTETS_GB_LO
);
2520 stats
->rx128to255octets_gb
+=
2521 xgbe_mmc_read(pdata
, MMC_RX128TO255OCTETS_GB_LO
);
2523 stats
->rx256to511octets_gb
+=
2524 xgbe_mmc_read(pdata
, MMC_RX256TO511OCTETS_GB_LO
);
2526 stats
->rx512to1023octets_gb
+=
2527 xgbe_mmc_read(pdata
, MMC_RX512TO1023OCTETS_GB_LO
);
2529 stats
->rx1024tomaxoctets_gb
+=
2530 xgbe_mmc_read(pdata
, MMC_RX1024TOMAXOCTETS_GB_LO
);
2532 stats
->rxunicastframes_g
+=
2533 xgbe_mmc_read(pdata
, MMC_RXUNICASTFRAMES_G_LO
);
2535 stats
->rxlengtherror
+=
2536 xgbe_mmc_read(pdata
, MMC_RXLENGTHERROR_LO
);
2538 stats
->rxoutofrangetype
+=
2539 xgbe_mmc_read(pdata
, MMC_RXOUTOFRANGETYPE_LO
);
2541 stats
->rxpauseframes
+=
2542 xgbe_mmc_read(pdata
, MMC_RXPAUSEFRAMES_LO
);
2544 stats
->rxfifooverflow
+=
2545 xgbe_mmc_read(pdata
, MMC_RXFIFOOVERFLOW_LO
);
2547 stats
->rxvlanframes_gb
+=
2548 xgbe_mmc_read(pdata
, MMC_RXVLANFRAMES_GB_LO
);
2550 stats
->rxwatchdogerror
+=
2551 xgbe_mmc_read(pdata
, MMC_RXWATCHDOGERROR
);
2553 /* Un-freeze counters */
2554 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, MCF
, 0);
2557 static void xgbe_config_mmc(struct xgbe_prv_data
*pdata
)
2559 /* Set counters to reset on read */
2560 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, ROR
, 1);
2562 /* Reset the counters */
2563 XGMAC_IOWRITE_BITS(pdata
, MMC_CR
, CR
, 1);
2566 static void xgbe_prepare_tx_stop(struct xgbe_prv_data
*pdata
,
2567 struct xgbe_channel
*channel
)
2569 unsigned int tx_dsr
, tx_pos
, tx_qidx
;
2570 unsigned int tx_status
;
2571 unsigned long tx_timeout
;
2573 /* Calculate the status register to read and the position within */
2574 if (channel
->queue_index
< DMA_DSRX_FIRST_QUEUE
) {
2576 tx_pos
= (channel
->queue_index
* DMA_DSR_Q_WIDTH
) +
2579 tx_qidx
= channel
->queue_index
- DMA_DSRX_FIRST_QUEUE
;
2581 tx_dsr
= DMA_DSR1
+ ((tx_qidx
/ DMA_DSRX_QPR
) * DMA_DSRX_INC
);
2582 tx_pos
= ((tx_qidx
% DMA_DSRX_QPR
) * DMA_DSR_Q_WIDTH
) +
2586 /* The Tx engine cannot be stopped if it is actively processing
2587 * descriptors. Wait for the Tx engine to enter the stopped or
2588 * suspended state. Don't wait forever though...
2590 tx_timeout
= jiffies
+ (XGBE_DMA_STOP_TIMEOUT
* HZ
);
2591 while (time_before(jiffies
, tx_timeout
)) {
2592 tx_status
= XGMAC_IOREAD(pdata
, tx_dsr
);
2593 tx_status
= GET_BITS(tx_status
, tx_pos
, DMA_DSR_TPS_WIDTH
);
2594 if ((tx_status
== DMA_TPS_STOPPED
) ||
2595 (tx_status
== DMA_TPS_SUSPENDED
))
2598 usleep_range(500, 1000);
2601 if (!time_before(jiffies
, tx_timeout
))
2602 netdev_info(pdata
->netdev
,
2603 "timed out waiting for Tx DMA channel %u to stop\n",
2604 channel
->queue_index
);
2607 static void xgbe_enable_tx(struct xgbe_prv_data
*pdata
)
2609 struct xgbe_channel
*channel
;
2612 /* Enable each Tx DMA channel */
2613 channel
= pdata
->channel
;
2614 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2615 if (!channel
->tx_ring
)
2618 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 1);
2621 /* Enable each Tx queue */
2622 for (i
= 0; i
< pdata
->tx_q_count
; i
++)
2623 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TXQEN
,
2627 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 1);
2630 static void xgbe_disable_tx(struct xgbe_prv_data
*pdata
)
2632 struct xgbe_channel
*channel
;
2635 /* Prepare for Tx DMA channel stop */
2636 channel
= pdata
->channel
;
2637 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2638 if (!channel
->tx_ring
)
2641 xgbe_prepare_tx_stop(pdata
, channel
);
2644 /* Disable MAC Tx */
2645 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 0);
2647 /* Disable each Tx queue */
2648 for (i
= 0; i
< pdata
->tx_q_count
; i
++)
2649 XGMAC_MTL_IOWRITE_BITS(pdata
, i
, MTL_Q_TQOMR
, TXQEN
, 0);
2651 /* Disable each Tx DMA channel */
2652 channel
= pdata
->channel
;
2653 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2654 if (!channel
->tx_ring
)
2657 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 0);
2661 static void xgbe_prepare_rx_stop(struct xgbe_prv_data
*pdata
,
2664 unsigned int rx_status
;
2665 unsigned long rx_timeout
;
2667 /* The Rx engine cannot be stopped if it is actively processing
2668 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
2669 * wait forever though...
2671 rx_timeout
= jiffies
+ (XGBE_DMA_STOP_TIMEOUT
* HZ
);
2672 while (time_before(jiffies
, rx_timeout
)) {
2673 rx_status
= XGMAC_MTL_IOREAD(pdata
, queue
, MTL_Q_RQDR
);
2674 if ((XGMAC_GET_BITS(rx_status
, MTL_Q_RQDR
, PRXQ
) == 0) &&
2675 (XGMAC_GET_BITS(rx_status
, MTL_Q_RQDR
, RXQSTS
) == 0))
2678 usleep_range(500, 1000);
2681 if (!time_before(jiffies
, rx_timeout
))
2682 netdev_info(pdata
->netdev
,
2683 "timed out waiting for Rx queue %u to empty\n",
2687 static void xgbe_enable_rx(struct xgbe_prv_data
*pdata
)
2689 struct xgbe_channel
*channel
;
2690 unsigned int reg_val
, i
;
2692 /* Enable each Rx DMA channel */
2693 channel
= pdata
->channel
;
2694 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2695 if (!channel
->rx_ring
)
2698 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 1);
2701 /* Enable each Rx queue */
2703 for (i
= 0; i
< pdata
->rx_q_count
; i
++)
2704 reg_val
|= (0x02 << (i
<< 1));
2705 XGMAC_IOWRITE(pdata
, MAC_RQC0R
, reg_val
);
2708 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, DCRCC
, 1);
2709 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, CST
, 1);
2710 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, ACS
, 1);
2711 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, RE
, 1);
2714 static void xgbe_disable_rx(struct xgbe_prv_data
*pdata
)
2716 struct xgbe_channel
*channel
;
2719 /* Disable MAC Rx */
2720 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, DCRCC
, 0);
2721 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, CST
, 0);
2722 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, ACS
, 0);
2723 XGMAC_IOWRITE_BITS(pdata
, MAC_RCR
, RE
, 0);
2725 /* Prepare for Rx DMA channel stop */
2726 for (i
= 0; i
< pdata
->rx_q_count
; i
++)
2727 xgbe_prepare_rx_stop(pdata
, i
);
2729 /* Disable each Rx queue */
2730 XGMAC_IOWRITE(pdata
, MAC_RQC0R
, 0);
2732 /* Disable each Rx DMA channel */
2733 channel
= pdata
->channel
;
2734 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2735 if (!channel
->rx_ring
)
2738 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 0);
2742 static void xgbe_powerup_tx(struct xgbe_prv_data
*pdata
)
2744 struct xgbe_channel
*channel
;
2747 /* Enable each Tx DMA channel */
2748 channel
= pdata
->channel
;
2749 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2750 if (!channel
->tx_ring
)
2753 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 1);
2757 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 1);
2760 static void xgbe_powerdown_tx(struct xgbe_prv_data
*pdata
)
2762 struct xgbe_channel
*channel
;
2765 /* Prepare for Tx DMA channel stop */
2766 channel
= pdata
->channel
;
2767 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2768 if (!channel
->tx_ring
)
2771 xgbe_prepare_tx_stop(pdata
, channel
);
2774 /* Disable MAC Tx */
2775 XGMAC_IOWRITE_BITS(pdata
, MAC_TCR
, TE
, 0);
2777 /* Disable each Tx DMA channel */
2778 channel
= pdata
->channel
;
2779 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2780 if (!channel
->tx_ring
)
2783 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_TCR
, ST
, 0);
2787 static void xgbe_powerup_rx(struct xgbe_prv_data
*pdata
)
2789 struct xgbe_channel
*channel
;
2792 /* Enable each Rx DMA channel */
2793 channel
= pdata
->channel
;
2794 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2795 if (!channel
->rx_ring
)
2798 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 1);
2802 static void xgbe_powerdown_rx(struct xgbe_prv_data
*pdata
)
2804 struct xgbe_channel
*channel
;
2807 /* Disable each Rx DMA channel */
2808 channel
= pdata
->channel
;
2809 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2810 if (!channel
->rx_ring
)
2813 XGMAC_DMA_IOWRITE_BITS(channel
, DMA_CH_RCR
, SR
, 0);
2817 static int xgbe_init(struct xgbe_prv_data
*pdata
)
2819 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
2822 DBGPR("-->xgbe_init\n");
2824 /* Flush Tx queues */
2825 ret
= xgbe_flush_tx_queues(pdata
);
2830 * Initialize DMA related features
2832 xgbe_config_dma_bus(pdata
);
2833 xgbe_config_dma_cache(pdata
);
2834 xgbe_config_osp_mode(pdata
);
2835 xgbe_config_pblx8(pdata
);
2836 xgbe_config_tx_pbl_val(pdata
);
2837 xgbe_config_rx_pbl_val(pdata
);
2838 xgbe_config_rx_coalesce(pdata
);
2839 xgbe_config_tx_coalesce(pdata
);
2840 xgbe_config_rx_buffer_size(pdata
);
2841 xgbe_config_tso_mode(pdata
);
2842 xgbe_config_sph_mode(pdata
);
2843 xgbe_config_rss(pdata
);
2844 desc_if
->wrapper_tx_desc_init(pdata
);
2845 desc_if
->wrapper_rx_desc_init(pdata
);
2846 xgbe_enable_dma_interrupts(pdata
);
2849 * Initialize MTL related features
2851 xgbe_config_mtl_mode(pdata
);
2852 xgbe_config_queue_mapping(pdata
);
2853 xgbe_config_tsf_mode(pdata
, pdata
->tx_sf_mode
);
2854 xgbe_config_rsf_mode(pdata
, pdata
->rx_sf_mode
);
2855 xgbe_config_tx_threshold(pdata
, pdata
->tx_threshold
);
2856 xgbe_config_rx_threshold(pdata
, pdata
->rx_threshold
);
2857 xgbe_config_tx_fifo_size(pdata
);
2858 xgbe_config_rx_fifo_size(pdata
);
2859 xgbe_config_flow_control_threshold(pdata
);
2860 /*TODO: Error Packet and undersized good Packet forwarding enable
2863 xgbe_config_dcb_tc(pdata
);
2864 xgbe_config_dcb_pfc(pdata
);
2865 xgbe_enable_mtl_interrupts(pdata
);
2868 * Initialize MAC related features
2870 xgbe_config_mac_address(pdata
);
2871 xgbe_config_rx_mode(pdata
);
2872 xgbe_config_jumbo_enable(pdata
);
2873 xgbe_config_flow_control(pdata
);
2874 xgbe_config_mac_speed(pdata
);
2875 xgbe_config_checksum_offload(pdata
);
2876 xgbe_config_vlan_support(pdata
);
2877 xgbe_config_mmc(pdata
);
2878 xgbe_enable_mac_interrupts(pdata
);
2880 DBGPR("<--xgbe_init\n");
2885 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if
*hw_if
)
2887 DBGPR("-->xgbe_init_function_ptrs\n");
2889 hw_if
->tx_complete
= xgbe_tx_complete
;
2891 hw_if
->set_mac_address
= xgbe_set_mac_address
;
2892 hw_if
->config_rx_mode
= xgbe_config_rx_mode
;
2894 hw_if
->enable_rx_csum
= xgbe_enable_rx_csum
;
2895 hw_if
->disable_rx_csum
= xgbe_disable_rx_csum
;
2897 hw_if
->enable_rx_vlan_stripping
= xgbe_enable_rx_vlan_stripping
;
2898 hw_if
->disable_rx_vlan_stripping
= xgbe_disable_rx_vlan_stripping
;
2899 hw_if
->enable_rx_vlan_filtering
= xgbe_enable_rx_vlan_filtering
;
2900 hw_if
->disable_rx_vlan_filtering
= xgbe_disable_rx_vlan_filtering
;
2901 hw_if
->update_vlan_hash_table
= xgbe_update_vlan_hash_table
;
2903 hw_if
->read_mmd_regs
= xgbe_read_mmd_regs
;
2904 hw_if
->write_mmd_regs
= xgbe_write_mmd_regs
;
2906 hw_if
->set_gmii_speed
= xgbe_set_gmii_speed
;
2907 hw_if
->set_gmii_2500_speed
= xgbe_set_gmii_2500_speed
;
2908 hw_if
->set_xgmii_speed
= xgbe_set_xgmii_speed
;
2910 hw_if
->enable_tx
= xgbe_enable_tx
;
2911 hw_if
->disable_tx
= xgbe_disable_tx
;
2912 hw_if
->enable_rx
= xgbe_enable_rx
;
2913 hw_if
->disable_rx
= xgbe_disable_rx
;
2915 hw_if
->powerup_tx
= xgbe_powerup_tx
;
2916 hw_if
->powerdown_tx
= xgbe_powerdown_tx
;
2917 hw_if
->powerup_rx
= xgbe_powerup_rx
;
2918 hw_if
->powerdown_rx
= xgbe_powerdown_rx
;
2920 hw_if
->dev_xmit
= xgbe_dev_xmit
;
2921 hw_if
->dev_read
= xgbe_dev_read
;
2922 hw_if
->enable_int
= xgbe_enable_int
;
2923 hw_if
->disable_int
= xgbe_disable_int
;
2924 hw_if
->init
= xgbe_init
;
2925 hw_if
->exit
= xgbe_exit
;
2927 /* Descriptor related Sequences have to be initialized here */
2928 hw_if
->tx_desc_init
= xgbe_tx_desc_init
;
2929 hw_if
->rx_desc_init
= xgbe_rx_desc_init
;
2930 hw_if
->tx_desc_reset
= xgbe_tx_desc_reset
;
2931 hw_if
->rx_desc_reset
= xgbe_rx_desc_reset
;
2932 hw_if
->is_last_desc
= xgbe_is_last_desc
;
2933 hw_if
->is_context_desc
= xgbe_is_context_desc
;
2934 hw_if
->tx_start_xmit
= xgbe_tx_start_xmit
;
2937 hw_if
->config_tx_flow_control
= xgbe_config_tx_flow_control
;
2938 hw_if
->config_rx_flow_control
= xgbe_config_rx_flow_control
;
2940 /* For RX coalescing */
2941 hw_if
->config_rx_coalesce
= xgbe_config_rx_coalesce
;
2942 hw_if
->config_tx_coalesce
= xgbe_config_tx_coalesce
;
2943 hw_if
->usec_to_riwt
= xgbe_usec_to_riwt
;
2944 hw_if
->riwt_to_usec
= xgbe_riwt_to_usec
;
2946 /* For RX and TX threshold config */
2947 hw_if
->config_rx_threshold
= xgbe_config_rx_threshold
;
2948 hw_if
->config_tx_threshold
= xgbe_config_tx_threshold
;
2950 /* For RX and TX Store and Forward Mode config */
2951 hw_if
->config_rsf_mode
= xgbe_config_rsf_mode
;
2952 hw_if
->config_tsf_mode
= xgbe_config_tsf_mode
;
2954 /* For TX DMA Operating on Second Frame config */
2955 hw_if
->config_osp_mode
= xgbe_config_osp_mode
;
2957 /* For RX and TX PBL config */
2958 hw_if
->config_rx_pbl_val
= xgbe_config_rx_pbl_val
;
2959 hw_if
->get_rx_pbl_val
= xgbe_get_rx_pbl_val
;
2960 hw_if
->config_tx_pbl_val
= xgbe_config_tx_pbl_val
;
2961 hw_if
->get_tx_pbl_val
= xgbe_get_tx_pbl_val
;
2962 hw_if
->config_pblx8
= xgbe_config_pblx8
;
2964 /* For MMC statistics support */
2965 hw_if
->tx_mmc_int
= xgbe_tx_mmc_int
;
2966 hw_if
->rx_mmc_int
= xgbe_rx_mmc_int
;
2967 hw_if
->read_mmc_stats
= xgbe_read_mmc_stats
;
2969 /* For PTP config */
2970 hw_if
->config_tstamp
= xgbe_config_tstamp
;
2971 hw_if
->update_tstamp_addend
= xgbe_update_tstamp_addend
;
2972 hw_if
->set_tstamp_time
= xgbe_set_tstamp_time
;
2973 hw_if
->get_tstamp_time
= xgbe_get_tstamp_time
;
2974 hw_if
->get_tx_tstamp
= xgbe_get_tx_tstamp
;
2976 /* For Data Center Bridging config */
2977 hw_if
->config_tc
= xgbe_config_tc
;
2978 hw_if
->config_dcb_tc
= xgbe_config_dcb_tc
;
2979 hw_if
->config_dcb_pfc
= xgbe_config_dcb_pfc
;
2981 /* For Receive Side Scaling */
2982 hw_if
->enable_rss
= xgbe_enable_rss
;
2983 hw_if
->disable_rss
= xgbe_disable_rss
;
2984 hw_if
->set_rss_hash_key
= xgbe_set_rss_hash_key
;
2985 hw_if
->set_rss_lookup_table
= xgbe_set_rss_lookup_table
;
2987 DBGPR("<--xgbe_init_function_ptrs\n");