x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / net / ethernet / amd / xgbe / xgbe-dev.c
blob24a687ce4388182716438770c49e2dca7ff81114
1 /*
2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
5 * licenses:
7 * License 1: GPLv2
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/mdio.h>
119 #include <linux/clk.h>
120 #include <linux/bitrev.h>
121 #include <linux/crc32.h>
123 #include "xgbe.h"
124 #include "xgbe-common.h"
126 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
128 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
131 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
132 unsigned int usec)
134 unsigned long rate;
135 unsigned int ret;
137 DBGPR("-->xgbe_usec_to_riwt\n");
139 rate = pdata->sysclk_rate;
142 * Convert the input usec value to the watchdog timer value. Each
143 * watchdog timer value is equivalent to 256 clock cycles.
144 * Calculate the required value as:
145 * ( usec * ( system_clock_mhz / 10^6 ) / 256
147 ret = (usec * (rate / 1000000)) / 256;
149 DBGPR("<--xgbe_usec_to_riwt\n");
151 return ret;
154 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
155 unsigned int riwt)
157 unsigned long rate;
158 unsigned int ret;
160 DBGPR("-->xgbe_riwt_to_usec\n");
162 rate = pdata->sysclk_rate;
165 * Convert the input watchdog timer value to the usec value. Each
166 * watchdog timer value is equivalent to 256 clock cycles.
167 * Calculate the required value as:
168 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
170 ret = (riwt * 256) / (rate / 1000000);
172 DBGPR("<--xgbe_riwt_to_usec\n");
174 return ret;
177 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
179 struct xgbe_channel *channel;
180 unsigned int i;
182 channel = pdata->channel;
183 for (i = 0; i < pdata->channel_count; i++, channel++)
184 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
185 pdata->pblx8);
187 return 0;
190 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
192 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
195 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
197 struct xgbe_channel *channel;
198 unsigned int i;
200 channel = pdata->channel;
201 for (i = 0; i < pdata->channel_count; i++, channel++) {
202 if (!channel->tx_ring)
203 break;
205 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
206 pdata->tx_pbl);
209 return 0;
212 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
214 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
217 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
219 struct xgbe_channel *channel;
220 unsigned int i;
222 channel = pdata->channel;
223 for (i = 0; i < pdata->channel_count; i++, channel++) {
224 if (!channel->rx_ring)
225 break;
227 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
228 pdata->rx_pbl);
231 return 0;
234 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
236 struct xgbe_channel *channel;
237 unsigned int i;
239 channel = pdata->channel;
240 for (i = 0; i < pdata->channel_count; i++, channel++) {
241 if (!channel->tx_ring)
242 break;
244 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
245 pdata->tx_osp_mode);
248 return 0;
251 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
253 unsigned int i;
255 for (i = 0; i < pdata->rx_q_count; i++)
256 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
258 return 0;
261 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
263 unsigned int i;
265 for (i = 0; i < pdata->tx_q_count; i++)
266 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
268 return 0;
271 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
272 unsigned int val)
274 unsigned int i;
276 for (i = 0; i < pdata->rx_q_count; i++)
277 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
279 return 0;
282 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
283 unsigned int val)
285 unsigned int i;
287 for (i = 0; i < pdata->tx_q_count; i++)
288 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
290 return 0;
293 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
295 struct xgbe_channel *channel;
296 unsigned int i;
298 channel = pdata->channel;
299 for (i = 0; i < pdata->channel_count; i++, channel++) {
300 if (!channel->rx_ring)
301 break;
303 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
304 pdata->rx_riwt);
307 return 0;
310 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
312 return 0;
315 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
317 struct xgbe_channel *channel;
318 unsigned int i;
320 channel = pdata->channel;
321 for (i = 0; i < pdata->channel_count; i++, channel++) {
322 if (!channel->rx_ring)
323 break;
325 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
326 pdata->rx_buf_size);
330 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
332 struct xgbe_channel *channel;
333 unsigned int i;
335 channel = pdata->channel;
336 for (i = 0; i < pdata->channel_count; i++, channel++) {
337 if (!channel->tx_ring)
338 break;
340 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
344 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
346 struct xgbe_channel *channel;
347 unsigned int i;
349 channel = pdata->channel;
350 for (i = 0; i < pdata->channel_count; i++, channel++) {
351 if (!channel->rx_ring)
352 break;
354 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
357 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
360 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
361 unsigned int index, unsigned int val)
363 unsigned int wait;
364 int ret = 0;
366 mutex_lock(&pdata->rss_mutex);
368 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
369 ret = -EBUSY;
370 goto unlock;
373 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
375 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
376 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
377 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
378 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
380 wait = 1000;
381 while (wait--) {
382 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
383 goto unlock;
385 usleep_range(1000, 1500);
388 ret = -EBUSY;
390 unlock:
391 mutex_unlock(&pdata->rss_mutex);
393 return ret;
396 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
398 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
399 unsigned int *key = (unsigned int *)&pdata->rss_key;
400 int ret;
402 while (key_regs--) {
403 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
404 key_regs, *key++);
405 if (ret)
406 return ret;
409 return 0;
412 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
414 unsigned int i;
415 int ret;
417 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
418 ret = xgbe_write_rss_reg(pdata,
419 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
420 pdata->rss_table[i]);
421 if (ret)
422 return ret;
425 return 0;
428 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
430 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
432 return xgbe_write_rss_hash_key(pdata);
435 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
436 const u32 *table)
438 unsigned int i;
440 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
441 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
443 return xgbe_write_rss_lookup_table(pdata);
446 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
448 int ret;
450 if (!pdata->hw_feat.rss)
451 return -EOPNOTSUPP;
453 /* Program the hash key */
454 ret = xgbe_write_rss_hash_key(pdata);
455 if (ret)
456 return ret;
458 /* Program the lookup table */
459 ret = xgbe_write_rss_lookup_table(pdata);
460 if (ret)
461 return ret;
463 /* Set the RSS options */
464 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
466 /* Enable RSS */
467 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
469 return 0;
472 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
474 if (!pdata->hw_feat.rss)
475 return -EOPNOTSUPP;
477 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
479 return 0;
482 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
484 int ret;
486 if (!pdata->hw_feat.rss)
487 return;
489 if (pdata->netdev->features & NETIF_F_RXHASH)
490 ret = xgbe_enable_rss(pdata);
491 else
492 ret = xgbe_disable_rss(pdata);
494 if (ret)
495 netdev_err(pdata->netdev,
496 "error configuring RSS, RSS disabled\n");
499 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
500 unsigned int queue)
502 unsigned int prio, tc;
504 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
505 /* Does this queue handle the priority? */
506 if (pdata->prio2q_map[prio] != queue)
507 continue;
509 /* Get the Traffic Class for this priority */
510 tc = pdata->ets->prio_tc[prio];
512 /* Check if PFC is enabled for this traffic class */
513 if (pdata->pfc->pfc_en & (1 << tc))
514 return true;
517 return false;
520 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
522 unsigned int max_q_count, q_count;
523 unsigned int reg, reg_val;
524 unsigned int i;
526 /* Clear MTL flow control */
527 for (i = 0; i < pdata->rx_q_count; i++)
528 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
530 /* Clear MAC flow control */
531 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
532 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
533 reg = MAC_Q0TFCR;
534 for (i = 0; i < q_count; i++) {
535 reg_val = XGMAC_IOREAD(pdata, reg);
536 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
537 XGMAC_IOWRITE(pdata, reg, reg_val);
539 reg += MAC_QTFCR_INC;
542 return 0;
545 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
547 struct ieee_pfc *pfc = pdata->pfc;
548 struct ieee_ets *ets = pdata->ets;
549 unsigned int max_q_count, q_count;
550 unsigned int reg, reg_val;
551 unsigned int i;
553 /* Set MTL flow control */
554 for (i = 0; i < pdata->rx_q_count; i++) {
555 unsigned int ehfc = 0;
557 if (pdata->rx_rfd[i]) {
558 /* Flow control thresholds are established */
559 if (pfc && ets) {
560 if (xgbe_is_pfc_queue(pdata, i))
561 ehfc = 1;
562 } else {
563 ehfc = 1;
567 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
569 netif_dbg(pdata, drv, pdata->netdev,
570 "flow control %s for RXq%u\n",
571 ehfc ? "enabled" : "disabled", i);
574 /* Set MAC flow control */
575 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
576 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
577 reg = MAC_Q0TFCR;
578 for (i = 0; i < q_count; i++) {
579 reg_val = XGMAC_IOREAD(pdata, reg);
581 /* Enable transmit flow control */
582 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
583 /* Set pause time */
584 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
586 XGMAC_IOWRITE(pdata, reg, reg_val);
588 reg += MAC_QTFCR_INC;
591 return 0;
594 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
596 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
598 return 0;
601 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
603 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
605 return 0;
608 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
610 struct ieee_pfc *pfc = pdata->pfc;
612 if (pdata->tx_pause || (pfc && pfc->pfc_en))
613 xgbe_enable_tx_flow_control(pdata);
614 else
615 xgbe_disable_tx_flow_control(pdata);
617 return 0;
620 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
622 struct ieee_pfc *pfc = pdata->pfc;
624 if (pdata->rx_pause || (pfc && pfc->pfc_en))
625 xgbe_enable_rx_flow_control(pdata);
626 else
627 xgbe_disable_rx_flow_control(pdata);
629 return 0;
632 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
634 struct ieee_pfc *pfc = pdata->pfc;
636 xgbe_config_tx_flow_control(pdata);
637 xgbe_config_rx_flow_control(pdata);
639 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
640 (pfc && pfc->pfc_en) ? 1 : 0);
643 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
645 struct xgbe_channel *channel;
646 unsigned int dma_ch_isr, dma_ch_ier;
647 unsigned int i;
649 /* Set the interrupt mode if supported */
650 if (pdata->channel_irq_mode)
651 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
652 pdata->channel_irq_mode);
654 channel = pdata->channel;
655 for (i = 0; i < pdata->channel_count; i++, channel++) {
656 /* Clear all the interrupts which are set */
657 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
658 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
660 /* Clear all interrupt enable bits */
661 dma_ch_ier = 0;
663 /* Enable following interrupts
664 * NIE - Normal Interrupt Summary Enable
665 * AIE - Abnormal Interrupt Summary Enable
666 * FBEE - Fatal Bus Error Enable
668 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
669 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
670 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
672 if (channel->tx_ring) {
673 /* Enable the following Tx interrupts
674 * TIE - Transmit Interrupt Enable (unless using
675 * per channel interrupts in edge triggered
676 * mode)
678 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
679 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
681 if (channel->rx_ring) {
682 /* Enable following Rx interrupts
683 * RBUE - Receive Buffer Unavailable Enable
684 * RIE - Receive Interrupt Enable (unless using
685 * per channel interrupts in edge triggered
686 * mode)
688 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
689 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
690 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
693 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
697 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
699 unsigned int mtl_q_isr;
700 unsigned int q_count, i;
702 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
703 for (i = 0; i < q_count; i++) {
704 /* Clear all the interrupts which are set */
705 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
706 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
708 /* No MTL interrupts to be enabled */
709 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
713 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
715 unsigned int mac_ier = 0;
717 /* Enable Timestamp interrupt */
718 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
720 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
722 /* Enable all counter interrupts */
723 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
724 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
726 /* Enable MDIO single command completion interrupt */
727 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
730 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
732 unsigned int ecc_isr, ecc_ier = 0;
734 if (!pdata->vdata->ecc_support)
735 return;
737 /* Clear all the interrupts which are set */
738 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
739 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
741 /* Enable ECC interrupts */
742 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
743 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
744 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
745 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
746 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
747 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
749 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
752 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
754 unsigned int ecc_ier;
756 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
758 /* Disable ECC DED interrupts */
759 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
760 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
761 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
763 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
766 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
767 enum xgbe_ecc_sec sec)
769 unsigned int ecc_ier;
771 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
773 /* Disable ECC SEC interrupt */
774 switch (sec) {
775 case XGBE_ECC_SEC_TX:
776 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
777 break;
778 case XGBE_ECC_SEC_RX:
779 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
780 break;
781 case XGBE_ECC_SEC_DESC:
782 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
783 break;
786 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
789 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
791 unsigned int ss;
793 switch (speed) {
794 case SPEED_1000:
795 ss = 0x03;
796 break;
797 case SPEED_2500:
798 ss = 0x02;
799 break;
800 case SPEED_10000:
801 ss = 0x00;
802 break;
803 default:
804 return -EINVAL;
807 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
808 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
810 return 0;
813 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
815 /* Put the VLAN tag in the Rx descriptor */
816 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
818 /* Don't check the VLAN type */
819 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
821 /* Check only C-TAG (0x8100) packets */
822 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
824 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
825 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
827 /* Enable VLAN tag stripping */
828 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
830 return 0;
833 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
835 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
837 return 0;
840 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
842 /* Enable VLAN filtering */
843 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
845 /* Enable VLAN Hash Table filtering */
846 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
848 /* Disable VLAN tag inverse matching */
849 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
851 /* Only filter on the lower 12-bits of the VLAN tag */
852 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
854 /* In order for the VLAN Hash Table filtering to be effective,
855 * the VLAN tag identifier in the VLAN Tag Register must not
856 * be zero. Set the VLAN tag identifier to "1" to enable the
857 * VLAN Hash Table filtering. This implies that a VLAN tag of
858 * 1 will always pass filtering.
860 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
862 return 0;
865 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
867 /* Disable VLAN filtering */
868 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
870 return 0;
873 static u32 xgbe_vid_crc32_le(__le16 vid_le)
875 u32 poly = 0xedb88320; /* CRCPOLY_LE */
876 u32 crc = ~0;
877 u32 temp = 0;
878 unsigned char *data = (unsigned char *)&vid_le;
879 unsigned char data_byte = 0;
880 int i, bits;
882 bits = get_bitmask_order(VLAN_VID_MASK);
883 for (i = 0; i < bits; i++) {
884 if ((i % 8) == 0)
885 data_byte = data[i / 8];
887 temp = ((crc & 1) ^ data_byte) & 1;
888 crc >>= 1;
889 data_byte >>= 1;
891 if (temp)
892 crc ^= poly;
895 return crc;
898 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
900 u32 crc;
901 u16 vid;
902 __le16 vid_le;
903 u16 vlan_hash_table = 0;
905 /* Generate the VLAN Hash Table value */
906 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
907 /* Get the CRC32 value of the VLAN ID */
908 vid_le = cpu_to_le16(vid);
909 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
911 vlan_hash_table |= (1 << crc);
914 /* Set the VLAN Hash Table filtering register */
915 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
917 return 0;
920 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
921 unsigned int enable)
923 unsigned int val = enable ? 1 : 0;
925 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
926 return 0;
928 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
929 enable ? "entering" : "leaving");
930 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
932 /* Hardware will still perform VLAN filtering in promiscuous mode */
933 if (enable) {
934 xgbe_disable_rx_vlan_filtering(pdata);
935 } else {
936 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
937 xgbe_enable_rx_vlan_filtering(pdata);
940 return 0;
943 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
944 unsigned int enable)
946 unsigned int val = enable ? 1 : 0;
948 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
949 return 0;
951 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
952 enable ? "entering" : "leaving");
953 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
955 return 0;
958 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
959 struct netdev_hw_addr *ha, unsigned int *mac_reg)
961 unsigned int mac_addr_hi, mac_addr_lo;
962 u8 *mac_addr;
964 mac_addr_lo = 0;
965 mac_addr_hi = 0;
967 if (ha) {
968 mac_addr = (u8 *)&mac_addr_lo;
969 mac_addr[0] = ha->addr[0];
970 mac_addr[1] = ha->addr[1];
971 mac_addr[2] = ha->addr[2];
972 mac_addr[3] = ha->addr[3];
973 mac_addr = (u8 *)&mac_addr_hi;
974 mac_addr[0] = ha->addr[4];
975 mac_addr[1] = ha->addr[5];
977 netif_dbg(pdata, drv, pdata->netdev,
978 "adding mac address %pM at %#x\n",
979 ha->addr, *mac_reg);
981 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
984 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
985 *mac_reg += MAC_MACA_INC;
986 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
987 *mac_reg += MAC_MACA_INC;
990 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
992 struct net_device *netdev = pdata->netdev;
993 struct netdev_hw_addr *ha;
994 unsigned int mac_reg;
995 unsigned int addn_macs;
997 mac_reg = MAC_MACA1HR;
998 addn_macs = pdata->hw_feat.addn_mac;
1000 if (netdev_uc_count(netdev) > addn_macs) {
1001 xgbe_set_promiscuous_mode(pdata, 1);
1002 } else {
1003 netdev_for_each_uc_addr(ha, netdev) {
1004 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1005 addn_macs--;
1008 if (netdev_mc_count(netdev) > addn_macs) {
1009 xgbe_set_all_multicast_mode(pdata, 1);
1010 } else {
1011 netdev_for_each_mc_addr(ha, netdev) {
1012 xgbe_set_mac_reg(pdata, ha, &mac_reg);
1013 addn_macs--;
1018 /* Clear remaining additional MAC address entries */
1019 while (addn_macs--)
1020 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
1023 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
1025 struct net_device *netdev = pdata->netdev;
1026 struct netdev_hw_addr *ha;
1027 unsigned int hash_reg;
1028 unsigned int hash_table_shift, hash_table_count;
1029 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
1030 u32 crc;
1031 unsigned int i;
1033 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
1034 hash_table_count = pdata->hw_feat.hash_table_size / 32;
1035 memset(hash_table, 0, sizeof(hash_table));
1037 /* Build the MAC Hash Table register values */
1038 netdev_for_each_uc_addr(ha, netdev) {
1039 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1040 crc >>= hash_table_shift;
1041 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1044 netdev_for_each_mc_addr(ha, netdev) {
1045 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
1046 crc >>= hash_table_shift;
1047 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
1050 /* Set the MAC Hash Table registers */
1051 hash_reg = MAC_HTR0;
1052 for (i = 0; i < hash_table_count; i++) {
1053 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
1054 hash_reg += MAC_HTR_INC;
1058 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
1060 if (pdata->hw_feat.hash_table_size)
1061 xgbe_set_mac_hash_table(pdata);
1062 else
1063 xgbe_set_mac_addn_addrs(pdata);
1065 return 0;
1068 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
1070 unsigned int mac_addr_hi, mac_addr_lo;
1072 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1073 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1074 (addr[1] << 8) | (addr[0] << 0);
1076 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1077 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1079 return 0;
1082 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1084 struct net_device *netdev = pdata->netdev;
1085 unsigned int pr_mode, am_mode;
1087 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1088 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1090 xgbe_set_promiscuous_mode(pdata, pr_mode);
1091 xgbe_set_all_multicast_mode(pdata, am_mode);
1093 xgbe_add_mac_addresses(pdata);
1095 return 0;
1098 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1100 unsigned int reg;
1102 if (gpio > 15)
1103 return -EINVAL;
1105 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1107 reg &= ~(1 << (gpio + 16));
1108 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1110 return 0;
1113 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1115 unsigned int reg;
1117 if (gpio > 15)
1118 return -EINVAL;
1120 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1122 reg |= (1 << (gpio + 16));
1123 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1125 return 0;
1128 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1129 int mmd_reg)
1131 unsigned long flags;
1132 unsigned int mmd_address, index, offset;
1133 int mmd_data;
1135 if (mmd_reg & MII_ADDR_C45)
1136 mmd_address = mmd_reg & ~MII_ADDR_C45;
1137 else
1138 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1140 /* The PCS registers are accessed using mmio. The underlying
1141 * management interface uses indirect addressing to access the MMD
1142 * register sets. This requires accessing of the PCS register in two
1143 * phases, an address phase and a data phase.
1145 * The mmio interface is based on 16-bit offsets and values. All
1146 * register offsets must therefore be adjusted by left shifting the
1147 * offset 1 bit and reading 16 bits of data.
1149 mmd_address <<= 1;
1150 index = mmd_address & ~pdata->xpcs_window_mask;
1151 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1153 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1154 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1155 mmd_data = XPCS16_IOREAD(pdata, offset);
1156 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1158 return mmd_data;
1161 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1162 int mmd_reg, int mmd_data)
1164 unsigned long flags;
1165 unsigned int mmd_address, index, offset;
1167 if (mmd_reg & MII_ADDR_C45)
1168 mmd_address = mmd_reg & ~MII_ADDR_C45;
1169 else
1170 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1172 /* The PCS registers are accessed using mmio. The underlying
1173 * management interface uses indirect addressing to access the MMD
1174 * register sets. This requires accessing of the PCS register in two
1175 * phases, an address phase and a data phase.
1177 * The mmio interface is based on 16-bit offsets and values. All
1178 * register offsets must therefore be adjusted by left shifting the
1179 * offset 1 bit and writing 16 bits of data.
1181 mmd_address <<= 1;
1182 index = mmd_address & ~pdata->xpcs_window_mask;
1183 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1185 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1186 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1187 XPCS16_IOWRITE(pdata, offset, mmd_data);
1188 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1191 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1192 int mmd_reg)
1194 unsigned long flags;
1195 unsigned int mmd_address;
1196 int mmd_data;
1198 if (mmd_reg & MII_ADDR_C45)
1199 mmd_address = mmd_reg & ~MII_ADDR_C45;
1200 else
1201 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1203 /* The PCS registers are accessed using mmio. The underlying APB3
1204 * management interface uses indirect addressing to access the MMD
1205 * register sets. This requires accessing of the PCS register in two
1206 * phases, an address phase and a data phase.
1208 * The mmio interface is based on 32-bit offsets and values. All
1209 * register offsets must therefore be adjusted by left shifting the
1210 * offset 2 bits and reading 32 bits of data.
1212 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1213 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1214 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1215 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1217 return mmd_data;
1220 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1221 int mmd_reg, int mmd_data)
1223 unsigned int mmd_address;
1224 unsigned long flags;
1226 if (mmd_reg & MII_ADDR_C45)
1227 mmd_address = mmd_reg & ~MII_ADDR_C45;
1228 else
1229 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1231 /* The PCS registers are accessed using mmio. The underlying APB3
1232 * management interface uses indirect addressing to access the MMD
1233 * register sets. This requires accessing of the PCS register in two
1234 * phases, an address phase and a data phase.
1236 * The mmio interface is based on 32-bit offsets and values. All
1237 * register offsets must therefore be adjusted by left shifting the
1238 * offset 2 bits and writing 32 bits of data.
1240 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1241 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1242 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1243 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1246 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1247 int mmd_reg)
1249 switch (pdata->vdata->xpcs_access) {
1250 case XGBE_XPCS_ACCESS_V1:
1251 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
1253 case XGBE_XPCS_ACCESS_V2:
1254 default:
1255 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
1259 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1260 int mmd_reg, int mmd_data)
1262 switch (pdata->vdata->xpcs_access) {
1263 case XGBE_XPCS_ACCESS_V1:
1264 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
1266 case XGBE_XPCS_ACCESS_V2:
1267 default:
1268 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
1272 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1273 int reg, u16 val)
1275 unsigned int mdio_sca, mdio_sccd;
1277 reinit_completion(&pdata->mdio_complete);
1279 mdio_sca = 0;
1280 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1281 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1282 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1284 mdio_sccd = 0;
1285 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1286 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1287 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1288 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1290 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1291 netdev_err(pdata->netdev, "mdio write operation timed out\n");
1292 return -ETIMEDOUT;
1295 return 0;
1298 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1299 int reg)
1301 unsigned int mdio_sca, mdio_sccd;
1303 reinit_completion(&pdata->mdio_complete);
1305 mdio_sca = 0;
1306 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1307 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1308 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1310 mdio_sccd = 0;
1311 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1312 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1313 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1315 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1316 netdev_err(pdata->netdev, "mdio read operation timed out\n");
1317 return -ETIMEDOUT;
1320 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1323 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1324 enum xgbe_mdio_mode mode)
1326 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1328 switch (mode) {
1329 case XGBE_MDIO_MODE_CL22:
1330 if (port > XGMAC_MAX_C22_PORT)
1331 return -EINVAL;
1332 reg_val |= (1 << port);
1333 break;
1334 case XGBE_MDIO_MODE_CL45:
1335 break;
1336 default:
1337 return -EINVAL;
1340 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1342 return 0;
1345 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1347 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1350 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1352 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1354 return 0;
1357 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1359 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1361 return 0;
1364 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1366 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1368 /* Reset the Tx descriptor
1369 * Set buffer 1 (lo) address to zero
1370 * Set buffer 1 (hi) address to zero
1371 * Reset all other control bits (IC, TTSE, B2L & B1L)
1372 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1374 rdesc->desc0 = 0;
1375 rdesc->desc1 = 0;
1376 rdesc->desc2 = 0;
1377 rdesc->desc3 = 0;
1379 /* Make sure ownership is written to the descriptor */
1380 dma_wmb();
1383 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1385 struct xgbe_ring *ring = channel->tx_ring;
1386 struct xgbe_ring_data *rdata;
1387 int i;
1388 int start_index = ring->cur;
1390 DBGPR("-->tx_desc_init\n");
1392 /* Initialze all descriptors */
1393 for (i = 0; i < ring->rdesc_count; i++) {
1394 rdata = XGBE_GET_DESC_DATA(ring, i);
1396 /* Initialize Tx descriptor */
1397 xgbe_tx_desc_reset(rdata);
1400 /* Update the total number of Tx descriptors */
1401 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1403 /* Update the starting address of descriptor ring */
1404 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1405 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1406 upper_32_bits(rdata->rdesc_dma));
1407 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1408 lower_32_bits(rdata->rdesc_dma));
1410 DBGPR("<--tx_desc_init\n");
1413 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1414 struct xgbe_ring_data *rdata, unsigned int index)
1416 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1417 unsigned int rx_usecs = pdata->rx_usecs;
1418 unsigned int rx_frames = pdata->rx_frames;
1419 unsigned int inte;
1420 dma_addr_t hdr_dma, buf_dma;
1422 if (!rx_usecs && !rx_frames) {
1423 /* No coalescing, interrupt for every descriptor */
1424 inte = 1;
1425 } else {
1426 /* Set interrupt based on Rx frame coalescing setting */
1427 if (rx_frames && !((index + 1) % rx_frames))
1428 inte = 1;
1429 else
1430 inte = 0;
1433 /* Reset the Rx descriptor
1434 * Set buffer 1 (lo) address to header dma address (lo)
1435 * Set buffer 1 (hi) address to header dma address (hi)
1436 * Set buffer 2 (lo) address to buffer dma address (lo)
1437 * Set buffer 2 (hi) address to buffer dma address (hi) and
1438 * set control bits OWN and INTE
1440 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1441 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1442 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1443 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1444 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1445 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1447 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1449 /* Since the Rx DMA engine is likely running, make sure everything
1450 * is written to the descriptor(s) before setting the OWN bit
1451 * for the descriptor
1453 dma_wmb();
1455 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1457 /* Make sure ownership is written to the descriptor */
1458 dma_wmb();
1461 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1463 struct xgbe_prv_data *pdata = channel->pdata;
1464 struct xgbe_ring *ring = channel->rx_ring;
1465 struct xgbe_ring_data *rdata;
1466 unsigned int start_index = ring->cur;
1467 unsigned int i;
1469 DBGPR("-->rx_desc_init\n");
1471 /* Initialize all descriptors */
1472 for (i = 0; i < ring->rdesc_count; i++) {
1473 rdata = XGBE_GET_DESC_DATA(ring, i);
1475 /* Initialize Rx descriptor */
1476 xgbe_rx_desc_reset(pdata, rdata, i);
1479 /* Update the total number of Rx descriptors */
1480 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1482 /* Update the starting address of descriptor ring */
1483 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1484 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1485 upper_32_bits(rdata->rdesc_dma));
1486 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1487 lower_32_bits(rdata->rdesc_dma));
1489 /* Update the Rx Descriptor Tail Pointer */
1490 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1491 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1492 lower_32_bits(rdata->rdesc_dma));
1494 DBGPR("<--rx_desc_init\n");
1497 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1498 unsigned int addend)
1500 /* Set the addend register value and tell the device */
1501 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1502 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1504 /* Wait for addend update to complete */
1505 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1506 udelay(5);
1509 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1510 unsigned int nsec)
1512 /* Set the time values and tell the device */
1513 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1514 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1515 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1517 /* Wait for time update to complete */
1518 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1519 udelay(5);
1522 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1524 u64 nsec;
1526 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1527 nsec *= NSEC_PER_SEC;
1528 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1530 return nsec;
1533 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1535 unsigned int tx_snr, tx_ssr;
1536 u64 nsec;
1538 if (pdata->vdata->tx_tstamp_workaround) {
1539 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1540 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1541 } else {
1542 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
1543 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1546 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1547 return 0;
1549 nsec = tx_ssr;
1550 nsec *= NSEC_PER_SEC;
1551 nsec += tx_snr;
1553 return nsec;
1556 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1557 struct xgbe_ring_desc *rdesc)
1559 u64 nsec;
1561 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1562 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1563 nsec = le32_to_cpu(rdesc->desc1);
1564 nsec <<= 32;
1565 nsec |= le32_to_cpu(rdesc->desc0);
1566 if (nsec != 0xffffffffffffffffULL) {
1567 packet->rx_tstamp = nsec;
1568 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1569 RX_TSTAMP, 1);
1574 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1575 unsigned int mac_tscr)
1577 /* Set one nano-second accuracy */
1578 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1580 /* Set fine timestamp update */
1581 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1583 /* Overwrite earlier timestamps */
1584 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1586 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1588 /* Exit if timestamping is not enabled */
1589 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1590 return 0;
1592 /* Initialize time registers */
1593 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1594 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1595 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1596 xgbe_set_tstamp_time(pdata, 0, 0);
1598 /* Initialize the timecounter */
1599 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1600 ktime_to_ns(ktime_get_real()));
1602 return 0;
1605 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1606 struct xgbe_ring *ring)
1608 struct xgbe_prv_data *pdata = channel->pdata;
1609 struct xgbe_ring_data *rdata;
1611 /* Make sure everything is written before the register write */
1612 wmb();
1614 /* Issue a poll command to Tx DMA by writing address
1615 * of next immediate free descriptor */
1616 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1617 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1618 lower_32_bits(rdata->rdesc_dma));
1620 /* Start the Tx timer */
1621 if (pdata->tx_usecs && !channel->tx_timer_active) {
1622 channel->tx_timer_active = 1;
1623 mod_timer(&channel->tx_timer,
1624 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1627 ring->tx.xmit_more = 0;
1630 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1632 struct xgbe_prv_data *pdata = channel->pdata;
1633 struct xgbe_ring *ring = channel->tx_ring;
1634 struct xgbe_ring_data *rdata;
1635 struct xgbe_ring_desc *rdesc;
1636 struct xgbe_packet_data *packet = &ring->packet_data;
1637 unsigned int csum, tso, vlan;
1638 unsigned int tso_context, vlan_context;
1639 unsigned int tx_set_ic;
1640 int start_index = ring->cur;
1641 int cur_index = ring->cur;
1642 int i;
1644 DBGPR("-->xgbe_dev_xmit\n");
1646 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1647 CSUM_ENABLE);
1648 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1649 TSO_ENABLE);
1650 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1651 VLAN_CTAG);
1653 if (tso && (packet->mss != ring->tx.cur_mss))
1654 tso_context = 1;
1655 else
1656 tso_context = 0;
1658 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1659 vlan_context = 1;
1660 else
1661 vlan_context = 0;
1663 /* Determine if an interrupt should be generated for this Tx:
1664 * Interrupt:
1665 * - Tx frame count exceeds the frame count setting
1666 * - Addition of Tx frame count to the frame count since the
1667 * last interrupt was set exceeds the frame count setting
1668 * No interrupt:
1669 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1670 * - Addition of Tx frame count to the frame count since the
1671 * last interrupt was set does not exceed the frame count setting
1673 ring->coalesce_count += packet->tx_packets;
1674 if (!pdata->tx_frames)
1675 tx_set_ic = 0;
1676 else if (packet->tx_packets > pdata->tx_frames)
1677 tx_set_ic = 1;
1678 else if ((ring->coalesce_count % pdata->tx_frames) <
1679 packet->tx_packets)
1680 tx_set_ic = 1;
1681 else
1682 tx_set_ic = 0;
1684 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1685 rdesc = rdata->rdesc;
1687 /* Create a context descriptor if this is a TSO packet */
1688 if (tso_context || vlan_context) {
1689 if (tso_context) {
1690 netif_dbg(pdata, tx_queued, pdata->netdev,
1691 "TSO context descriptor, mss=%u\n",
1692 packet->mss);
1694 /* Set the MSS size */
1695 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1696 MSS, packet->mss);
1698 /* Mark it as a CONTEXT descriptor */
1699 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1700 CTXT, 1);
1702 /* Indicate this descriptor contains the MSS */
1703 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1704 TCMSSV, 1);
1706 ring->tx.cur_mss = packet->mss;
1709 if (vlan_context) {
1710 netif_dbg(pdata, tx_queued, pdata->netdev,
1711 "VLAN context descriptor, ctag=%u\n",
1712 packet->vlan_ctag);
1714 /* Mark it as a CONTEXT descriptor */
1715 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1716 CTXT, 1);
1718 /* Set the VLAN tag */
1719 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1720 VT, packet->vlan_ctag);
1722 /* Indicate this descriptor contains the VLAN tag */
1723 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1724 VLTV, 1);
1726 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1729 cur_index++;
1730 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1731 rdesc = rdata->rdesc;
1734 /* Update buffer address (for TSO this is the header) */
1735 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1736 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1738 /* Update the buffer length */
1739 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1740 rdata->skb_dma_len);
1742 /* VLAN tag insertion check */
1743 if (vlan)
1744 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1745 TX_NORMAL_DESC2_VLAN_INSERT);
1747 /* Timestamp enablement check */
1748 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1749 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1751 /* Mark it as First Descriptor */
1752 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1754 /* Mark it as a NORMAL descriptor */
1755 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1757 /* Set OWN bit if not the first descriptor */
1758 if (cur_index != start_index)
1759 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1761 if (tso) {
1762 /* Enable TSO */
1763 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1764 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1765 packet->tcp_payload_len);
1766 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1767 packet->tcp_header_len / 4);
1769 pdata->ext_stats.tx_tso_packets++;
1770 } else {
1771 /* Enable CRC and Pad Insertion */
1772 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1774 /* Enable HW CSUM */
1775 if (csum)
1776 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1777 CIC, 0x3);
1779 /* Set the total length to be transmitted */
1780 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1781 packet->length);
1784 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1785 cur_index++;
1786 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1787 rdesc = rdata->rdesc;
1789 /* Update buffer address */
1790 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1791 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1793 /* Update the buffer length */
1794 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1795 rdata->skb_dma_len);
1797 /* Set OWN bit */
1798 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1800 /* Mark it as NORMAL descriptor */
1801 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1803 /* Enable HW CSUM */
1804 if (csum)
1805 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1806 CIC, 0x3);
1809 /* Set LAST bit for the last descriptor */
1810 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1812 /* Set IC bit based on Tx coalescing settings */
1813 if (tx_set_ic)
1814 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1816 /* Save the Tx info to report back during cleanup */
1817 rdata->tx.packets = packet->tx_packets;
1818 rdata->tx.bytes = packet->tx_bytes;
1820 /* In case the Tx DMA engine is running, make sure everything
1821 * is written to the descriptor(s) before setting the OWN bit
1822 * for the first descriptor
1824 dma_wmb();
1826 /* Set OWN bit for the first descriptor */
1827 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1828 rdesc = rdata->rdesc;
1829 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1831 if (netif_msg_tx_queued(pdata))
1832 xgbe_dump_tx_desc(pdata, ring, start_index,
1833 packet->rdesc_count, 1);
1835 /* Make sure ownership is written to the descriptor */
1836 smp_wmb();
1838 ring->cur = cur_index + 1;
1839 if (!packet->skb->xmit_more ||
1840 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1841 channel->queue_index)))
1842 xgbe_tx_start_xmit(channel, ring);
1843 else
1844 ring->tx.xmit_more = 1;
1846 DBGPR(" %s: descriptors %u to %u written\n",
1847 channel->name, start_index & (ring->rdesc_count - 1),
1848 (ring->cur - 1) & (ring->rdesc_count - 1));
1850 DBGPR("<--xgbe_dev_xmit\n");
1853 static int xgbe_dev_read(struct xgbe_channel *channel)
1855 struct xgbe_prv_data *pdata = channel->pdata;
1856 struct xgbe_ring *ring = channel->rx_ring;
1857 struct xgbe_ring_data *rdata;
1858 struct xgbe_ring_desc *rdesc;
1859 struct xgbe_packet_data *packet = &ring->packet_data;
1860 struct net_device *netdev = pdata->netdev;
1861 unsigned int err, etlt, l34t;
1863 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1865 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1866 rdesc = rdata->rdesc;
1868 /* Check for data availability */
1869 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1870 return 1;
1872 /* Make sure descriptor fields are read after reading the OWN bit */
1873 dma_rmb();
1875 if (netif_msg_rx_status(pdata))
1876 xgbe_dump_rx_desc(pdata, ring, ring->cur);
1878 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1879 /* Timestamp Context Descriptor */
1880 xgbe_get_rx_tstamp(packet, rdesc);
1882 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1883 CONTEXT, 1);
1884 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1885 CONTEXT_NEXT, 0);
1886 return 0;
1889 /* Normal Descriptor, be sure Context Descriptor bit is off */
1890 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1892 /* Indicate if a Context Descriptor is next */
1893 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1894 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1895 CONTEXT_NEXT, 1);
1897 /* Get the header length */
1898 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1899 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1900 FIRST, 1);
1901 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1902 RX_NORMAL_DESC2, HL);
1903 if (rdata->rx.hdr_len)
1904 pdata->ext_stats.rx_split_header_packets++;
1905 } else {
1906 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1907 FIRST, 0);
1910 /* Get the RSS hash */
1911 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1912 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1913 RSS_HASH, 1);
1915 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1917 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1918 switch (l34t) {
1919 case RX_DESC3_L34T_IPV4_TCP:
1920 case RX_DESC3_L34T_IPV4_UDP:
1921 case RX_DESC3_L34T_IPV6_TCP:
1922 case RX_DESC3_L34T_IPV6_UDP:
1923 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1924 break;
1925 default:
1926 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1930 /* Not all the data has been transferred for this packet */
1931 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1932 return 0;
1934 /* This is the last of the data for this packet */
1935 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1936 LAST, 1);
1938 /* Get the packet length */
1939 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1941 /* Set checksum done indicator as appropriate */
1942 if (netdev->features & NETIF_F_RXCSUM)
1943 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1944 CSUM_DONE, 1);
1946 /* Check for errors (only valid in last descriptor) */
1947 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1948 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1949 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
1951 if (!err || !etlt) {
1952 /* No error if err is 0 or etlt is 0 */
1953 if ((etlt == 0x09) &&
1954 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1955 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1956 VLAN_CTAG, 1);
1957 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1958 RX_NORMAL_DESC0,
1959 OVT);
1960 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
1961 packet->vlan_ctag);
1963 } else {
1964 if ((etlt == 0x05) || (etlt == 0x06))
1965 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1966 CSUM_DONE, 0);
1967 else
1968 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1969 FRAME, 1);
1972 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1973 ring->cur & (ring->rdesc_count - 1), ring->cur);
1975 return 0;
1978 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1980 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1981 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1984 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1986 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1987 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1990 static int xgbe_enable_int(struct xgbe_channel *channel,
1991 enum xgbe_int int_id)
1993 unsigned int dma_ch_ier;
1995 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1997 switch (int_id) {
1998 case XGMAC_INT_DMA_CH_SR_TI:
1999 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
2000 break;
2001 case XGMAC_INT_DMA_CH_SR_TPS:
2002 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
2003 break;
2004 case XGMAC_INT_DMA_CH_SR_TBU:
2005 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
2006 break;
2007 case XGMAC_INT_DMA_CH_SR_RI:
2008 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
2009 break;
2010 case XGMAC_INT_DMA_CH_SR_RBU:
2011 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
2012 break;
2013 case XGMAC_INT_DMA_CH_SR_RPS:
2014 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
2015 break;
2016 case XGMAC_INT_DMA_CH_SR_TI_RI:
2017 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
2018 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
2019 break;
2020 case XGMAC_INT_DMA_CH_SR_FBE:
2021 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
2022 break;
2023 case XGMAC_INT_DMA_ALL:
2024 dma_ch_ier |= channel->saved_ier;
2025 break;
2026 default:
2027 return -1;
2030 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
2032 return 0;
2035 static int xgbe_disable_int(struct xgbe_channel *channel,
2036 enum xgbe_int int_id)
2038 unsigned int dma_ch_ier;
2040 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
2042 switch (int_id) {
2043 case XGMAC_INT_DMA_CH_SR_TI:
2044 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
2045 break;
2046 case XGMAC_INT_DMA_CH_SR_TPS:
2047 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
2048 break;
2049 case XGMAC_INT_DMA_CH_SR_TBU:
2050 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
2051 break;
2052 case XGMAC_INT_DMA_CH_SR_RI:
2053 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
2054 break;
2055 case XGMAC_INT_DMA_CH_SR_RBU:
2056 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
2057 break;
2058 case XGMAC_INT_DMA_CH_SR_RPS:
2059 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
2060 break;
2061 case XGMAC_INT_DMA_CH_SR_TI_RI:
2062 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
2063 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
2064 break;
2065 case XGMAC_INT_DMA_CH_SR_FBE:
2066 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
2067 break;
2068 case XGMAC_INT_DMA_ALL:
2069 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
2070 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
2071 break;
2072 default:
2073 return -1;
2076 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
2078 return 0;
2081 static int __xgbe_exit(struct xgbe_prv_data *pdata)
2083 unsigned int count = 2000;
2085 DBGPR("-->xgbe_exit\n");
2087 /* Issue a software reset */
2088 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
2089 usleep_range(10, 15);
2091 /* Poll Until Poll Condition */
2092 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
2093 usleep_range(500, 600);
2095 if (!count)
2096 return -EBUSY;
2098 DBGPR("<--xgbe_exit\n");
2100 return 0;
2103 static int xgbe_exit(struct xgbe_prv_data *pdata)
2105 int ret;
2107 /* To guard against possible incorrectly generated interrupts,
2108 * issue the software reset twice.
2110 ret = __xgbe_exit(pdata);
2111 if (ret)
2112 return ret;
2114 return __xgbe_exit(pdata);
2117 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
2119 unsigned int i, count;
2121 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
2122 return 0;
2124 for (i = 0; i < pdata->tx_q_count; i++)
2125 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
2127 /* Poll Until Poll Condition */
2128 for (i = 0; i < pdata->tx_q_count; i++) {
2129 count = 2000;
2130 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
2131 MTL_Q_TQOMR, FTQ))
2132 usleep_range(500, 600);
2134 if (!count)
2135 return -EBUSY;
2138 return 0;
2141 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
2143 /* Set enhanced addressing mode */
2144 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
2146 /* Set the System Bus mode */
2147 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
2148 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
2151 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
2153 unsigned int arcache, awcache;
2155 arcache = 0;
2156 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
2157 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
2158 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
2159 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
2160 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
2161 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
2162 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
2164 awcache = 0;
2165 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
2166 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
2167 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
2168 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
2169 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
2170 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
2171 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
2172 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
2173 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
2176 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
2178 unsigned int i;
2180 /* Set Tx to weighted round robin scheduling algorithm */
2181 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
2183 /* Set Tx traffic classes to use WRR algorithm with equal weights */
2184 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2185 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2186 MTL_TSA_ETS);
2187 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2190 /* Set Rx to strict priority algorithm */
2191 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2194 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
2195 unsigned int queue,
2196 unsigned int q_fifo_size)
2198 unsigned int frame_fifo_size;
2199 unsigned int rfa, rfd;
2201 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
2203 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
2204 /* PFC is active for this queue */
2205 rfa = pdata->pfc_rfa;
2206 rfd = rfa + frame_fifo_size;
2207 if (rfd > XGMAC_FLOW_CONTROL_MAX)
2208 rfd = XGMAC_FLOW_CONTROL_MAX;
2209 if (rfa >= XGMAC_FLOW_CONTROL_MAX)
2210 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
2211 } else {
2212 /* This path deals with just maximum frame sizes which are
2213 * limited to a jumbo frame of 9,000 (plus headers, etc.)
2214 * so we can never exceed the maximum allowable RFA/RFD
2215 * values.
2217 if (q_fifo_size <= 2048) {
2218 /* rx_rfd to zero to signal no flow control */
2219 pdata->rx_rfa[queue] = 0;
2220 pdata->rx_rfd[queue] = 0;
2221 return;
2224 if (q_fifo_size <= 4096) {
2225 /* Between 2048 and 4096 */
2226 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
2227 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
2228 return;
2231 if (q_fifo_size <= frame_fifo_size) {
2232 /* Between 4096 and max-frame */
2233 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
2234 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
2235 return;
2238 if (q_fifo_size <= (frame_fifo_size * 3)) {
2239 /* Between max-frame and 3 max-frames,
2240 * trigger if we get just over a frame of data and
2241 * resume when we have just under half a frame left.
2243 rfa = q_fifo_size - frame_fifo_size;
2244 rfd = rfa + (frame_fifo_size / 2);
2245 } else {
2246 /* Above 3 max-frames - trigger when just over
2247 * 2 frames of space available
2249 rfa = frame_fifo_size * 2;
2250 rfa += XGMAC_FLOW_CONTROL_UNIT;
2251 rfd = rfa + frame_fifo_size;
2255 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
2256 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
2259 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
2260 unsigned int *fifo)
2262 unsigned int q_fifo_size;
2263 unsigned int i;
2265 for (i = 0; i < pdata->rx_q_count; i++) {
2266 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
2268 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
2272 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2274 unsigned int i;
2276 for (i = 0; i < pdata->rx_q_count; i++) {
2277 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
2278 pdata->rx_rfa[i]);
2279 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
2280 pdata->rx_rfd[i]);
2284 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
2286 /* The configured value may not be the actual amount of fifo RAM */
2287 return min_t(unsigned int, pdata->tx_max_fifo_size,
2288 pdata->hw_feat.tx_fifo_size);
2291 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
2293 /* The configured value may not be the actual amount of fifo RAM */
2294 return min_t(unsigned int, pdata->rx_max_fifo_size,
2295 pdata->hw_feat.rx_fifo_size);
2298 static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
2299 unsigned int queue_count,
2300 unsigned int *fifo)
2302 unsigned int q_fifo_size;
2303 unsigned int p_fifo;
2304 unsigned int i;
2306 q_fifo_size = fifo_size / queue_count;
2308 /* Calculate the fifo setting by dividing the queue's fifo size
2309 * by the fifo allocation increment (with 0 representing the
2310 * base allocation increment so decrement the result by 1).
2312 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
2313 if (p_fifo)
2314 p_fifo--;
2316 /* Distribute the fifo equally amongst the queues */
2317 for (i = 0; i < queue_count; i++)
2318 fifo[i] = p_fifo;
2321 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
2322 unsigned int queue_count,
2323 unsigned int *fifo)
2325 unsigned int i;
2327 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
2329 if (queue_count <= IEEE_8021QAZ_MAX_TCS)
2330 return fifo_size;
2332 /* Rx queues 9 and up are for specialized packets,
2333 * such as PTP or DCB control packets, etc. and
2334 * don't require a large fifo
2336 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
2337 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
2338 fifo_size -= XGMAC_FIFO_MIN_ALLOC;
2341 return fifo_size;
2344 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
2346 unsigned int delay;
2348 /* If a delay has been provided, use that */
2349 if (pdata->pfc->delay)
2350 return pdata->pfc->delay / 8;
2352 /* Allow for two maximum size frames */
2353 delay = xgbe_get_max_frame(pdata);
2354 delay += XGMAC_ETH_PREAMBLE;
2355 delay *= 2;
2357 /* Allow for PFC frame */
2358 delay += XGMAC_PFC_DATA_LEN;
2359 delay += ETH_HLEN + ETH_FCS_LEN;
2360 delay += XGMAC_ETH_PREAMBLE;
2362 /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
2363 delay += XGMAC_PFC_DELAYS;
2365 return delay;
2368 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
2370 unsigned int count, prio_queues;
2371 unsigned int i;
2373 if (!pdata->pfc->pfc_en)
2374 return 0;
2376 count = 0;
2377 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2378 for (i = 0; i < prio_queues; i++) {
2379 if (!xgbe_is_pfc_queue(pdata, i))
2380 continue;
2382 pdata->pfcq[i] = 1;
2383 count++;
2386 return count;
2389 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
2390 unsigned int fifo_size,
2391 unsigned int *fifo)
2393 unsigned int q_fifo_size, rem_fifo, addn_fifo;
2394 unsigned int prio_queues;
2395 unsigned int pfc_count;
2396 unsigned int i;
2398 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
2399 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2400 pfc_count = xgbe_get_pfc_queues(pdata);
2402 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
2403 /* No traffic classes with PFC enabled or can't do lossless */
2404 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2405 return;
2408 /* Calculate how much fifo we have to play with */
2409 rem_fifo = fifo_size - (q_fifo_size * prio_queues);
2411 /* Calculate how much more than base fifo PFC needs, which also
2412 * becomes the threshold activation point (RFA)
2414 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
2415 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
2417 if (pdata->pfc_rfa > q_fifo_size) {
2418 addn_fifo = pdata->pfc_rfa - q_fifo_size;
2419 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
2420 } else {
2421 addn_fifo = 0;
2424 /* Calculate DCB fifo settings:
2425 * - distribute remaining fifo between the VLAN priority
2426 * queues based on traffic class PFC enablement and overall
2427 * priority (0 is lowest priority, so start at highest)
2429 i = prio_queues;
2430 while (i > 0) {
2431 i--;
2433 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
2435 if (!pdata->pfcq[i] || !addn_fifo)
2436 continue;
2438 if (addn_fifo > rem_fifo) {
2439 netdev_warn(pdata->netdev,
2440 "RXq%u cannot set needed fifo size\n", i);
2441 if (!rem_fifo)
2442 continue;
2444 addn_fifo = rem_fifo;
2447 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
2448 rem_fifo -= addn_fifo;
2451 if (rem_fifo) {
2452 unsigned int inc_fifo = rem_fifo / prio_queues;
2454 /* Distribute remaining fifo across queues */
2455 for (i = 0; i < prio_queues; i++)
2456 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
2460 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2462 unsigned int fifo_size;
2463 unsigned int fifo[XGBE_MAX_QUEUES];
2464 unsigned int i;
2466 fifo_size = xgbe_get_tx_fifo_size(pdata);
2468 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
2470 for (i = 0; i < pdata->tx_q_count; i++)
2471 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
2473 netif_info(pdata, drv, pdata->netdev,
2474 "%d Tx hardware queues, %d byte fifo per queue\n",
2475 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2478 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2480 unsigned int fifo_size;
2481 unsigned int fifo[XGBE_MAX_QUEUES];
2482 unsigned int prio_queues;
2483 unsigned int i;
2485 /* Clear any DCB related fifo/queue information */
2486 memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
2487 pdata->pfc_rfa = 0;
2489 fifo_size = xgbe_get_rx_fifo_size(pdata);
2490 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2492 /* Assign a minimum fifo to the non-VLAN priority queues */
2493 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
2495 if (pdata->pfc && pdata->ets)
2496 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
2497 else
2498 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2500 for (i = 0; i < pdata->rx_q_count; i++)
2501 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
2503 xgbe_calculate_flow_control_threshold(pdata, fifo);
2504 xgbe_config_flow_control_threshold(pdata);
2506 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
2507 netif_info(pdata, drv, pdata->netdev,
2508 "%u Rx hardware queues\n", pdata->rx_q_count);
2509 for (i = 0; i < pdata->rx_q_count; i++)
2510 netif_info(pdata, drv, pdata->netdev,
2511 "RxQ%u, %u byte fifo queue\n", i,
2512 ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
2513 } else {
2514 netif_info(pdata, drv, pdata->netdev,
2515 "%u Rx hardware queues, %u byte fifo per queue\n",
2516 pdata->rx_q_count,
2517 ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2521 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2523 unsigned int qptc, qptc_extra, queue;
2524 unsigned int prio_queues;
2525 unsigned int ppq, ppq_extra, prio;
2526 unsigned int mask;
2527 unsigned int i, j, reg, reg_val;
2529 /* Map the MTL Tx Queues to Traffic Classes
2530 * Note: Tx Queues >= Traffic Classes
2532 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2533 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2535 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2536 for (j = 0; j < qptc; j++) {
2537 netif_dbg(pdata, drv, pdata->netdev,
2538 "TXq%u mapped to TC%u\n", queue, i);
2539 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2540 Q2TCMAP, i);
2541 pdata->q2tc_map[queue++] = i;
2544 if (i < qptc_extra) {
2545 netif_dbg(pdata, drv, pdata->netdev,
2546 "TXq%u mapped to TC%u\n", queue, i);
2547 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2548 Q2TCMAP, i);
2549 pdata->q2tc_map[queue++] = i;
2553 /* Map the 8 VLAN priority values to available MTL Rx queues */
2554 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2555 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2556 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2558 reg = MAC_RQC2R;
2559 reg_val = 0;
2560 for (i = 0, prio = 0; i < prio_queues;) {
2561 mask = 0;
2562 for (j = 0; j < ppq; j++) {
2563 netif_dbg(pdata, drv, pdata->netdev,
2564 "PRIO%u mapped to RXq%u\n", prio, i);
2565 mask |= (1 << prio);
2566 pdata->prio2q_map[prio++] = i;
2569 if (i < ppq_extra) {
2570 netif_dbg(pdata, drv, pdata->netdev,
2571 "PRIO%u mapped to RXq%u\n", prio, i);
2572 mask |= (1 << prio);
2573 pdata->prio2q_map[prio++] = i;
2576 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2578 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2579 continue;
2581 XGMAC_IOWRITE(pdata, reg, reg_val);
2582 reg += MAC_RQC2_INC;
2583 reg_val = 0;
2586 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2587 reg = MTL_RQDCM0R;
2588 reg_val = 0;
2589 for (i = 0; i < pdata->rx_q_count;) {
2590 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2592 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2593 continue;
2595 XGMAC_IOWRITE(pdata, reg, reg_val);
2597 reg += MTL_RQDCM_INC;
2598 reg_val = 0;
2602 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
2604 unsigned int offset, queue, prio;
2605 u8 i;
2607 netdev_reset_tc(pdata->netdev);
2608 if (!pdata->num_tcs)
2609 return;
2611 netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
2613 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
2614 while ((queue < pdata->tx_q_count) &&
2615 (pdata->q2tc_map[queue] == i))
2616 queue++;
2618 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
2619 i, offset, queue - 1);
2620 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
2621 offset = queue;
2624 if (!pdata->ets)
2625 return;
2627 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
2628 netdev_set_prio_tc_map(pdata->netdev, prio,
2629 pdata->ets->prio_tc[prio]);
2632 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
2634 struct ieee_ets *ets = pdata->ets;
2635 unsigned int total_weight, min_weight, weight;
2636 unsigned int mask, reg, reg_val;
2637 unsigned int i, prio;
2639 if (!ets)
2640 return;
2642 /* Set Tx to deficit weighted round robin scheduling algorithm (when
2643 * traffic class is using ETS algorithm)
2645 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
2647 /* Set Traffic Class algorithms */
2648 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
2649 min_weight = total_weight / 100;
2650 if (!min_weight)
2651 min_weight = 1;
2653 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2654 /* Map the priorities to the traffic class */
2655 mask = 0;
2656 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
2657 if (ets->prio_tc[prio] == i)
2658 mask |= (1 << prio);
2660 mask &= 0xff;
2662 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
2663 i, mask);
2664 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
2665 reg_val = XGMAC_IOREAD(pdata, reg);
2667 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
2668 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
2670 XGMAC_IOWRITE(pdata, reg, reg_val);
2672 /* Set the traffic class algorithm */
2673 switch (ets->tc_tsa[i]) {
2674 case IEEE_8021QAZ_TSA_STRICT:
2675 netif_dbg(pdata, drv, pdata->netdev,
2676 "TC%u using SP\n", i);
2677 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2678 MTL_TSA_SP);
2679 break;
2680 case IEEE_8021QAZ_TSA_ETS:
2681 weight = total_weight * ets->tc_tx_bw[i] / 100;
2682 weight = clamp(weight, min_weight, total_weight);
2684 netif_dbg(pdata, drv, pdata->netdev,
2685 "TC%u using DWRR (weight %u)\n", i, weight);
2686 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2687 MTL_TSA_ETS);
2688 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
2689 weight);
2690 break;
2694 xgbe_config_tc(pdata);
2697 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
2699 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2700 /* Just stop the Tx queues while Rx fifo is changed */
2701 netif_tx_stop_all_queues(pdata->netdev);
2703 /* Suspend Rx so that fifo's can be adjusted */
2704 pdata->hw_if.disable_rx(pdata);
2707 xgbe_config_rx_fifo_size(pdata);
2708 xgbe_config_flow_control(pdata);
2710 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2711 /* Resume Rx */
2712 pdata->hw_if.enable_rx(pdata);
2714 /* Resume Tx queues */
2715 netif_tx_start_all_queues(pdata->netdev);
2719 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2721 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2723 /* Filtering is done using perfect filtering and hash filtering */
2724 if (pdata->hw_feat.hash_table_size) {
2725 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2726 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2727 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2731 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2733 unsigned int val;
2735 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2737 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2740 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2742 xgbe_set_speed(pdata, pdata->phy_speed);
2745 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2747 if (pdata->netdev->features & NETIF_F_RXCSUM)
2748 xgbe_enable_rx_csum(pdata);
2749 else
2750 xgbe_disable_rx_csum(pdata);
2753 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2755 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2757 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2759 /* Set the current VLAN Hash Table register value */
2760 xgbe_update_vlan_hash_table(pdata);
2762 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2763 xgbe_enable_rx_vlan_filtering(pdata);
2764 else
2765 xgbe_disable_rx_vlan_filtering(pdata);
2767 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2768 xgbe_enable_rx_vlan_stripping(pdata);
2769 else
2770 xgbe_disable_rx_vlan_stripping(pdata);
2773 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2775 bool read_hi;
2776 u64 val;
2778 if (pdata->vdata->mmc_64bit) {
2779 switch (reg_lo) {
2780 /* These registers are always 32 bit */
2781 case MMC_RXRUNTERROR:
2782 case MMC_RXJABBERERROR:
2783 case MMC_RXUNDERSIZE_G:
2784 case MMC_RXOVERSIZE_G:
2785 case MMC_RXWATCHDOGERROR:
2786 read_hi = false;
2787 break;
2789 default:
2790 read_hi = true;
2792 } else {
2793 switch (reg_lo) {
2794 /* These registers are always 64 bit */
2795 case MMC_TXOCTETCOUNT_GB_LO:
2796 case MMC_TXOCTETCOUNT_G_LO:
2797 case MMC_RXOCTETCOUNT_GB_LO:
2798 case MMC_RXOCTETCOUNT_G_LO:
2799 read_hi = true;
2800 break;
2802 default:
2803 read_hi = false;
2807 val = XGMAC_IOREAD(pdata, reg_lo);
2809 if (read_hi)
2810 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2812 return val;
2815 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2817 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2818 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2820 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2821 stats->txoctetcount_gb +=
2822 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2824 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2825 stats->txframecount_gb +=
2826 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2828 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2829 stats->txbroadcastframes_g +=
2830 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2832 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2833 stats->txmulticastframes_g +=
2834 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2836 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2837 stats->tx64octets_gb +=
2838 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2840 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2841 stats->tx65to127octets_gb +=
2842 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2844 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2845 stats->tx128to255octets_gb +=
2846 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2848 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2849 stats->tx256to511octets_gb +=
2850 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2852 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2853 stats->tx512to1023octets_gb +=
2854 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2856 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2857 stats->tx1024tomaxoctets_gb +=
2858 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2860 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2861 stats->txunicastframes_gb +=
2862 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2864 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2865 stats->txmulticastframes_gb +=
2866 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2868 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2869 stats->txbroadcastframes_g +=
2870 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2872 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2873 stats->txunderflowerror +=
2874 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2876 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2877 stats->txoctetcount_g +=
2878 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2880 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2881 stats->txframecount_g +=
2882 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2884 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2885 stats->txpauseframes +=
2886 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2888 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2889 stats->txvlanframes_g +=
2890 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2893 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2895 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2896 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2898 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2899 stats->rxframecount_gb +=
2900 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2902 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2903 stats->rxoctetcount_gb +=
2904 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2906 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2907 stats->rxoctetcount_g +=
2908 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2910 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2911 stats->rxbroadcastframes_g +=
2912 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2914 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2915 stats->rxmulticastframes_g +=
2916 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2918 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2919 stats->rxcrcerror +=
2920 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2922 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2923 stats->rxrunterror +=
2924 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2926 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2927 stats->rxjabbererror +=
2928 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2930 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2931 stats->rxundersize_g +=
2932 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2934 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2935 stats->rxoversize_g +=
2936 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2938 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2939 stats->rx64octets_gb +=
2940 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2942 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2943 stats->rx65to127octets_gb +=
2944 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2946 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2947 stats->rx128to255octets_gb +=
2948 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2950 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2951 stats->rx256to511octets_gb +=
2952 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2954 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2955 stats->rx512to1023octets_gb +=
2956 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2958 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2959 stats->rx1024tomaxoctets_gb +=
2960 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2962 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2963 stats->rxunicastframes_g +=
2964 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2966 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2967 stats->rxlengtherror +=
2968 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2970 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2971 stats->rxoutofrangetype +=
2972 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2974 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2975 stats->rxpauseframes +=
2976 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2978 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2979 stats->rxfifooverflow +=
2980 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2982 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2983 stats->rxvlanframes_gb +=
2984 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2986 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2987 stats->rxwatchdogerror +=
2988 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2991 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2993 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2995 /* Freeze counters */
2996 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2998 stats->txoctetcount_gb +=
2999 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
3001 stats->txframecount_gb +=
3002 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
3004 stats->txbroadcastframes_g +=
3005 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
3007 stats->txmulticastframes_g +=
3008 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
3010 stats->tx64octets_gb +=
3011 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
3013 stats->tx65to127octets_gb +=
3014 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
3016 stats->tx128to255octets_gb +=
3017 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
3019 stats->tx256to511octets_gb +=
3020 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
3022 stats->tx512to1023octets_gb +=
3023 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
3025 stats->tx1024tomaxoctets_gb +=
3026 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
3028 stats->txunicastframes_gb +=
3029 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3031 stats->txmulticastframes_gb +=
3032 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3034 stats->txbroadcastframes_g +=
3035 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3037 stats->txunderflowerror +=
3038 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3040 stats->txoctetcount_g +=
3041 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3043 stats->txframecount_g +=
3044 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3046 stats->txpauseframes +=
3047 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3049 stats->txvlanframes_g +=
3050 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3052 stats->rxframecount_gb +=
3053 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3055 stats->rxoctetcount_gb +=
3056 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3058 stats->rxoctetcount_g +=
3059 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3061 stats->rxbroadcastframes_g +=
3062 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3064 stats->rxmulticastframes_g +=
3065 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3067 stats->rxcrcerror +=
3068 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3070 stats->rxrunterror +=
3071 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3073 stats->rxjabbererror +=
3074 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3076 stats->rxundersize_g +=
3077 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3079 stats->rxoversize_g +=
3080 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3082 stats->rx64octets_gb +=
3083 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3085 stats->rx65to127octets_gb +=
3086 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3088 stats->rx128to255octets_gb +=
3089 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3091 stats->rx256to511octets_gb +=
3092 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3094 stats->rx512to1023octets_gb +=
3095 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3097 stats->rx1024tomaxoctets_gb +=
3098 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3100 stats->rxunicastframes_g +=
3101 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3103 stats->rxlengtherror +=
3104 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3106 stats->rxoutofrangetype +=
3107 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3109 stats->rxpauseframes +=
3110 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3112 stats->rxfifooverflow +=
3113 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3115 stats->rxvlanframes_gb +=
3116 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3118 stats->rxwatchdogerror +=
3119 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3121 /* Un-freeze counters */
3122 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
3125 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
3127 /* Set counters to reset on read */
3128 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
3130 /* Reset the counters */
3131 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
3134 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
3135 unsigned int queue)
3137 unsigned int tx_status;
3138 unsigned long tx_timeout;
3140 /* The Tx engine cannot be stopped if it is actively processing
3141 * packets. Wait for the Tx queue to empty the Tx fifo. Don't
3142 * wait forever though...
3144 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3145 while (time_before(jiffies, tx_timeout)) {
3146 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
3147 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
3148 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
3149 break;
3151 usleep_range(500, 1000);
3154 if (!time_before(jiffies, tx_timeout))
3155 netdev_info(pdata->netdev,
3156 "timed out waiting for Tx queue %u to empty\n",
3157 queue);
3160 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
3161 unsigned int queue)
3163 unsigned int tx_dsr, tx_pos, tx_qidx;
3164 unsigned int tx_status;
3165 unsigned long tx_timeout;
3167 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
3168 return xgbe_txq_prepare_tx_stop(pdata, queue);
3170 /* Calculate the status register to read and the position within */
3171 if (queue < DMA_DSRX_FIRST_QUEUE) {
3172 tx_dsr = DMA_DSR0;
3173 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
3174 } else {
3175 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
3177 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
3178 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
3179 DMA_DSRX_TPS_START;
3182 /* The Tx engine cannot be stopped if it is actively processing
3183 * descriptors. Wait for the Tx engine to enter the stopped or
3184 * suspended state. Don't wait forever though...
3186 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3187 while (time_before(jiffies, tx_timeout)) {
3188 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
3189 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
3190 if ((tx_status == DMA_TPS_STOPPED) ||
3191 (tx_status == DMA_TPS_SUSPENDED))
3192 break;
3194 usleep_range(500, 1000);
3197 if (!time_before(jiffies, tx_timeout))
3198 netdev_info(pdata->netdev,
3199 "timed out waiting for Tx DMA channel %u to stop\n",
3200 queue);
3203 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
3205 struct xgbe_channel *channel;
3206 unsigned int i;
3208 /* Enable each Tx DMA channel */
3209 channel = pdata->channel;
3210 for (i = 0; i < pdata->channel_count; i++, channel++) {
3211 if (!channel->tx_ring)
3212 break;
3214 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
3217 /* Enable each Tx queue */
3218 for (i = 0; i < pdata->tx_q_count; i++)
3219 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
3220 MTL_Q_ENABLED);
3222 /* Enable MAC Tx */
3223 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3226 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
3228 struct xgbe_channel *channel;
3229 unsigned int i;
3231 /* Prepare for Tx DMA channel stop */
3232 for (i = 0; i < pdata->tx_q_count; i++)
3233 xgbe_prepare_tx_stop(pdata, i);
3235 /* Disable MAC Tx */
3236 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3238 /* Disable each Tx queue */
3239 for (i = 0; i < pdata->tx_q_count; i++)
3240 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
3242 /* Disable each Tx DMA channel */
3243 channel = pdata->channel;
3244 for (i = 0; i < pdata->channel_count; i++, channel++) {
3245 if (!channel->tx_ring)
3246 break;
3248 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
3252 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
3253 unsigned int queue)
3255 unsigned int rx_status;
3256 unsigned long rx_timeout;
3258 /* The Rx engine cannot be stopped if it is actively processing
3259 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
3260 * wait forever though...
3262 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3263 while (time_before(jiffies, rx_timeout)) {
3264 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
3265 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
3266 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
3267 break;
3269 usleep_range(500, 1000);
3272 if (!time_before(jiffies, rx_timeout))
3273 netdev_info(pdata->netdev,
3274 "timed out waiting for Rx queue %u to empty\n",
3275 queue);
3278 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
3280 struct xgbe_channel *channel;
3281 unsigned int reg_val, i;
3283 /* Enable each Rx DMA channel */
3284 channel = pdata->channel;
3285 for (i = 0; i < pdata->channel_count; i++, channel++) {
3286 if (!channel->rx_ring)
3287 break;
3289 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
3292 /* Enable each Rx queue */
3293 reg_val = 0;
3294 for (i = 0; i < pdata->rx_q_count; i++)
3295 reg_val |= (0x02 << (i << 1));
3296 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
3298 /* Enable MAC Rx */
3299 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
3300 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
3301 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
3302 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
3305 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
3307 struct xgbe_channel *channel;
3308 unsigned int i;
3310 /* Disable MAC Rx */
3311 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
3312 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
3313 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
3314 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
3316 /* Prepare for Rx DMA channel stop */
3317 for (i = 0; i < pdata->rx_q_count; i++)
3318 xgbe_prepare_rx_stop(pdata, i);
3320 /* Disable each Rx queue */
3321 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
3323 /* Disable each Rx DMA channel */
3324 channel = pdata->channel;
3325 for (i = 0; i < pdata->channel_count; i++, channel++) {
3326 if (!channel->rx_ring)
3327 break;
3329 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
3333 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
3335 struct xgbe_channel *channel;
3336 unsigned int i;
3338 /* Enable each Tx DMA channel */
3339 channel = pdata->channel;
3340 for (i = 0; i < pdata->channel_count; i++, channel++) {
3341 if (!channel->tx_ring)
3342 break;
3344 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
3347 /* Enable MAC Tx */
3348 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3351 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
3353 struct xgbe_channel *channel;
3354 unsigned int i;
3356 /* Prepare for Tx DMA channel stop */
3357 for (i = 0; i < pdata->tx_q_count; i++)
3358 xgbe_prepare_tx_stop(pdata, i);
3360 /* Disable MAC Tx */
3361 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3363 /* Disable each Tx DMA channel */
3364 channel = pdata->channel;
3365 for (i = 0; i < pdata->channel_count; i++, channel++) {
3366 if (!channel->tx_ring)
3367 break;
3369 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
3373 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
3375 struct xgbe_channel *channel;
3376 unsigned int i;
3378 /* Enable each Rx DMA channel */
3379 channel = pdata->channel;
3380 for (i = 0; i < pdata->channel_count; i++, channel++) {
3381 if (!channel->rx_ring)
3382 break;
3384 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
3388 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
3390 struct xgbe_channel *channel;
3391 unsigned int i;
3393 /* Disable each Rx DMA channel */
3394 channel = pdata->channel;
3395 for (i = 0; i < pdata->channel_count; i++, channel++) {
3396 if (!channel->rx_ring)
3397 break;
3399 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
3403 static int xgbe_init(struct xgbe_prv_data *pdata)
3405 struct xgbe_desc_if *desc_if = &pdata->desc_if;
3406 int ret;
3408 DBGPR("-->xgbe_init\n");
3410 /* Flush Tx queues */
3411 ret = xgbe_flush_tx_queues(pdata);
3412 if (ret) {
3413 netdev_err(pdata->netdev, "error flushing TX queues\n");
3414 return ret;
3418 * Initialize DMA related features
3420 xgbe_config_dma_bus(pdata);
3421 xgbe_config_dma_cache(pdata);
3422 xgbe_config_osp_mode(pdata);
3423 xgbe_config_pblx8(pdata);
3424 xgbe_config_tx_pbl_val(pdata);
3425 xgbe_config_rx_pbl_val(pdata);
3426 xgbe_config_rx_coalesce(pdata);
3427 xgbe_config_tx_coalesce(pdata);
3428 xgbe_config_rx_buffer_size(pdata);
3429 xgbe_config_tso_mode(pdata);
3430 xgbe_config_sph_mode(pdata);
3431 xgbe_config_rss(pdata);
3432 desc_if->wrapper_tx_desc_init(pdata);
3433 desc_if->wrapper_rx_desc_init(pdata);
3434 xgbe_enable_dma_interrupts(pdata);
3437 * Initialize MTL related features
3439 xgbe_config_mtl_mode(pdata);
3440 xgbe_config_queue_mapping(pdata);
3441 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
3442 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
3443 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
3444 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
3445 xgbe_config_tx_fifo_size(pdata);
3446 xgbe_config_rx_fifo_size(pdata);
3447 /*TODO: Error Packet and undersized good Packet forwarding enable
3448 (FEP and FUP)
3450 xgbe_config_dcb_tc(pdata);
3451 xgbe_enable_mtl_interrupts(pdata);
3454 * Initialize MAC related features
3456 xgbe_config_mac_address(pdata);
3457 xgbe_config_rx_mode(pdata);
3458 xgbe_config_jumbo_enable(pdata);
3459 xgbe_config_flow_control(pdata);
3460 xgbe_config_mac_speed(pdata);
3461 xgbe_config_checksum_offload(pdata);
3462 xgbe_config_vlan_support(pdata);
3463 xgbe_config_mmc(pdata);
3464 xgbe_enable_mac_interrupts(pdata);
3467 * Initialize ECC related features
3469 xgbe_enable_ecc_interrupts(pdata);
3471 DBGPR("<--xgbe_init\n");
3473 return 0;
3476 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
3478 DBGPR("-->xgbe_init_function_ptrs\n");
3480 hw_if->tx_complete = xgbe_tx_complete;
3482 hw_if->set_mac_address = xgbe_set_mac_address;
3483 hw_if->config_rx_mode = xgbe_config_rx_mode;
3485 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
3486 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
3488 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
3489 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
3490 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
3491 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
3492 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
3494 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
3495 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
3497 hw_if->set_speed = xgbe_set_speed;
3499 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
3500 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
3501 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
3503 hw_if->set_gpio = xgbe_set_gpio;
3504 hw_if->clr_gpio = xgbe_clr_gpio;
3506 hw_if->enable_tx = xgbe_enable_tx;
3507 hw_if->disable_tx = xgbe_disable_tx;
3508 hw_if->enable_rx = xgbe_enable_rx;
3509 hw_if->disable_rx = xgbe_disable_rx;
3511 hw_if->powerup_tx = xgbe_powerup_tx;
3512 hw_if->powerdown_tx = xgbe_powerdown_tx;
3513 hw_if->powerup_rx = xgbe_powerup_rx;
3514 hw_if->powerdown_rx = xgbe_powerdown_rx;
3516 hw_if->dev_xmit = xgbe_dev_xmit;
3517 hw_if->dev_read = xgbe_dev_read;
3518 hw_if->enable_int = xgbe_enable_int;
3519 hw_if->disable_int = xgbe_disable_int;
3520 hw_if->init = xgbe_init;
3521 hw_if->exit = xgbe_exit;
3523 /* Descriptor related Sequences have to be initialized here */
3524 hw_if->tx_desc_init = xgbe_tx_desc_init;
3525 hw_if->rx_desc_init = xgbe_rx_desc_init;
3526 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
3527 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
3528 hw_if->is_last_desc = xgbe_is_last_desc;
3529 hw_if->is_context_desc = xgbe_is_context_desc;
3530 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
3532 /* For FLOW ctrl */
3533 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
3534 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
3536 /* For RX coalescing */
3537 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
3538 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
3539 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
3540 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
3542 /* For RX and TX threshold config */
3543 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
3544 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
3546 /* For RX and TX Store and Forward Mode config */
3547 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
3548 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
3550 /* For TX DMA Operating on Second Frame config */
3551 hw_if->config_osp_mode = xgbe_config_osp_mode;
3553 /* For RX and TX PBL config */
3554 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
3555 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
3556 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
3557 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
3558 hw_if->config_pblx8 = xgbe_config_pblx8;
3560 /* For MMC statistics support */
3561 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
3562 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
3563 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
3565 /* For PTP config */
3566 hw_if->config_tstamp = xgbe_config_tstamp;
3567 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
3568 hw_if->set_tstamp_time = xgbe_set_tstamp_time;
3569 hw_if->get_tstamp_time = xgbe_get_tstamp_time;
3570 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
3572 /* For Data Center Bridging config */
3573 hw_if->config_tc = xgbe_config_tc;
3574 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
3575 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
3577 /* For Receive Side Scaling */
3578 hw_if->enable_rss = xgbe_enable_rss;
3579 hw_if->disable_rss = xgbe_disable_rss;
3580 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
3581 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
3583 /* For ECC */
3584 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
3585 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
3587 DBGPR("<--xgbe_init_function_ptrs\n");