1 // SPDX-License-Identifier: GPL-2.0-only
3 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
5 * Copyright 2008 JMicron Technology Corporation
6 * https://www.jmicron.com/
7 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
9 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/ethtool.h>
20 #include <linux/mii.h>
21 #include <linux/crc32.h>
22 #include <linux/delay.h>
23 #include <linux/spinlock.h>
26 #include <linux/ipv6.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/if_vlan.h>
30 #include <linux/slab.h>
31 #include <linux/jiffies.h>
32 #include <net/ip6_checksum.h>
35 static int force_pseudohp
= -1;
36 static int no_pseudohp
= -1;
37 static int no_extplug
= -1;
38 module_param(force_pseudohp
, int, 0);
39 MODULE_PARM_DESC(force_pseudohp
,
40 "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
41 module_param(no_pseudohp
, int, 0);
42 MODULE_PARM_DESC(no_pseudohp
, "Disable pseudo hot-plug feature.");
43 module_param(no_extplug
, int, 0);
44 MODULE_PARM_DESC(no_extplug
,
45 "Do not use external plug signal for pseudo hot-plug.");
48 jme_mdio_read(struct net_device
*netdev
, int phy
, int reg
)
50 struct jme_adapter
*jme
= netdev_priv(netdev
);
51 int i
, val
, again
= (reg
== MII_BMSR
) ? 1 : 0;
54 jwrite32(jme
, JME_SMI
, SMI_OP_REQ
|
59 for (i
= JME_PHY_TIMEOUT
* 50 ; i
> 0 ; --i
) {
61 val
= jread32(jme
, JME_SMI
);
62 if ((val
& SMI_OP_REQ
) == 0)
67 pr_err("phy(%d) read timeout : %d\n", phy
, reg
);
74 return (val
& SMI_DATA_MASK
) >> SMI_DATA_SHIFT
;
78 jme_mdio_write(struct net_device
*netdev
,
79 int phy
, int reg
, int val
)
81 struct jme_adapter
*jme
= netdev_priv(netdev
);
84 jwrite32(jme
, JME_SMI
, SMI_OP_WRITE
| SMI_OP_REQ
|
85 ((val
<< SMI_DATA_SHIFT
) & SMI_DATA_MASK
) |
86 smi_phy_addr(phy
) | smi_reg_addr(reg
));
89 for (i
= JME_PHY_TIMEOUT
* 50 ; i
> 0 ; --i
) {
91 if ((jread32(jme
, JME_SMI
) & SMI_OP_REQ
) == 0)
96 pr_err("phy(%d) write timeout : %d\n", phy
, reg
);
100 jme_reset_phy_processor(struct jme_adapter
*jme
)
104 jme_mdio_write(jme
->dev
,
106 MII_ADVERTISE
, ADVERTISE_ALL
|
107 ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
109 if (jme
->pdev
->device
== PCI_DEVICE_ID_JMICRON_JMC250
)
110 jme_mdio_write(jme
->dev
,
113 ADVERTISE_1000FULL
| ADVERTISE_1000HALF
);
115 val
= jme_mdio_read(jme
->dev
,
119 jme_mdio_write(jme
->dev
,
121 MII_BMCR
, val
| BMCR_RESET
);
125 jme_setup_wakeup_frame(struct jme_adapter
*jme
,
126 const u32
*mask
, u32 crc
, int fnr
)
133 jwrite32(jme
, JME_WFOI
, WFOI_CRC_SEL
| (fnr
& WFOI_FRAME_SEL
));
135 jwrite32(jme
, JME_WFODP
, crc
);
141 for (i
= 0 ; i
< WAKEUP_FRAME_MASK_DWNR
; ++i
) {
142 jwrite32(jme
, JME_WFOI
,
143 ((i
<< WFOI_MASK_SHIFT
) & WFOI_MASK_SEL
) |
144 (fnr
& WFOI_FRAME_SEL
));
146 jwrite32(jme
, JME_WFODP
, mask
[i
]);
152 jme_mac_rxclk_off(struct jme_adapter
*jme
)
154 jme
->reg_gpreg1
|= GPREG1_RXCLKOFF
;
155 jwrite32f(jme
, JME_GPREG1
, jme
->reg_gpreg1
);
159 jme_mac_rxclk_on(struct jme_adapter
*jme
)
161 jme
->reg_gpreg1
&= ~GPREG1_RXCLKOFF
;
162 jwrite32f(jme
, JME_GPREG1
, jme
->reg_gpreg1
);
166 jme_mac_txclk_off(struct jme_adapter
*jme
)
168 jme
->reg_ghc
&= ~(GHC_TO_CLK_SRC
| GHC_TXMAC_CLK_SRC
);
169 jwrite32f(jme
, JME_GHC
, jme
->reg_ghc
);
173 jme_mac_txclk_on(struct jme_adapter
*jme
)
175 u32 speed
= jme
->reg_ghc
& GHC_SPEED
;
176 if (speed
== GHC_SPEED_1000M
)
177 jme
->reg_ghc
|= GHC_TO_CLK_GPHY
| GHC_TXMAC_CLK_GPHY
;
179 jme
->reg_ghc
|= GHC_TO_CLK_PCIE
| GHC_TXMAC_CLK_PCIE
;
180 jwrite32f(jme
, JME_GHC
, jme
->reg_ghc
);
184 jme_reset_ghc_speed(struct jme_adapter
*jme
)
186 jme
->reg_ghc
&= ~(GHC_SPEED
| GHC_DPX
);
187 jwrite32f(jme
, JME_GHC
, jme
->reg_ghc
);
191 jme_reset_250A2_workaround(struct jme_adapter
*jme
)
193 jme
->reg_gpreg1
&= ~(GPREG1_HALFMODEPATCH
|
195 jwrite32(jme
, JME_GPREG1
, jme
->reg_gpreg1
);
199 jme_assert_ghc_reset(struct jme_adapter
*jme
)
201 jme
->reg_ghc
|= GHC_SWRST
;
202 jwrite32f(jme
, JME_GHC
, jme
->reg_ghc
);
206 jme_clear_ghc_reset(struct jme_adapter
*jme
)
208 jme
->reg_ghc
&= ~GHC_SWRST
;
209 jwrite32f(jme
, JME_GHC
, jme
->reg_ghc
);
213 jme_reset_mac_processor(struct jme_adapter
*jme
)
215 static const u32 mask
[WAKEUP_FRAME_MASK_DWNR
] = {0, 0, 0, 0};
216 u32 crc
= 0xCDCDCDCD;
220 jme_reset_ghc_speed(jme
);
221 jme_reset_250A2_workaround(jme
);
223 jme_mac_rxclk_on(jme
);
224 jme_mac_txclk_on(jme
);
226 jme_assert_ghc_reset(jme
);
228 jme_mac_rxclk_off(jme
);
229 jme_mac_txclk_off(jme
);
231 jme_clear_ghc_reset(jme
);
233 jme_mac_rxclk_on(jme
);
234 jme_mac_txclk_on(jme
);
236 jme_mac_rxclk_off(jme
);
237 jme_mac_txclk_off(jme
);
239 jwrite32(jme
, JME_RXDBA_LO
, 0x00000000);
240 jwrite32(jme
, JME_RXDBA_HI
, 0x00000000);
241 jwrite32(jme
, JME_RXQDC
, 0x00000000);
242 jwrite32(jme
, JME_RXNDA
, 0x00000000);
243 jwrite32(jme
, JME_TXDBA_LO
, 0x00000000);
244 jwrite32(jme
, JME_TXDBA_HI
, 0x00000000);
245 jwrite32(jme
, JME_TXQDC
, 0x00000000);
246 jwrite32(jme
, JME_TXNDA
, 0x00000000);
248 jwrite32(jme
, JME_RXMCHT_LO
, 0x00000000);
249 jwrite32(jme
, JME_RXMCHT_HI
, 0x00000000);
250 for (i
= 0 ; i
< WAKEUP_FRAME_NR
; ++i
)
251 jme_setup_wakeup_frame(jme
, mask
, crc
, i
);
253 gpreg0
= GPREG0_DEFAULT
| GPREG0_LNKINTPOLL
;
255 gpreg0
= GPREG0_DEFAULT
;
256 jwrite32(jme
, JME_GPREG0
, gpreg0
);
260 jme_clear_pm_enable_wol(struct jme_adapter
*jme
)
262 jwrite32(jme
, JME_PMCS
, PMCS_STMASK
| jme
->reg_pmcs
);
266 jme_clear_pm_disable_wol(struct jme_adapter
*jme
)
268 jwrite32(jme
, JME_PMCS
, PMCS_STMASK
);
272 jme_reload_eeprom(struct jme_adapter
*jme
)
277 val
= jread32(jme
, JME_SMBCSR
);
279 if (val
& SMBCSR_EEPROMD
) {
281 jwrite32(jme
, JME_SMBCSR
, val
);
282 val
|= SMBCSR_RELOAD
;
283 jwrite32(jme
, JME_SMBCSR
, val
);
286 for (i
= JME_EEPROM_RELOAD_TIMEOUT
; i
> 0; --i
) {
288 if ((jread32(jme
, JME_SMBCSR
) & SMBCSR_RELOAD
) == 0)
293 pr_err("eeprom reload timeout\n");
302 jme_load_macaddr(struct net_device
*netdev
)
304 struct jme_adapter
*jme
= netdev_priv(netdev
);
305 unsigned char macaddr
[ETH_ALEN
];
308 spin_lock_bh(&jme
->macaddr_lock
);
309 val
= jread32(jme
, JME_RXUMA_LO
);
310 macaddr
[0] = (val
>> 0) & 0xFF;
311 macaddr
[1] = (val
>> 8) & 0xFF;
312 macaddr
[2] = (val
>> 16) & 0xFF;
313 macaddr
[3] = (val
>> 24) & 0xFF;
314 val
= jread32(jme
, JME_RXUMA_HI
);
315 macaddr
[4] = (val
>> 0) & 0xFF;
316 macaddr
[5] = (val
>> 8) & 0xFF;
317 eth_hw_addr_set(netdev
, macaddr
);
318 spin_unlock_bh(&jme
->macaddr_lock
);
322 jme_set_rx_pcc(struct jme_adapter
*jme
, int p
)
326 jwrite32(jme
, JME_PCCRX0
,
327 ((PCC_OFF_TO
<< PCCRXTO_SHIFT
) & PCCRXTO_MASK
) |
328 ((PCC_OFF_CNT
<< PCCRX_SHIFT
) & PCCRX_MASK
));
331 jwrite32(jme
, JME_PCCRX0
,
332 ((PCC_P1_TO
<< PCCRXTO_SHIFT
) & PCCRXTO_MASK
) |
333 ((PCC_P1_CNT
<< PCCRX_SHIFT
) & PCCRX_MASK
));
336 jwrite32(jme
, JME_PCCRX0
,
337 ((PCC_P2_TO
<< PCCRXTO_SHIFT
) & PCCRXTO_MASK
) |
338 ((PCC_P2_CNT
<< PCCRX_SHIFT
) & PCCRX_MASK
));
341 jwrite32(jme
, JME_PCCRX0
,
342 ((PCC_P3_TO
<< PCCRXTO_SHIFT
) & PCCRXTO_MASK
) |
343 ((PCC_P3_CNT
<< PCCRX_SHIFT
) & PCCRX_MASK
));
350 if (!(test_bit(JME_FLAG_POLL
, &jme
->flags
)))
351 netif_info(jme
, rx_status
, jme
->dev
, "Switched to PCC_P%d\n", p
);
355 jme_start_irq(struct jme_adapter
*jme
)
357 register struct dynpcc_info
*dpi
= &(jme
->dpi
);
359 jme_set_rx_pcc(jme
, PCC_P1
);
361 dpi
->attempt
= PCC_P1
;
364 jwrite32(jme
, JME_PCCTX
,
365 ((PCC_TX_TO
<< PCCTXTO_SHIFT
) & PCCTXTO_MASK
) |
366 ((PCC_TX_CNT
<< PCCTX_SHIFT
) & PCCTX_MASK
) |
373 jwrite32(jme
, JME_IENS
, INTR_ENABLE
);
377 jme_stop_irq(struct jme_adapter
*jme
)
382 jwrite32f(jme
, JME_IENC
, INTR_ENABLE
);
386 jme_linkstat_from_phy(struct jme_adapter
*jme
)
390 phylink
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, 17);
391 bmsr
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMSR
);
392 if (bmsr
& BMSR_ANCOMP
)
393 phylink
|= PHY_LINK_AUTONEG_COMPLETE
;
399 jme_set_phyfifo_5level(struct jme_adapter
*jme
)
401 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, 27, 0x0004);
405 jme_set_phyfifo_8level(struct jme_adapter
*jme
)
407 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, 27, 0x0000);
411 jme_check_link(struct net_device
*netdev
, int testonly
)
413 struct jme_adapter
*jme
= netdev_priv(netdev
);
414 u32 phylink
, cnt
= JME_SPDRSV_TIMEOUT
, bmcr
;
421 phylink
= jme_linkstat_from_phy(jme
);
423 phylink
= jread32(jme
, JME_PHY_LINK
);
425 if (phylink
& PHY_LINK_UP
) {
426 if (!(phylink
& PHY_LINK_AUTONEG_COMPLETE
)) {
428 * If we did not enable AN
429 * Speed/Duplex Info should be obtained from SMI
431 phylink
= PHY_LINK_UP
;
433 bmcr
= jme_mdio_read(jme
->dev
,
437 phylink
|= ((bmcr
& BMCR_SPEED1000
) &&
438 (bmcr
& BMCR_SPEED100
) == 0) ?
439 PHY_LINK_SPEED_1000M
:
440 (bmcr
& BMCR_SPEED100
) ?
441 PHY_LINK_SPEED_100M
:
444 phylink
|= (bmcr
& BMCR_FULLDPLX
) ?
447 strcat(linkmsg
, "Forced: ");
450 * Keep polling for speed/duplex resolve complete
452 while (!(phylink
& PHY_LINK_SPEEDDPU_RESOLVED
) &&
458 phylink
= jme_linkstat_from_phy(jme
);
460 phylink
= jread32(jme
, JME_PHY_LINK
);
463 pr_err("Waiting speed resolve timeout\n");
465 strcat(linkmsg
, "ANed: ");
468 if (jme
->phylink
== phylink
) {
475 jme
->phylink
= phylink
;
478 * The speed/duplex setting of jme->reg_ghc already cleared
479 * by jme_reset_mac_processor()
481 switch (phylink
& PHY_LINK_SPEED_MASK
) {
482 case PHY_LINK_SPEED_10M
:
483 jme
->reg_ghc
|= GHC_SPEED_10M
;
484 strcat(linkmsg
, "10 Mbps, ");
486 case PHY_LINK_SPEED_100M
:
487 jme
->reg_ghc
|= GHC_SPEED_100M
;
488 strcat(linkmsg
, "100 Mbps, ");
490 case PHY_LINK_SPEED_1000M
:
491 jme
->reg_ghc
|= GHC_SPEED_1000M
;
492 strcat(linkmsg
, "1000 Mbps, ");
498 if (phylink
& PHY_LINK_DUPLEX
) {
499 jwrite32(jme
, JME_TXMCS
, TXMCS_DEFAULT
);
500 jwrite32(jme
, JME_TXTRHD
, TXTRHD_FULLDUPLEX
);
501 jme
->reg_ghc
|= GHC_DPX
;
503 jwrite32(jme
, JME_TXMCS
, TXMCS_DEFAULT
|
507 jwrite32(jme
, JME_TXTRHD
, TXTRHD_HALFDUPLEX
);
510 jwrite32(jme
, JME_GHC
, jme
->reg_ghc
);
512 if (is_buggy250(jme
->pdev
->device
, jme
->chiprev
)) {
513 jme
->reg_gpreg1
&= ~(GPREG1_HALFMODEPATCH
|
515 if (!(phylink
& PHY_LINK_DUPLEX
))
516 jme
->reg_gpreg1
|= GPREG1_HALFMODEPATCH
;
517 switch (phylink
& PHY_LINK_SPEED_MASK
) {
518 case PHY_LINK_SPEED_10M
:
519 jme_set_phyfifo_8level(jme
);
520 jme
->reg_gpreg1
|= GPREG1_RSSPATCH
;
522 case PHY_LINK_SPEED_100M
:
523 jme_set_phyfifo_5level(jme
);
524 jme
->reg_gpreg1
|= GPREG1_RSSPATCH
;
526 case PHY_LINK_SPEED_1000M
:
527 jme_set_phyfifo_8level(jme
);
533 jwrite32(jme
, JME_GPREG1
, jme
->reg_gpreg1
);
535 strcat(linkmsg
, (phylink
& PHY_LINK_DUPLEX
) ?
538 strcat(linkmsg
, (phylink
& PHY_LINK_MDI_STAT
) ?
541 netif_info(jme
, link
, jme
->dev
, "Link is up at %s\n", linkmsg
);
542 netif_carrier_on(netdev
);
547 netif_info(jme
, link
, jme
->dev
, "Link is down\n");
549 netif_carrier_off(netdev
);
557 jme_setup_tx_resources(struct jme_adapter
*jme
)
559 struct jme_ring
*txring
= &(jme
->txring
[0]);
561 txring
->alloc
= dma_alloc_coherent(&(jme
->pdev
->dev
),
562 TX_RING_ALLOC_SIZE(jme
->tx_ring_size
),
572 txring
->desc
= (void *)ALIGN((unsigned long)(txring
->alloc
),
574 txring
->dma
= ALIGN(txring
->dmaalloc
, RING_DESC_ALIGN
);
575 txring
->next_to_use
= 0;
576 atomic_set(&txring
->next_to_clean
, 0);
577 atomic_set(&txring
->nr_free
, jme
->tx_ring_size
);
579 txring
->bufinf
= kcalloc(jme
->tx_ring_size
,
580 sizeof(struct jme_buffer_info
),
582 if (unlikely(!(txring
->bufinf
)))
583 goto err_free_txring
;
588 dma_free_coherent(&(jme
->pdev
->dev
),
589 TX_RING_ALLOC_SIZE(jme
->tx_ring_size
),
595 txring
->dmaalloc
= 0;
597 txring
->bufinf
= NULL
;
603 jme_free_tx_resources(struct jme_adapter
*jme
)
606 struct jme_ring
*txring
= &(jme
->txring
[0]);
607 struct jme_buffer_info
*txbi
;
610 if (txring
->bufinf
) {
611 for (i
= 0 ; i
< jme
->tx_ring_size
; ++i
) {
612 txbi
= txring
->bufinf
+ i
;
614 dev_kfree_skb(txbi
->skb
);
620 txbi
->start_xmit
= 0;
622 kfree(txring
->bufinf
);
625 dma_free_coherent(&(jme
->pdev
->dev
),
626 TX_RING_ALLOC_SIZE(jme
->tx_ring_size
),
630 txring
->alloc
= NULL
;
632 txring
->dmaalloc
= 0;
634 txring
->bufinf
= NULL
;
636 txring
->next_to_use
= 0;
637 atomic_set(&txring
->next_to_clean
, 0);
638 atomic_set(&txring
->nr_free
, 0);
642 jme_enable_tx_engine(struct jme_adapter
*jme
)
647 jwrite32(jme
, JME_TXCS
, TXCS_DEFAULT
| TXCS_SELECT_QUEUE0
);
651 * Setup TX Queue 0 DMA Bass Address
653 jwrite32(jme
, JME_TXDBA_LO
, (__u64
)jme
->txring
[0].dma
& 0xFFFFFFFFUL
);
654 jwrite32(jme
, JME_TXDBA_HI
, (__u64
)(jme
->txring
[0].dma
) >> 32);
655 jwrite32(jme
, JME_TXNDA
, (__u64
)jme
->txring
[0].dma
& 0xFFFFFFFFUL
);
658 * Setup TX Descptor Count
660 jwrite32(jme
, JME_TXQDC
, jme
->tx_ring_size
);
666 jwrite32f(jme
, JME_TXCS
, jme
->reg_txcs
|
671 * Start clock for TX MAC Processor
673 jme_mac_txclk_on(jme
);
677 jme_disable_tx_engine(struct jme_adapter
*jme
)
685 jwrite32(jme
, JME_TXCS
, jme
->reg_txcs
| TXCS_SELECT_QUEUE0
);
688 val
= jread32(jme
, JME_TXCS
);
689 for (i
= JME_TX_DISABLE_TIMEOUT
; (val
& TXCS_ENABLE
) && i
> 0 ; --i
) {
691 val
= jread32(jme
, JME_TXCS
);
696 pr_err("Disable TX engine timeout\n");
699 * Stop clock for TX MAC Processor
701 jme_mac_txclk_off(jme
);
705 jme_set_clean_rxdesc(struct jme_adapter
*jme
, int i
)
707 struct jme_ring
*rxring
= &(jme
->rxring
[0]);
708 register struct rxdesc
*rxdesc
= rxring
->desc
;
709 struct jme_buffer_info
*rxbi
= rxring
->bufinf
;
715 rxdesc
->desc1
.bufaddrh
= cpu_to_le32((__u64
)rxbi
->mapping
>> 32);
716 rxdesc
->desc1
.bufaddrl
= cpu_to_le32(
717 (__u64
)rxbi
->mapping
& 0xFFFFFFFFUL
);
718 rxdesc
->desc1
.datalen
= cpu_to_le16(rxbi
->len
);
719 if (jme
->dev
->features
& NETIF_F_HIGHDMA
)
720 rxdesc
->desc1
.flags
= RXFLAG_64BIT
;
722 rxdesc
->desc1
.flags
|= RXFLAG_OWN
| RXFLAG_INT
;
726 jme_make_new_rx_buf(struct jme_adapter
*jme
, int i
)
728 struct jme_ring
*rxring
= &(jme
->rxring
[0]);
729 struct jme_buffer_info
*rxbi
= rxring
->bufinf
+ i
;
733 skb
= netdev_alloc_skb(jme
->dev
,
734 jme
->dev
->mtu
+ RX_EXTRA_LEN
);
738 mapping
= dma_map_page(&jme
->pdev
->dev
, virt_to_page(skb
->data
),
739 offset_in_page(skb
->data
), skb_tailroom(skb
),
741 if (unlikely(dma_mapping_error(&jme
->pdev
->dev
, mapping
))) {
746 if (likely(rxbi
->mapping
))
747 dma_unmap_page(&jme
->pdev
->dev
, rxbi
->mapping
, rxbi
->len
,
751 rxbi
->len
= skb_tailroom(skb
);
752 rxbi
->mapping
= mapping
;
757 jme_free_rx_buf(struct jme_adapter
*jme
, int i
)
759 struct jme_ring
*rxring
= &(jme
->rxring
[0]);
760 struct jme_buffer_info
*rxbi
= rxring
->bufinf
;
764 dma_unmap_page(&jme
->pdev
->dev
, rxbi
->mapping
, rxbi
->len
,
766 dev_kfree_skb(rxbi
->skb
);
774 jme_free_rx_resources(struct jme_adapter
*jme
)
777 struct jme_ring
*rxring
= &(jme
->rxring
[0]);
780 if (rxring
->bufinf
) {
781 for (i
= 0 ; i
< jme
->rx_ring_size
; ++i
)
782 jme_free_rx_buf(jme
, i
);
783 kfree(rxring
->bufinf
);
786 dma_free_coherent(&(jme
->pdev
->dev
),
787 RX_RING_ALLOC_SIZE(jme
->rx_ring_size
),
790 rxring
->alloc
= NULL
;
792 rxring
->dmaalloc
= 0;
794 rxring
->bufinf
= NULL
;
796 rxring
->next_to_use
= 0;
797 atomic_set(&rxring
->next_to_clean
, 0);
801 jme_setup_rx_resources(struct jme_adapter
*jme
)
804 struct jme_ring
*rxring
= &(jme
->rxring
[0]);
806 rxring
->alloc
= dma_alloc_coherent(&(jme
->pdev
->dev
),
807 RX_RING_ALLOC_SIZE(jme
->rx_ring_size
),
816 rxring
->desc
= (void *)ALIGN((unsigned long)(rxring
->alloc
),
818 rxring
->dma
= ALIGN(rxring
->dmaalloc
, RING_DESC_ALIGN
);
819 rxring
->next_to_use
= 0;
820 atomic_set(&rxring
->next_to_clean
, 0);
822 rxring
->bufinf
= kcalloc(jme
->rx_ring_size
,
823 sizeof(struct jme_buffer_info
),
825 if (unlikely(!(rxring
->bufinf
)))
826 goto err_free_rxring
;
829 * Initiallize Receive Descriptors
831 for (i
= 0 ; i
< jme
->rx_ring_size
; ++i
) {
832 if (unlikely(jme_make_new_rx_buf(jme
, i
))) {
833 jme_free_rx_resources(jme
);
837 jme_set_clean_rxdesc(jme
, i
);
843 dma_free_coherent(&(jme
->pdev
->dev
),
844 RX_RING_ALLOC_SIZE(jme
->rx_ring_size
),
849 rxring
->dmaalloc
= 0;
851 rxring
->bufinf
= NULL
;
857 jme_enable_rx_engine(struct jme_adapter
*jme
)
862 jwrite32(jme
, JME_RXCS
, jme
->reg_rxcs
|
867 * Setup RX DMA Bass Address
869 jwrite32(jme
, JME_RXDBA_LO
, (__u64
)(jme
->rxring
[0].dma
) & 0xFFFFFFFFUL
);
870 jwrite32(jme
, JME_RXDBA_HI
, (__u64
)(jme
->rxring
[0].dma
) >> 32);
871 jwrite32(jme
, JME_RXNDA
, (__u64
)(jme
->rxring
[0].dma
) & 0xFFFFFFFFUL
);
874 * Setup RX Descriptor Count
876 jwrite32(jme
, JME_RXQDC
, jme
->rx_ring_size
);
879 * Setup Unicast Filter
881 jme_set_unicastaddr(jme
->dev
);
882 jme_set_multi(jme
->dev
);
888 jwrite32f(jme
, JME_RXCS
, jme
->reg_rxcs
|
894 * Start clock for RX MAC Processor
896 jme_mac_rxclk_on(jme
);
900 jme_restart_rx_engine(struct jme_adapter
*jme
)
905 jwrite32(jme
, JME_RXCS
, jme
->reg_rxcs
|
912 jme_disable_rx_engine(struct jme_adapter
*jme
)
920 jwrite32(jme
, JME_RXCS
, jme
->reg_rxcs
);
923 val
= jread32(jme
, JME_RXCS
);
924 for (i
= JME_RX_DISABLE_TIMEOUT
; (val
& RXCS_ENABLE
) && i
> 0 ; --i
) {
926 val
= jread32(jme
, JME_RXCS
);
931 pr_err("Disable RX engine timeout\n");
934 * Stop clock for RX MAC Processor
936 jme_mac_rxclk_off(jme
);
940 jme_udpsum(struct sk_buff
*skb
)
944 if (skb
->len
< (ETH_HLEN
+ sizeof(struct iphdr
)))
946 if (skb
->protocol
!= htons(ETH_P_IP
))
948 skb_set_network_header(skb
, ETH_HLEN
);
950 if (ip_hdr(skb
)->protocol
!= IPPROTO_UDP
||
951 skb
->len
< (ETH_HLEN
+ ip_hdrlen(skb
) + sizeof(struct udphdr
))) {
952 skb_reset_network_header(skb
);
955 skb_set_transport_header(skb
, ETH_HLEN
+ ip_hdrlen(skb
));
956 csum
= udp_hdr(skb
)->check
;
957 skb_reset_transport_header(skb
);
958 skb_reset_network_header(skb
);
964 jme_rxsum_ok(struct jme_adapter
*jme
, u16 flags
, struct sk_buff
*skb
)
966 if (!(flags
& (RXWBFLAG_TCPON
| RXWBFLAG_UDPON
| RXWBFLAG_IPV4
)))
969 if (unlikely((flags
& (RXWBFLAG_MF
| RXWBFLAG_TCPON
| RXWBFLAG_TCPCS
))
970 == RXWBFLAG_TCPON
)) {
971 if (flags
& RXWBFLAG_IPV4
)
972 netif_err(jme
, rx_err
, jme
->dev
, "TCP Checksum error\n");
976 if (unlikely((flags
& (RXWBFLAG_MF
| RXWBFLAG_UDPON
| RXWBFLAG_UDPCS
))
977 == RXWBFLAG_UDPON
) && jme_udpsum(skb
)) {
978 if (flags
& RXWBFLAG_IPV4
)
979 netif_err(jme
, rx_err
, jme
->dev
, "UDP Checksum error\n");
983 if (unlikely((flags
& (RXWBFLAG_IPV4
| RXWBFLAG_IPCS
))
985 netif_err(jme
, rx_err
, jme
->dev
, "IPv4 Checksum error\n");
993 jme_alloc_and_feed_skb(struct jme_adapter
*jme
, int idx
)
995 struct jme_ring
*rxring
= &(jme
->rxring
[0]);
996 struct rxdesc
*rxdesc
= rxring
->desc
;
997 struct jme_buffer_info
*rxbi
= rxring
->bufinf
;
1005 dma_sync_single_for_cpu(&jme
->pdev
->dev
, rxbi
->mapping
, rxbi
->len
,
1008 if (unlikely(jme_make_new_rx_buf(jme
, idx
))) {
1009 dma_sync_single_for_device(&jme
->pdev
->dev
, rxbi
->mapping
,
1010 rxbi
->len
, DMA_FROM_DEVICE
);
1012 ++(NET_STAT(jme
).rx_dropped
);
1014 framesize
= le16_to_cpu(rxdesc
->descwb
.framesize
)
1017 skb_reserve(skb
, RX_PREPAD_SIZE
);
1018 skb_put(skb
, framesize
);
1019 skb
->protocol
= eth_type_trans(skb
, jme
->dev
);
1021 if (jme_rxsum_ok(jme
, le16_to_cpu(rxdesc
->descwb
.flags
), skb
))
1022 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1024 skb_checksum_none_assert(skb
);
1026 if (rxdesc
->descwb
.flags
& cpu_to_le16(RXWBFLAG_TAGON
)) {
1027 u16 vid
= le16_to_cpu(rxdesc
->descwb
.vlan
);
1029 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1030 NET_STAT(jme
).rx_bytes
+= 4;
1034 if ((rxdesc
->descwb
.flags
& cpu_to_le16(RXWBFLAG_DEST
)) ==
1035 cpu_to_le16(RXWBFLAG_DEST_MUL
))
1036 ++(NET_STAT(jme
).multicast
);
1038 NET_STAT(jme
).rx_bytes
+= framesize
;
1039 ++(NET_STAT(jme
).rx_packets
);
1042 jme_set_clean_rxdesc(jme
, idx
);
1047 jme_process_receive(struct jme_adapter
*jme
, int limit
)
1049 struct jme_ring
*rxring
= &(jme
->rxring
[0]);
1050 struct rxdesc
*rxdesc
;
1051 int i
, j
, ccnt
, desccnt
, mask
= jme
->rx_ring_mask
;
1053 if (unlikely(!atomic_dec_and_test(&jme
->rx_cleaning
)))
1056 if (unlikely(atomic_read(&jme
->link_changing
) != 1))
1059 if (unlikely(!netif_carrier_ok(jme
->dev
)))
1062 i
= atomic_read(&rxring
->next_to_clean
);
1064 rxdesc
= rxring
->desc
;
1067 if ((rxdesc
->descwb
.flags
& cpu_to_le16(RXWBFLAG_OWN
)) ||
1068 !(rxdesc
->descwb
.desccnt
& RXWBDCNT_WBCPL
))
1073 desccnt
= rxdesc
->descwb
.desccnt
& RXWBDCNT_DCNT
;
1075 if (unlikely(desccnt
> 1 ||
1076 rxdesc
->descwb
.errstat
& RXWBERR_ALLERR
)) {
1078 if (rxdesc
->descwb
.errstat
& RXWBERR_CRCERR
)
1079 ++(NET_STAT(jme
).rx_crc_errors
);
1080 else if (rxdesc
->descwb
.errstat
& RXWBERR_OVERUN
)
1081 ++(NET_STAT(jme
).rx_fifo_errors
);
1083 ++(NET_STAT(jme
).rx_errors
);
1086 limit
-= desccnt
- 1;
1088 for (j
= i
, ccnt
= desccnt
; ccnt
-- ; ) {
1089 jme_set_clean_rxdesc(jme
, j
);
1090 j
= (j
+ 1) & (mask
);
1094 jme_alloc_and_feed_skb(jme
, i
);
1097 i
= (i
+ desccnt
) & (mask
);
1101 atomic_set(&rxring
->next_to_clean
, i
);
1104 atomic_inc(&jme
->rx_cleaning
);
1106 return limit
> 0 ? limit
: 0;
1111 jme_attempt_pcc(struct dynpcc_info
*dpi
, int atmp
)
1113 if (likely(atmp
== dpi
->cur
)) {
1118 if (dpi
->attempt
== atmp
) {
1121 dpi
->attempt
= atmp
;
1128 jme_dynamic_pcc(struct jme_adapter
*jme
)
1130 register struct dynpcc_info
*dpi
= &(jme
->dpi
);
1132 if ((NET_STAT(jme
).rx_bytes
- dpi
->last_bytes
) > PCC_P3_THRESHOLD
)
1133 jme_attempt_pcc(dpi
, PCC_P3
);
1134 else if ((NET_STAT(jme
).rx_packets
- dpi
->last_pkts
) > PCC_P2_THRESHOLD
||
1135 dpi
->intr_cnt
> PCC_INTR_THRESHOLD
)
1136 jme_attempt_pcc(dpi
, PCC_P2
);
1138 jme_attempt_pcc(dpi
, PCC_P1
);
1140 if (unlikely(dpi
->attempt
!= dpi
->cur
&& dpi
->cnt
> 5)) {
1141 if (dpi
->attempt
< dpi
->cur
)
1142 tasklet_schedule(&jme
->rxclean_task
);
1143 jme_set_rx_pcc(jme
, dpi
->attempt
);
1144 dpi
->cur
= dpi
->attempt
;
1150 jme_start_pcc_timer(struct jme_adapter
*jme
)
1152 struct dynpcc_info
*dpi
= &(jme
->dpi
);
1153 dpi
->last_bytes
= NET_STAT(jme
).rx_bytes
;
1154 dpi
->last_pkts
= NET_STAT(jme
).rx_packets
;
1156 jwrite32(jme
, JME_TMCSR
,
1157 TMCSR_EN
| ((0xFFFFFF - PCC_INTERVAL_US
) & TMCSR_CNT
));
1161 jme_stop_pcc_timer(struct jme_adapter
*jme
)
1163 jwrite32(jme
, JME_TMCSR
, 0);
1167 jme_shutdown_nic(struct jme_adapter
*jme
)
1171 phylink
= jme_linkstat_from_phy(jme
);
1173 if (!(phylink
& PHY_LINK_UP
)) {
1175 * Disable all interrupt before issue timer
1178 jwrite32(jme
, JME_TIMER2
, TMCSR_EN
| 0xFFFFFE);
1183 jme_pcc_tasklet(struct tasklet_struct
*t
)
1185 struct jme_adapter
*jme
= from_tasklet(jme
, t
, pcc_task
);
1186 struct net_device
*netdev
= jme
->dev
;
1188 if (unlikely(test_bit(JME_FLAG_SHUTDOWN
, &jme
->flags
))) {
1189 jme_shutdown_nic(jme
);
1193 if (unlikely(!netif_carrier_ok(netdev
) ||
1194 (atomic_read(&jme
->link_changing
) != 1)
1196 jme_stop_pcc_timer(jme
);
1200 if (!(test_bit(JME_FLAG_POLL
, &jme
->flags
)))
1201 jme_dynamic_pcc(jme
);
1203 jme_start_pcc_timer(jme
);
1207 jme_polling_mode(struct jme_adapter
*jme
)
1209 jme_set_rx_pcc(jme
, PCC_OFF
);
1213 jme_interrupt_mode(struct jme_adapter
*jme
)
1215 jme_set_rx_pcc(jme
, PCC_P1
);
1219 jme_pseudo_hotplug_enabled(struct jme_adapter
*jme
)
1222 apmc
= jread32(jme
, JME_APMC
);
1223 return apmc
& JME_APMC_PSEUDO_HP_EN
;
1227 jme_start_shutdown_timer(struct jme_adapter
*jme
)
1231 apmc
= jread32(jme
, JME_APMC
) | JME_APMC_PCIE_SD_EN
;
1232 apmc
&= ~JME_APMC_EPIEN_CTRL
;
1234 jwrite32f(jme
, JME_APMC
, apmc
| JME_APMC_EPIEN_CTRL_EN
);
1237 jwrite32f(jme
, JME_APMC
, apmc
);
1239 jwrite32f(jme
, JME_TIMER2
, 0);
1240 set_bit(JME_FLAG_SHUTDOWN
, &jme
->flags
);
1241 jwrite32(jme
, JME_TMCSR
,
1242 TMCSR_EN
| ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY
) & TMCSR_CNT
));
1246 jme_stop_shutdown_timer(struct jme_adapter
*jme
)
1250 jwrite32f(jme
, JME_TMCSR
, 0);
1251 jwrite32f(jme
, JME_TIMER2
, 0);
1252 clear_bit(JME_FLAG_SHUTDOWN
, &jme
->flags
);
1254 apmc
= jread32(jme
, JME_APMC
);
1255 apmc
&= ~(JME_APMC_PCIE_SD_EN
| JME_APMC_EPIEN_CTRL
);
1256 jwrite32f(jme
, JME_APMC
, apmc
| JME_APMC_EPIEN_CTRL_DIS
);
1258 jwrite32f(jme
, JME_APMC
, apmc
);
1261 static void jme_link_change_work(struct work_struct
*work
)
1263 struct jme_adapter
*jme
= container_of(work
, struct jme_adapter
, linkch_task
);
1264 struct net_device
*netdev
= jme
->dev
;
1267 while (!atomic_dec_and_test(&jme
->link_changing
)) {
1268 atomic_inc(&jme
->link_changing
);
1269 netif_info(jme
, intr
, jme
->dev
, "Get link change lock failed\n");
1270 while (atomic_read(&jme
->link_changing
) != 1)
1271 netif_info(jme
, intr
, jme
->dev
, "Waiting link change lock\n");
1274 if (jme_check_link(netdev
, 1) && jme
->old_mtu
== netdev
->mtu
)
1277 jme
->old_mtu
= netdev
->mtu
;
1278 netif_stop_queue(netdev
);
1279 if (jme_pseudo_hotplug_enabled(jme
))
1280 jme_stop_shutdown_timer(jme
);
1282 jme_stop_pcc_timer(jme
);
1283 tasklet_disable(&jme
->txclean_task
);
1284 tasklet_disable(&jme
->rxclean_task
);
1285 tasklet_disable(&jme
->rxempty_task
);
1287 if (netif_carrier_ok(netdev
)) {
1288 jme_disable_rx_engine(jme
);
1289 jme_disable_tx_engine(jme
);
1290 jme_reset_mac_processor(jme
);
1291 jme_free_rx_resources(jme
);
1292 jme_free_tx_resources(jme
);
1294 if (test_bit(JME_FLAG_POLL
, &jme
->flags
))
1295 jme_polling_mode(jme
);
1297 netif_carrier_off(netdev
);
1300 jme_check_link(netdev
, 0);
1301 if (netif_carrier_ok(netdev
)) {
1302 rc
= jme_setup_rx_resources(jme
);
1304 pr_err("Allocating resources for RX error, Device STOPPED!\n");
1305 goto out_enable_tasklet
;
1308 rc
= jme_setup_tx_resources(jme
);
1310 pr_err("Allocating resources for TX error, Device STOPPED!\n");
1311 goto err_out_free_rx_resources
;
1314 jme_enable_rx_engine(jme
);
1315 jme_enable_tx_engine(jme
);
1317 netif_start_queue(netdev
);
1319 if (test_bit(JME_FLAG_POLL
, &jme
->flags
))
1320 jme_interrupt_mode(jme
);
1322 jme_start_pcc_timer(jme
);
1323 } else if (jme_pseudo_hotplug_enabled(jme
)) {
1324 jme_start_shutdown_timer(jme
);
1327 goto out_enable_tasklet
;
1329 err_out_free_rx_resources
:
1330 jme_free_rx_resources(jme
);
1332 tasklet_enable(&jme
->txclean_task
);
1333 tasklet_enable(&jme
->rxclean_task
);
1334 tasklet_enable(&jme
->rxempty_task
);
1336 atomic_inc(&jme
->link_changing
);
1340 jme_rx_clean_tasklet(struct tasklet_struct
*t
)
1342 struct jme_adapter
*jme
= from_tasklet(jme
, t
, rxclean_task
);
1343 struct dynpcc_info
*dpi
= &(jme
->dpi
);
1345 jme_process_receive(jme
, jme
->rx_ring_size
);
1351 jme_poll(JME_NAPI_HOLDER(holder
), JME_NAPI_WEIGHT(budget
))
1353 struct jme_adapter
*jme
= jme_napi_priv(holder
);
1356 rest
= jme_process_receive(jme
, JME_NAPI_WEIGHT_VAL(budget
));
1358 while (atomic_read(&jme
->rx_empty
) > 0) {
1359 atomic_dec(&jme
->rx_empty
);
1360 ++(NET_STAT(jme
).rx_dropped
);
1361 jme_restart_rx_engine(jme
);
1363 atomic_inc(&jme
->rx_empty
);
1366 JME_RX_COMPLETE(netdev
, holder
);
1367 jme_interrupt_mode(jme
);
1370 JME_NAPI_WEIGHT_SET(budget
, rest
);
1371 return JME_NAPI_WEIGHT_VAL(budget
) - rest
;
1375 jme_rx_empty_tasklet(struct tasklet_struct
*t
)
1377 struct jme_adapter
*jme
= from_tasklet(jme
, t
, rxempty_task
);
1379 if (unlikely(atomic_read(&jme
->link_changing
) != 1))
1382 if (unlikely(!netif_carrier_ok(jme
->dev
)))
1385 netif_info(jme
, rx_status
, jme
->dev
, "RX Queue Full!\n");
1387 jme_rx_clean_tasklet(&jme
->rxclean_task
);
1389 while (atomic_read(&jme
->rx_empty
) > 0) {
1390 atomic_dec(&jme
->rx_empty
);
1391 ++(NET_STAT(jme
).rx_dropped
);
1392 jme_restart_rx_engine(jme
);
1394 atomic_inc(&jme
->rx_empty
);
1398 jme_wake_queue_if_stopped(struct jme_adapter
*jme
)
1400 struct jme_ring
*txring
= &(jme
->txring
[0]);
1403 if (unlikely(netif_queue_stopped(jme
->dev
) &&
1404 atomic_read(&txring
->nr_free
) >= (jme
->tx_wake_threshold
))) {
1405 netif_info(jme
, tx_done
, jme
->dev
, "TX Queue Waked\n");
1406 netif_wake_queue(jme
->dev
);
1411 static void jme_tx_clean_tasklet(struct tasklet_struct
*t
)
1413 struct jme_adapter
*jme
= from_tasklet(jme
, t
, txclean_task
);
1414 struct jme_ring
*txring
= &(jme
->txring
[0]);
1415 struct txdesc
*txdesc
= txring
->desc
;
1416 struct jme_buffer_info
*txbi
= txring
->bufinf
, *ctxbi
, *ttxbi
;
1417 int i
, j
, cnt
= 0, max
, err
, mask
;
1419 tx_dbg(jme
, "Into txclean\n");
1421 if (unlikely(!atomic_dec_and_test(&jme
->tx_cleaning
)))
1424 if (unlikely(atomic_read(&jme
->link_changing
) != 1))
1427 if (unlikely(!netif_carrier_ok(jme
->dev
)))
1430 max
= jme
->tx_ring_size
- atomic_read(&txring
->nr_free
);
1431 mask
= jme
->tx_ring_mask
;
1433 for (i
= atomic_read(&txring
->next_to_clean
) ; cnt
< max
; ) {
1437 if (likely(ctxbi
->skb
&&
1438 !(txdesc
[i
].descwb
.flags
& TXWBFLAG_OWN
))) {
1440 tx_dbg(jme
, "txclean: %d+%d@%lu\n",
1441 i
, ctxbi
->nr_desc
, jiffies
);
1443 err
= txdesc
[i
].descwb
.flags
& TXWBFLAG_ALLERR
;
1445 for (j
= 1 ; j
< ctxbi
->nr_desc
; ++j
) {
1446 ttxbi
= txbi
+ ((i
+ j
) & (mask
));
1447 txdesc
[(i
+ j
) & (mask
)].dw
[0] = 0;
1449 dma_unmap_page(&jme
->pdev
->dev
,
1450 ttxbi
->mapping
, ttxbi
->len
,
1457 dev_kfree_skb(ctxbi
->skb
);
1459 cnt
+= ctxbi
->nr_desc
;
1461 if (unlikely(err
)) {
1462 ++(NET_STAT(jme
).tx_carrier_errors
);
1464 ++(NET_STAT(jme
).tx_packets
);
1465 NET_STAT(jme
).tx_bytes
+= ctxbi
->len
;
1470 ctxbi
->start_xmit
= 0;
1476 i
= (i
+ ctxbi
->nr_desc
) & mask
;
1481 tx_dbg(jme
, "txclean: done %d@%lu\n", i
, jiffies
);
1482 atomic_set(&txring
->next_to_clean
, i
);
1483 atomic_add(cnt
, &txring
->nr_free
);
1485 jme_wake_queue_if_stopped(jme
);
1488 atomic_inc(&jme
->tx_cleaning
);
1492 jme_intr_msi(struct jme_adapter
*jme
, u32 intrstat
)
1497 jwrite32f(jme
, JME_IENC
, INTR_ENABLE
);
1499 if (intrstat
& (INTR_LINKCH
| INTR_SWINTR
)) {
1501 * Link change event is critical
1502 * all other events are ignored
1504 jwrite32(jme
, JME_IEVE
, intrstat
);
1505 schedule_work(&jme
->linkch_task
);
1509 if (intrstat
& INTR_TMINTR
) {
1510 jwrite32(jme
, JME_IEVE
, INTR_TMINTR
);
1511 tasklet_schedule(&jme
->pcc_task
);
1514 if (intrstat
& (INTR_PCCTXTO
| INTR_PCCTX
)) {
1515 jwrite32(jme
, JME_IEVE
, INTR_PCCTXTO
| INTR_PCCTX
| INTR_TX0
);
1516 tasklet_schedule(&jme
->txclean_task
);
1519 if ((intrstat
& (INTR_PCCRX0TO
| INTR_PCCRX0
| INTR_RX0EMP
))) {
1520 jwrite32(jme
, JME_IEVE
, (intrstat
& (INTR_PCCRX0TO
|
1526 if (test_bit(JME_FLAG_POLL
, &jme
->flags
)) {
1527 if (intrstat
& INTR_RX0EMP
)
1528 atomic_inc(&jme
->rx_empty
);
1530 if ((intrstat
& (INTR_PCCRX0TO
| INTR_PCCRX0
| INTR_RX0EMP
))) {
1531 if (likely(JME_RX_SCHEDULE_PREP(jme
))) {
1532 jme_polling_mode(jme
);
1533 JME_RX_SCHEDULE(jme
);
1537 if (intrstat
& INTR_RX0EMP
) {
1538 atomic_inc(&jme
->rx_empty
);
1539 tasklet_hi_schedule(&jme
->rxempty_task
);
1540 } else if (intrstat
& (INTR_PCCRX0TO
| INTR_PCCRX0
)) {
1541 tasklet_hi_schedule(&jme
->rxclean_task
);
1547 * Re-enable interrupt
1549 jwrite32f(jme
, JME_IENS
, INTR_ENABLE
);
1553 jme_intr(int irq
, void *dev_id
)
1555 struct net_device
*netdev
= dev_id
;
1556 struct jme_adapter
*jme
= netdev_priv(netdev
);
1559 intrstat
= jread32(jme
, JME_IEVE
);
1562 * Check if it's really an interrupt for us
1564 if (unlikely((intrstat
& INTR_ENABLE
) == 0))
1568 * Check if the device still exist
1570 if (unlikely(intrstat
== ~((typeof(intrstat
))0)))
1573 jme_intr_msi(jme
, intrstat
);
1579 jme_msi(int irq
, void *dev_id
)
1581 struct net_device
*netdev
= dev_id
;
1582 struct jme_adapter
*jme
= netdev_priv(netdev
);
1585 intrstat
= jread32(jme
, JME_IEVE
);
1587 jme_intr_msi(jme
, intrstat
);
1593 jme_reset_link(struct jme_adapter
*jme
)
1595 jwrite32(jme
, JME_TMCSR
, TMCSR_SWIT
);
1599 jme_restart_an(struct jme_adapter
*jme
)
1603 spin_lock_bh(&jme
->phy_lock
);
1604 bmcr
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
);
1605 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1606 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
, bmcr
);
1607 spin_unlock_bh(&jme
->phy_lock
);
1611 jme_request_irq(struct jme_adapter
*jme
)
1614 struct net_device
*netdev
= jme
->dev
;
1615 irq_handler_t handler
= jme_intr
;
1616 int irq_flags
= IRQF_SHARED
;
1618 if (!pci_enable_msi(jme
->pdev
)) {
1619 set_bit(JME_FLAG_MSI
, &jme
->flags
);
1624 rc
= request_irq(jme
->pdev
->irq
, handler
, irq_flags
, netdev
->name
,
1628 "Unable to request %s interrupt (return: %d)\n",
1629 test_bit(JME_FLAG_MSI
, &jme
->flags
) ? "MSI" : "INTx",
1632 if (test_bit(JME_FLAG_MSI
, &jme
->flags
)) {
1633 pci_disable_msi(jme
->pdev
);
1634 clear_bit(JME_FLAG_MSI
, &jme
->flags
);
1637 netdev
->irq
= jme
->pdev
->irq
;
1644 jme_free_irq(struct jme_adapter
*jme
)
1646 free_irq(jme
->pdev
->irq
, jme
->dev
);
1647 if (test_bit(JME_FLAG_MSI
, &jme
->flags
)) {
1648 pci_disable_msi(jme
->pdev
);
1649 clear_bit(JME_FLAG_MSI
, &jme
->flags
);
1650 jme
->dev
->irq
= jme
->pdev
->irq
;
1655 jme_new_phy_on(struct jme_adapter
*jme
)
1659 reg
= jread32(jme
, JME_PHY_PWR
);
1660 reg
&= ~(PHY_PWR_DWN1SEL
| PHY_PWR_DWN1SW
|
1661 PHY_PWR_DWN2
| PHY_PWR_CLKSEL
);
1662 jwrite32(jme
, JME_PHY_PWR
, reg
);
1664 pci_read_config_dword(jme
->pdev
, PCI_PRIV_PE1
, ®
);
1665 reg
&= ~PE1_GPREG0_PBG
;
1666 reg
|= PE1_GPREG0_ENBG
;
1667 pci_write_config_dword(jme
->pdev
, PCI_PRIV_PE1
, reg
);
1671 jme_new_phy_off(struct jme_adapter
*jme
)
1675 reg
= jread32(jme
, JME_PHY_PWR
);
1676 reg
|= PHY_PWR_DWN1SEL
| PHY_PWR_DWN1SW
|
1677 PHY_PWR_DWN2
| PHY_PWR_CLKSEL
;
1678 jwrite32(jme
, JME_PHY_PWR
, reg
);
1680 pci_read_config_dword(jme
->pdev
, PCI_PRIV_PE1
, ®
);
1681 reg
&= ~PE1_GPREG0_PBG
;
1682 reg
|= PE1_GPREG0_PDD3COLD
;
1683 pci_write_config_dword(jme
->pdev
, PCI_PRIV_PE1
, reg
);
1687 jme_phy_on(struct jme_adapter
*jme
)
1691 bmcr
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
);
1692 bmcr
&= ~BMCR_PDOWN
;
1693 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
, bmcr
);
1695 if (new_phy_power_ctrl(jme
->chip_main_rev
))
1696 jme_new_phy_on(jme
);
1700 jme_phy_off(struct jme_adapter
*jme
)
1704 bmcr
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
);
1706 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
, bmcr
);
1708 if (new_phy_power_ctrl(jme
->chip_main_rev
))
1709 jme_new_phy_off(jme
);
1713 jme_phy_specreg_read(struct jme_adapter
*jme
, u32 specreg
)
1717 phy_addr
= JM_PHY_SPEC_REG_READ
| specreg
;
1718 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, JM_PHY_SPEC_ADDR_REG
,
1720 return jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
,
1721 JM_PHY_SPEC_DATA_REG
);
1725 jme_phy_specreg_write(struct jme_adapter
*jme
, u32 ext_reg
, u32 phy_data
)
1729 phy_addr
= JM_PHY_SPEC_REG_WRITE
| ext_reg
;
1730 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, JM_PHY_SPEC_DATA_REG
,
1732 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, JM_PHY_SPEC_ADDR_REG
,
1737 jme_phy_calibration(struct jme_adapter
*jme
)
1739 u32 ctrl1000
, phy_data
;
1743 /* Enabel PHY test mode 1 */
1744 ctrl1000
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_CTRL1000
);
1745 ctrl1000
&= ~PHY_GAD_TEST_MODE_MSK
;
1746 ctrl1000
|= PHY_GAD_TEST_MODE_1
;
1747 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, MII_CTRL1000
, ctrl1000
);
1749 phy_data
= jme_phy_specreg_read(jme
, JM_PHY_EXT_COMM_2_REG
);
1750 phy_data
&= ~JM_PHY_EXT_COMM_2_CALI_MODE_0
;
1751 phy_data
|= JM_PHY_EXT_COMM_2_CALI_LATCH
|
1752 JM_PHY_EXT_COMM_2_CALI_ENABLE
;
1753 jme_phy_specreg_write(jme
, JM_PHY_EXT_COMM_2_REG
, phy_data
);
1755 phy_data
= jme_phy_specreg_read(jme
, JM_PHY_EXT_COMM_2_REG
);
1756 phy_data
&= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE
|
1757 JM_PHY_EXT_COMM_2_CALI_MODE_0
|
1758 JM_PHY_EXT_COMM_2_CALI_LATCH
);
1759 jme_phy_specreg_write(jme
, JM_PHY_EXT_COMM_2_REG
, phy_data
);
1761 /* Disable PHY test mode */
1762 ctrl1000
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_CTRL1000
);
1763 ctrl1000
&= ~PHY_GAD_TEST_MODE_MSK
;
1764 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, MII_CTRL1000
, ctrl1000
);
1769 jme_phy_setEA(struct jme_adapter
*jme
)
1771 u32 phy_comm0
= 0, phy_comm1
= 0;
1774 pci_read_config_byte(jme
->pdev
, PCI_PRIV_SHARE_NICCTRL
, &nic_ctrl
);
1775 if ((nic_ctrl
& 0x3) == JME_FLAG_PHYEA_ENABLE
)
1778 switch (jme
->pdev
->device
) {
1779 case PCI_DEVICE_ID_JMICRON_JMC250
:
1780 if (((jme
->chip_main_rev
== 5) &&
1781 ((jme
->chip_sub_rev
== 0) || (jme
->chip_sub_rev
== 1) ||
1782 (jme
->chip_sub_rev
== 3))) ||
1783 (jme
->chip_main_rev
>= 6)) {
1787 if ((jme
->chip_main_rev
== 3) &&
1788 ((jme
->chip_sub_rev
== 1) || (jme
->chip_sub_rev
== 2)))
1791 case PCI_DEVICE_ID_JMICRON_JMC260
:
1792 if (((jme
->chip_main_rev
== 5) &&
1793 ((jme
->chip_sub_rev
== 0) || (jme
->chip_sub_rev
== 1) ||
1794 (jme
->chip_sub_rev
== 3))) ||
1795 (jme
->chip_main_rev
>= 6)) {
1799 if ((jme
->chip_main_rev
== 3) &&
1800 ((jme
->chip_sub_rev
== 1) || (jme
->chip_sub_rev
== 2)))
1802 if ((jme
->chip_main_rev
== 2) && (jme
->chip_sub_rev
== 0))
1804 if ((jme
->chip_main_rev
== 2) && (jme
->chip_sub_rev
== 2))
1811 jme_phy_specreg_write(jme
, JM_PHY_EXT_COMM_0_REG
, phy_comm0
);
1813 jme_phy_specreg_write(jme
, JM_PHY_EXT_COMM_1_REG
, phy_comm1
);
1819 jme_open(struct net_device
*netdev
)
1821 struct jme_adapter
*jme
= netdev_priv(netdev
);
1824 jme_clear_pm_disable_wol(jme
);
1825 JME_NAPI_ENABLE(jme
);
1827 tasklet_setup(&jme
->txclean_task
, jme_tx_clean_tasklet
);
1828 tasklet_setup(&jme
->rxclean_task
, jme_rx_clean_tasklet
);
1829 tasklet_setup(&jme
->rxempty_task
, jme_rx_empty_tasklet
);
1831 rc
= jme_request_irq(jme
);
1838 if (test_bit(JME_FLAG_SSET
, &jme
->flags
))
1839 jme_set_link_ksettings(netdev
, &jme
->old_cmd
);
1841 jme_reset_phy_processor(jme
);
1842 jme_phy_calibration(jme
);
1844 jme_reset_link(jme
);
1849 netif_stop_queue(netdev
);
1850 netif_carrier_off(netdev
);
1855 jme_set_100m_half(struct jme_adapter
*jme
)
1860 bmcr
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
);
1861 tmp
= bmcr
& ~(BMCR_ANENABLE
| BMCR_SPEED100
|
1862 BMCR_SPEED1000
| BMCR_FULLDPLX
);
1863 tmp
|= BMCR_SPEED100
;
1866 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, MII_BMCR
, tmp
);
1869 jwrite32(jme
, JME_GHC
, GHC_SPEED_100M
| GHC_LINK_POLL
);
1871 jwrite32(jme
, JME_GHC
, GHC_SPEED_100M
);
1874 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1876 jme_wait_link(struct jme_adapter
*jme
)
1878 u32 phylink
, to
= JME_WAIT_LINK_TIME
;
1881 phylink
= jme_linkstat_from_phy(jme
);
1882 while (!(phylink
& PHY_LINK_UP
) && (to
-= 10) > 0) {
1883 usleep_range(10000, 11000);
1884 phylink
= jme_linkstat_from_phy(jme
);
1889 jme_powersave_phy(struct jme_adapter
*jme
)
1891 if (jme
->reg_pmcs
&& device_may_wakeup(&jme
->pdev
->dev
)) {
1892 jme_set_100m_half(jme
);
1893 if (jme
->reg_pmcs
& (PMCS_LFEN
| PMCS_LREN
))
1895 jme_clear_pm_enable_wol(jme
);
1902 jme_close(struct net_device
*netdev
)
1904 struct jme_adapter
*jme
= netdev_priv(netdev
);
1906 netif_stop_queue(netdev
);
1907 netif_carrier_off(netdev
);
1912 JME_NAPI_DISABLE(jme
);
1914 cancel_work_sync(&jme
->linkch_task
);
1915 tasklet_kill(&jme
->txclean_task
);
1916 tasklet_kill(&jme
->rxclean_task
);
1917 tasklet_kill(&jme
->rxempty_task
);
1919 jme_disable_rx_engine(jme
);
1920 jme_disable_tx_engine(jme
);
1921 jme_reset_mac_processor(jme
);
1922 jme_free_rx_resources(jme
);
1923 jme_free_tx_resources(jme
);
1931 jme_alloc_txdesc(struct jme_adapter
*jme
,
1932 struct sk_buff
*skb
)
1934 struct jme_ring
*txring
= &(jme
->txring
[0]);
1935 int idx
, nr_alloc
, mask
= jme
->tx_ring_mask
;
1937 idx
= txring
->next_to_use
;
1938 nr_alloc
= skb_shinfo(skb
)->nr_frags
+ 2;
1940 if (unlikely(atomic_read(&txring
->nr_free
) < nr_alloc
))
1943 atomic_sub(nr_alloc
, &txring
->nr_free
);
1945 txring
->next_to_use
= (txring
->next_to_use
+ nr_alloc
) & mask
;
1951 jme_fill_tx_map(struct pci_dev
*pdev
,
1952 struct txdesc
*txdesc
,
1953 struct jme_buffer_info
*txbi
,
1961 dmaaddr
= dma_map_page(&pdev
->dev
, page
, page_offset
, len
,
1964 if (unlikely(dma_mapping_error(&pdev
->dev
, dmaaddr
)))
1967 dma_sync_single_for_device(&pdev
->dev
, dmaaddr
, len
, DMA_TO_DEVICE
);
1971 txdesc
->desc2
.flags
= TXFLAG_OWN
;
1972 txdesc
->desc2
.flags
|= (hidma
) ? TXFLAG_64BIT
: 0;
1973 txdesc
->desc2
.datalen
= cpu_to_le16(len
);
1974 txdesc
->desc2
.bufaddrh
= cpu_to_le32((__u64
)dmaaddr
>> 32);
1975 txdesc
->desc2
.bufaddrl
= cpu_to_le32(
1976 (__u64
)dmaaddr
& 0xFFFFFFFFUL
);
1978 txbi
->mapping
= dmaaddr
;
1983 static void jme_drop_tx_map(struct jme_adapter
*jme
, int startidx
, int count
)
1985 struct jme_ring
*txring
= &(jme
->txring
[0]);
1986 struct jme_buffer_info
*txbi
= txring
->bufinf
, *ctxbi
;
1987 int mask
= jme
->tx_ring_mask
;
1990 for (j
= 0 ; j
< count
; j
++) {
1991 ctxbi
= txbi
+ ((startidx
+ j
+ 2) & (mask
));
1992 dma_unmap_page(&jme
->pdev
->dev
, ctxbi
->mapping
, ctxbi
->len
,
2001 jme_map_tx_skb(struct jme_adapter
*jme
, struct sk_buff
*skb
, int idx
)
2003 struct jme_ring
*txring
= &(jme
->txring
[0]);
2004 struct txdesc
*txdesc
= txring
->desc
, *ctxdesc
;
2005 struct jme_buffer_info
*txbi
= txring
->bufinf
, *ctxbi
;
2006 bool hidma
= jme
->dev
->features
& NETIF_F_HIGHDMA
;
2007 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
2008 int mask
= jme
->tx_ring_mask
;
2012 for (i
= 0 ; i
< nr_frags
; ++i
) {
2013 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2015 ctxdesc
= txdesc
+ ((idx
+ i
+ 2) & (mask
));
2016 ctxbi
= txbi
+ ((idx
+ i
+ 2) & (mask
));
2018 ret
= jme_fill_tx_map(jme
->pdev
, ctxdesc
, ctxbi
,
2019 skb_frag_page(frag
), skb_frag_off(frag
),
2020 skb_frag_size(frag
), hidma
);
2022 jme_drop_tx_map(jme
, idx
, i
);
2027 len
= skb_is_nonlinear(skb
) ? skb_headlen(skb
) : skb
->len
;
2028 ctxdesc
= txdesc
+ ((idx
+ 1) & (mask
));
2029 ctxbi
= txbi
+ ((idx
+ 1) & (mask
));
2030 ret
= jme_fill_tx_map(jme
->pdev
, ctxdesc
, ctxbi
, virt_to_page(skb
->data
),
2031 offset_in_page(skb
->data
), len
, hidma
);
2033 jme_drop_tx_map(jme
, idx
, i
);
2042 jme_tx_tso(struct sk_buff
*skb
, __le16
*mss
, u8
*flags
)
2044 *mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
<< TXDESC_MSS_SHIFT
);
2046 *flags
|= TXFLAG_LSEN
;
2048 if (skb
->protocol
== htons(ETH_P_IP
)) {
2049 struct iphdr
*iph
= ip_hdr(skb
);
2052 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2057 tcp_v6_gso_csum_prep(skb
);
2067 jme_tx_csum(struct jme_adapter
*jme
, struct sk_buff
*skb
, u8
*flags
)
2069 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2072 switch (skb
->protocol
) {
2073 case htons(ETH_P_IP
):
2074 ip_proto
= ip_hdr(skb
)->protocol
;
2076 case htons(ETH_P_IPV6
):
2077 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
2086 *flags
|= TXFLAG_TCPCS
;
2089 *flags
|= TXFLAG_UDPCS
;
2092 netif_err(jme
, tx_err
, jme
->dev
, "Error upper layer protocol\n");
2099 jme_tx_vlan(struct sk_buff
*skb
, __le16
*vlan
, u8
*flags
)
2101 if (skb_vlan_tag_present(skb
)) {
2102 *flags
|= TXFLAG_TAGON
;
2103 *vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
2108 jme_fill_tx_desc(struct jme_adapter
*jme
, struct sk_buff
*skb
, int idx
)
2110 struct jme_ring
*txring
= &(jme
->txring
[0]);
2111 struct txdesc
*txdesc
;
2112 struct jme_buffer_info
*txbi
;
2116 txdesc
= (struct txdesc
*)txring
->desc
+ idx
;
2117 txbi
= txring
->bufinf
+ idx
;
2123 txdesc
->desc1
.pktsize
= cpu_to_le16(skb
->len
);
2125 * Set OWN bit at final.
2126 * When kernel transmit faster than NIC.
2127 * And NIC trying to send this descriptor before we tell
2128 * it to start sending this TX queue.
2129 * Other fields are already filled correctly.
2132 flags
= TXFLAG_OWN
| TXFLAG_INT
;
2134 * Set checksum flags while not tso
2136 if (jme_tx_tso(skb
, &txdesc
->desc1
.mss
, &flags
))
2137 jme_tx_csum(jme
, skb
, &flags
);
2138 jme_tx_vlan(skb
, &txdesc
->desc1
.vlan
, &flags
);
2139 ret
= jme_map_tx_skb(jme
, skb
, idx
);
2143 txdesc
->desc1
.flags
= flags
;
2145 * Set tx buffer info after telling NIC to send
2146 * For better tx_clean timing
2149 txbi
->nr_desc
= skb_shinfo(skb
)->nr_frags
+ 2;
2151 txbi
->len
= skb
->len
;
2152 txbi
->start_xmit
= jiffies
;
2153 if (!txbi
->start_xmit
)
2154 txbi
->start_xmit
= (0UL-1);
2160 jme_stop_queue_if_full(struct jme_adapter
*jme
)
2162 struct jme_ring
*txring
= &(jme
->txring
[0]);
2163 struct jme_buffer_info
*txbi
= txring
->bufinf
;
2164 int idx
= atomic_read(&txring
->next_to_clean
);
2169 if (unlikely(atomic_read(&txring
->nr_free
) < (MAX_SKB_FRAGS
+2))) {
2170 netif_stop_queue(jme
->dev
);
2171 netif_info(jme
, tx_queued
, jme
->dev
, "TX Queue Paused\n");
2173 if (atomic_read(&txring
->nr_free
)
2174 >= (jme
->tx_wake_threshold
)) {
2175 netif_wake_queue(jme
->dev
);
2176 netif_info(jme
, tx_queued
, jme
->dev
, "TX Queue Fast Waked\n");
2180 if (unlikely(txbi
->start_xmit
&&
2181 time_is_before_eq_jiffies(txbi
->start_xmit
+ TX_TIMEOUT
) &&
2183 netif_stop_queue(jme
->dev
);
2184 netif_info(jme
, tx_queued
, jme
->dev
,
2185 "TX Queue Stopped %d@%lu\n", idx
, jiffies
);
2190 * This function is already protected by netif_tx_lock()
2194 jme_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2196 struct jme_adapter
*jme
= netdev_priv(netdev
);
2199 if (unlikely(skb_is_gso(skb
) && skb_cow_head(skb
, 0))) {
2200 dev_kfree_skb_any(skb
);
2201 ++(NET_STAT(jme
).tx_dropped
);
2202 return NETDEV_TX_OK
;
2205 idx
= jme_alloc_txdesc(jme
, skb
);
2207 if (unlikely(idx
< 0)) {
2208 netif_stop_queue(netdev
);
2209 netif_err(jme
, tx_err
, jme
->dev
,
2210 "BUG! Tx ring full when queue awake!\n");
2212 return NETDEV_TX_BUSY
;
2215 if (jme_fill_tx_desc(jme
, skb
, idx
))
2216 return NETDEV_TX_OK
;
2218 jwrite32(jme
, JME_TXCS
, jme
->reg_txcs
|
2219 TXCS_SELECT_QUEUE0
|
2223 tx_dbg(jme
, "xmit: %d+%d@%lu\n",
2224 idx
, skb_shinfo(skb
)->nr_frags
+ 2, jiffies
);
2225 jme_stop_queue_if_full(jme
);
2227 return NETDEV_TX_OK
;
2231 jme_set_unicastaddr(struct net_device
*netdev
)
2233 struct jme_adapter
*jme
= netdev_priv(netdev
);
2236 val
= (netdev
->dev_addr
[3] & 0xff) << 24 |
2237 (netdev
->dev_addr
[2] & 0xff) << 16 |
2238 (netdev
->dev_addr
[1] & 0xff) << 8 |
2239 (netdev
->dev_addr
[0] & 0xff);
2240 jwrite32(jme
, JME_RXUMA_LO
, val
);
2241 val
= (netdev
->dev_addr
[5] & 0xff) << 8 |
2242 (netdev
->dev_addr
[4] & 0xff);
2243 jwrite32(jme
, JME_RXUMA_HI
, val
);
2247 jme_set_macaddr(struct net_device
*netdev
, void *p
)
2249 struct jme_adapter
*jme
= netdev_priv(netdev
);
2250 struct sockaddr
*addr
= p
;
2252 if (netif_running(netdev
))
2255 spin_lock_bh(&jme
->macaddr_lock
);
2256 eth_hw_addr_set(netdev
, addr
->sa_data
);
2257 jme_set_unicastaddr(netdev
);
2258 spin_unlock_bh(&jme
->macaddr_lock
);
2264 jme_set_multi(struct net_device
*netdev
)
2266 struct jme_adapter
*jme
= netdev_priv(netdev
);
2267 u32 mc_hash
[2] = {};
2269 spin_lock_bh(&jme
->rxmcs_lock
);
2271 jme
->reg_rxmcs
|= RXMCS_BRDFRAME
| RXMCS_UNIFRAME
;
2273 if (netdev
->flags
& IFF_PROMISC
) {
2274 jme
->reg_rxmcs
|= RXMCS_ALLFRAME
;
2275 } else if (netdev
->flags
& IFF_ALLMULTI
) {
2276 jme
->reg_rxmcs
|= RXMCS_ALLMULFRAME
;
2277 } else if (netdev
->flags
& IFF_MULTICAST
) {
2278 struct netdev_hw_addr
*ha
;
2281 jme
->reg_rxmcs
|= RXMCS_MULFRAME
| RXMCS_MULFILTERED
;
2282 netdev_for_each_mc_addr(ha
, netdev
) {
2283 bit_nr
= ether_crc(ETH_ALEN
, ha
->addr
) & 0x3F;
2284 mc_hash
[bit_nr
>> 5] |= 1 << (bit_nr
& 0x1F);
2287 jwrite32(jme
, JME_RXMCHT_LO
, mc_hash
[0]);
2288 jwrite32(jme
, JME_RXMCHT_HI
, mc_hash
[1]);
2292 jwrite32(jme
, JME_RXMCS
, jme
->reg_rxmcs
);
2294 spin_unlock_bh(&jme
->rxmcs_lock
);
2298 jme_change_mtu(struct net_device
*netdev
, int new_mtu
)
2300 struct jme_adapter
*jme
= netdev_priv(netdev
);
2302 WRITE_ONCE(netdev
->mtu
, new_mtu
);
2303 netdev_update_features(netdev
);
2305 jme_restart_rx_engine(jme
);
2306 jme_reset_link(jme
);
2312 jme_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
2314 struct jme_adapter
*jme
= netdev_priv(netdev
);
2317 jme_reset_phy_processor(jme
);
2318 if (test_bit(JME_FLAG_SSET
, &jme
->flags
))
2319 jme_set_link_ksettings(netdev
, &jme
->old_cmd
);
2322 * Force to Reset the link again
2324 jme_reset_link(jme
);
2328 jme_get_drvinfo(struct net_device
*netdev
,
2329 struct ethtool_drvinfo
*info
)
2331 struct jme_adapter
*jme
= netdev_priv(netdev
);
2333 strscpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
2334 strscpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
2335 strscpy(info
->bus_info
, pci_name(jme
->pdev
), sizeof(info
->bus_info
));
2339 jme_get_regs_len(struct net_device
*netdev
)
2345 mmapio_memcpy(struct jme_adapter
*jme
, u32
*p
, u32 reg
, int len
)
2349 for (i
= 0 ; i
< len
; i
+= 4)
2350 p
[i
>> 2] = jread32(jme
, reg
+ i
);
2354 mdio_memcpy(struct jme_adapter
*jme
, u32
*p
, int reg_nr
)
2357 u16
*p16
= (u16
*)p
;
2359 for (i
= 0 ; i
< reg_nr
; ++i
)
2360 p16
[i
] = jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, i
);
2364 jme_get_regs(struct net_device
*netdev
, struct ethtool_regs
*regs
, void *p
)
2366 struct jme_adapter
*jme
= netdev_priv(netdev
);
2367 u32
*p32
= (u32
*)p
;
2369 memset(p
, 0xFF, JME_REG_LEN
);
2372 mmapio_memcpy(jme
, p32
, JME_MAC
, JME_MAC_LEN
);
2375 mmapio_memcpy(jme
, p32
, JME_PHY
, JME_PHY_LEN
);
2378 mmapio_memcpy(jme
, p32
, JME_MISC
, JME_MISC_LEN
);
2381 mmapio_memcpy(jme
, p32
, JME_RSS
, JME_RSS_LEN
);
2384 mdio_memcpy(jme
, p32
, JME_PHY_REG_NR
);
2387 static int jme_get_coalesce(struct net_device
*netdev
,
2388 struct ethtool_coalesce
*ecmd
,
2389 struct kernel_ethtool_coalesce
*kernel_coal
,
2390 struct netlink_ext_ack
*extack
)
2392 struct jme_adapter
*jme
= netdev_priv(netdev
);
2394 ecmd
->tx_coalesce_usecs
= PCC_TX_TO
;
2395 ecmd
->tx_max_coalesced_frames
= PCC_TX_CNT
;
2397 if (test_bit(JME_FLAG_POLL
, &jme
->flags
)) {
2398 ecmd
->use_adaptive_rx_coalesce
= false;
2399 ecmd
->rx_coalesce_usecs
= 0;
2400 ecmd
->rx_max_coalesced_frames
= 0;
2404 ecmd
->use_adaptive_rx_coalesce
= true;
2406 switch (jme
->dpi
.cur
) {
2408 ecmd
->rx_coalesce_usecs
= PCC_P1_TO
;
2409 ecmd
->rx_max_coalesced_frames
= PCC_P1_CNT
;
2412 ecmd
->rx_coalesce_usecs
= PCC_P2_TO
;
2413 ecmd
->rx_max_coalesced_frames
= PCC_P2_CNT
;
2416 ecmd
->rx_coalesce_usecs
= PCC_P3_TO
;
2417 ecmd
->rx_max_coalesced_frames
= PCC_P3_CNT
;
2426 static int jme_set_coalesce(struct net_device
*netdev
,
2427 struct ethtool_coalesce
*ecmd
,
2428 struct kernel_ethtool_coalesce
*kernel_coal
,
2429 struct netlink_ext_ack
*extack
)
2431 struct jme_adapter
*jme
= netdev_priv(netdev
);
2432 struct dynpcc_info
*dpi
= &(jme
->dpi
);
2434 if (netif_running(netdev
))
2437 if (ecmd
->use_adaptive_rx_coalesce
&&
2438 test_bit(JME_FLAG_POLL
, &jme
->flags
)) {
2439 clear_bit(JME_FLAG_POLL
, &jme
->flags
);
2440 jme
->jme_rx
= netif_rx
;
2442 dpi
->attempt
= PCC_P1
;
2444 jme_set_rx_pcc(jme
, PCC_P1
);
2445 jme_interrupt_mode(jme
);
2446 } else if (!(ecmd
->use_adaptive_rx_coalesce
) &&
2447 !(test_bit(JME_FLAG_POLL
, &jme
->flags
))) {
2448 set_bit(JME_FLAG_POLL
, &jme
->flags
);
2449 jme
->jme_rx
= netif_receive_skb
;
2450 jme_interrupt_mode(jme
);
2457 jme_get_pauseparam(struct net_device
*netdev
,
2458 struct ethtool_pauseparam
*ecmd
)
2460 struct jme_adapter
*jme
= netdev_priv(netdev
);
2463 ecmd
->tx_pause
= (jme
->reg_txpfc
& TXPFC_PF_EN
) != 0;
2464 ecmd
->rx_pause
= (jme
->reg_rxmcs
& RXMCS_FLOWCTRL
) != 0;
2466 spin_lock_bh(&jme
->phy_lock
);
2467 val
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_ADVERTISE
);
2468 spin_unlock_bh(&jme
->phy_lock
);
2471 (val
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
)) != 0;
2475 jme_set_pauseparam(struct net_device
*netdev
,
2476 struct ethtool_pauseparam
*ecmd
)
2478 struct jme_adapter
*jme
= netdev_priv(netdev
);
2481 if (((jme
->reg_txpfc
& TXPFC_PF_EN
) != 0) ^
2482 (ecmd
->tx_pause
!= 0)) {
2485 jme
->reg_txpfc
|= TXPFC_PF_EN
;
2487 jme
->reg_txpfc
&= ~TXPFC_PF_EN
;
2489 jwrite32(jme
, JME_TXPFC
, jme
->reg_txpfc
);
2492 spin_lock_bh(&jme
->rxmcs_lock
);
2493 if (((jme
->reg_rxmcs
& RXMCS_FLOWCTRL
) != 0) ^
2494 (ecmd
->rx_pause
!= 0)) {
2497 jme
->reg_rxmcs
|= RXMCS_FLOWCTRL
;
2499 jme
->reg_rxmcs
&= ~RXMCS_FLOWCTRL
;
2501 jwrite32(jme
, JME_RXMCS
, jme
->reg_rxmcs
);
2503 spin_unlock_bh(&jme
->rxmcs_lock
);
2505 spin_lock_bh(&jme
->phy_lock
);
2506 val
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, MII_ADVERTISE
);
2507 if (((val
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
)) != 0) ^
2508 (ecmd
->autoneg
!= 0)) {
2511 val
|= (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2513 val
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2515 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
,
2516 MII_ADVERTISE
, val
);
2518 spin_unlock_bh(&jme
->phy_lock
);
2524 jme_get_wol(struct net_device
*netdev
,
2525 struct ethtool_wolinfo
*wol
)
2527 struct jme_adapter
*jme
= netdev_priv(netdev
);
2529 wol
->supported
= WAKE_MAGIC
| WAKE_PHY
;
2533 if (jme
->reg_pmcs
& (PMCS_LFEN
| PMCS_LREN
))
2534 wol
->wolopts
|= WAKE_PHY
;
2536 if (jme
->reg_pmcs
& PMCS_MFEN
)
2537 wol
->wolopts
|= WAKE_MAGIC
;
2542 jme_set_wol(struct net_device
*netdev
,
2543 struct ethtool_wolinfo
*wol
)
2545 struct jme_adapter
*jme
= netdev_priv(netdev
);
2547 if (wol
->wolopts
& (WAKE_MAGICSECURE
|
2556 if (wol
->wolopts
& WAKE_PHY
)
2557 jme
->reg_pmcs
|= PMCS_LFEN
| PMCS_LREN
;
2559 if (wol
->wolopts
& WAKE_MAGIC
)
2560 jme
->reg_pmcs
|= PMCS_MFEN
;
2566 jme_get_link_ksettings(struct net_device
*netdev
,
2567 struct ethtool_link_ksettings
*cmd
)
2569 struct jme_adapter
*jme
= netdev_priv(netdev
);
2571 spin_lock_bh(&jme
->phy_lock
);
2572 mii_ethtool_get_link_ksettings(&jme
->mii_if
, cmd
);
2573 spin_unlock_bh(&jme
->phy_lock
);
2578 jme_set_link_ksettings(struct net_device
*netdev
,
2579 const struct ethtool_link_ksettings
*cmd
)
2581 struct jme_adapter
*jme
= netdev_priv(netdev
);
2584 if (cmd
->base
.speed
== SPEED_1000
&&
2585 cmd
->base
.autoneg
!= AUTONEG_ENABLE
)
2589 * Check If user changed duplex only while force_media.
2590 * Hardware would not generate link change interrupt.
2592 if (jme
->mii_if
.force_media
&&
2593 cmd
->base
.autoneg
!= AUTONEG_ENABLE
&&
2594 (jme
->mii_if
.full_duplex
!= cmd
->base
.duplex
))
2597 spin_lock_bh(&jme
->phy_lock
);
2598 rc
= mii_ethtool_set_link_ksettings(&jme
->mii_if
, cmd
);
2599 spin_unlock_bh(&jme
->phy_lock
);
2603 jme_reset_link(jme
);
2604 jme
->old_cmd
= *cmd
;
2605 set_bit(JME_FLAG_SSET
, &jme
->flags
);
2612 jme_ioctl(struct net_device
*netdev
, struct ifreq
*rq
, int cmd
)
2615 struct jme_adapter
*jme
= netdev_priv(netdev
);
2616 struct mii_ioctl_data
*mii_data
= if_mii(rq
);
2617 unsigned int duplex_chg
;
2619 if (cmd
== SIOCSMIIREG
) {
2620 u16 val
= mii_data
->val_in
;
2621 if (!(val
& (BMCR_RESET
|BMCR_ANENABLE
)) &&
2622 (val
& BMCR_SPEED1000
))
2626 spin_lock_bh(&jme
->phy_lock
);
2627 rc
= generic_mii_ioctl(&jme
->mii_if
, mii_data
, cmd
, &duplex_chg
);
2628 spin_unlock_bh(&jme
->phy_lock
);
2630 if (!rc
&& (cmd
== SIOCSMIIREG
)) {
2632 jme_reset_link(jme
);
2633 jme_get_link_ksettings(netdev
, &jme
->old_cmd
);
2634 set_bit(JME_FLAG_SSET
, &jme
->flags
);
2641 jme_get_link(struct net_device
*netdev
)
2643 struct jme_adapter
*jme
= netdev_priv(netdev
);
2644 return jread32(jme
, JME_PHY_LINK
) & PHY_LINK_UP
;
2648 jme_get_msglevel(struct net_device
*netdev
)
2650 struct jme_adapter
*jme
= netdev_priv(netdev
);
2651 return jme
->msg_enable
;
2655 jme_set_msglevel(struct net_device
*netdev
, u32 value
)
2657 struct jme_adapter
*jme
= netdev_priv(netdev
);
2658 jme
->msg_enable
= value
;
2661 static netdev_features_t
2662 jme_fix_features(struct net_device
*netdev
, netdev_features_t features
)
2664 if (netdev
->mtu
> 1900)
2665 features
&= ~(NETIF_F_ALL_TSO
| NETIF_F_CSUM_MASK
);
2670 jme_set_features(struct net_device
*netdev
, netdev_features_t features
)
2672 struct jme_adapter
*jme
= netdev_priv(netdev
);
2674 spin_lock_bh(&jme
->rxmcs_lock
);
2675 if (features
& NETIF_F_RXCSUM
)
2676 jme
->reg_rxmcs
|= RXMCS_CHECKSUM
;
2678 jme
->reg_rxmcs
&= ~RXMCS_CHECKSUM
;
2679 jwrite32(jme
, JME_RXMCS
, jme
->reg_rxmcs
);
2680 spin_unlock_bh(&jme
->rxmcs_lock
);
2685 #ifdef CONFIG_NET_POLL_CONTROLLER
2686 static void jme_netpoll(struct net_device
*dev
)
2688 unsigned long flags
;
2690 local_irq_save(flags
);
2691 jme_intr(dev
->irq
, dev
);
2692 local_irq_restore(flags
);
2697 jme_nway_reset(struct net_device
*netdev
)
2699 struct jme_adapter
*jme
= netdev_priv(netdev
);
2700 jme_restart_an(jme
);
2705 jme_smb_read(struct jme_adapter
*jme
, unsigned int addr
)
2710 val
= jread32(jme
, JME_SMBCSR
);
2711 to
= JME_SMB_BUSY_TIMEOUT
;
2712 while ((val
& SMBCSR_BUSY
) && --to
) {
2714 val
= jread32(jme
, JME_SMBCSR
);
2717 netif_err(jme
, hw
, jme
->dev
, "SMB Bus Busy\n");
2721 jwrite32(jme
, JME_SMBINTF
,
2722 ((addr
<< SMBINTF_HWADDR_SHIFT
) & SMBINTF_HWADDR
) |
2723 SMBINTF_HWRWN_READ
|
2726 val
= jread32(jme
, JME_SMBINTF
);
2727 to
= JME_SMB_BUSY_TIMEOUT
;
2728 while ((val
& SMBINTF_HWCMD
) && --to
) {
2730 val
= jread32(jme
, JME_SMBINTF
);
2733 netif_err(jme
, hw
, jme
->dev
, "SMB Bus Busy\n");
2737 return (val
& SMBINTF_HWDATR
) >> SMBINTF_HWDATR_SHIFT
;
2741 jme_smb_write(struct jme_adapter
*jme
, unsigned int addr
, u8 data
)
2746 val
= jread32(jme
, JME_SMBCSR
);
2747 to
= JME_SMB_BUSY_TIMEOUT
;
2748 while ((val
& SMBCSR_BUSY
) && --to
) {
2750 val
= jread32(jme
, JME_SMBCSR
);
2753 netif_err(jme
, hw
, jme
->dev
, "SMB Bus Busy\n");
2757 jwrite32(jme
, JME_SMBINTF
,
2758 ((data
<< SMBINTF_HWDATW_SHIFT
) & SMBINTF_HWDATW
) |
2759 ((addr
<< SMBINTF_HWADDR_SHIFT
) & SMBINTF_HWADDR
) |
2760 SMBINTF_HWRWN_WRITE
|
2763 val
= jread32(jme
, JME_SMBINTF
);
2764 to
= JME_SMB_BUSY_TIMEOUT
;
2765 while ((val
& SMBINTF_HWCMD
) && --to
) {
2767 val
= jread32(jme
, JME_SMBINTF
);
2770 netif_err(jme
, hw
, jme
->dev
, "SMB Bus Busy\n");
2778 jme_get_eeprom_len(struct net_device
*netdev
)
2780 struct jme_adapter
*jme
= netdev_priv(netdev
);
2782 val
= jread32(jme
, JME_SMBCSR
);
2783 return (val
& SMBCSR_EEPROMD
) ? JME_SMB_LEN
: 0;
2787 jme_get_eeprom(struct net_device
*netdev
,
2788 struct ethtool_eeprom
*eeprom
, u8
*data
)
2790 struct jme_adapter
*jme
= netdev_priv(netdev
);
2791 int i
, offset
= eeprom
->offset
, len
= eeprom
->len
;
2794 * ethtool will check the boundary for us
2796 eeprom
->magic
= JME_EEPROM_MAGIC
;
2797 for (i
= 0 ; i
< len
; ++i
)
2798 data
[i
] = jme_smb_read(jme
, i
+ offset
);
2804 jme_set_eeprom(struct net_device
*netdev
,
2805 struct ethtool_eeprom
*eeprom
, u8
*data
)
2807 struct jme_adapter
*jme
= netdev_priv(netdev
);
2808 int i
, offset
= eeprom
->offset
, len
= eeprom
->len
;
2810 if (eeprom
->magic
!= JME_EEPROM_MAGIC
)
2814 * ethtool will check the boundary for us
2816 for (i
= 0 ; i
< len
; ++i
)
2817 jme_smb_write(jme
, i
+ offset
, data
[i
]);
2822 static const struct ethtool_ops jme_ethtool_ops
= {
2823 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
2824 ETHTOOL_COALESCE_MAX_FRAMES
|
2825 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
,
2826 .get_drvinfo
= jme_get_drvinfo
,
2827 .get_regs_len
= jme_get_regs_len
,
2828 .get_regs
= jme_get_regs
,
2829 .get_coalesce
= jme_get_coalesce
,
2830 .set_coalesce
= jme_set_coalesce
,
2831 .get_pauseparam
= jme_get_pauseparam
,
2832 .set_pauseparam
= jme_set_pauseparam
,
2833 .get_wol
= jme_get_wol
,
2834 .set_wol
= jme_set_wol
,
2835 .get_link
= jme_get_link
,
2836 .get_msglevel
= jme_get_msglevel
,
2837 .set_msglevel
= jme_set_msglevel
,
2838 .nway_reset
= jme_nway_reset
,
2839 .get_eeprom_len
= jme_get_eeprom_len
,
2840 .get_eeprom
= jme_get_eeprom
,
2841 .set_eeprom
= jme_set_eeprom
,
2842 .get_link_ksettings
= jme_get_link_ksettings
,
2843 .set_link_ksettings
= jme_set_link_ksettings
,
2847 jme_pci_dma64(struct pci_dev
*pdev
)
2849 if (pdev
->device
== PCI_DEVICE_ID_JMICRON_JMC250
&&
2850 !dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64)))
2853 if (pdev
->device
== PCI_DEVICE_ID_JMICRON_JMC250
&&
2854 !dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(40)))
2857 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32)))
2864 jme_phy_init(struct jme_adapter
*jme
)
2868 reg26
= jme_mdio_read(jme
->dev
, jme
->mii_if
.phy_id
, 26);
2869 jme_mdio_write(jme
->dev
, jme
->mii_if
.phy_id
, 26, reg26
| 0x1000);
2873 jme_check_hw_ver(struct jme_adapter
*jme
)
2877 chipmode
= jread32(jme
, JME_CHIPMODE
);
2879 jme
->fpgaver
= (chipmode
& CM_FPGAVER_MASK
) >> CM_FPGAVER_SHIFT
;
2880 jme
->chiprev
= (chipmode
& CM_CHIPREV_MASK
) >> CM_CHIPREV_SHIFT
;
2881 jme
->chip_main_rev
= jme
->chiprev
& 0xF;
2882 jme
->chip_sub_rev
= (jme
->chiprev
>> 4) & 0xF;
2885 static const struct net_device_ops jme_netdev_ops
= {
2886 .ndo_open
= jme_open
,
2887 .ndo_stop
= jme_close
,
2888 .ndo_validate_addr
= eth_validate_addr
,
2889 .ndo_eth_ioctl
= jme_ioctl
,
2890 .ndo_start_xmit
= jme_start_xmit
,
2891 .ndo_set_mac_address
= jme_set_macaddr
,
2892 .ndo_set_rx_mode
= jme_set_multi
,
2893 .ndo_change_mtu
= jme_change_mtu
,
2894 .ndo_tx_timeout
= jme_tx_timeout
,
2895 .ndo_fix_features
= jme_fix_features
,
2896 .ndo_set_features
= jme_set_features
,
2897 #ifdef CONFIG_NET_POLL_CONTROLLER
2898 .ndo_poll_controller
= jme_netpoll
,
2903 jme_init_one(struct pci_dev
*pdev
,
2904 const struct pci_device_id
*ent
)
2906 int rc
= 0, using_dac
, i
;
2907 struct net_device
*netdev
;
2908 struct jme_adapter
*jme
;
2913 * set up PCI device basics
2915 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
|
2916 PCIE_LINK_STATE_CLKPM
);
2918 rc
= pci_enable_device(pdev
);
2920 pr_err("Cannot enable PCI device\n");
2924 using_dac
= jme_pci_dma64(pdev
);
2925 if (using_dac
< 0) {
2926 pr_err("Cannot set PCI DMA Mask\n");
2928 goto err_out_disable_pdev
;
2931 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
2932 pr_err("No PCI resource region found\n");
2934 goto err_out_disable_pdev
;
2937 rc
= pci_request_regions(pdev
, DRV_NAME
);
2939 pr_err("Cannot obtain PCI resource region\n");
2940 goto err_out_disable_pdev
;
2943 pci_set_master(pdev
);
2946 * alloc and init net device
2948 netdev
= alloc_etherdev(sizeof(*jme
));
2951 goto err_out_release_regions
;
2953 netdev
->netdev_ops
= &jme_netdev_ops
;
2954 netdev
->ethtool_ops
= &jme_ethtool_ops
;
2955 netdev
->watchdog_timeo
= TX_TIMEOUT
;
2956 netdev
->hw_features
= NETIF_F_IP_CSUM
|
2962 netdev
->features
= NETIF_F_IP_CSUM
|
2967 NETIF_F_HW_VLAN_CTAG_TX
|
2968 NETIF_F_HW_VLAN_CTAG_RX
;
2970 netdev
->features
|= NETIF_F_HIGHDMA
;
2972 /* MTU range: 1280 - 9202*/
2973 netdev
->min_mtu
= IPV6_MIN_MTU
;
2974 netdev
->max_mtu
= MAX_ETHERNET_JUMBO_PACKET_SIZE
- ETH_HLEN
;
2976 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2977 pci_set_drvdata(pdev
, netdev
);
2982 jme
= netdev_priv(netdev
);
2985 jme
->jme_rx
= netif_rx
;
2986 jme
->old_mtu
= netdev
->mtu
= 1500;
2988 jme
->tx_ring_size
= 1 << 10;
2989 jme
->tx_ring_mask
= jme
->tx_ring_size
- 1;
2990 jme
->tx_wake_threshold
= 1 << 9;
2991 jme
->rx_ring_size
= 1 << 9;
2992 jme
->rx_ring_mask
= jme
->rx_ring_size
- 1;
2993 jme
->msg_enable
= JME_DEF_MSG_ENABLE
;
2994 jme
->regs
= ioremap(pci_resource_start(pdev
, 0),
2995 pci_resource_len(pdev
, 0));
2997 pr_err("Mapping PCI resource region error\n");
2999 goto err_out_free_netdev
;
3003 apmc
= jread32(jme
, JME_APMC
) & ~JME_APMC_PSEUDO_HP_EN
;
3004 jwrite32(jme
, JME_APMC
, apmc
);
3005 } else if (force_pseudohp
) {
3006 apmc
= jread32(jme
, JME_APMC
) | JME_APMC_PSEUDO_HP_EN
;
3007 jwrite32(jme
, JME_APMC
, apmc
);
3010 netif_napi_add(netdev
, &jme
->napi
, jme_poll
);
3012 spin_lock_init(&jme
->phy_lock
);
3013 spin_lock_init(&jme
->macaddr_lock
);
3014 spin_lock_init(&jme
->rxmcs_lock
);
3016 atomic_set(&jme
->link_changing
, 1);
3017 atomic_set(&jme
->rx_cleaning
, 1);
3018 atomic_set(&jme
->tx_cleaning
, 1);
3019 atomic_set(&jme
->rx_empty
, 1);
3021 tasklet_setup(&jme
->pcc_task
, jme_pcc_tasklet
);
3022 INIT_WORK(&jme
->linkch_task
, jme_link_change_work
);
3023 jme
->dpi
.cur
= PCC_P1
;
3026 jme
->reg_rxcs
= RXCS_DEFAULT
;
3027 jme
->reg_rxmcs
= RXMCS_DEFAULT
;
3029 jme
->reg_pmcs
= PMCS_MFEN
;
3030 jme
->reg_gpreg1
= GPREG1_DEFAULT
;
3032 if (jme
->reg_rxmcs
& RXMCS_CHECKSUM
)
3033 netdev
->features
|= NETIF_F_RXCSUM
;
3036 * Get Max Read Req Size from PCI Config Space
3038 pci_read_config_byte(pdev
, PCI_DCSR_MRRS
, &jme
->mrrs
);
3039 jme
->mrrs
&= PCI_DCSR_MRRS_MASK
;
3040 switch (jme
->mrrs
) {
3042 jme
->reg_txcs
= TXCS_DEFAULT
| TXCS_DMASIZE_128B
;
3045 jme
->reg_txcs
= TXCS_DEFAULT
| TXCS_DMASIZE_256B
;
3048 jme
->reg_txcs
= TXCS_DEFAULT
| TXCS_DMASIZE_512B
;
3053 * Must check before reset_mac_processor
3055 jme_check_hw_ver(jme
);
3056 jme
->mii_if
.dev
= netdev
;
3058 jme
->mii_if
.phy_id
= 0;
3059 for (i
= 1 ; i
< 32 ; ++i
) {
3060 bmcr
= jme_mdio_read(netdev
, i
, MII_BMCR
);
3061 bmsr
= jme_mdio_read(netdev
, i
, MII_BMSR
);
3062 if (bmcr
!= 0xFFFFU
&& (bmcr
!= 0 || bmsr
!= 0)) {
3063 jme
->mii_if
.phy_id
= i
;
3068 if (!jme
->mii_if
.phy_id
) {
3070 pr_err("Can not find phy_id\n");
3074 jme
->reg_ghc
|= GHC_LINK_POLL
;
3076 jme
->mii_if
.phy_id
= 1;
3078 if (pdev
->device
== PCI_DEVICE_ID_JMICRON_JMC250
)
3079 jme
->mii_if
.supports_gmii
= true;
3081 jme
->mii_if
.supports_gmii
= false;
3082 jme
->mii_if
.phy_id_mask
= 0x1F;
3083 jme
->mii_if
.reg_num_mask
= 0x1F;
3084 jme
->mii_if
.mdio_read
= jme_mdio_read
;
3085 jme
->mii_if
.mdio_write
= jme_mdio_write
;
3087 jme_clear_pm_disable_wol(jme
);
3088 device_init_wakeup(&pdev
->dev
, true);
3090 jme_set_phyfifo_5level(jme
);
3091 jme
->pcirev
= pdev
->revision
;
3097 * Reset MAC processor and reload EEPROM for MAC Address
3099 jme_reset_mac_processor(jme
);
3100 rc
= jme_reload_eeprom(jme
);
3102 pr_err("Reload eeprom for reading MAC Address error\n");
3105 jme_load_macaddr(netdev
);
3108 * Tell stack that we are not ready to work until open()
3110 netif_carrier_off(netdev
);
3112 rc
= register_netdev(netdev
);
3114 pr_err("Cannot register net device\n");
3118 netif_info(jme
, probe
, jme
->dev
, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
3119 (jme
->pdev
->device
== PCI_DEVICE_ID_JMICRON_JMC250
) ?
3120 "JMC250 Gigabit Ethernet" :
3121 (jme
->pdev
->device
== PCI_DEVICE_ID_JMICRON_JMC260
) ?
3122 "JMC260 Fast Ethernet" : "Unknown",
3123 (jme
->fpgaver
!= 0) ? " (FPGA)" : "",
3124 (jme
->fpgaver
!= 0) ? jme
->fpgaver
: jme
->chiprev
,
3125 jme
->pcirev
, netdev
->dev_addr
);
3131 err_out_free_netdev
:
3132 free_netdev(netdev
);
3133 err_out_release_regions
:
3134 pci_release_regions(pdev
);
3135 err_out_disable_pdev
:
3136 pci_disable_device(pdev
);
3142 jme_remove_one(struct pci_dev
*pdev
)
3144 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3145 struct jme_adapter
*jme
= netdev_priv(netdev
);
3147 unregister_netdev(netdev
);
3149 free_netdev(netdev
);
3150 pci_release_regions(pdev
);
3151 pci_disable_device(pdev
);
3156 jme_shutdown(struct pci_dev
*pdev
)
3158 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3159 struct jme_adapter
*jme
= netdev_priv(netdev
);
3161 jme_powersave_phy(jme
);
3162 pci_pme_active(pdev
, true);
3165 #ifdef CONFIG_PM_SLEEP
3167 jme_suspend(struct device
*dev
)
3169 struct net_device
*netdev
= dev_get_drvdata(dev
);
3170 struct jme_adapter
*jme
= netdev_priv(netdev
);
3172 if (!netif_running(netdev
))
3175 atomic_dec(&jme
->link_changing
);
3177 netif_device_detach(netdev
);
3178 netif_stop_queue(netdev
);
3181 tasklet_disable(&jme
->txclean_task
);
3182 tasklet_disable(&jme
->rxclean_task
);
3183 tasklet_disable(&jme
->rxempty_task
);
3185 if (netif_carrier_ok(netdev
)) {
3186 if (test_bit(JME_FLAG_POLL
, &jme
->flags
))
3187 jme_polling_mode(jme
);
3189 jme_stop_pcc_timer(jme
);
3190 jme_disable_rx_engine(jme
);
3191 jme_disable_tx_engine(jme
);
3192 jme_reset_mac_processor(jme
);
3193 jme_free_rx_resources(jme
);
3194 jme_free_tx_resources(jme
);
3195 netif_carrier_off(netdev
);
3199 tasklet_enable(&jme
->txclean_task
);
3200 tasklet_enable(&jme
->rxclean_task
);
3201 tasklet_enable(&jme
->rxempty_task
);
3203 jme_powersave_phy(jme
);
3209 jme_resume(struct device
*dev
)
3211 struct net_device
*netdev
= dev_get_drvdata(dev
);
3212 struct jme_adapter
*jme
= netdev_priv(netdev
);
3214 if (!netif_running(netdev
))
3217 jme_clear_pm_disable_wol(jme
);
3219 if (test_bit(JME_FLAG_SSET
, &jme
->flags
))
3220 jme_set_link_ksettings(netdev
, &jme
->old_cmd
);
3222 jme_reset_phy_processor(jme
);
3223 jme_phy_calibration(jme
);
3225 netif_device_attach(netdev
);
3227 atomic_inc(&jme
->link_changing
);
3229 jme_reset_link(jme
);
3236 static SIMPLE_DEV_PM_OPS(jme_pm_ops
, jme_suspend
, jme_resume
);
3237 #define JME_PM_OPS (&jme_pm_ops)
3241 #define JME_PM_OPS NULL
3244 static const struct pci_device_id jme_pci_tbl
[] = {
3245 { PCI_VDEVICE(JMICRON
, PCI_DEVICE_ID_JMICRON_JMC250
) },
3246 { PCI_VDEVICE(JMICRON
, PCI_DEVICE_ID_JMICRON_JMC260
) },
3250 static struct pci_driver jme_driver
= {
3252 .id_table
= jme_pci_tbl
,
3253 .probe
= jme_init_one
,
3254 .remove
= jme_remove_one
,
3255 .shutdown
= jme_shutdown
,
3256 .driver
.pm
= JME_PM_OPS
,
3260 jme_init_module(void)
3262 pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION
);
3263 return pci_register_driver(&jme_driver
);
3267 jme_cleanup_module(void)
3269 pci_unregister_driver(&jme_driver
);
3272 module_init(jme_init_module
);
3273 module_exit(jme_cleanup_module
);
3275 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3276 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3277 MODULE_LICENSE("GPL");
3278 MODULE_VERSION(DRV_VERSION
);
3279 MODULE_DEVICE_TABLE(pci
, jme_pci_tbl
);