1 // SPDX-License-Identifier: GPL-2.0-or-later
3 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
4 ethernet driver for Linux.
5 Copyright (C) 1997 Sten Wang
8 DAVICOM Web-Site: www.davicom.com.tw
10 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
11 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
13 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
15 Marcelo Tosatti <marcelo@conectiva.com.br> :
16 Made it compile in 2.3 (device to net_device)
18 Alan Cox <alan@lxorguk.ukuu.org.uk> :
19 Cleaned up for kernel merge.
20 Removed the back compatibility support
21 Reformatted, fixing spelling etc as I went
22 Removed IRQ 0-15 assumption
24 Jeff Garzik <jgarzik@pobox.com> :
25 Updated to use new PCI driver API.
26 Resource usage cleanups.
27 Report driver version to user.
29 Tobias Ringstrom <tori@unhappy.mine.nu> :
30 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
31 Andrew Morton and Frank Davis for the SMP safety fixes.
33 Vojtech Pavlik <vojtech@suse.cz> :
34 Cleaned up pointer arithmetics.
35 Fixed a lot of 64bit issues.
36 Cleaned up printk()s a bit.
37 Fixed some obvious big endian problems.
39 Tobias Ringstrom <tori@unhappy.mine.nu> :
40 Use time_after for jiffies calculation. Added ethtool
41 support. Updated PCI resource allocation. Do not
42 forget to unmap PCI mapped skbs.
44 Alan Cox <alan@lxorguk.ukuu.org.uk>
45 Added new PCI identifiers provided by Clear Zhang at ALi
46 for their 1563 ethernet device.
50 Check on 64 bit boxes.
51 Check and fix on big endian boxes.
53 Test and make sure PCI latency is now correct for all cases.
56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
58 #define DRV_NAME "dmfe"
60 #include <linux/module.h>
61 #include <linux/kernel.h>
62 #include <linux/string.h>
63 #include <linux/timer.h>
64 #include <linux/ptrace.h>
65 #include <linux/errno.h>
66 #include <linux/ioport.h>
67 #include <linux/interrupt.h>
68 #include <linux/pci.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/init.h>
71 #include <linux/netdevice.h>
72 #include <linux/etherdevice.h>
73 #include <linux/ethtool.h>
74 #include <linux/skbuff.h>
75 #include <linux/delay.h>
76 #include <linux/spinlock.h>
77 #include <linux/crc32.h>
78 #include <linux/bitops.h>
80 #include <asm/processor.h>
83 #include <linux/uaccess.h>
86 #ifdef CONFIG_TULIP_DM910X
91 /* Board/System/Debug information/definition ---------------- */
92 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
93 #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
94 #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
95 #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
97 #define DM9102_IO_SIZE 0x80
98 #define DM9102A_IO_SIZE 0x100
99 #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
100 #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
101 #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
102 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
103 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
104 #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
105 #define TX_BUF_ALLOC 0x600
106 #define RX_ALLOC_SIZE 0x620
107 #define DM910X_RESET 1
108 #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
109 #define CR6_DEFAULT 0x00080000 /* HD */
110 #define CR7_DEFAULT 0x180c1
111 #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
112 #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
113 #define MAX_PACKET_SIZE 1514
114 #define DMFE_MAX_MULTICAST 14
115 #define RX_COPY_SIZE 100
116 #define MAX_CHECK_PACKET 0x8000
117 #define DM9801_NOISE_FLOOR 8
118 #define DM9802_NOISE_FLOOR 5
120 #define DMFE_WOL_LINKCHANGE 0x20000000
121 #define DMFE_WOL_SAMPLEPACKET 0x10000000
122 #define DMFE_WOL_MAGICPACKET 0x08000000
126 #define DMFE_100MHF 1
128 #define DMFE_100MFD 5
130 #define DMFE_1M_HPNA 0x10
132 #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
133 #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
134 #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
135 #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
136 #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
137 #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
139 #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
140 #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
141 #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
143 #define dw32(reg, val) iowrite32(val, ioaddr + (reg))
144 #define dw16(reg, val) iowrite16(val, ioaddr + (reg))
145 #define dr32(reg) ioread32(ioaddr + (reg))
146 #define dr16(reg) ioread16(ioaddr + (reg))
147 #define dr8(reg) ioread8(ioaddr + (reg))
149 #define DMFE_DBUG(dbug_now, msg, value) \
151 if (dmfe_debug || (dbug_now)) \
153 (msg), (long) (value)); \
156 #define SHOW_MEDIA_TYPE(mode) \
157 pr_info("Change Speed to %sMhz %s duplex\n" , \
158 (mode & 1) ? "100":"10", \
159 (mode & 4) ? "full":"half");
162 /* CR9 definition: SROM/MII */
163 #define CR9_SROM_READ 0x4800
165 #define CR9_SRCLK 0x2
166 #define CR9_CRDOUT 0x8
167 #define SROM_DATA_0 0x0
168 #define SROM_DATA_1 0x4
169 #define PHY_DATA_1 0x20000
170 #define PHY_DATA_0 0x00000
171 #define MDCLKH 0x10000
173 #define PHY_POWER_DOWN 0x800
175 #define SROM_V41_CODE 0x14
177 #define __CHK_IO_SIZE(pci_id, dev_rev) \
178 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
179 DM9102A_IO_SIZE: DM9102_IO_SIZE)
181 #define CHK_IO_SIZE(pci_dev) \
182 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
183 (pci_dev)->revision))
185 /* Structure/enum declaration ------------------------------- */
187 __le32 tdes0
, tdes1
, tdes2
, tdes3
; /* Data for the card */
188 char *tx_buf_ptr
; /* Data for us */
189 struct tx_desc
*next_tx_desc
;
190 } __attribute__(( aligned(32) ));
193 __le32 rdes0
, rdes1
, rdes2
, rdes3
; /* Data for the card */
194 struct sk_buff
*rx_skb_ptr
; /* Data for us */
195 struct rx_desc
*next_rx_desc
;
196 } __attribute__(( aligned(32) ));
198 struct dmfe_board_info
{
199 u32 chip_id
; /* Chip vendor/Device ID */
200 u8 chip_revision
; /* Chip revision */
201 struct net_device
*next_dev
; /* next device */
202 struct pci_dev
*pdev
; /* PCI device */
205 void __iomem
*ioaddr
; /* I/O base address */
212 /* pointer for memory physical address */
213 dma_addr_t buf_pool_dma_ptr
; /* Tx buffer pool memory */
214 dma_addr_t buf_pool_dma_start
; /* Tx buffer pool align dword */
215 dma_addr_t desc_pool_dma_ptr
; /* descriptor pool memory */
216 dma_addr_t first_tx_desc_dma
;
217 dma_addr_t first_rx_desc_dma
;
219 /* descriptor pointer */
220 unsigned char *buf_pool_ptr
; /* Tx buffer pool memory */
221 unsigned char *buf_pool_start
; /* Tx buffer pool align dword */
222 unsigned char *desc_pool_ptr
; /* descriptor pool memory */
223 struct tx_desc
*first_tx_desc
;
224 struct tx_desc
*tx_insert_ptr
;
225 struct tx_desc
*tx_remove_ptr
;
226 struct rx_desc
*first_rx_desc
;
227 struct rx_desc
*rx_insert_ptr
;
228 struct rx_desc
*rx_ready_ptr
; /* packet come pointer */
229 unsigned long tx_packet_cnt
; /* transmitted packet count */
230 unsigned long tx_queue_cnt
; /* wait to send packet count */
231 unsigned long rx_avail_cnt
; /* available rx descriptor count */
232 unsigned long interval_rx_cnt
; /* rx packet count a callback time */
234 u16 HPNA_command
; /* For HPNA register 16 */
235 u16 HPNA_timer
; /* For HPNA remote device check */
237 u16 NIC_capability
; /* NIC media capability */
238 u16 PHY_reg4
; /* Saved Phyxcer register 4 value */
240 u8 HPNA_present
; /* 0:none, 1:DM9801, 2:DM9802 */
241 u8 chip_type
; /* Keep DM9102A chip type */
242 u8 media_mode
; /* user specify media mode */
243 u8 op_mode
; /* real work media mode */
245 u8 wait_reset
; /* Hardware failed, need to reset */
246 u8 dm910x_chk_mode
; /* Operating mode check */
247 u8 first_in_callback
; /* Flag to record state */
248 u8 wol_mode
; /* user WOL settings */
249 struct timer_list timer
;
251 /* Driver defined statistic counter */
252 unsigned long tx_fifo_underrun
;
253 unsigned long tx_loss_carrier
;
254 unsigned long tx_no_carrier
;
255 unsigned long tx_late_collision
;
256 unsigned long tx_excessive_collision
;
257 unsigned long tx_jabber_timeout
;
258 unsigned long reset_count
;
259 unsigned long reset_cr8
;
260 unsigned long reset_fatal
;
261 unsigned long reset_TXtimeout
;
264 unsigned char srom
[128];
268 DCR0
= 0x00, DCR1
= 0x08, DCR2
= 0x10, DCR3
= 0x18, DCR4
= 0x20,
269 DCR5
= 0x28, DCR6
= 0x30, DCR7
= 0x38, DCR8
= 0x40, DCR9
= 0x48,
270 DCR10
= 0x50, DCR11
= 0x58, DCR12
= 0x60, DCR13
= 0x68, DCR14
= 0x70,
275 CR6_RXSC
= 0x2, CR6_PBF
= 0x8, CR6_PM
= 0x40, CR6_PAM
= 0x80,
276 CR6_FDM
= 0x200, CR6_TXSC
= 0x2000, CR6_STI
= 0x100000,
277 CR6_SFT
= 0x200000, CR6_RXA
= 0x40000000, CR6_NO_PURGE
= 0x20000000
280 /* Global variable declaration ----------------------------- */
281 static int dmfe_debug
;
282 static unsigned char dmfe_media_mode
= DMFE_AUTO
;
283 static u32 dmfe_cr6_user_set
;
285 /* For module input parameter */
288 static unsigned char mode
= 8;
289 static u8 chkmode
= 1;
290 static u8 HPNA_mode
; /* Default: Low Power/High Speed */
291 static u8 HPNA_rx_cmd
; /* Default: Disable Rx remote command */
292 static u8 HPNA_tx_cmd
; /* Default: Don't issue remote command */
293 static u8 HPNA_NoiseFloor
; /* Default: HPNA NoiseFloor */
294 static u8 SF_mode
; /* Special Function: 1:VLAN, 2:RX Flow Control
295 4: TX pause packet */
298 /* function declaration ------------------------------------- */
299 static int dmfe_open(struct net_device
*);
300 static netdev_tx_t
dmfe_start_xmit(struct sk_buff
*, struct net_device
*);
301 static int dmfe_stop(struct net_device
*);
302 static void dmfe_set_filter_mode(struct net_device
*);
303 static const struct ethtool_ops netdev_ethtool_ops
;
304 static u16
read_srom_word(void __iomem
*, int);
305 static irqreturn_t
dmfe_interrupt(int , void *);
306 #ifdef CONFIG_NET_POLL_CONTROLLER
307 static void poll_dmfe (struct net_device
*dev
);
309 static void dmfe_descriptor_init(struct net_device
*);
310 static void allocate_rx_buffer(struct net_device
*);
311 static void update_cr6(u32
, void __iomem
*);
312 static void send_filter_frame(struct net_device
*);
313 static void dm9132_id_table(struct net_device
*);
314 static u16
dmfe_phy_read(void __iomem
*, u8
, u8
, u32
);
315 static void dmfe_phy_write(void __iomem
*, u8
, u8
, u16
, u32
);
316 static void dmfe_phy_write_1bit(void __iomem
*, u32
);
317 static u16
dmfe_phy_read_1bit(void __iomem
*);
318 static u8
dmfe_sense_speed(struct dmfe_board_info
*);
319 static void dmfe_process_mode(struct dmfe_board_info
*);
320 static void dmfe_timer(struct timer_list
*);
321 static inline u32
cal_CRC(unsigned char *, unsigned int, u8
);
322 static void dmfe_rx_packet(struct net_device
*, struct dmfe_board_info
*);
323 static void dmfe_free_tx_pkt(struct net_device
*, struct dmfe_board_info
*);
324 static void dmfe_reuse_skb(struct dmfe_board_info
*, struct sk_buff
*);
325 static void dmfe_dynamic_reset(struct net_device
*);
326 static void dmfe_free_rxbuffer(struct dmfe_board_info
*);
327 static void dmfe_init_dm910x(struct net_device
*);
328 static void dmfe_parse_srom(struct dmfe_board_info
*);
329 static void dmfe_program_DM9801(struct dmfe_board_info
*, int);
330 static void dmfe_program_DM9802(struct dmfe_board_info
*);
331 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info
* );
332 static void dmfe_set_phyxcer(struct dmfe_board_info
*);
334 /* DM910X network board routine ---------------------------- */
336 static const struct net_device_ops netdev_ops
= {
337 .ndo_open
= dmfe_open
,
338 .ndo_stop
= dmfe_stop
,
339 .ndo_start_xmit
= dmfe_start_xmit
,
340 .ndo_set_rx_mode
= dmfe_set_filter_mode
,
341 .ndo_set_mac_address
= eth_mac_addr
,
342 .ndo_validate_addr
= eth_validate_addr
,
343 #ifdef CONFIG_NET_POLL_CONTROLLER
344 .ndo_poll_controller
= poll_dmfe
,
349 * Search DM910X board ,allocate space and register it
352 static int dmfe_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
354 struct dmfe_board_info
*db
; /* board information structure */
355 struct net_device
*dev
;
359 DMFE_DBUG(0, "dmfe_init_one()", 0);
362 * SPARC on-board DM910x chips should be handled by the main
363 * tulip driver, except for early DM9100s.
365 #ifdef CONFIG_TULIP_DM910X
366 if ((ent
->driver_data
== PCI_DM9100_ID
&& pdev
->revision
>= 0x30) ||
367 ent
->driver_data
== PCI_DM9102_ID
) {
368 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
370 if (dp
&& of_get_property(dp
, "local-mac-address", NULL
)) {
371 pr_info("skipping on-board DM910x (use tulip)\n");
377 /* Init network device */
378 dev
= alloc_etherdev(sizeof(*db
));
381 SET_NETDEV_DEV(dev
, &pdev
->dev
);
383 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32))) {
384 pr_warn("32-bit PCI DMA not available\n");
389 /* Enable Master/IO access, Disable memory access */
390 err
= pci_enable_device(pdev
);
394 if (!pci_resource_start(pdev
, 0)) {
395 pr_err("I/O base is zero\n");
397 goto err_out_disable
;
400 if (pci_resource_len(pdev
, 0) < (CHK_IO_SIZE(pdev
)) ) {
401 pr_err("Allocated I/O size too small\n");
403 goto err_out_disable
;
406 #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
408 /* Set Latency Timer 80h */
409 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
410 Need a PCI quirk.. */
412 pci_write_config_byte(pdev
, PCI_LATENCY_TIMER
, 0x80);
415 if (pci_request_regions(pdev
, DRV_NAME
)) {
416 pr_err("Failed to request PCI regions\n");
418 goto err_out_disable
;
421 /* Init system & device */
422 db
= netdev_priv(dev
);
424 /* Allocate Tx/Rx descriptor memory */
425 db
->desc_pool_ptr
= dma_alloc_coherent(&pdev
->dev
,
426 sizeof(struct tx_desc
) * DESC_ALL_CNT
+ 0x20,
427 &db
->desc_pool_dma_ptr
, GFP_KERNEL
);
428 if (!db
->desc_pool_ptr
) {
433 db
->buf_pool_ptr
= dma_alloc_coherent(&pdev
->dev
,
434 TX_BUF_ALLOC
* TX_DESC_CNT
+ 4,
435 &db
->buf_pool_dma_ptr
, GFP_KERNEL
);
436 if (!db
->buf_pool_ptr
) {
438 goto err_out_free_desc
;
441 db
->first_tx_desc
= (struct tx_desc
*) db
->desc_pool_ptr
;
442 db
->first_tx_desc_dma
= db
->desc_pool_dma_ptr
;
443 db
->buf_pool_start
= db
->buf_pool_ptr
;
444 db
->buf_pool_dma_start
= db
->buf_pool_dma_ptr
;
446 db
->chip_id
= ent
->driver_data
;
448 db
->ioaddr
= pci_iomap(pdev
, 0, 0);
451 goto err_out_free_buf
;
454 db
->chip_revision
= pdev
->revision
;
459 pci_set_drvdata(pdev
, dev
);
460 dev
->netdev_ops
= &netdev_ops
;
461 dev
->ethtool_ops
= &netdev_ethtool_ops
;
462 netif_carrier_off(dev
);
463 spin_lock_init(&db
->lock
);
465 pci_read_config_dword(pdev
, 0x50, &pci_pmr
);
467 if ( (pci_pmr
== 0x10000) && (db
->chip_revision
== 0x31) )
468 db
->chip_type
= 1; /* DM9102A E3 */
472 /* read 64 word srom data */
473 for (i
= 0; i
< 64; i
++) {
474 ((__le16
*) db
->srom
)[i
] =
475 cpu_to_le16(read_srom_word(db
->ioaddr
, i
));
478 /* Set Node address */
479 for (i
= 0; i
< 6; i
++)
480 dev
->dev_addr
[i
] = db
->srom
[20 + i
];
482 err
= register_netdev (dev
);
486 dev_info(&dev
->dev
, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
487 ent
->driver_data
>> 16,
488 pci_name(pdev
), dev
->dev_addr
, pdev
->irq
);
490 pci_set_master(pdev
);
495 pci_iounmap(pdev
, db
->ioaddr
);
497 dma_free_coherent(&pdev
->dev
, TX_BUF_ALLOC
* TX_DESC_CNT
+ 4,
498 db
->buf_pool_ptr
, db
->buf_pool_dma_ptr
);
500 dma_free_coherent(&pdev
->dev
,
501 sizeof(struct tx_desc
) * DESC_ALL_CNT
+ 0x20,
502 db
->desc_pool_ptr
, db
->desc_pool_dma_ptr
);
504 pci_release_regions(pdev
);
506 pci_disable_device(pdev
);
514 static void dmfe_remove_one(struct pci_dev
*pdev
)
516 struct net_device
*dev
= pci_get_drvdata(pdev
);
517 struct dmfe_board_info
*db
= netdev_priv(dev
);
519 DMFE_DBUG(0, "dmfe_remove_one()", 0);
523 unregister_netdev(dev
);
524 pci_iounmap(db
->pdev
, db
->ioaddr
);
525 dma_free_coherent(&db
->pdev
->dev
,
526 sizeof(struct tx_desc
) * DESC_ALL_CNT
+ 0x20,
527 db
->desc_pool_ptr
, db
->desc_pool_dma_ptr
);
528 dma_free_coherent(&db
->pdev
->dev
,
529 TX_BUF_ALLOC
* TX_DESC_CNT
+ 4,
530 db
->buf_pool_ptr
, db
->buf_pool_dma_ptr
);
531 pci_release_regions(pdev
);
532 free_netdev(dev
); /* free board information */
535 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
540 * Open the interface.
541 * The interface is opened whenever "ifconfig" actives it.
544 static int dmfe_open(struct net_device
*dev
)
546 struct dmfe_board_info
*db
= netdev_priv(dev
);
547 const int irq
= db
->pdev
->irq
;
550 DMFE_DBUG(0, "dmfe_open", 0);
552 ret
= request_irq(irq
, dmfe_interrupt
, IRQF_SHARED
, dev
->name
, dev
);
556 /* system variable init */
557 db
->cr6_data
= CR6_DEFAULT
| dmfe_cr6_user_set
;
558 db
->tx_packet_cnt
= 0;
559 db
->tx_queue_cnt
= 0;
560 db
->rx_avail_cnt
= 0;
563 db
->first_in_callback
= 0;
564 db
->NIC_capability
= 0xf; /* All capability*/
565 db
->PHY_reg4
= 0x1e0;
567 /* CR6 operation mode decision */
568 if ( !chkmode
|| (db
->chip_id
== PCI_DM9132_ID
) ||
569 (db
->chip_revision
>= 0x30) ) {
570 db
->cr6_data
|= DMFE_TXTH_256
;
571 db
->cr0_data
= CR0_DEFAULT
;
572 db
->dm910x_chk_mode
=4; /* Enter the normal mode */
574 db
->cr6_data
|= CR6_SFT
; /* Store & Forward mode */
576 db
->dm910x_chk_mode
= 1; /* Enter the check mode */
579 /* Initialize DM910X board */
580 dmfe_init_dm910x(dev
);
582 /* Active System Interface */
583 netif_wake_queue(dev
);
585 /* set and active a timer process */
586 timer_setup(&db
->timer
, dmfe_timer
, 0);
587 db
->timer
.expires
= DMFE_TIMER_WUT
+ HZ
* 2;
588 add_timer(&db
->timer
);
594 /* Initialize DM910X board
596 * Initialize TX/Rx descriptor chain structure
597 * Send the set-up frame
598 * Enable Tx/Rx machine
601 static void dmfe_init_dm910x(struct net_device
*dev
)
603 struct dmfe_board_info
*db
= netdev_priv(dev
);
604 void __iomem
*ioaddr
= db
->ioaddr
;
606 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
608 /* Reset DM910x MAC controller */
609 dw32(DCR0
, DM910X_RESET
); /* RESET MAC */
611 dw32(DCR0
, db
->cr0_data
);
614 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
617 /* Parser SROM and media mode */
619 db
->media_mode
= dmfe_media_mode
;
621 /* RESET Phyxcer Chip by GPR port bit 7 */
622 dw32(DCR12
, 0x180); /* Let bit 7 output port */
623 if (db
->chip_id
== PCI_DM9009_ID
) {
624 dw32(DCR12
, 0x80); /* Issue RESET signal */
625 mdelay(300); /* Delay 300 ms */
627 dw32(DCR12
, 0x0); /* Clear RESET signal */
629 /* Process Phyxcer Media Mode */
630 if ( !(db
->media_mode
& 0x10) ) /* Force 1M mode */
631 dmfe_set_phyxcer(db
);
633 /* Media Mode Process */
634 if ( !(db
->media_mode
& DMFE_AUTO
) )
635 db
->op_mode
= db
->media_mode
; /* Force Mode */
637 /* Initialize Transmit/Receive descriptor and CR3/4 */
638 dmfe_descriptor_init(dev
);
640 /* Init CR6 to program DM910x operation */
641 update_cr6(db
->cr6_data
, ioaddr
);
643 /* Send setup frame */
644 if (db
->chip_id
== PCI_DM9132_ID
)
645 dm9132_id_table(dev
); /* DM9132 */
647 send_filter_frame(dev
); /* DM9102/DM9102A */
649 /* Init CR7, interrupt active bit */
650 db
->cr7_data
= CR7_DEFAULT
;
651 dw32(DCR7
, db
->cr7_data
);
653 /* Init CR15, Tx jabber and Rx watchdog timer */
654 dw32(DCR15
, db
->cr15_data
);
656 /* Enable DM910X Tx/Rx function */
657 db
->cr6_data
|= CR6_RXSC
| CR6_TXSC
| 0x40000;
658 update_cr6(db
->cr6_data
, ioaddr
);
663 * Hardware start transmission.
664 * Send a packet to media from the upper layer.
667 static netdev_tx_t
dmfe_start_xmit(struct sk_buff
*skb
,
668 struct net_device
*dev
)
670 struct dmfe_board_info
*db
= netdev_priv(dev
);
671 void __iomem
*ioaddr
= db
->ioaddr
;
672 struct tx_desc
*txptr
;
675 DMFE_DBUG(0, "dmfe_start_xmit", 0);
677 /* Too large packet check */
678 if (skb
->len
> MAX_PACKET_SIZE
) {
679 pr_err("big packet = %d\n", (u16
)skb
->len
);
680 dev_kfree_skb_any(skb
);
684 /* Resource flag check */
685 netif_stop_queue(dev
);
687 spin_lock_irqsave(&db
->lock
, flags
);
689 /* No Tx resource check, it never happen nromally */
690 if (db
->tx_queue_cnt
>= TX_FREE_DESC_CNT
) {
691 spin_unlock_irqrestore(&db
->lock
, flags
);
692 pr_err("No Tx resource %ld\n", db
->tx_queue_cnt
);
693 return NETDEV_TX_BUSY
;
696 /* Disable NIC interrupt */
699 /* transmit this packet */
700 txptr
= db
->tx_insert_ptr
;
701 skb_copy_from_linear_data(skb
, txptr
->tx_buf_ptr
, skb
->len
);
702 txptr
->tdes1
= cpu_to_le32(0xe1000000 | skb
->len
);
704 /* Point to next transmit free descriptor */
705 db
->tx_insert_ptr
= txptr
->next_tx_desc
;
707 /* Transmit Packet Process */
708 if ( (!db
->tx_queue_cnt
) && (db
->tx_packet_cnt
< TX_MAX_SEND_CNT
) ) {
709 txptr
->tdes0
= cpu_to_le32(0x80000000); /* Set owner bit */
710 db
->tx_packet_cnt
++; /* Ready to send */
711 dw32(DCR1
, 0x1); /* Issue Tx polling */
712 netif_trans_update(dev
); /* saved time stamp */
714 db
->tx_queue_cnt
++; /* queue TX packet */
715 dw32(DCR1
, 0x1); /* Issue Tx polling */
718 /* Tx resource check */
719 if ( db
->tx_queue_cnt
< TX_FREE_DESC_CNT
)
720 netif_wake_queue(dev
);
722 /* Restore CR7 to enable interrupt */
723 spin_unlock_irqrestore(&db
->lock
, flags
);
724 dw32(DCR7
, db
->cr7_data
);
727 dev_consume_skb_any(skb
);
734 * Stop the interface.
735 * The interface is stopped when it is brought.
738 static int dmfe_stop(struct net_device
*dev
)
740 struct dmfe_board_info
*db
= netdev_priv(dev
);
741 void __iomem
*ioaddr
= db
->ioaddr
;
743 DMFE_DBUG(0, "dmfe_stop", 0);
746 netif_stop_queue(dev
);
749 del_timer_sync(&db
->timer
);
751 /* Reset & stop DM910X board */
752 dw32(DCR0
, DM910X_RESET
);
754 dmfe_phy_write(ioaddr
, db
->phy_addr
, 0, 0x8000, db
->chip_id
);
757 free_irq(db
->pdev
->irq
, dev
);
759 /* free allocated rx buffer */
760 dmfe_free_rxbuffer(db
);
763 /* show statistic counter */
764 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
765 db
->tx_fifo_underrun
, db
->tx_excessive_collision
,
766 db
->tx_late_collision
, db
->tx_no_carrier
, db
->tx_loss_carrier
,
767 db
->tx_jabber_timeout
, db
->reset_count
, db
->reset_cr8
,
768 db
->reset_fatal
, db
->reset_TXtimeout
);
776 * DM9102 insterrupt handler
777 * receive the packet to upper layer, free the transmitted packet
780 static irqreturn_t
dmfe_interrupt(int irq
, void *dev_id
)
782 struct net_device
*dev
= dev_id
;
783 struct dmfe_board_info
*db
= netdev_priv(dev
);
784 void __iomem
*ioaddr
= db
->ioaddr
;
787 DMFE_DBUG(0, "dmfe_interrupt()", 0);
789 spin_lock_irqsave(&db
->lock
, flags
);
791 /* Got DM910X status */
792 db
->cr5_data
= dr32(DCR5
);
793 dw32(DCR5
, db
->cr5_data
);
794 if ( !(db
->cr5_data
& 0xc1) ) {
795 spin_unlock_irqrestore(&db
->lock
, flags
);
799 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
802 /* Check system status */
803 if (db
->cr5_data
& 0x2000) {
804 /* system bus error happen */
805 DMFE_DBUG(1, "System bus error happen. CR5=", db
->cr5_data
);
807 db
->wait_reset
= 1; /* Need to RESET */
808 spin_unlock_irqrestore(&db
->lock
, flags
);
812 /* Received the coming packet */
813 if ( (db
->cr5_data
& 0x40) && db
->rx_avail_cnt
)
814 dmfe_rx_packet(dev
, db
);
816 /* reallocate rx descriptor buffer */
817 if (db
->rx_avail_cnt
<RX_DESC_CNT
)
818 allocate_rx_buffer(dev
);
820 /* Free the transmitted descriptor */
821 if ( db
->cr5_data
& 0x01)
822 dmfe_free_tx_pkt(dev
, db
);
825 if (db
->dm910x_chk_mode
& 0x2) {
826 db
->dm910x_chk_mode
= 0x4;
827 db
->cr6_data
|= 0x100;
828 update_cr6(db
->cr6_data
, ioaddr
);
831 /* Restore CR7 to enable interrupt mask */
832 dw32(DCR7
, db
->cr7_data
);
834 spin_unlock_irqrestore(&db
->lock
, flags
);
839 #ifdef CONFIG_NET_POLL_CONTROLLER
841 * Polling 'interrupt' - used by things like netconsole to send skbs
842 * without having to re-enable interrupts. It's not called while
843 * the interrupt routine is executing.
846 static void poll_dmfe (struct net_device
*dev
)
848 struct dmfe_board_info
*db
= netdev_priv(dev
);
849 const int irq
= db
->pdev
->irq
;
851 /* disable_irq here is not very nice, but with the lockless
852 interrupt handler we have no other choice. */
854 dmfe_interrupt (irq
, dev
);
860 * Free TX resource after TX complete
863 static void dmfe_free_tx_pkt(struct net_device
*dev
, struct dmfe_board_info
*db
)
865 struct tx_desc
*txptr
;
866 void __iomem
*ioaddr
= db
->ioaddr
;
869 txptr
= db
->tx_remove_ptr
;
870 while(db
->tx_packet_cnt
) {
871 tdes0
= le32_to_cpu(txptr
->tdes0
);
872 if (tdes0
& 0x80000000)
875 /* A packet sent completed */
877 dev
->stats
.tx_packets
++;
879 /* Transmit statistic counter */
880 if ( tdes0
!= 0x7fffffff ) {
881 dev
->stats
.collisions
+= (tdes0
>> 3) & 0xf;
882 dev
->stats
.tx_bytes
+= le32_to_cpu(txptr
->tdes1
) & 0x7ff;
883 if (tdes0
& TDES0_ERR_MASK
) {
884 dev
->stats
.tx_errors
++;
886 if (tdes0
& 0x0002) { /* UnderRun */
887 db
->tx_fifo_underrun
++;
888 if ( !(db
->cr6_data
& CR6_SFT
) ) {
889 db
->cr6_data
= db
->cr6_data
| CR6_SFT
;
890 update_cr6(db
->cr6_data
, ioaddr
);
894 db
->tx_excessive_collision
++;
896 db
->tx_late_collision
++;
900 db
->tx_loss_carrier
++;
902 db
->tx_jabber_timeout
++;
906 txptr
= txptr
->next_tx_desc
;
909 /* Update TX remove pointer to next */
910 db
->tx_remove_ptr
= txptr
;
912 /* Send the Tx packet in queue */
913 if ( (db
->tx_packet_cnt
< TX_MAX_SEND_CNT
) && db
->tx_queue_cnt
) {
914 txptr
->tdes0
= cpu_to_le32(0x80000000); /* Set owner bit */
915 db
->tx_packet_cnt
++; /* Ready to send */
917 dw32(DCR1
, 0x1); /* Issue Tx polling */
918 netif_trans_update(dev
); /* saved time stamp */
921 /* Resource available check */
922 if ( db
->tx_queue_cnt
< TX_WAKE_DESC_CNT
)
923 netif_wake_queue(dev
); /* Active upper layer, send again */
928 * Calculate the CRC valude of the Rx packet
929 * flag = 1 : return the reverse CRC (for the received packet CRC)
930 * 0 : return the normal CRC (for Hash Table index)
933 static inline u32
cal_CRC(unsigned char * Data
, unsigned int Len
, u8 flag
)
935 u32 crc
= crc32(~0, Data
, Len
);
936 if (flag
) crc
= ~crc
;
942 * Receive the come packet and pass to upper layer
945 static void dmfe_rx_packet(struct net_device
*dev
, struct dmfe_board_info
*db
)
947 struct rx_desc
*rxptr
;
948 struct sk_buff
*skb
, *newskb
;
952 rxptr
= db
->rx_ready_ptr
;
954 while(db
->rx_avail_cnt
) {
955 rdes0
= le32_to_cpu(rxptr
->rdes0
);
956 if (rdes0
& 0x80000000) /* packet owner check */
960 db
->interval_rx_cnt
++;
962 dma_unmap_single(&db
->pdev
->dev
, le32_to_cpu(rxptr
->rdes2
),
963 RX_ALLOC_SIZE
, DMA_FROM_DEVICE
);
965 if ( (rdes0
& 0x300) != 0x300) {
966 /* A packet without First/Last flag */
968 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0
);
969 dmfe_reuse_skb(db
, rxptr
->rx_skb_ptr
);
971 /* A packet with First/Last flag */
972 rxlen
= ( (rdes0
>> 16) & 0x3fff) - 4;
974 /* error summary bit check */
975 if (rdes0
& 0x8000) {
976 /* This is a error packet */
977 dev
->stats
.rx_errors
++;
979 dev
->stats
.rx_fifo_errors
++;
981 dev
->stats
.rx_crc_errors
++;
983 dev
->stats
.rx_length_errors
++;
986 if ( !(rdes0
& 0x8000) ||
987 ((db
->cr6_data
& CR6_PM
) && (rxlen
>6)) ) {
988 skb
= rxptr
->rx_skb_ptr
;
990 /* Received Packet CRC check need or not */
991 if ( (db
->dm910x_chk_mode
& 1) &&
992 (cal_CRC(skb
->data
, rxlen
, 1) !=
993 (*(u32
*) (skb
->data
+rxlen
) ))) { /* FIXME (?) */
994 /* Found a error received packet */
995 dmfe_reuse_skb(db
, rxptr
->rx_skb_ptr
);
996 db
->dm910x_chk_mode
= 3;
998 /* Good packet, send to upper layer */
999 /* Shorst packet used new SKB */
1000 if ((rxlen
< RX_COPY_SIZE
) &&
1001 ((newskb
= netdev_alloc_skb(dev
, rxlen
+ 2))
1005 /* size less than COPY_SIZE, allocate a rxlen SKB */
1006 skb_reserve(skb
, 2); /* 16byte align */
1007 skb_copy_from_linear_data(rxptr
->rx_skb_ptr
,
1008 skb_put(skb
, rxlen
),
1010 dmfe_reuse_skb(db
, rxptr
->rx_skb_ptr
);
1012 skb_put(skb
, rxlen
);
1014 skb
->protocol
= eth_type_trans(skb
, dev
);
1016 dev
->stats
.rx_packets
++;
1017 dev
->stats
.rx_bytes
+= rxlen
;
1020 /* Reuse SKB buffer when the packet is error */
1021 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0
);
1022 dmfe_reuse_skb(db
, rxptr
->rx_skb_ptr
);
1026 rxptr
= rxptr
->next_rx_desc
;
1029 db
->rx_ready_ptr
= rxptr
;
1033 * Set DM910X multicast address
1036 static void dmfe_set_filter_mode(struct net_device
*dev
)
1038 struct dmfe_board_info
*db
= netdev_priv(dev
);
1039 unsigned long flags
;
1040 int mc_count
= netdev_mc_count(dev
);
1042 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1043 spin_lock_irqsave(&db
->lock
, flags
);
1045 if (dev
->flags
& IFF_PROMISC
) {
1046 DMFE_DBUG(0, "Enable PROM Mode", 0);
1047 db
->cr6_data
|= CR6_PM
| CR6_PBF
;
1048 update_cr6(db
->cr6_data
, db
->ioaddr
);
1049 spin_unlock_irqrestore(&db
->lock
, flags
);
1053 if (dev
->flags
& IFF_ALLMULTI
|| mc_count
> DMFE_MAX_MULTICAST
) {
1054 DMFE_DBUG(0, "Pass all multicast address", mc_count
);
1055 db
->cr6_data
&= ~(CR6_PM
| CR6_PBF
);
1056 db
->cr6_data
|= CR6_PAM
;
1057 spin_unlock_irqrestore(&db
->lock
, flags
);
1061 DMFE_DBUG(0, "Set multicast address", mc_count
);
1062 if (db
->chip_id
== PCI_DM9132_ID
)
1063 dm9132_id_table(dev
); /* DM9132 */
1065 send_filter_frame(dev
); /* DM9102/DM9102A */
1066 spin_unlock_irqrestore(&db
->lock
, flags
);
1073 static void dmfe_ethtool_get_drvinfo(struct net_device
*dev
,
1074 struct ethtool_drvinfo
*info
)
1076 struct dmfe_board_info
*np
= netdev_priv(dev
);
1078 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1079 strlcpy(info
->bus_info
, pci_name(np
->pdev
), sizeof(info
->bus_info
));
1082 static int dmfe_ethtool_set_wol(struct net_device
*dev
,
1083 struct ethtool_wolinfo
*wolinfo
)
1085 struct dmfe_board_info
*db
= netdev_priv(dev
);
1087 if (wolinfo
->wolopts
& (WAKE_UCAST
| WAKE_MCAST
| WAKE_BCAST
|
1088 WAKE_ARP
| WAKE_MAGICSECURE
))
1091 db
->wol_mode
= wolinfo
->wolopts
;
1095 static void dmfe_ethtool_get_wol(struct net_device
*dev
,
1096 struct ethtool_wolinfo
*wolinfo
)
1098 struct dmfe_board_info
*db
= netdev_priv(dev
);
1100 wolinfo
->supported
= WAKE_PHY
| WAKE_MAGIC
;
1101 wolinfo
->wolopts
= db
->wol_mode
;
1105 static const struct ethtool_ops netdev_ethtool_ops
= {
1106 .get_drvinfo
= dmfe_ethtool_get_drvinfo
,
1107 .get_link
= ethtool_op_get_link
,
1108 .set_wol
= dmfe_ethtool_set_wol
,
1109 .get_wol
= dmfe_ethtool_get_wol
,
1113 * A periodic timer routine
1114 * Dynamic media sense, allocate Rx buffer...
1117 static void dmfe_timer(struct timer_list
*t
)
1119 struct dmfe_board_info
*db
= from_timer(db
, t
, timer
);
1120 struct net_device
*dev
= pci_get_drvdata(db
->pdev
);
1121 void __iomem
*ioaddr
= db
->ioaddr
;
1123 unsigned char tmp_cr12
;
1124 unsigned long flags
;
1126 int link_ok
, link_ok_phy
;
1128 DMFE_DBUG(0, "dmfe_timer()", 0);
1129 spin_lock_irqsave(&db
->lock
, flags
);
1131 /* Media mode process when Link OK before enter this route */
1132 if (db
->first_in_callback
== 0) {
1133 db
->first_in_callback
= 1;
1134 if (db
->chip_type
&& (db
->chip_id
==PCI_DM9102_ID
)) {
1135 db
->cr6_data
&= ~0x40000;
1136 update_cr6(db
->cr6_data
, ioaddr
);
1137 dmfe_phy_write(ioaddr
, db
->phy_addr
, 0, 0x1000, db
->chip_id
);
1138 db
->cr6_data
|= 0x40000;
1139 update_cr6(db
->cr6_data
, ioaddr
);
1140 db
->timer
.expires
= DMFE_TIMER_WUT
+ HZ
* 2;
1141 add_timer(&db
->timer
);
1142 spin_unlock_irqrestore(&db
->lock
, flags
);
1148 /* Operating Mode Check */
1149 if ( (db
->dm910x_chk_mode
& 0x1) &&
1150 (dev
->stats
.rx_packets
> MAX_CHECK_PACKET
) )
1151 db
->dm910x_chk_mode
= 0x4;
1153 /* Dynamic reset DM910X : system error or transmit time-out */
1154 tmp_cr8
= dr32(DCR8
);
1155 if ( (db
->interval_rx_cnt
==0) && (tmp_cr8
) ) {
1159 db
->interval_rx_cnt
= 0;
1161 /* TX polling kick monitor */
1162 if ( db
->tx_packet_cnt
&&
1163 time_after(jiffies
, dev_trans_start(dev
) + DMFE_TX_KICK
) ) {
1164 dw32(DCR1
, 0x1); /* Tx polling again */
1167 if (time_after(jiffies
, dev_trans_start(dev
) + DMFE_TX_TIMEOUT
) ) {
1168 db
->reset_TXtimeout
++;
1170 dev_warn(&dev
->dev
, "Tx timeout - resetting\n");
1174 if (db
->wait_reset
) {
1175 DMFE_DBUG(0, "Dynamic Reset device", db
->tx_packet_cnt
);
1177 dmfe_dynamic_reset(dev
);
1178 db
->first_in_callback
= 0;
1179 db
->timer
.expires
= DMFE_TIMER_WUT
;
1180 add_timer(&db
->timer
);
1181 spin_unlock_irqrestore(&db
->lock
, flags
);
1185 /* Link status check, Dynamic media type change */
1186 if (db
->chip_id
== PCI_DM9132_ID
)
1187 tmp_cr12
= dr8(DCR9
+ 3); /* DM9132 */
1189 tmp_cr12
= dr8(DCR12
); /* DM9102/DM9102A */
1191 if ( ((db
->chip_id
== PCI_DM9102_ID
) &&
1192 (db
->chip_revision
== 0x30)) ||
1193 ((db
->chip_id
== PCI_DM9132_ID
) &&
1194 (db
->chip_revision
== 0x10)) ) {
1202 /*0x43 is used instead of 0x3 because bit 6 should represent
1203 link status of external PHY */
1204 link_ok
= (tmp_cr12
& 0x43) ? 1 : 0;
1207 /* If chip reports that link is failed it could be because external
1208 PHY link status pin is not connected correctly to chip
1209 To be sure ask PHY too.
1212 /* need a dummy read because of PHY's register latch*/
1213 dmfe_phy_read (db
->ioaddr
, db
->phy_addr
, 1, db
->chip_id
);
1214 link_ok_phy
= (dmfe_phy_read (db
->ioaddr
,
1215 db
->phy_addr
, 1, db
->chip_id
) & 0x4) ? 1 : 0;
1217 if (link_ok_phy
!= link_ok
) {
1218 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1219 link_ok
= link_ok
| link_ok_phy
;
1222 if ( !link_ok
&& netif_carrier_ok(dev
)) {
1224 DMFE_DBUG(0, "Link Failed", tmp_cr12
);
1225 netif_carrier_off(dev
);
1227 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1228 /* AUTO or force 1M Homerun/Longrun don't need */
1229 if ( !(db
->media_mode
& 0x38) )
1230 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
,
1231 0, 0x1000, db
->chip_id
);
1233 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1234 if (db
->media_mode
& DMFE_AUTO
) {
1235 /* 10/100M link failed, used 1M Home-Net */
1236 db
->cr6_data
|=0x00040000; /* bit18=1, MII */
1237 db
->cr6_data
&=~0x00000200; /* bit9=0, HD mode */
1238 update_cr6(db
->cr6_data
, ioaddr
);
1240 } else if (!netif_carrier_ok(dev
)) {
1242 DMFE_DBUG(0, "Link link OK", tmp_cr12
);
1244 /* Auto Sense Speed */
1245 if ( !(db
->media_mode
& DMFE_AUTO
) || !dmfe_sense_speed(db
)) {
1246 netif_carrier_on(dev
);
1247 SHOW_MEDIA_TYPE(db
->op_mode
);
1250 dmfe_process_mode(db
);
1253 /* HPNA remote command check */
1254 if (db
->HPNA_command
& 0xf00) {
1256 if (!db
->HPNA_timer
)
1257 dmfe_HPNA_remote_cmd_chk(db
);
1260 /* Timer active again */
1261 db
->timer
.expires
= DMFE_TIMER_WUT
;
1262 add_timer(&db
->timer
);
1263 spin_unlock_irqrestore(&db
->lock
, flags
);
1268 * Dynamic reset the DM910X board
1270 * Free Tx/Rx allocated memory
1271 * Reset DM910X board
1272 * Re-initialize DM910X board
1275 static void dmfe_dynamic_reset(struct net_device
*dev
)
1277 struct dmfe_board_info
*db
= netdev_priv(dev
);
1278 void __iomem
*ioaddr
= db
->ioaddr
;
1280 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1282 /* Sopt MAC controller */
1283 db
->cr6_data
&= ~(CR6_RXSC
| CR6_TXSC
); /* Disable Tx/Rx */
1284 update_cr6(db
->cr6_data
, ioaddr
);
1285 dw32(DCR7
, 0); /* Disable Interrupt */
1286 dw32(DCR5
, dr32(DCR5
));
1288 /* Disable upper layer interface */
1289 netif_stop_queue(dev
);
1291 /* Free Rx Allocate buffer */
1292 dmfe_free_rxbuffer(db
);
1294 /* system variable init */
1295 db
->tx_packet_cnt
= 0;
1296 db
->tx_queue_cnt
= 0;
1297 db
->rx_avail_cnt
= 0;
1298 netif_carrier_off(dev
);
1301 /* Re-initialize DM910X board */
1302 dmfe_init_dm910x(dev
);
1304 /* Restart upper layer interface */
1305 netif_wake_queue(dev
);
1310 * free all allocated rx buffer
1313 static void dmfe_free_rxbuffer(struct dmfe_board_info
* db
)
1315 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1317 /* free allocated rx buffer */
1318 while (db
->rx_avail_cnt
) {
1319 dev_kfree_skb(db
->rx_ready_ptr
->rx_skb_ptr
);
1320 db
->rx_ready_ptr
= db
->rx_ready_ptr
->next_rx_desc
;
1327 * Reuse the SK buffer
1330 static void dmfe_reuse_skb(struct dmfe_board_info
*db
, struct sk_buff
* skb
)
1332 struct rx_desc
*rxptr
= db
->rx_insert_ptr
;
1334 if (!(rxptr
->rdes0
& cpu_to_le32(0x80000000))) {
1335 rxptr
->rx_skb_ptr
= skb
;
1336 rxptr
->rdes2
= cpu_to_le32(dma_map_single(&db
->pdev
->dev
, skb
->data
,
1337 RX_ALLOC_SIZE
, DMA_FROM_DEVICE
));
1339 rxptr
->rdes0
= cpu_to_le32(0x80000000);
1341 db
->rx_insert_ptr
= rxptr
->next_rx_desc
;
1343 DMFE_DBUG(0, "SK Buffer reuse method error", db
->rx_avail_cnt
);
1348 * Initialize transmit/Receive descriptor
1349 * Using Chain structure, and allocate Tx/Rx buffer
1352 static void dmfe_descriptor_init(struct net_device
*dev
)
1354 struct dmfe_board_info
*db
= netdev_priv(dev
);
1355 void __iomem
*ioaddr
= db
->ioaddr
;
1356 struct tx_desc
*tmp_tx
;
1357 struct rx_desc
*tmp_rx
;
1358 unsigned char *tmp_buf
;
1359 dma_addr_t tmp_tx_dma
, tmp_rx_dma
;
1360 dma_addr_t tmp_buf_dma
;
1363 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1365 /* tx descriptor start pointer */
1366 db
->tx_insert_ptr
= db
->first_tx_desc
;
1367 db
->tx_remove_ptr
= db
->first_tx_desc
;
1368 dw32(DCR4
, db
->first_tx_desc_dma
); /* TX DESC address */
1370 /* rx descriptor start pointer */
1371 db
->first_rx_desc
= (void *)db
->first_tx_desc
+
1372 sizeof(struct tx_desc
) * TX_DESC_CNT
;
1374 db
->first_rx_desc_dma
= db
->first_tx_desc_dma
+
1375 sizeof(struct tx_desc
) * TX_DESC_CNT
;
1376 db
->rx_insert_ptr
= db
->first_rx_desc
;
1377 db
->rx_ready_ptr
= db
->first_rx_desc
;
1378 dw32(DCR3
, db
->first_rx_desc_dma
); /* RX DESC address */
1380 /* Init Transmit chain */
1381 tmp_buf
= db
->buf_pool_start
;
1382 tmp_buf_dma
= db
->buf_pool_dma_start
;
1383 tmp_tx_dma
= db
->first_tx_desc_dma
;
1384 for (tmp_tx
= db
->first_tx_desc
, i
= 0; i
< TX_DESC_CNT
; i
++, tmp_tx
++) {
1385 tmp_tx
->tx_buf_ptr
= tmp_buf
;
1386 tmp_tx
->tdes0
= cpu_to_le32(0);
1387 tmp_tx
->tdes1
= cpu_to_le32(0x81000000); /* IC, chain */
1388 tmp_tx
->tdes2
= cpu_to_le32(tmp_buf_dma
);
1389 tmp_tx_dma
+= sizeof(struct tx_desc
);
1390 tmp_tx
->tdes3
= cpu_to_le32(tmp_tx_dma
);
1391 tmp_tx
->next_tx_desc
= tmp_tx
+ 1;
1392 tmp_buf
= tmp_buf
+ TX_BUF_ALLOC
;
1393 tmp_buf_dma
= tmp_buf_dma
+ TX_BUF_ALLOC
;
1395 (--tmp_tx
)->tdes3
= cpu_to_le32(db
->first_tx_desc_dma
);
1396 tmp_tx
->next_tx_desc
= db
->first_tx_desc
;
1398 /* Init Receive descriptor chain */
1399 tmp_rx_dma
=db
->first_rx_desc_dma
;
1400 for (tmp_rx
= db
->first_rx_desc
, i
= 0; i
< RX_DESC_CNT
; i
++, tmp_rx
++) {
1401 tmp_rx
->rdes0
= cpu_to_le32(0);
1402 tmp_rx
->rdes1
= cpu_to_le32(0x01000600);
1403 tmp_rx_dma
+= sizeof(struct rx_desc
);
1404 tmp_rx
->rdes3
= cpu_to_le32(tmp_rx_dma
);
1405 tmp_rx
->next_rx_desc
= tmp_rx
+ 1;
1407 (--tmp_rx
)->rdes3
= cpu_to_le32(db
->first_rx_desc_dma
);
1408 tmp_rx
->next_rx_desc
= db
->first_rx_desc
;
1410 /* pre-allocate Rx buffer */
1411 allocate_rx_buffer(dev
);
1417 * Firstly stop DM910X , then written value and start
1420 static void update_cr6(u32 cr6_data
, void __iomem
*ioaddr
)
1424 cr6_tmp
= cr6_data
& ~0x2002; /* stop Tx/Rx */
1425 dw32(DCR6
, cr6_tmp
);
1427 dw32(DCR6
, cr6_data
);
1433 * Send a setup frame for DM9132
1434 * This setup frame initialize DM910X address filter mode
1437 static void dm9132_id_table(struct net_device
*dev
)
1439 struct dmfe_board_info
*db
= netdev_priv(dev
);
1440 void __iomem
*ioaddr
= db
->ioaddr
+ 0xc0;
1441 u16
*addrptr
= (u16
*)dev
->dev_addr
;
1442 struct netdev_hw_addr
*ha
;
1443 u16 i
, hash_table
[4];
1446 for (i
= 0; i
< 3; i
++) {
1447 dw16(0, addrptr
[i
]);
1451 /* Clear Hash Table */
1452 memset(hash_table
, 0, sizeof(hash_table
));
1454 /* broadcast address */
1455 hash_table
[3] = 0x8000;
1457 /* the multicast address in Hash Table : 64 bits */
1458 netdev_for_each_mc_addr(ha
, dev
) {
1459 u32 hash_val
= cal_CRC((char *)ha
->addr
, 6, 0) & 0x3f;
1461 hash_table
[hash_val
/ 16] |= (u16
) 1 << (hash_val
% 16);
1464 /* Write the hash table to MAC MD table */
1465 for (i
= 0; i
< 4; i
++, ioaddr
+= 4)
1466 dw16(0, hash_table
[i
]);
1471 * Send a setup frame for DM9102/DM9102A
1472 * This setup frame initialize DM910X address filter mode
1475 static void send_filter_frame(struct net_device
*dev
)
1477 struct dmfe_board_info
*db
= netdev_priv(dev
);
1478 struct netdev_hw_addr
*ha
;
1479 struct tx_desc
*txptr
;
1484 DMFE_DBUG(0, "send_filter_frame()", 0);
1486 txptr
= db
->tx_insert_ptr
;
1487 suptr
= (u32
*) txptr
->tx_buf_ptr
;
1490 addrptr
= (u16
*) dev
->dev_addr
;
1491 *suptr
++ = addrptr
[0];
1492 *suptr
++ = addrptr
[1];
1493 *suptr
++ = addrptr
[2];
1495 /* broadcast address */
1500 /* fit the multicast address */
1501 netdev_for_each_mc_addr(ha
, dev
) {
1502 addrptr
= (u16
*) ha
->addr
;
1503 *suptr
++ = addrptr
[0];
1504 *suptr
++ = addrptr
[1];
1505 *suptr
++ = addrptr
[2];
1508 for (i
= netdev_mc_count(dev
); i
< 14; i
++) {
1514 /* prepare the setup frame */
1515 db
->tx_insert_ptr
= txptr
->next_tx_desc
;
1516 txptr
->tdes1
= cpu_to_le32(0x890000c0);
1518 /* Resource Check and Send the setup packet */
1519 if (!db
->tx_packet_cnt
) {
1520 void __iomem
*ioaddr
= db
->ioaddr
;
1522 /* Resource Empty */
1523 db
->tx_packet_cnt
++;
1524 txptr
->tdes0
= cpu_to_le32(0x80000000);
1525 update_cr6(db
->cr6_data
| 0x2000, ioaddr
);
1526 dw32(DCR1
, 0x1); /* Issue Tx polling */
1527 update_cr6(db
->cr6_data
, ioaddr
);
1528 netif_trans_update(dev
);
1530 db
->tx_queue_cnt
++; /* Put in TX queue */
1535 * Allocate rx buffer,
1536 * As possible as allocate maxiumn Rx buffer
1539 static void allocate_rx_buffer(struct net_device
*dev
)
1541 struct dmfe_board_info
*db
= netdev_priv(dev
);
1542 struct rx_desc
*rxptr
;
1543 struct sk_buff
*skb
;
1545 rxptr
= db
->rx_insert_ptr
;
1547 while(db
->rx_avail_cnt
< RX_DESC_CNT
) {
1548 if ( ( skb
= netdev_alloc_skb(dev
, RX_ALLOC_SIZE
) ) == NULL
)
1550 rxptr
->rx_skb_ptr
= skb
; /* FIXME (?) */
1551 rxptr
->rdes2
= cpu_to_le32(dma_map_single(&db
->pdev
->dev
, skb
->data
,
1552 RX_ALLOC_SIZE
, DMA_FROM_DEVICE
));
1554 rxptr
->rdes0
= cpu_to_le32(0x80000000);
1555 rxptr
= rxptr
->next_rx_desc
;
1559 db
->rx_insert_ptr
= rxptr
;
1562 static void srom_clk_write(void __iomem
*ioaddr
, u32 data
)
1564 static const u32 cmd
[] = {
1565 CR9_SROM_READ
| CR9_SRCS
,
1566 CR9_SROM_READ
| CR9_SRCS
| CR9_SRCLK
,
1567 CR9_SROM_READ
| CR9_SRCS
1571 for (i
= 0; i
< ARRAY_SIZE(cmd
); i
++) {
1572 dw32(DCR9
, data
| cmd
[i
]);
1578 * Read one word data from the serial ROM
1580 static u16
read_srom_word(void __iomem
*ioaddr
, int offset
)
1585 dw32(DCR9
, CR9_SROM_READ
);
1587 dw32(DCR9
, CR9_SROM_READ
| CR9_SRCS
);
1590 /* Send the Read Command 110b */
1591 srom_clk_write(ioaddr
, SROM_DATA_1
);
1592 srom_clk_write(ioaddr
, SROM_DATA_1
);
1593 srom_clk_write(ioaddr
, SROM_DATA_0
);
1595 /* Send the offset */
1596 for (i
= 5; i
>= 0; i
--) {
1597 srom_data
= (offset
& (1 << i
)) ? SROM_DATA_1
: SROM_DATA_0
;
1598 srom_clk_write(ioaddr
, srom_data
);
1601 dw32(DCR9
, CR9_SROM_READ
| CR9_SRCS
);
1604 for (i
= 16; i
> 0; i
--) {
1605 dw32(DCR9
, CR9_SROM_READ
| CR9_SRCS
| CR9_SRCLK
);
1607 srom_data
= (srom_data
<< 1) |
1608 ((dr32(DCR9
) & CR9_CRDOUT
) ? 1 : 0);
1609 dw32(DCR9
, CR9_SROM_READ
| CR9_SRCS
);
1613 dw32(DCR9
, CR9_SROM_READ
);
1620 * Auto sense the media mode
1623 static u8
dmfe_sense_speed(struct dmfe_board_info
*db
)
1625 void __iomem
*ioaddr
= db
->ioaddr
;
1629 /* CR6 bit18=0, select 10/100M */
1630 update_cr6(db
->cr6_data
& ~0x40000, ioaddr
);
1632 phy_mode
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 1, db
->chip_id
);
1633 phy_mode
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 1, db
->chip_id
);
1635 if ( (phy_mode
& 0x24) == 0x24 ) {
1636 if (db
->chip_id
== PCI_DM9132_ID
) /* DM9132 */
1637 phy_mode
= dmfe_phy_read(db
->ioaddr
,
1638 db
->phy_addr
, 7, db
->chip_id
) & 0xf000;
1639 else /* DM9102/DM9102A */
1640 phy_mode
= dmfe_phy_read(db
->ioaddr
,
1641 db
->phy_addr
, 17, db
->chip_id
) & 0xf000;
1643 case 0x1000: db
->op_mode
= DMFE_10MHF
; break;
1644 case 0x2000: db
->op_mode
= DMFE_10MFD
; break;
1645 case 0x4000: db
->op_mode
= DMFE_100MHF
; break;
1646 case 0x8000: db
->op_mode
= DMFE_100MFD
; break;
1647 default: db
->op_mode
= DMFE_10MHF
;
1652 db
->op_mode
= DMFE_10MHF
;
1653 DMFE_DBUG(0, "Link Failed :", phy_mode
);
1662 * Set 10/100 phyxcer capability
1663 * AUTO mode : phyxcer register4 is NIC capability
1664 * Force mode: phyxcer register4 is the force media
1667 static void dmfe_set_phyxcer(struct dmfe_board_info
*db
)
1669 void __iomem
*ioaddr
= db
->ioaddr
;
1672 /* Select 10/100M phyxcer */
1673 db
->cr6_data
&= ~0x40000;
1674 update_cr6(db
->cr6_data
, ioaddr
);
1676 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1677 if (db
->chip_id
== PCI_DM9009_ID
) {
1678 phy_reg
= dmfe_phy_read(db
->ioaddr
,
1679 db
->phy_addr
, 18, db
->chip_id
) & ~0x1000;
1681 dmfe_phy_write(db
->ioaddr
,
1682 db
->phy_addr
, 18, phy_reg
, db
->chip_id
);
1685 /* Phyxcer capability setting */
1686 phy_reg
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 4, db
->chip_id
) & ~0x01e0;
1688 if (db
->media_mode
& DMFE_AUTO
) {
1690 phy_reg
|= db
->PHY_reg4
;
1693 switch(db
->media_mode
) {
1694 case DMFE_10MHF
: phy_reg
|= 0x20; break;
1695 case DMFE_10MFD
: phy_reg
|= 0x40; break;
1696 case DMFE_100MHF
: phy_reg
|= 0x80; break;
1697 case DMFE_100MFD
: phy_reg
|= 0x100; break;
1699 if (db
->chip_id
== PCI_DM9009_ID
) phy_reg
&= 0x61;
1702 /* Write new capability to Phyxcer Reg4 */
1703 if ( !(phy_reg
& 0x01e0)) {
1704 phy_reg
|=db
->PHY_reg4
;
1705 db
->media_mode
|=DMFE_AUTO
;
1707 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 4, phy_reg
, db
->chip_id
);
1709 /* Restart Auto-Negotiation */
1710 if ( db
->chip_type
&& (db
->chip_id
== PCI_DM9102_ID
) )
1711 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 0, 0x1800, db
->chip_id
);
1712 if ( !db
->chip_type
)
1713 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 0, 0x1200, db
->chip_id
);
1719 * AUTO mode : PHY controller in Auto-negotiation Mode
1720 * Force mode: PHY controller in force mode with HUB
1721 * N-way force capability with SWITCH
1724 static void dmfe_process_mode(struct dmfe_board_info
*db
)
1728 /* Full Duplex Mode Check */
1729 if (db
->op_mode
& 0x4)
1730 db
->cr6_data
|= CR6_FDM
; /* Set Full Duplex Bit */
1732 db
->cr6_data
&= ~CR6_FDM
; /* Clear Full Duplex Bit */
1734 /* Transciver Selection */
1735 if (db
->op_mode
& 0x10) /* 1M HomePNA */
1736 db
->cr6_data
|= 0x40000;/* External MII select */
1738 db
->cr6_data
&= ~0x40000;/* Internal 10/100 transciver */
1740 update_cr6(db
->cr6_data
, db
->ioaddr
);
1742 /* 10/100M phyxcer force mode need */
1743 if ( !(db
->media_mode
& 0x18)) {
1745 phy_reg
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 6, db
->chip_id
);
1746 if ( !(phy_reg
& 0x1) ) {
1747 /* parter without N-Way capability */
1749 switch(db
->op_mode
) {
1750 case DMFE_10MHF
: phy_reg
= 0x0; break;
1751 case DMFE_10MFD
: phy_reg
= 0x100; break;
1752 case DMFE_100MHF
: phy_reg
= 0x2000; break;
1753 case DMFE_100MFD
: phy_reg
= 0x2100; break;
1755 dmfe_phy_write(db
->ioaddr
,
1756 db
->phy_addr
, 0, phy_reg
, db
->chip_id
);
1757 if ( db
->chip_type
&& (db
->chip_id
== PCI_DM9102_ID
) )
1759 dmfe_phy_write(db
->ioaddr
,
1760 db
->phy_addr
, 0, phy_reg
, db
->chip_id
);
1767 * Write a word to Phy register
1770 static void dmfe_phy_write(void __iomem
*ioaddr
, u8 phy_addr
, u8 offset
,
1771 u16 phy_data
, u32 chip_id
)
1775 if (chip_id
== PCI_DM9132_ID
) {
1776 dw16(0x80 + offset
* 4, phy_data
);
1778 /* DM9102/DM9102A Chip */
1780 /* Send 33 synchronization clock to Phy controller */
1781 for (i
= 0; i
< 35; i
++)
1782 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_1
);
1784 /* Send start command(01) to Phy */
1785 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_0
);
1786 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_1
);
1788 /* Send write command(01) to Phy */
1789 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_0
);
1790 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_1
);
1792 /* Send Phy address */
1793 for (i
= 0x10; i
> 0; i
= i
>> 1)
1794 dmfe_phy_write_1bit(ioaddr
,
1795 phy_addr
& i
? PHY_DATA_1
: PHY_DATA_0
);
1797 /* Send register address */
1798 for (i
= 0x10; i
> 0; i
= i
>> 1)
1799 dmfe_phy_write_1bit(ioaddr
,
1800 offset
& i
? PHY_DATA_1
: PHY_DATA_0
);
1802 /* written trasnition */
1803 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_1
);
1804 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_0
);
1806 /* Write a word data to PHY controller */
1807 for ( i
= 0x8000; i
> 0; i
>>= 1)
1808 dmfe_phy_write_1bit(ioaddr
,
1809 phy_data
& i
? PHY_DATA_1
: PHY_DATA_0
);
1815 * Read a word data from phy register
1818 static u16
dmfe_phy_read(void __iomem
*ioaddr
, u8 phy_addr
, u8 offset
, u32 chip_id
)
1823 if (chip_id
== PCI_DM9132_ID
) {
1825 phy_data
= dr16(0x80 + offset
* 4);
1827 /* DM9102/DM9102A Chip */
1829 /* Send 33 synchronization clock to Phy controller */
1830 for (i
= 0; i
< 35; i
++)
1831 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_1
);
1833 /* Send start command(01) to Phy */
1834 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_0
);
1835 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_1
);
1837 /* Send read command(10) to Phy */
1838 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_1
);
1839 dmfe_phy_write_1bit(ioaddr
, PHY_DATA_0
);
1841 /* Send Phy address */
1842 for (i
= 0x10; i
> 0; i
= i
>> 1)
1843 dmfe_phy_write_1bit(ioaddr
,
1844 phy_addr
& i
? PHY_DATA_1
: PHY_DATA_0
);
1846 /* Send register address */
1847 for (i
= 0x10; i
> 0; i
= i
>> 1)
1848 dmfe_phy_write_1bit(ioaddr
,
1849 offset
& i
? PHY_DATA_1
: PHY_DATA_0
);
1851 /* Skip transition state */
1852 dmfe_phy_read_1bit(ioaddr
);
1854 /* read 16bit data */
1855 for (phy_data
= 0, i
= 0; i
< 16; i
++) {
1857 phy_data
|= dmfe_phy_read_1bit(ioaddr
);
1866 * Write one bit data to Phy Controller
1869 static void dmfe_phy_write_1bit(void __iomem
*ioaddr
, u32 phy_data
)
1871 dw32(DCR9
, phy_data
); /* MII Clock Low */
1873 dw32(DCR9
, phy_data
| MDCLKH
); /* MII Clock High */
1875 dw32(DCR9
, phy_data
); /* MII Clock Low */
1881 * Read one bit phy data from PHY controller
1884 static u16
dmfe_phy_read_1bit(void __iomem
*ioaddr
)
1888 dw32(DCR9
, 0x50000);
1890 phy_data
= (dr32(DCR9
) >> 19) & 0x1;
1891 dw32(DCR9
, 0x40000);
1899 * Parser SROM and media mode
1902 static void dmfe_parse_srom(struct dmfe_board_info
* db
)
1904 char * srom
= db
->srom
;
1905 int dmfe_mode
, tmp_reg
;
1907 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1910 db
->cr15_data
= CR15_DEFAULT
;
1912 /* Check SROM Version */
1913 if ( ( (int) srom
[18] & 0xff) == SROM_V41_CODE
) {
1915 /* Get NIC support media mode */
1916 db
->NIC_capability
= le16_to_cpup((__le16
*) (srom
+ 34));
1918 for (tmp_reg
= 1; tmp_reg
< 0x10; tmp_reg
<<= 1) {
1919 switch( db
->NIC_capability
& tmp_reg
) {
1920 case 0x1: db
->PHY_reg4
|= 0x0020; break;
1921 case 0x2: db
->PHY_reg4
|= 0x0040; break;
1922 case 0x4: db
->PHY_reg4
|= 0x0080; break;
1923 case 0x8: db
->PHY_reg4
|= 0x0100; break;
1927 /* Media Mode Force or not check */
1928 dmfe_mode
= (le32_to_cpup((__le32
*) (srom
+ 34)) &
1929 le32_to_cpup((__le32
*) (srom
+ 36)));
1931 case 0x4: dmfe_media_mode
= DMFE_100MHF
; break; /* 100MHF */
1932 case 0x2: dmfe_media_mode
= DMFE_10MFD
; break; /* 10MFD */
1933 case 0x8: dmfe_media_mode
= DMFE_100MFD
; break; /* 100MFD */
1935 case 0x200: dmfe_media_mode
= DMFE_1M_HPNA
; break;/* HomePNA */
1938 /* Special Function setting */
1940 if ( (SF_mode
& 0x1) || (srom
[43] & 0x80) )
1941 db
->cr15_data
|= 0x40;
1944 if ( (SF_mode
& 0x2) || (srom
[40] & 0x1) )
1945 db
->cr15_data
|= 0x400;
1947 /* TX pause packet */
1948 if ( (SF_mode
& 0x4) || (srom
[40] & 0xe) )
1949 db
->cr15_data
|= 0x9800;
1952 /* Parse HPNA parameter */
1953 db
->HPNA_command
= 1;
1955 /* Accept remote command or not */
1956 if (HPNA_rx_cmd
== 0)
1957 db
->HPNA_command
|= 0x8000;
1959 /* Issue remote command & operation mode */
1960 if (HPNA_tx_cmd
== 1)
1961 switch(HPNA_mode
) { /* Issue Remote Command */
1962 case 0: db
->HPNA_command
|= 0x0904; break;
1963 case 1: db
->HPNA_command
|= 0x0a00; break;
1964 case 2: db
->HPNA_command
|= 0x0506; break;
1965 case 3: db
->HPNA_command
|= 0x0602; break;
1968 switch(HPNA_mode
) { /* Don't Issue */
1969 case 0: db
->HPNA_command
|= 0x0004; break;
1970 case 1: db
->HPNA_command
|= 0x0000; break;
1971 case 2: db
->HPNA_command
|= 0x0006; break;
1972 case 3: db
->HPNA_command
|= 0x0002; break;
1975 /* Check DM9801 or DM9802 present or not */
1976 db
->HPNA_present
= 0;
1977 update_cr6(db
->cr6_data
| 0x40000, db
->ioaddr
);
1978 tmp_reg
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 3, db
->chip_id
);
1979 if ( ( tmp_reg
& 0xfff0 ) == 0xb900 ) {
1980 /* DM9801 or DM9802 present */
1982 if ( dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 31, db
->chip_id
) == 0x4404) {
1983 /* DM9801 HomeRun */
1984 db
->HPNA_present
= 1;
1985 dmfe_program_DM9801(db
, tmp_reg
);
1987 /* DM9802 LongRun */
1988 db
->HPNA_present
= 2;
1989 dmfe_program_DM9802(db
);
1997 * Init HomeRun DM9801
2000 static void dmfe_program_DM9801(struct dmfe_board_info
* db
, int HPNA_rev
)
2004 if ( !HPNA_NoiseFloor
) HPNA_NoiseFloor
= DM9801_NOISE_FLOOR
;
2006 case 0xb900: /* DM9801 E3 */
2007 db
->HPNA_command
|= 0x1000;
2008 reg25
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 24, db
->chip_id
);
2009 reg25
= ( (reg25
+ HPNA_NoiseFloor
) & 0xff) | 0xf000;
2010 reg17
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 17, db
->chip_id
);
2012 case 0xb901: /* DM9801 E4 */
2013 reg25
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 25, db
->chip_id
);
2014 reg25
= (reg25
& 0xff00) + HPNA_NoiseFloor
;
2015 reg17
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 17, db
->chip_id
);
2016 reg17
= (reg17
& 0xfff0) + HPNA_NoiseFloor
+ 3;
2018 case 0xb902: /* DM9801 E5 */
2019 case 0xb903: /* DM9801 E6 */
2021 db
->HPNA_command
|= 0x1000;
2022 reg25
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 25, db
->chip_id
);
2023 reg25
= (reg25
& 0xff00) + HPNA_NoiseFloor
- 5;
2024 reg17
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 17, db
->chip_id
);
2025 reg17
= (reg17
& 0xfff0) + HPNA_NoiseFloor
;
2028 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 16, db
->HPNA_command
, db
->chip_id
);
2029 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 17, reg17
, db
->chip_id
);
2030 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 25, reg25
, db
->chip_id
);
2035 * Init HomeRun DM9802
2038 static void dmfe_program_DM9802(struct dmfe_board_info
* db
)
2042 if ( !HPNA_NoiseFloor
) HPNA_NoiseFloor
= DM9802_NOISE_FLOOR
;
2043 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 16, db
->HPNA_command
, db
->chip_id
);
2044 phy_reg
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 25, db
->chip_id
);
2045 phy_reg
= ( phy_reg
& 0xff00) + HPNA_NoiseFloor
;
2046 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 25, phy_reg
, db
->chip_id
);
2051 * Check remote HPNA power and speed status. If not correct,
2052 * issue command again.
2055 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info
* db
)
2059 /* Got remote device status */
2060 phy_reg
= dmfe_phy_read(db
->ioaddr
, db
->phy_addr
, 17, db
->chip_id
) & 0x60;
2062 case 0x00: phy_reg
= 0x0a00;break; /* LP/LS */
2063 case 0x20: phy_reg
= 0x0900;break; /* LP/HS */
2064 case 0x40: phy_reg
= 0x0600;break; /* HP/LS */
2065 case 0x60: phy_reg
= 0x0500;break; /* HP/HS */
2068 /* Check remote device status match our setting ot not */
2069 if ( phy_reg
!= (db
->HPNA_command
& 0x0f00) ) {
2070 dmfe_phy_write(db
->ioaddr
, db
->phy_addr
, 16, db
->HPNA_command
,
2074 db
->HPNA_timer
=600; /* Match, every 10 minutes, check */
2079 static const struct pci_device_id dmfe_pci_tbl
[] = {
2080 { 0x1282, 0x9132, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, PCI_DM9132_ID
},
2081 { 0x1282, 0x9102, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, PCI_DM9102_ID
},
2082 { 0x1282, 0x9100, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, PCI_DM9100_ID
},
2083 { 0x1282, 0x9009, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, PCI_DM9009_ID
},
2086 MODULE_DEVICE_TABLE(pci
, dmfe_pci_tbl
);
2088 static int __maybe_unused
dmfe_suspend(struct device
*dev_d
)
2090 struct net_device
*dev
= dev_get_drvdata(dev_d
);
2091 struct dmfe_board_info
*db
= netdev_priv(dev
);
2092 void __iomem
*ioaddr
= db
->ioaddr
;
2094 /* Disable upper layer interface */
2095 netif_device_detach(dev
);
2098 db
->cr6_data
&= ~(CR6_RXSC
| CR6_TXSC
);
2099 update_cr6(db
->cr6_data
, ioaddr
);
2101 /* Disable Interrupt */
2103 dw32(DCR5
, dr32(DCR5
));
2105 /* Fre RX buffers */
2106 dmfe_free_rxbuffer(db
);
2109 device_wakeup_enable(dev_d
);
2114 static int __maybe_unused
dmfe_resume(struct device
*dev_d
)
2116 struct net_device
*dev
= dev_get_drvdata(dev_d
);
2118 /* Re-initialize DM910X board */
2119 dmfe_init_dm910x(dev
);
2122 device_wakeup_disable(dev_d
);
2124 /* Restart upper layer interface */
2125 netif_device_attach(dev
);
2130 static SIMPLE_DEV_PM_OPS(dmfe_pm_ops
, dmfe_suspend
, dmfe_resume
);
2132 static struct pci_driver dmfe_driver
= {
2134 .id_table
= dmfe_pci_tbl
,
2135 .probe
= dmfe_init_one
,
2136 .remove
= dmfe_remove_one
,
2137 .driver
.pm
= &dmfe_pm_ops
,
2140 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2141 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2142 MODULE_LICENSE("GPL");
2144 module_param(debug
, int, 0);
2145 module_param(mode
, byte
, 0);
2146 module_param(cr6set
, int, 0);
2147 module_param(chkmode
, byte
, 0);
2148 module_param(HPNA_mode
, byte
, 0);
2149 module_param(HPNA_rx_cmd
, byte
, 0);
2150 module_param(HPNA_tx_cmd
, byte
, 0);
2151 module_param(HPNA_NoiseFloor
, byte
, 0);
2152 module_param(SF_mode
, byte
, 0);
2153 MODULE_PARM_DESC(debug
, "Davicom DM9xxx enable debugging (0-1)");
2154 MODULE_PARM_DESC(mode
, "Davicom DM9xxx: "
2155 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2157 MODULE_PARM_DESC(SF_mode
, "Davicom DM9xxx special function "
2158 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2161 * when user used insmod to add module, system invoked init_module()
2162 * to initialize and register.
2165 static int __init
dmfe_init_module(void)
2169 DMFE_DBUG(0, "init_module() ", debug
);
2172 dmfe_debug
= debug
; /* set debug flag */
2174 dmfe_cr6_user_set
= cr6set
;
2182 dmfe_media_mode
= mode
;
2185 dmfe_media_mode
= DMFE_AUTO
;
2190 HPNA_mode
= 0; /* Default: LP/HS */
2191 if (HPNA_rx_cmd
> 1)
2192 HPNA_rx_cmd
= 0; /* Default: Ignored remote cmd */
2193 if (HPNA_tx_cmd
> 1)
2194 HPNA_tx_cmd
= 0; /* Default: Don't issue remote cmd */
2195 if (HPNA_NoiseFloor
> 15)
2196 HPNA_NoiseFloor
= 0;
2198 rc
= pci_register_driver(&dmfe_driver
);
2208 * when user used rmmod to delete module, system invoked clean_module()
2209 * to un-register all registered services.
2212 static void __exit
dmfe_cleanup_module(void)
2214 DMFE_DBUG(0, "dmfe_cleanup_module() ", debug
);
2215 pci_unregister_driver(&dmfe_driver
);
2218 module_init(dmfe_init_module
);
2219 module_exit(dmfe_cleanup_module
);