2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 * Copyright © 2005 Agere Systems Inc.
8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10 *------------------------------------------------------------------------------
14 * This software is provided subject to the following terms and conditions,
15 * which you should read carefully before using the software. Using this
16 * software indicates your acceptance of these terms and conditions. If you do
17 * not agree with these terms and conditions, do not use the software.
19 * Copyright © 2005 Agere Systems Inc.
20 * All rights reserved.
22 * Redistribution and use in source or binary forms, with or without
23 * modifications, are permitted provided that the following conditions are met:
25 * . Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following Disclaimer as comments in the code as
27 * well as in the documentation and/or other materials provided with the
30 * . Redistributions in binary form must reproduce the above copyright notice,
31 * this list of conditions and the following Disclaimer in the documentation
32 * and/or other materials provided with the distribution.
34 * . Neither the name of Agere Systems Inc. nor the names of the contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
56 #include <linux/pci.h>
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
61 #include <linux/sched.h>
62 #include <linux/ptrace.h>
63 #include <linux/slab.h>
64 #include <linux/ctype.h>
65 #include <linux/string.h>
66 #include <linux/timer.h>
67 #include <linux/interrupt.h>
69 #include <linux/delay.h>
70 #include <linux/bitops.h>
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_arp.h>
77 #include <linux/ioport.h>
78 #include <linux/crc32.h>
79 #include <linux/random.h>
80 #include <linux/phy.h>
84 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86 MODULE_LICENSE("Dual BSD/GPL");
87 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
90 #define MAX_NUM_REGISTER_POLLS 1000
91 #define MAX_NUM_WRITE_RETRIES 2
94 #define COUNTER_WRAP_16_BIT 0x10000
95 #define COUNTER_WRAP_12_BIT 0x1000
98 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
99 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
102 /* For interrupts, normal running is:
103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
104 * watchdog_interrupt & txdma_xfer_done
106 * In both cases, when flow control is enabled for either Tx or bi-direction,
107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
108 * buffer rings are running low.
110 #define INT_MASK_DISABLE 0xffffffff
112 /* NOTE: Masking out MAC_STAT Interrupt for now...
113 * #define INT_MASK_ENABLE 0xfff6bf17
114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
116 #define INT_MASK_ENABLE 0xfffebf17
117 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
119 /* General defines */
120 /* Packet and header sizes */
121 #define NIC_MIN_PACKET_SIZE 60
123 /* Multicast list size */
124 #define NIC_MAX_MCAST_LIST 128
126 /* Supported Filters */
127 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
128 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
129 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
130 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
134 #define ET131X_TX_TIMEOUT (1 * HZ)
135 #define NIC_SEND_HANG_THRESHOLD 0
137 /* MP_ADAPTER flags */
138 #define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
140 /* MP_SHARED flags */
141 #define FMP_ADAPTER_LOWER_POWER 0x00200000
143 #define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144 #define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
146 #define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
148 /* Some offsets in PCI config space that are actually used. */
149 #define ET1310_PCI_MAC_ADDRESS 0xA4
150 #define ET1310_PCI_EEPROM_STATUS 0xB2
151 #define ET1310_PCI_ACK_NACK 0xC0
152 #define ET1310_PCI_REPLAY 0xC2
153 #define ET1310_PCI_L0L1LATENCY 0xCF
155 /* PCI Product IDs */
156 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
157 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
159 /* Define order of magnitude converter */
160 #define NANO_IN_A_MICRO 1000
162 #define PARM_RX_NUM_BUFS_DEF 4
163 #define PARM_RX_TIME_INT_DEF 10
164 #define PARM_RX_MEM_END_DEF 0x2bc
165 #define PARM_TX_TIME_INT_DEF 40
166 #define PARM_TX_NUM_BUFS_DEF 4
167 #define PARM_DMA_CACHE_DEF 0
170 #define FBR_CHUNKS 32
171 #define MAX_DESC_PER_RING_RX 1024
173 /* number of RFDs - default and min */
174 #define RFD_LOW_WATER_MARK 40
175 #define NIC_DEFAULT_NUM_RFD 1024
178 #define MAX_PACKETS_HANDLED 256
179 #define ET131X_MIN_MTU 64
180 #define ET131X_MAX_MTU 9216
182 #define ALCATEL_MULTICAST_PKT 0x01000000
183 #define ALCATEL_BROADCAST_PKT 0x02000000
185 /* typedefs for Free Buffer Descriptors */
189 u32 word2
; /* Bits 10-31 reserved, 0-9 descriptor */
192 /* Packet Status Ring Descriptors
196 * top 16 bits are from the Alcatel Status Word as enumerated in
197 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
200 * 1: ipa IP checksum assist
201 * 2: ipp IP checksum pass
202 * 3: tcpa TCP checksum assist
203 * 4: tcpp TCP checksum pass
205 * 6: rxmac_error RXMAC Error Indicator
206 * 7: drop Drop packet
207 * 8: ft Frame Truncated
211 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
212 * 17: asw_RX_DV_event short receive event detected
213 * 18: asw_false_carrier_event bad carrier since last good packet
214 * 19: asw_code_err one or more nibbles signalled as errors
215 * 20: asw_CRC_err CRC error
216 * 21: asw_len_chk_err frame length field incorrect
217 * 22: asw_too_long frame length > 1518 bytes
218 * 23: asw_OK valid CRC + no code error
219 * 24: asw_multicast has a multicast address
220 * 25: asw_broadcast has a broadcast address
221 * 26: asw_dribble_nibble spurious bits after EOP
222 * 27: asw_control_frame is a control frame
223 * 28: asw_pause_frame is a pause frame
224 * 29: asw_unsupported_op unsupported OP code
225 * 30: asw_VLAN_tag VLAN tag detected
226 * 31: asw_long_evt Rx long event
229 * 0-15: length length in bytes
230 * 16-25: bi Buffer Index
231 * 26-27: ri Ring Index
234 struct pkt_stat_desc
{
239 /* Typedefs for the RX DMA status word */
241 /* rx status word 0 holds part of the status bits of the Rx DMA engine
242 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
243 * which contains the Free Buffer ring 0 and 1 available offset.
245 * bit 0-9 FBR1 offset
246 * bit 10 Wrap flag for FBR1
247 * bit 16-25 FBR0 offset
248 * bit 26 Wrap flag for FBR0
251 /* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
252 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
253 * which contains the Packet Status Ring available offset.
256 * bit 16-27 PSRoffset
261 /* struct rx_status_block is a structure representing the status of the Rx
262 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
264 struct rx_status_block
{
269 /* Structure for look-up table holding free buffer ring pointers, addresses
273 void *virt
[MAX_DESC_PER_RING_RX
];
274 u32 bus_high
[MAX_DESC_PER_RING_RX
];
275 u32 bus_low
[MAX_DESC_PER_RING_RX
];
277 dma_addr_t ring_physaddr
;
278 void *mem_virtaddrs
[MAX_DESC_PER_RING_RX
/ FBR_CHUNKS
];
279 dma_addr_t mem_physaddrs
[MAX_DESC_PER_RING_RX
/ FBR_CHUNKS
];
285 /* struct rx_ring is the structure representing the adaptor's local
286 * reference(s) to the rings
289 struct fbr_lookup
*fbr
[NUM_FBRS
];
290 void *ps_ring_virtaddr
;
291 dma_addr_t ps_ring_physaddr
;
295 struct rx_status_block
*rx_status_block
;
296 dma_addr_t rx_status_bus
;
298 struct list_head recv_list
;
303 bool unfinished_receives
;
307 /* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
309 * 0-15: length of packet
312 * 29-31: VLAN priority
314 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
316 * 0: last packet in the sequence
317 * 1: first packet in the sequence
318 * 2: interrupt the processor when this pkt sent
319 * 3: Control word - no packet data
320 * 4: Issue half-duplex backpressure : XON/XOFF
321 * 5: send pause frame
322 * 6: Tx frame has error
326 * 10: Packet is a Huge packet
327 * 11: append VLAN tag
328 * 12: IP checksum assist
329 * 13: TCP checksum assist
330 * 14: UDP checksum assist
332 #define TXDESC_FLAG_LASTPKT 0x0001
333 #define TXDESC_FLAG_FIRSTPKT 0x0002
334 #define TXDESC_FLAG_INTPROC 0x0004
336 /* struct tx_desc represents each descriptor on the ring */
340 u32 len_vlan
; /* control words how to xmit the */
341 u32 flags
; /* data (detailed above) */
344 /* The status of the Tx DMA engine it sits in free memory, and is pointed to
345 * by 0x101c / 0x1020. This is a DMA10 type
348 /* TCB (Transmit Control Block: Host Side) */
350 struct tcb
*next
; /* Next entry in ring */
351 u32 count
; /* Used to spot stuck/lost packets */
352 u32 stale
; /* Used to spot stuck/lost packets */
353 struct sk_buff
*skb
; /* Network skb we are tied to */
354 u32 index
; /* Ring indexes */
358 /* Structure representing our local reference(s) to the ring */
360 /* TCB (Transmit Control Block) memory and lists */
361 struct tcb
*tcb_ring
;
363 /* List of TCBs that are ready to be used */
364 struct tcb
*tcb_qhead
;
365 struct tcb
*tcb_qtail
;
367 /* list of TCBs that are currently being sent. */
368 struct tcb
*send_head
;
369 struct tcb
*send_tail
;
372 /* The actual descriptor ring */
373 struct tx_desc
*tx_desc_ring
;
374 dma_addr_t tx_desc_ring_pa
;
376 /* send_idx indicates where we last wrote to in the descriptor ring. */
379 /* The location of the write-back status block */
381 dma_addr_t tx_status_pa
;
383 /* Packets since the last IRQ: used for interrupt coalescing */
387 /* Do not change these values: if changed, then change also in respective
388 * TXdma and Rxdma engines
390 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
393 /* These values are all superseded by registry entries to facilitate tuning.
394 * Once the desired performance has been achieved, the optimal registry values
395 * should be re-populated to these #defines:
397 #define TX_ERROR_PERIOD 1000
399 #define LO_MARK_PERCENT_FOR_PSR 15
400 #define LO_MARK_PERCENT_FOR_RX 15
402 /* RFD (Receive Frame Descriptor) */
404 struct list_head list_node
;
406 u32 len
; /* total size of receive frame */
413 #define FLOW_TXONLY 1
414 #define FLOW_RXONLY 2
417 /* Struct to define some device statistics */
419 u32 multicast_pkts_rcvd
;
420 u32 rcvd_pkts_dropped
;
424 u32 tx_excessive_collisions
;
425 u32 tx_first_collisions
;
426 u32 tx_late_collisions
;
434 u32 rx_code_violations
;
437 u32 interrupt_status
;
440 /* The private adapter structure */
441 struct et131x_adapter
{
442 struct net_device
*netdev
;
443 struct pci_dev
*pdev
;
444 struct mii_bus
*mii_bus
;
445 struct napi_struct napi
;
447 /* Flags that indicate current state of the adapter */
450 /* local link state, to determine if a state change has occurred */
454 u8 rom_addr
[ETH_ALEN
];
459 spinlock_t tcb_send_qlock
; /* protects the tx_ring send tcb list */
460 spinlock_t tcb_ready_qlock
; /* protects the tx_ring ready tcb list */
461 spinlock_t rcv_lock
; /* protects the rx_ring receive list */
463 /* Packet Filter and look ahead size */
467 u32 multicast_addr_count
;
468 u8 multicast_list
[NIC_MAX_MCAST_LIST
][ETH_ALEN
];
470 /* Pointer to the device's PCI register space */
471 struct address_map __iomem
*regs
;
473 /* Registry parameters */
474 u8 wanted_flow
; /* Flow we want for 802.3x flow control */
475 u32 registry_jumbo_packet
; /* Max supported ethernet packet size */
477 /* Derived from the registry: */
478 u8 flow
; /* flow control validated by the far-end */
480 /* Minimize init-time */
481 struct timer_list error_timer
;
483 /* variable putting the phy into coma mode when boot up with no cable
484 * plugged in after 5 seconds
488 /* Tx Memory Variables */
489 struct tx_ring tx_ring
;
491 /* Rx Memory Variables */
492 struct rx_ring rx_ring
;
494 struct ce_stats stats
;
497 static int eeprom_wait_ready(struct pci_dev
*pdev
, u32
*status
)
502 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
503 * bits 7,1:0 both equal to 1, at least once after reset.
504 * Subsequent operations need only to check that bits 1:0 are equal
505 * to 1 prior to starting a single byte read/write
507 for (i
= 0; i
< MAX_NUM_REGISTER_POLLS
; i
++) {
508 if (pci_read_config_dword(pdev
, LBCIF_DWORD1_GROUP
, ®
))
511 /* I2C idle and Phy Queue Avail both true */
512 if ((reg
& 0x3000) == 0x3000) {
521 static int eeprom_write(struct et131x_adapter
*adapter
, u32 addr
, u8 data
)
523 struct pci_dev
*pdev
= adapter
->pdev
;
531 /* For an EEPROM, an I2C single byte write is defined as a START
532 * condition followed by the device address, EEPROM address, one byte
533 * of data and a STOP condition. The STOP condition will trigger the
534 * EEPROM's internally timed write cycle to the nonvolatile memory.
535 * All inputs are disabled during this write cycle and the EEPROM will
536 * not respond to any access until the internal write is complete.
538 err
= eeprom_wait_ready(pdev
, NULL
);
542 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
543 * and bits 1:0 both =0. Bit 5 should be set according to the
544 * type of EEPROM being accessed (1=two byte addressing, 0=one
547 if (pci_write_config_byte(pdev
, LBCIF_CONTROL_REGISTER
,
548 LBCIF_CONTROL_LBCIF_ENABLE
|
549 LBCIF_CONTROL_I2C_WRITE
))
552 /* Prepare EEPROM address for Step 3 */
553 for (retries
= 0; retries
< MAX_NUM_WRITE_RETRIES
; retries
++) {
554 if (pci_write_config_dword(pdev
, LBCIF_ADDRESS_REGISTER
, addr
))
556 /* Write the data to the LBCIF Data Register (the I2C write
559 if (pci_write_config_byte(pdev
, LBCIF_DATA_REGISTER
, data
))
561 /* Monitor bit 1:0 of the LBCIF Status Register. When bits
562 * 1:0 are both equal to 1, the I2C write has completed and the
563 * internal write cycle of the EEPROM is about to start.
564 * (bits 1:0 = 01 is a legal state while waiting from both
565 * equal to 1, but bits 1:0 = 10 is invalid and implies that
566 * something is broken).
568 err
= eeprom_wait_ready(pdev
, &status
);
572 /* Check bit 3 of the LBCIF Status Register. If equal to 1,
573 * an error has occurred.Don't break here if we are revision
574 * 1, this is so we do a blind write for load bug.
576 if ((status
& LBCIF_STATUS_GENERAL_ERROR
) &&
577 adapter
->pdev
->revision
== 0)
580 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
581 * ACK error has occurred on the address phase of the write.
582 * This could be due to an actual hardware failure or the
583 * EEPROM may still be in its internal write cycle from a
584 * previous write. This write operation was ignored and must be
587 if (status
& LBCIF_STATUS_ACK_ERROR
) {
588 /* This could be due to an actual hardware failure
589 * or the EEPROM may still be in its internal write
590 * cycle from a previous write. This write operation
591 * was ignored and must be repeated later.
604 if (pci_write_config_byte(pdev
, LBCIF_CONTROL_REGISTER
,
605 LBCIF_CONTROL_LBCIF_ENABLE
))
608 /* Do read until internal ACK_ERROR goes away meaning write
612 pci_write_config_dword(pdev
,
613 LBCIF_ADDRESS_REGISTER
,
616 pci_read_config_dword(pdev
,
619 } while ((val
& 0x00010000) == 0);
620 } while (val
& 0x00040000);
622 if ((val
& 0xFF00) != 0xC000 || index
== 10000)
626 return writeok
? 0 : -EIO
;
629 static int eeprom_read(struct et131x_adapter
*adapter
, u32 addr
, u8
*pdata
)
631 struct pci_dev
*pdev
= adapter
->pdev
;
635 /* A single byte read is similar to the single byte write, with the
636 * exception of the data flow:
638 err
= eeprom_wait_ready(pdev
, NULL
);
641 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
642 * and bits 1:0 both =0. Bit 5 should be set according to the type
643 * of EEPROM being accessed (1=two byte addressing, 0=one byte
646 if (pci_write_config_byte(pdev
, LBCIF_CONTROL_REGISTER
,
647 LBCIF_CONTROL_LBCIF_ENABLE
))
649 /* Write the address to the LBCIF Address Register (I2C read will
652 if (pci_write_config_dword(pdev
, LBCIF_ADDRESS_REGISTER
, addr
))
654 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
655 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
658 err
= eeprom_wait_ready(pdev
, &status
);
661 /* Regardless of error status, read data byte from LBCIF Data
666 return (status
& LBCIF_STATUS_ACK_ERROR
) ? -EIO
: 0;
669 static int et131x_init_eeprom(struct et131x_adapter
*adapter
)
671 struct pci_dev
*pdev
= adapter
->pdev
;
674 pci_read_config_byte(pdev
, ET1310_PCI_EEPROM_STATUS
, &eestatus
);
676 /* THIS IS A WORKAROUND:
677 * I need to call this function twice to get my card in a
678 * LG M1 Express Dual running. I tried also a msleep before this
679 * function, because I thought there could be some time conditions
680 * but it didn't work. Call the whole function twice also work.
682 if (pci_read_config_byte(pdev
, ET1310_PCI_EEPROM_STATUS
, &eestatus
)) {
684 "Could not read PCI config space for EEPROM Status\n");
688 /* Determine if the error(s) we care about are present. If they are
689 * present we need to fail.
691 if (eestatus
& 0x4C) {
692 int write_failed
= 0;
694 if (pdev
->revision
== 0x01) {
696 static const u8 eedata
[4] = { 0xFE, 0x13, 0x10, 0xFF };
698 /* Re-write the first 4 bytes if we have an eeprom
699 * present and the revision id is 1, this fixes the
700 * corruption seen with 1310 B Silicon
702 for (i
= 0; i
< 3; i
++)
703 if (eeprom_write(adapter
, i
, eedata
[i
]) < 0)
706 if (pdev
->revision
!= 0x01 || write_failed
) {
708 "Fatal EEPROM Status Error - 0x%04x\n",
711 /* This error could mean that there was an error
712 * reading the eeprom or that the eeprom doesn't exist.
713 * We will treat each case the same and not try to
714 * gather additional information that normally would
715 * come from the eeprom, like MAC Address
717 adapter
->has_eeprom
= false;
721 adapter
->has_eeprom
= true;
723 /* Read the EEPROM for information regarding LED behavior. Refer to
724 * et131x_xcvr_init() for its use.
726 eeprom_read(adapter
, 0x70, &adapter
->eeprom_data
[0]);
727 eeprom_read(adapter
, 0x71, &adapter
->eeprom_data
[1]);
729 if (adapter
->eeprom_data
[0] != 0xcd)
730 /* Disable all optional features */
731 adapter
->eeprom_data
[1] = 0x00;
736 static void et131x_rx_dma_enable(struct et131x_adapter
*adapter
)
738 /* Setup the receive dma configuration register for normal operation */
739 u32 csr
= ET_RXDMA_CSR_FBR1_ENABLE
;
740 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
742 if (rx_ring
->fbr
[1]->buffsize
== 4096)
743 csr
|= ET_RXDMA_CSR_FBR1_SIZE_LO
;
744 else if (rx_ring
->fbr
[1]->buffsize
== 8192)
745 csr
|= ET_RXDMA_CSR_FBR1_SIZE_HI
;
746 else if (rx_ring
->fbr
[1]->buffsize
== 16384)
747 csr
|= ET_RXDMA_CSR_FBR1_SIZE_LO
| ET_RXDMA_CSR_FBR1_SIZE_HI
;
749 csr
|= ET_RXDMA_CSR_FBR0_ENABLE
;
750 if (rx_ring
->fbr
[0]->buffsize
== 256)
751 csr
|= ET_RXDMA_CSR_FBR0_SIZE_LO
;
752 else if (rx_ring
->fbr
[0]->buffsize
== 512)
753 csr
|= ET_RXDMA_CSR_FBR0_SIZE_HI
;
754 else if (rx_ring
->fbr
[0]->buffsize
== 1024)
755 csr
|= ET_RXDMA_CSR_FBR0_SIZE_LO
| ET_RXDMA_CSR_FBR0_SIZE_HI
;
756 writel(csr
, &adapter
->regs
->rxdma
.csr
);
758 csr
= readl(&adapter
->regs
->rxdma
.csr
);
759 if (csr
& ET_RXDMA_CSR_HALT_STATUS
) {
761 csr
= readl(&adapter
->regs
->rxdma
.csr
);
762 if (csr
& ET_RXDMA_CSR_HALT_STATUS
) {
763 dev_err(&adapter
->pdev
->dev
,
764 "RX Dma failed to exit halt state. CSR 0x%08x\n",
770 static void et131x_rx_dma_disable(struct et131x_adapter
*adapter
)
773 /* Setup the receive dma configuration register */
774 writel(ET_RXDMA_CSR_HALT
| ET_RXDMA_CSR_FBR1_ENABLE
,
775 &adapter
->regs
->rxdma
.csr
);
776 csr
= readl(&adapter
->regs
->rxdma
.csr
);
777 if (!(csr
& ET_RXDMA_CSR_HALT_STATUS
)) {
779 csr
= readl(&adapter
->regs
->rxdma
.csr
);
780 if (!(csr
& ET_RXDMA_CSR_HALT_STATUS
))
781 dev_err(&adapter
->pdev
->dev
,
782 "RX Dma failed to enter halt state. CSR 0x%08x\n",
787 static void et131x_tx_dma_enable(struct et131x_adapter
*adapter
)
789 /* Setup the transmit dma configuration register for normal
792 writel(ET_TXDMA_SNGL_EPKT
| (PARM_DMA_CACHE_DEF
<< ET_TXDMA_CACHE_SHIFT
),
793 &adapter
->regs
->txdma
.csr
);
796 static inline void add_10bit(u32
*v
, int n
)
798 *v
= INDEX10(*v
+ n
) | (*v
& ET_DMA10_WRAP
);
801 static inline void add_12bit(u32
*v
, int n
)
803 *v
= INDEX12(*v
+ n
) | (*v
& ET_DMA12_WRAP
);
806 static void et1310_config_mac_regs1(struct et131x_adapter
*adapter
)
808 struct mac_regs __iomem
*macregs
= &adapter
->regs
->mac
;
813 /* First we need to reset everything. Write to MAC configuration
814 * register 1 to perform reset.
816 writel(ET_MAC_CFG1_SOFT_RESET
| ET_MAC_CFG1_SIM_RESET
|
817 ET_MAC_CFG1_RESET_RXMC
| ET_MAC_CFG1_RESET_TXMC
|
818 ET_MAC_CFG1_RESET_RXFUNC
| ET_MAC_CFG1_RESET_TXFUNC
,
821 /* Next lets configure the MAC Inter-packet gap register */
822 ipg
= 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
823 ipg
|= 0x50 << 8; /* ifg enforce 0x50 */
824 writel(ipg
, ¯egs
->ipg
);
826 /* Next lets configure the MAC Half Duplex register */
827 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
828 writel(0x00A1F037, ¯egs
->hfdp
);
830 /* Next lets configure the MAC Interface Control register */
831 writel(0, ¯egs
->if_ctrl
);
833 writel(ET_MAC_MIIMGMT_CLK_RST
, ¯egs
->mii_mgmt_cfg
);
835 /* Next lets configure the MAC Station Address register. These
836 * values are read from the EEPROM during initialization and stored
837 * in the adapter structure. We write what is stored in the adapter
838 * structure to the MAC Station Address registers high and low. This
839 * station address is used for generating and checking pause control
842 station2
= (adapter
->addr
[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT
) |
843 (adapter
->addr
[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT
);
844 station1
= (adapter
->addr
[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT
) |
845 (adapter
->addr
[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT
) |
846 (adapter
->addr
[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT
) |
848 writel(station1
, ¯egs
->station_addr_1
);
849 writel(station2
, ¯egs
->station_addr_2
);
851 /* Max ethernet packet in bytes that will be passed by the mac without
852 * being truncated. Allow the MAC to pass 4 more than our max packet
853 * size. This is 4 for the Ethernet CRC.
855 * Packets larger than (registry_jumbo_packet) that do not contain a
856 * VLAN ID will be dropped by the Rx function.
858 writel(adapter
->registry_jumbo_packet
+ 4, ¯egs
->max_fm_len
);
860 /* clear out MAC config reset */
861 writel(0, ¯egs
->cfg1
);
864 static void et1310_config_mac_regs2(struct et131x_adapter
*adapter
)
867 struct mac_regs __iomem
*mac
= &adapter
->regs
->mac
;
868 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
874 ctl
= readl(&adapter
->regs
->txmac
.ctl
);
875 cfg1
= readl(&mac
->cfg1
);
876 cfg2
= readl(&mac
->cfg2
);
877 ifctrl
= readl(&mac
->if_ctrl
);
879 /* Set up the if mode bits */
880 cfg2
&= ~ET_MAC_CFG2_IFMODE_MASK
;
881 if (phydev
->speed
== SPEED_1000
) {
882 cfg2
|= ET_MAC_CFG2_IFMODE_1000
;
883 ifctrl
&= ~ET_MAC_IFCTRL_PHYMODE
;
885 cfg2
|= ET_MAC_CFG2_IFMODE_100
;
886 ifctrl
|= ET_MAC_IFCTRL_PHYMODE
;
889 cfg1
|= ET_MAC_CFG1_RX_ENABLE
| ET_MAC_CFG1_TX_ENABLE
|
892 cfg1
&= ~(ET_MAC_CFG1_LOOPBACK
| ET_MAC_CFG1_RX_FLOW
);
893 if (adapter
->flow
== FLOW_RXONLY
|| adapter
->flow
== FLOW_BOTH
)
894 cfg1
|= ET_MAC_CFG1_RX_FLOW
;
895 writel(cfg1
, &mac
->cfg1
);
897 /* Now we need to initialize the MAC Configuration 2 register */
898 /* preamble 7, check length, huge frame off, pad crc, crc enable
901 cfg2
|= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT
;
902 cfg2
|= ET_MAC_CFG2_IFMODE_LEN_CHECK
;
903 cfg2
|= ET_MAC_CFG2_IFMODE_PAD_CRC
;
904 cfg2
|= ET_MAC_CFG2_IFMODE_CRC_ENABLE
;
905 cfg2
&= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME
;
906 cfg2
&= ~ET_MAC_CFG2_IFMODE_FULL_DPLX
;
908 if (phydev
->duplex
== DUPLEX_FULL
)
909 cfg2
|= ET_MAC_CFG2_IFMODE_FULL_DPLX
;
911 ifctrl
&= ~ET_MAC_IFCTRL_GHDMODE
;
912 if (phydev
->duplex
== DUPLEX_HALF
)
913 ifctrl
|= ET_MAC_IFCTRL_GHDMODE
;
915 writel(ifctrl
, &mac
->if_ctrl
);
916 writel(cfg2
, &mac
->cfg2
);
921 cfg1
= readl(&mac
->cfg1
);
922 } while ((cfg1
& ET_MAC_CFG1_WAIT
) != ET_MAC_CFG1_WAIT
&& delay
< 100);
925 dev_warn(&adapter
->pdev
->dev
,
926 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
930 ctl
|= ET_TX_CTRL_TXMAC_ENABLE
| ET_TX_CTRL_FC_DISABLE
;
931 writel(ctl
, &adapter
->regs
->txmac
.ctl
);
933 if (adapter
->flags
& FMP_ADAPTER_LOWER_POWER
) {
934 et131x_rx_dma_enable(adapter
);
935 et131x_tx_dma_enable(adapter
);
939 static int et1310_in_phy_coma(struct et131x_adapter
*adapter
)
941 u32 pmcsr
= readl(&adapter
->regs
->global
.pm_csr
);
943 return ET_PM_PHY_SW_COMA
& pmcsr
? 1 : 0;
946 static void et1310_setup_device_for_multicast(struct et131x_adapter
*adapter
)
948 struct rxmac_regs __iomem
*rxmac
= &adapter
->regs
->rxmac
;
954 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
955 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
956 * specified) then we should pass NO multi-cast addresses to the
959 if (adapter
->packet_filter
& ET131X_PACKET_TYPE_MULTICAST
) {
962 /* Loop through our multicast array and set up the device */
963 for (i
= 0; i
< adapter
->multicast_addr_count
; i
++) {
966 result
= ether_crc(6, adapter
->multicast_list
[i
]);
968 result
= (result
& 0x3F800000) >> 23;
971 hash1
|= (1 << result
);
972 } else if ((31 < result
) && (result
< 64)) {
974 hash2
|= (1 << result
);
975 } else if ((63 < result
) && (result
< 96)) {
977 hash3
|= (1 << result
);
980 hash4
|= (1 << result
);
985 /* Write out the new hash to the device */
986 if (!et1310_in_phy_coma(adapter
)) {
987 writel(hash1
, &rxmac
->multi_hash1
);
988 writel(hash2
, &rxmac
->multi_hash2
);
989 writel(hash3
, &rxmac
->multi_hash3
);
990 writel(hash4
, &rxmac
->multi_hash4
);
994 static void et1310_setup_device_for_unicast(struct et131x_adapter
*adapter
)
996 struct rxmac_regs __iomem
*rxmac
= &adapter
->regs
->rxmac
;
1001 /* Set up unicast packet filter reg 3 to be the first two octets of
1002 * the MAC address for both address
1004 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1005 * MAC address for second address
1007 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1008 * MAC address for first address
1010 uni_pf3
= (adapter
->addr
[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT
) |
1011 (adapter
->addr
[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT
) |
1012 (adapter
->addr
[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT
) |
1015 uni_pf2
= (adapter
->addr
[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT
) |
1016 (adapter
->addr
[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT
) |
1017 (adapter
->addr
[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT
) |
1020 uni_pf1
= (adapter
->addr
[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT
) |
1021 (adapter
->addr
[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT
) |
1022 (adapter
->addr
[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT
) |
1025 if (!et1310_in_phy_coma(adapter
)) {
1026 writel(uni_pf1
, &rxmac
->uni_pf_addr1
);
1027 writel(uni_pf2
, &rxmac
->uni_pf_addr2
);
1028 writel(uni_pf3
, &rxmac
->uni_pf_addr3
);
1032 static void et1310_config_rxmac_regs(struct et131x_adapter
*adapter
)
1034 struct rxmac_regs __iomem
*rxmac
= &adapter
->regs
->rxmac
;
1035 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
1041 /* Disable the MAC while it is being configured (also disable WOL) */
1042 writel(0x8, &rxmac
->ctrl
);
1044 /* Initialize WOL to disabled. */
1045 writel(0, &rxmac
->crc0
);
1046 writel(0, &rxmac
->crc12
);
1047 writel(0, &rxmac
->crc34
);
1049 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1050 * its default Values of 0x00000000 because there are not WOL masks
1053 for (wolw
= &rxmac
->mask0_word0
; wolw
<= &rxmac
->mask4_word3
; wolw
++)
1056 /* Lets setup the WOL Source Address */
1057 sa_lo
= (adapter
->addr
[2] << ET_RX_WOL_LO_SA3_SHIFT
) |
1058 (adapter
->addr
[3] << ET_RX_WOL_LO_SA4_SHIFT
) |
1059 (adapter
->addr
[4] << ET_RX_WOL_LO_SA5_SHIFT
) |
1061 writel(sa_lo
, &rxmac
->sa_lo
);
1063 sa_hi
= (u32
)(adapter
->addr
[0] << ET_RX_WOL_HI_SA1_SHIFT
) |
1065 writel(sa_hi
, &rxmac
->sa_hi
);
1067 /* Disable all Packet Filtering */
1068 writel(0, &rxmac
->pf_ctrl
);
1070 /* Let's initialize the Unicast Packet filtering address */
1071 if (adapter
->packet_filter
& ET131X_PACKET_TYPE_DIRECTED
) {
1072 et1310_setup_device_for_unicast(adapter
);
1073 pf_ctrl
|= ET_RX_PFCTRL_UNICST_FILTER_ENABLE
;
1075 writel(0, &rxmac
->uni_pf_addr1
);
1076 writel(0, &rxmac
->uni_pf_addr2
);
1077 writel(0, &rxmac
->uni_pf_addr3
);
1080 /* Let's initialize the Multicast hash */
1081 if (!(adapter
->packet_filter
& ET131X_PACKET_TYPE_ALL_MULTICAST
)) {
1082 pf_ctrl
|= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE
;
1083 et1310_setup_device_for_multicast(adapter
);
1086 /* Runt packet filtering. Didn't work in version A silicon. */
1087 pf_ctrl
|= (NIC_MIN_PACKET_SIZE
+ 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT
;
1088 pf_ctrl
|= ET_RX_PFCTRL_FRAG_FILTER_ENABLE
;
1090 if (adapter
->registry_jumbo_packet
> 8192)
1091 /* In order to transmit jumbo packets greater than 8k, the
1092 * FIFO between RxMAC and RxDMA needs to be reduced in size
1093 * to (16k - Jumbo packet size). In order to implement this,
1094 * we must use "cut through" mode in the RxMAC, which chops
1095 * packets down into segments which are (max_size * 16). In
1096 * this case we selected 256 bytes, since this is the size of
1097 * the PCI-Express TLP's that the 1310 uses.
1099 * seg_en on, fc_en off, size 0x10
1101 writel(0x41, &rxmac
->mcif_ctrl_max_seg
);
1103 writel(0, &rxmac
->mcif_ctrl_max_seg
);
1105 writel(0, &rxmac
->mcif_water_mark
);
1106 writel(0, &rxmac
->mif_ctrl
);
1107 writel(0, &rxmac
->space_avail
);
1109 /* Initialize the mif_ctrl register
1110 * bit 3: Receive code error. One or more nibbles were signaled as
1111 * errors during the reception of the packet. Clear this
1112 * bit in Gigabit, set it in 100Mbit. This was derived
1113 * experimentally at UNH.
1114 * bit 4: Receive CRC error. The packet's CRC did not match the
1115 * internally generated CRC.
1116 * bit 5: Receive length check error. Indicates that frame length
1117 * field value in the packet does not match the actual data
1118 * byte length and is not a type field.
1119 * bit 16: Receive frame truncated.
1120 * bit 17: Drop packet enable
1122 if (phydev
&& phydev
->speed
== SPEED_100
)
1123 writel(0x30038, &rxmac
->mif_ctrl
);
1125 writel(0x30030, &rxmac
->mif_ctrl
);
1127 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1128 * filter is always enabled since it is where the runt packets are
1129 * supposed to be dropped. For version A silicon, runt packet
1130 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1131 * but we still leave the packet filter on.
1133 writel(pf_ctrl
, &rxmac
->pf_ctrl
);
1134 writel(ET_RX_CTRL_RXMAC_ENABLE
| ET_RX_CTRL_WOL_DISABLE
, &rxmac
->ctrl
);
1137 static void et1310_config_txmac_regs(struct et131x_adapter
*adapter
)
1139 struct txmac_regs __iomem
*txmac
= &adapter
->regs
->txmac
;
1141 /* We need to update the Control Frame Parameters
1142 * cfpt - control frame pause timer set to 64 (0x40)
1143 * cfep - control frame extended pause timer set to 0x0
1145 if (adapter
->flow
== FLOW_NONE
)
1146 writel(0, &txmac
->cf_param
);
1148 writel(0x40, &txmac
->cf_param
);
1151 static void et1310_config_macstat_regs(struct et131x_adapter
*adapter
)
1153 struct macstat_regs __iomem
*macstat
= &adapter
->regs
->macstat
;
1156 /* initialize all the macstat registers to zero on the device */
1157 for (reg
= &macstat
->txrx_0_64_byte_frames
;
1158 reg
<= &macstat
->carry_reg2
; reg
++)
1161 /* Unmask any counters that we want to track the overflow of.
1162 * Initially this will be all counters. It may become clear later
1163 * that we do not need to track all counters.
1165 writel(0xFFFFBE32, &macstat
->carry_reg1_mask
);
1166 writel(0xFFFE7E8B, &macstat
->carry_reg2_mask
);
1169 static int et131x_phy_mii_read(struct et131x_adapter
*adapter
, u8 addr
,
1172 struct mac_regs __iomem
*mac
= &adapter
->regs
->mac
;
1179 /* Save a local copy of the registers we are dealing with so we can
1182 mii_addr
= readl(&mac
->mii_mgmt_addr
);
1183 mii_cmd
= readl(&mac
->mii_mgmt_cmd
);
1185 /* Stop the current operation */
1186 writel(0, &mac
->mii_mgmt_cmd
);
1188 /* Set up the register we need to read from on the correct PHY */
1189 writel(ET_MAC_MII_ADDR(addr
, reg
), &mac
->mii_mgmt_addr
);
1191 writel(0x1, &mac
->mii_mgmt_cmd
);
1196 mii_indicator
= readl(&mac
->mii_mgmt_indicator
);
1197 } while ((mii_indicator
& ET_MAC_MGMT_WAIT
) && delay
< 50);
1199 /* If we hit the max delay, we could not read the register */
1201 dev_warn(&adapter
->pdev
->dev
,
1202 "reg 0x%08x could not be read\n", reg
);
1203 dev_warn(&adapter
->pdev
->dev
, "status is 0x%08x\n",
1210 /* If we hit here we were able to read the register and we need to
1211 * return the value to the caller
1213 *value
= readl(&mac
->mii_mgmt_stat
) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK
;
1216 /* Stop the read operation */
1217 writel(0, &mac
->mii_mgmt_cmd
);
1219 /* set the registers we touched back to the state at which we entered
1222 writel(mii_addr
, &mac
->mii_mgmt_addr
);
1223 writel(mii_cmd
, &mac
->mii_mgmt_cmd
);
1228 static int et131x_mii_read(struct et131x_adapter
*adapter
, u8 reg
, u16
*value
)
1230 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
1235 return et131x_phy_mii_read(adapter
, phydev
->mdio
.addr
, reg
, value
);
1238 static int et131x_mii_write(struct et131x_adapter
*adapter
, u8 addr
, u8 reg
,
1241 struct mac_regs __iomem
*mac
= &adapter
->regs
->mac
;
1248 /* Save a local copy of the registers we are dealing with so we can
1251 mii_addr
= readl(&mac
->mii_mgmt_addr
);
1252 mii_cmd
= readl(&mac
->mii_mgmt_cmd
);
1254 /* Stop the current operation */
1255 writel(0, &mac
->mii_mgmt_cmd
);
1257 /* Set up the register we need to write to on the correct PHY */
1258 writel(ET_MAC_MII_ADDR(addr
, reg
), &mac
->mii_mgmt_addr
);
1260 /* Add the value to write to the registers to the mac */
1261 writel(value
, &mac
->mii_mgmt_ctrl
);
1266 mii_indicator
= readl(&mac
->mii_mgmt_indicator
);
1267 } while ((mii_indicator
& ET_MAC_MGMT_BUSY
) && delay
< 100);
1269 /* If we hit the max delay, we could not write the register */
1273 dev_warn(&adapter
->pdev
->dev
,
1274 "reg 0x%08x could not be written", reg
);
1275 dev_warn(&adapter
->pdev
->dev
, "status is 0x%08x\n",
1277 dev_warn(&adapter
->pdev
->dev
, "command is 0x%08x\n",
1278 readl(&mac
->mii_mgmt_cmd
));
1280 et131x_mii_read(adapter
, reg
, &tmp
);
1284 /* Stop the write operation */
1285 writel(0, &mac
->mii_mgmt_cmd
);
1287 /* set the registers we touched back to the state at which we entered
1290 writel(mii_addr
, &mac
->mii_mgmt_addr
);
1291 writel(mii_cmd
, &mac
->mii_mgmt_cmd
);
1296 static void et1310_phy_read_mii_bit(struct et131x_adapter
*adapter
,
1302 u16 mask
= 1 << bitnum
;
1304 et131x_mii_read(adapter
, regnum
, ®
);
1306 *value
= (reg
& mask
) >> bitnum
;
1309 static void et1310_config_flow_control(struct et131x_adapter
*adapter
)
1311 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
1313 if (phydev
->duplex
== DUPLEX_HALF
) {
1314 adapter
->flow
= FLOW_NONE
;
1316 char remote_pause
, remote_async_pause
;
1318 et1310_phy_read_mii_bit(adapter
, 5, 10, &remote_pause
);
1319 et1310_phy_read_mii_bit(adapter
, 5, 11, &remote_async_pause
);
1321 if (remote_pause
&& remote_async_pause
) {
1322 adapter
->flow
= adapter
->wanted_flow
;
1323 } else if (remote_pause
&& !remote_async_pause
) {
1324 if (adapter
->wanted_flow
== FLOW_BOTH
)
1325 adapter
->flow
= FLOW_BOTH
;
1327 adapter
->flow
= FLOW_NONE
;
1328 } else if (!remote_pause
&& !remote_async_pause
) {
1329 adapter
->flow
= FLOW_NONE
;
1331 if (adapter
->wanted_flow
== FLOW_BOTH
)
1332 adapter
->flow
= FLOW_RXONLY
;
1334 adapter
->flow
= FLOW_NONE
;
1339 /* et1310_update_macstat_host_counters - Update local copy of the statistics */
1340 static void et1310_update_macstat_host_counters(struct et131x_adapter
*adapter
)
1342 struct ce_stats
*stats
= &adapter
->stats
;
1343 struct macstat_regs __iomem
*macstat
=
1344 &adapter
->regs
->macstat
;
1346 stats
->tx_collisions
+= readl(&macstat
->tx_total_collisions
);
1347 stats
->tx_first_collisions
+= readl(&macstat
->tx_single_collisions
);
1348 stats
->tx_deferred
+= readl(&macstat
->tx_deferred
);
1349 stats
->tx_excessive_collisions
+=
1350 readl(&macstat
->tx_multiple_collisions
);
1351 stats
->tx_late_collisions
+= readl(&macstat
->tx_late_collisions
);
1352 stats
->tx_underflows
+= readl(&macstat
->tx_undersize_frames
);
1353 stats
->tx_max_pkt_errs
+= readl(&macstat
->tx_oversize_frames
);
1355 stats
->rx_align_errs
+= readl(&macstat
->rx_align_errs
);
1356 stats
->rx_crc_errs
+= readl(&macstat
->rx_code_errs
);
1357 stats
->rcvd_pkts_dropped
+= readl(&macstat
->rx_drops
);
1358 stats
->rx_overflows
+= readl(&macstat
->rx_oversize_packets
);
1359 stats
->rx_code_violations
+= readl(&macstat
->rx_fcs_errs
);
1360 stats
->rx_length_errs
+= readl(&macstat
->rx_frame_len_errs
);
1361 stats
->rx_other_errs
+= readl(&macstat
->rx_fragment_packets
);
1364 /* et1310_handle_macstat_interrupt
1366 * One of the MACSTAT counters has wrapped. Update the local copy of
1367 * the statistics held in the adapter structure, checking the "wrap"
1368 * bit for each counter.
1370 static void et1310_handle_macstat_interrupt(struct et131x_adapter
*adapter
)
1375 /* Read the interrupt bits from the register(s). These are Clear On
1378 carry_reg1
= readl(&adapter
->regs
->macstat
.carry_reg1
);
1379 carry_reg2
= readl(&adapter
->regs
->macstat
.carry_reg2
);
1381 writel(carry_reg1
, &adapter
->regs
->macstat
.carry_reg1
);
1382 writel(carry_reg2
, &adapter
->regs
->macstat
.carry_reg2
);
1384 /* We need to do update the host copy of all the MAC_STAT counters.
1385 * For each counter, check it's overflow bit. If the overflow bit is
1386 * set, then increment the host version of the count by one complete
1387 * revolution of the counter. This routine is called when the counter
1388 * block indicates that one of the counters has wrapped.
1390 if (carry_reg1
& (1 << 14))
1391 adapter
->stats
.rx_code_violations
+= COUNTER_WRAP_16_BIT
;
1392 if (carry_reg1
& (1 << 8))
1393 adapter
->stats
.rx_align_errs
+= COUNTER_WRAP_12_BIT
;
1394 if (carry_reg1
& (1 << 7))
1395 adapter
->stats
.rx_length_errs
+= COUNTER_WRAP_16_BIT
;
1396 if (carry_reg1
& (1 << 2))
1397 adapter
->stats
.rx_other_errs
+= COUNTER_WRAP_16_BIT
;
1398 if (carry_reg1
& (1 << 6))
1399 adapter
->stats
.rx_crc_errs
+= COUNTER_WRAP_16_BIT
;
1400 if (carry_reg1
& (1 << 3))
1401 adapter
->stats
.rx_overflows
+= COUNTER_WRAP_16_BIT
;
1402 if (carry_reg1
& (1 << 0))
1403 adapter
->stats
.rcvd_pkts_dropped
+= COUNTER_WRAP_16_BIT
;
1404 if (carry_reg2
& (1 << 16))
1405 adapter
->stats
.tx_max_pkt_errs
+= COUNTER_WRAP_12_BIT
;
1406 if (carry_reg2
& (1 << 15))
1407 adapter
->stats
.tx_underflows
+= COUNTER_WRAP_12_BIT
;
1408 if (carry_reg2
& (1 << 6))
1409 adapter
->stats
.tx_first_collisions
+= COUNTER_WRAP_12_BIT
;
1410 if (carry_reg2
& (1 << 8))
1411 adapter
->stats
.tx_deferred
+= COUNTER_WRAP_12_BIT
;
1412 if (carry_reg2
& (1 << 5))
1413 adapter
->stats
.tx_excessive_collisions
+= COUNTER_WRAP_12_BIT
;
1414 if (carry_reg2
& (1 << 4))
1415 adapter
->stats
.tx_late_collisions
+= COUNTER_WRAP_12_BIT
;
1416 if (carry_reg2
& (1 << 2))
1417 adapter
->stats
.tx_collisions
+= COUNTER_WRAP_12_BIT
;
1420 static int et131x_mdio_read(struct mii_bus
*bus
, int phy_addr
, int reg
)
1422 struct net_device
*netdev
= bus
->priv
;
1423 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1427 ret
= et131x_phy_mii_read(adapter
, phy_addr
, reg
, &value
);
1435 static int et131x_mdio_write(struct mii_bus
*bus
, int phy_addr
,
1438 struct net_device
*netdev
= bus
->priv
;
1439 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1441 return et131x_mii_write(adapter
, phy_addr
, reg
, value
);
1444 /* et1310_phy_power_switch - PHY power control
1445 * @adapter: device to control
1446 * @down: true for off/false for back on
1448 * one hundred, ten, one thousand megs
1449 * How would you like to have your LAN accessed
1450 * Can't you see that this code processed
1451 * Phy power, phy power..
1453 static void et1310_phy_power_switch(struct et131x_adapter
*adapter
, bool down
)
1456 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
1458 et131x_mii_read(adapter
, MII_BMCR
, &data
);
1459 data
&= ~BMCR_PDOWN
;
1462 et131x_mii_write(adapter
, phydev
->mdio
.addr
, MII_BMCR
, data
);
1465 /* et131x_xcvr_init - Init the phy if we are setting it into force mode */
1466 static void et131x_xcvr_init(struct et131x_adapter
*adapter
)
1469 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
1471 /* Set the LED behavior such that LED 1 indicates speed (off =
1472 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1473 * link and activity (on for link, blink off for activity).
1475 * NOTE: Some customizations have been added here for specific
1476 * vendors; The LED behavior is now determined by vendor data in the
1477 * EEPROM. However, the above description is the default.
1479 if ((adapter
->eeprom_data
[1] & 0x4) == 0) {
1480 et131x_mii_read(adapter
, PHY_LED_2
, &lcr2
);
1482 lcr2
&= (ET_LED2_LED_100TX
| ET_LED2_LED_1000T
);
1483 lcr2
|= (LED_VAL_LINKON_ACTIVE
<< LED_LINK_SHIFT
);
1485 if ((adapter
->eeprom_data
[1] & 0x8) == 0)
1486 lcr2
|= (LED_VAL_1000BT_100BTX
<< LED_TXRX_SHIFT
);
1488 lcr2
|= (LED_VAL_LINKON
<< LED_TXRX_SHIFT
);
1490 et131x_mii_write(adapter
, phydev
->mdio
.addr
, PHY_LED_2
, lcr2
);
1494 /* et131x_configure_global_regs - configure JAGCore global regs */
1495 static void et131x_configure_global_regs(struct et131x_adapter
*adapter
)
1497 struct global_regs __iomem
*regs
= &adapter
->regs
->global
;
1499 writel(0, ®s
->rxq_start_addr
);
1500 writel(INTERNAL_MEM_SIZE
- 1, ®s
->txq_end_addr
);
1502 if (adapter
->registry_jumbo_packet
< 2048) {
1503 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1504 * block of RAM that the driver can split between Tx
1505 * and Rx as it desires. Our default is to split it
1508 writel(PARM_RX_MEM_END_DEF
, ®s
->rxq_end_addr
);
1509 writel(PARM_RX_MEM_END_DEF
+ 1, ®s
->txq_start_addr
);
1510 } else if (adapter
->registry_jumbo_packet
< 8192) {
1511 /* For jumbo packets > 2k but < 8k, split 50-50. */
1512 writel(INTERNAL_MEM_RX_OFFSET
, ®s
->rxq_end_addr
);
1513 writel(INTERNAL_MEM_RX_OFFSET
+ 1, ®s
->txq_start_addr
);
1515 /* 9216 is the only packet size greater than 8k that
1516 * is available. The Tx buffer has to be big enough
1517 * for one whole packet on the Tx side. We'll make
1518 * the Tx 9408, and give the rest to Rx
1520 writel(0x01b3, ®s
->rxq_end_addr
);
1521 writel(0x01b4, ®s
->txq_start_addr
);
1524 /* Initialize the loopback register. Disable all loopbacks. */
1525 writel(0, ®s
->loopback
);
1527 writel(0, ®s
->msi_config
);
1529 /* By default, disable the watchdog timer. It will be enabled when
1530 * a packet is queued.
1532 writel(0, ®s
->watchdog_timer
);
1535 /* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
1536 static void et131x_config_rx_dma_regs(struct et131x_adapter
*adapter
)
1538 struct rxdma_regs __iomem
*rx_dma
= &adapter
->regs
->rxdma
;
1539 struct rx_ring
*rx_local
= &adapter
->rx_ring
;
1540 struct fbr_desc
*fbr_entry
;
1543 unsigned long flags
;
1546 et131x_rx_dma_disable(adapter
);
1548 /* Load the completion writeback physical address */
1549 writel(upper_32_bits(rx_local
->rx_status_bus
), &rx_dma
->dma_wb_base_hi
);
1550 writel(lower_32_bits(rx_local
->rx_status_bus
), &rx_dma
->dma_wb_base_lo
);
1552 memset(rx_local
->rx_status_block
, 0, sizeof(struct rx_status_block
));
1554 /* Set the address and parameters of the packet status ring */
1555 writel(upper_32_bits(rx_local
->ps_ring_physaddr
), &rx_dma
->psr_base_hi
);
1556 writel(lower_32_bits(rx_local
->ps_ring_physaddr
), &rx_dma
->psr_base_lo
);
1557 writel(rx_local
->psr_entries
- 1, &rx_dma
->psr_num_des
);
1558 writel(0, &rx_dma
->psr_full_offset
);
1560 psr_num_des
= readl(&rx_dma
->psr_num_des
) & ET_RXDMA_PSR_NUM_DES_MASK
;
1561 writel((psr_num_des
* LO_MARK_PERCENT_FOR_PSR
) / 100,
1562 &rx_dma
->psr_min_des
);
1564 spin_lock_irqsave(&adapter
->rcv_lock
, flags
);
1566 /* These local variables track the PSR in the adapter structure */
1567 rx_local
->local_psr_full
= 0;
1569 for (id
= 0; id
< NUM_FBRS
; id
++) {
1570 u32 __iomem
*num_des
;
1571 u32 __iomem
*full_offset
;
1572 u32 __iomem
*min_des
;
1573 u32 __iomem
*base_hi
;
1574 u32 __iomem
*base_lo
;
1575 struct fbr_lookup
*fbr
= rx_local
->fbr
[id
];
1578 num_des
= &rx_dma
->fbr0_num_des
;
1579 full_offset
= &rx_dma
->fbr0_full_offset
;
1580 min_des
= &rx_dma
->fbr0_min_des
;
1581 base_hi
= &rx_dma
->fbr0_base_hi
;
1582 base_lo
= &rx_dma
->fbr0_base_lo
;
1584 num_des
= &rx_dma
->fbr1_num_des
;
1585 full_offset
= &rx_dma
->fbr1_full_offset
;
1586 min_des
= &rx_dma
->fbr1_min_des
;
1587 base_hi
= &rx_dma
->fbr1_base_hi
;
1588 base_lo
= &rx_dma
->fbr1_base_lo
;
1591 /* Now's the best time to initialize FBR contents */
1592 fbr_entry
= fbr
->ring_virtaddr
;
1593 for (entry
= 0; entry
< fbr
->num_entries
; entry
++) {
1594 fbr_entry
->addr_hi
= fbr
->bus_high
[entry
];
1595 fbr_entry
->addr_lo
= fbr
->bus_low
[entry
];
1596 fbr_entry
->word2
= entry
;
1600 /* Set the address and parameters of Free buffer ring 1 and 0 */
1601 writel(upper_32_bits(fbr
->ring_physaddr
), base_hi
);
1602 writel(lower_32_bits(fbr
->ring_physaddr
), base_lo
);
1603 writel(fbr
->num_entries
- 1, num_des
);
1604 writel(ET_DMA10_WRAP
, full_offset
);
1606 /* This variable tracks the free buffer ring 1 full position,
1607 * so it has to match the above.
1609 fbr
->local_full
= ET_DMA10_WRAP
;
1610 writel(((fbr
->num_entries
* LO_MARK_PERCENT_FOR_RX
) / 100) - 1,
1614 /* Program the number of packets we will receive before generating an
1616 * For version B silicon, this value gets updated once autoneg is
1619 writel(PARM_RX_NUM_BUFS_DEF
, &rx_dma
->num_pkt_done
);
1621 /* The "time_done" is not working correctly to coalesce interrupts
1622 * after a given time period, but rather is giving us an interrupt
1623 * regardless of whether we have received packets.
1624 * This value gets updated once autoneg is complete.
1626 writel(PARM_RX_TIME_INT_DEF
, &rx_dma
->max_pkt_time
);
1628 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
1631 /* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1633 * Configure the transmit engine with the ring buffers we have created
1634 * and prepare it for use.
1636 static void et131x_config_tx_dma_regs(struct et131x_adapter
*adapter
)
1638 struct txdma_regs __iomem
*txdma
= &adapter
->regs
->txdma
;
1639 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
1641 /* Load the hardware with the start of the transmit descriptor ring. */
1642 writel(upper_32_bits(tx_ring
->tx_desc_ring_pa
), &txdma
->pr_base_hi
);
1643 writel(lower_32_bits(tx_ring
->tx_desc_ring_pa
), &txdma
->pr_base_lo
);
1645 /* Initialise the transmit DMA engine */
1646 writel(NUM_DESC_PER_RING_TX
- 1, &txdma
->pr_num_des
);
1648 /* Load the completion writeback physical address */
1649 writel(upper_32_bits(tx_ring
->tx_status_pa
), &txdma
->dma_wb_base_hi
);
1650 writel(lower_32_bits(tx_ring
->tx_status_pa
), &txdma
->dma_wb_base_lo
);
1652 *tx_ring
->tx_status
= 0;
1654 writel(0, &txdma
->service_request
);
1655 tx_ring
->send_idx
= 0;
1658 /* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
1659 static void et131x_adapter_setup(struct et131x_adapter
*adapter
)
1661 et131x_configure_global_regs(adapter
);
1662 et1310_config_mac_regs1(adapter
);
1664 /* Configure the MMC registers */
1665 /* All we need to do is initialize the Memory Control Register */
1666 writel(ET_MMC_ENABLE
, &adapter
->regs
->mmc
.mmc_ctrl
);
1668 et1310_config_rxmac_regs(adapter
);
1669 et1310_config_txmac_regs(adapter
);
1671 et131x_config_rx_dma_regs(adapter
);
1672 et131x_config_tx_dma_regs(adapter
);
1674 et1310_config_macstat_regs(adapter
);
1676 et1310_phy_power_switch(adapter
, 0);
1677 et131x_xcvr_init(adapter
);
1680 /* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
1681 static void et131x_soft_reset(struct et131x_adapter
*adapter
)
1685 /* Disable MAC Core */
1686 reg
= ET_MAC_CFG1_SOFT_RESET
| ET_MAC_CFG1_SIM_RESET
|
1687 ET_MAC_CFG1_RESET_RXMC
| ET_MAC_CFG1_RESET_TXMC
|
1688 ET_MAC_CFG1_RESET_RXFUNC
| ET_MAC_CFG1_RESET_TXFUNC
;
1689 writel(reg
, &adapter
->regs
->mac
.cfg1
);
1692 writel(reg
, &adapter
->regs
->global
.sw_reset
);
1694 reg
= ET_MAC_CFG1_RESET_RXMC
| ET_MAC_CFG1_RESET_TXMC
|
1695 ET_MAC_CFG1_RESET_RXFUNC
| ET_MAC_CFG1_RESET_TXFUNC
;
1696 writel(reg
, &adapter
->regs
->mac
.cfg1
);
1697 writel(0, &adapter
->regs
->mac
.cfg1
);
1700 static void et131x_enable_interrupts(struct et131x_adapter
*adapter
)
1704 if (adapter
->flow
== FLOW_TXONLY
|| adapter
->flow
== FLOW_BOTH
)
1705 mask
= INT_MASK_ENABLE
;
1707 mask
= INT_MASK_ENABLE_NO_FLOW
;
1709 writel(mask
, &adapter
->regs
->global
.int_mask
);
1712 static void et131x_disable_interrupts(struct et131x_adapter
*adapter
)
1714 writel(INT_MASK_DISABLE
, &adapter
->regs
->global
.int_mask
);
1717 static void et131x_tx_dma_disable(struct et131x_adapter
*adapter
)
1719 /* Setup the transmit dma configuration register */
1720 writel(ET_TXDMA_CSR_HALT
| ET_TXDMA_SNGL_EPKT
,
1721 &adapter
->regs
->txdma
.csr
);
1724 static void et131x_enable_txrx(struct net_device
*netdev
)
1726 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1728 et131x_rx_dma_enable(adapter
);
1729 et131x_tx_dma_enable(adapter
);
1731 if (adapter
->flags
& FMP_ADAPTER_INTERRUPT_IN_USE
)
1732 et131x_enable_interrupts(adapter
);
1734 netif_start_queue(netdev
);
1737 static void et131x_disable_txrx(struct net_device
*netdev
)
1739 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
1741 netif_stop_queue(netdev
);
1743 et131x_rx_dma_disable(adapter
);
1744 et131x_tx_dma_disable(adapter
);
1746 et131x_disable_interrupts(adapter
);
1749 static void et131x_init_send(struct et131x_adapter
*adapter
)
1752 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
1753 struct tcb
*tcb
= tx_ring
->tcb_ring
;
1755 tx_ring
->tcb_qhead
= tcb
;
1757 memset(tcb
, 0, sizeof(struct tcb
) * NUM_TCB
);
1759 for (i
= 0; i
< NUM_TCB
; i
++) {
1760 tcb
->next
= tcb
+ 1;
1765 tx_ring
->tcb_qtail
= tcb
;
1767 /* Curr send queue should now be empty */
1768 tx_ring
->send_head
= NULL
;
1769 tx_ring
->send_tail
= NULL
;
1772 /* et1310_enable_phy_coma
1774 * driver receive an phy status change interrupt while in D0 and check that
1775 * phy_status is down.
1777 * -- gate off JAGCore;
1778 * -- set gigE PHY in Coma mode
1779 * -- wake on phy_interrupt; Perform software reset JAGCore,
1780 * re-initialize jagcore and gigE PHY
1782 static void et1310_enable_phy_coma(struct et131x_adapter
*adapter
)
1784 u32 pmcsr
= readl(&adapter
->regs
->global
.pm_csr
);
1786 /* Stop sending packets. */
1787 adapter
->flags
|= FMP_ADAPTER_LOWER_POWER
;
1789 /* Wait for outstanding Receive packets */
1790 et131x_disable_txrx(adapter
->netdev
);
1792 /* Gate off JAGCore 3 clock domains */
1793 pmcsr
&= ~ET_PMCSR_INIT
;
1794 writel(pmcsr
, &adapter
->regs
->global
.pm_csr
);
1796 /* Program gigE PHY in to Coma mode */
1797 pmcsr
|= ET_PM_PHY_SW_COMA
;
1798 writel(pmcsr
, &adapter
->regs
->global
.pm_csr
);
1801 static void et1310_disable_phy_coma(struct et131x_adapter
*adapter
)
1805 pmcsr
= readl(&adapter
->regs
->global
.pm_csr
);
1807 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1808 pmcsr
|= ET_PMCSR_INIT
;
1809 pmcsr
&= ~ET_PM_PHY_SW_COMA
;
1810 writel(pmcsr
, &adapter
->regs
->global
.pm_csr
);
1812 /* Restore the GbE PHY speed and duplex modes;
1813 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1816 /* Re-initialize the send structures */
1817 et131x_init_send(adapter
);
1819 /* Bring the device back to the state it was during init prior to
1820 * autonegotiation being complete. This way, when we get the auto-neg
1821 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1823 et131x_soft_reset(adapter
);
1825 et131x_adapter_setup(adapter
);
1827 /* Allow Tx to restart */
1828 adapter
->flags
&= ~FMP_ADAPTER_LOWER_POWER
;
1830 et131x_enable_txrx(adapter
->netdev
);
1833 static inline u32
bump_free_buff_ring(u32
*free_buff_ring
, u32 limit
)
1835 u32 tmp_free_buff_ring
= *free_buff_ring
;
1837 tmp_free_buff_ring
++;
1838 /* This works for all cases where limit < 1024. The 1023 case
1839 * works because 1023++ is 1024 which means the if condition is not
1840 * taken but the carry of the bit into the wrap bit toggles the wrap
1843 if ((tmp_free_buff_ring
& ET_DMA10_MASK
) > limit
) {
1844 tmp_free_buff_ring
&= ~ET_DMA10_MASK
;
1845 tmp_free_buff_ring
^= ET_DMA10_WRAP
;
1847 /* For the 1023 case */
1848 tmp_free_buff_ring
&= (ET_DMA10_MASK
| ET_DMA10_WRAP
);
1849 *free_buff_ring
= tmp_free_buff_ring
;
1850 return tmp_free_buff_ring
;
1853 /* et131x_rx_dma_memory_alloc
1855 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1856 * and the Packet Status Ring.
1858 static int et131x_rx_dma_memory_alloc(struct et131x_adapter
*adapter
)
1865 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
1866 struct fbr_lookup
*fbr
;
1868 /* Alloc memory for the lookup table */
1869 rx_ring
->fbr
[0] = kzalloc(sizeof(*fbr
), GFP_KERNEL
);
1870 if (rx_ring
->fbr
[0] == NULL
)
1872 rx_ring
->fbr
[1] = kzalloc(sizeof(*fbr
), GFP_KERNEL
);
1873 if (rx_ring
->fbr
[1] == NULL
)
1876 /* The first thing we will do is configure the sizes of the buffer
1877 * rings. These will change based on jumbo packet support. Larger
1878 * jumbo packets increases the size of each entry in FBR0, and the
1879 * number of entries in FBR0, while at the same time decreasing the
1880 * number of entries in FBR1.
1882 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
1883 * entries are huge in order to accommodate a "jumbo" frame, then it
1884 * will have less entries. Conversely, FBR1 will now be relied upon
1885 * to carry more "normal" frames, thus it's entry size also increases
1886 * and the number of entries goes up too (since it now carries
1887 * "small" + "regular" packets.
1889 * In this scheme, we try to maintain 512 entries between the two
1890 * rings. Also, FBR1 remains a constant size - when it's size doubles
1891 * the number of entries halves. FBR0 increases in size, however.
1893 if (adapter
->registry_jumbo_packet
< 2048) {
1894 rx_ring
->fbr
[0]->buffsize
= 256;
1895 rx_ring
->fbr
[0]->num_entries
= 512;
1896 rx_ring
->fbr
[1]->buffsize
= 2048;
1897 rx_ring
->fbr
[1]->num_entries
= 512;
1898 } else if (adapter
->registry_jumbo_packet
< 4096) {
1899 rx_ring
->fbr
[0]->buffsize
= 512;
1900 rx_ring
->fbr
[0]->num_entries
= 1024;
1901 rx_ring
->fbr
[1]->buffsize
= 4096;
1902 rx_ring
->fbr
[1]->num_entries
= 512;
1904 rx_ring
->fbr
[0]->buffsize
= 1024;
1905 rx_ring
->fbr
[0]->num_entries
= 768;
1906 rx_ring
->fbr
[1]->buffsize
= 16384;
1907 rx_ring
->fbr
[1]->num_entries
= 128;
1910 rx_ring
->psr_entries
= rx_ring
->fbr
[0]->num_entries
+
1911 rx_ring
->fbr
[1]->num_entries
;
1913 for (id
= 0; id
< NUM_FBRS
; id
++) {
1914 fbr
= rx_ring
->fbr
[id
];
1915 /* Allocate an area of memory for Free Buffer Ring */
1916 bufsize
= sizeof(struct fbr_desc
) * fbr
->num_entries
;
1917 fbr
->ring_virtaddr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
1919 &fbr
->ring_physaddr
,
1921 if (!fbr
->ring_virtaddr
) {
1922 dev_err(&adapter
->pdev
->dev
,
1923 "Cannot alloc memory for Free Buffer Ring %d\n",
1929 for (id
= 0; id
< NUM_FBRS
; id
++) {
1930 fbr
= rx_ring
->fbr
[id
];
1931 fbr_chunksize
= (FBR_CHUNKS
* fbr
->buffsize
);
1933 for (i
= 0; i
< fbr
->num_entries
/ FBR_CHUNKS
; i
++) {
1934 dma_addr_t fbr_physaddr
;
1936 fbr
->mem_virtaddrs
[i
] = dma_alloc_coherent(
1937 &adapter
->pdev
->dev
, fbr_chunksize
,
1938 &fbr
->mem_physaddrs
[i
],
1941 if (!fbr
->mem_virtaddrs
[i
]) {
1942 dev_err(&adapter
->pdev
->dev
,
1943 "Could not alloc memory\n");
1947 /* See NOTE in "Save Physical Address" comment above */
1948 fbr_physaddr
= fbr
->mem_physaddrs
[i
];
1950 for (j
= 0; j
< FBR_CHUNKS
; j
++) {
1951 u32 k
= (i
* FBR_CHUNKS
) + j
;
1953 /* Save the Virtual address of this index for
1954 * quick access later
1956 fbr
->virt
[k
] = (u8
*)fbr
->mem_virtaddrs
[i
] +
1957 (j
* fbr
->buffsize
);
1959 /* now store the physical address in the
1960 * descriptor so the device can access it
1962 fbr
->bus_high
[k
] = upper_32_bits(fbr_physaddr
);
1963 fbr
->bus_low
[k
] = lower_32_bits(fbr_physaddr
);
1964 fbr_physaddr
+= fbr
->buffsize
;
1969 /* Allocate an area of memory for FIFO of Packet Status ring entries */
1970 psr_size
= sizeof(struct pkt_stat_desc
) * rx_ring
->psr_entries
;
1972 rx_ring
->ps_ring_virtaddr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
1974 &rx_ring
->ps_ring_physaddr
,
1977 if (!rx_ring
->ps_ring_virtaddr
) {
1978 dev_err(&adapter
->pdev
->dev
,
1979 "Cannot alloc memory for Packet Status Ring\n");
1983 /* Allocate an area of memory for writeback of status information */
1984 rx_ring
->rx_status_block
= dma_alloc_coherent(&adapter
->pdev
->dev
,
1985 sizeof(struct rx_status_block
),
1986 &rx_ring
->rx_status_bus
,
1988 if (!rx_ring
->rx_status_block
) {
1989 dev_err(&adapter
->pdev
->dev
,
1990 "Cannot alloc memory for Status Block\n");
1993 rx_ring
->num_rfd
= NIC_DEFAULT_NUM_RFD
;
1995 /* The RFDs are going to be put on lists later on, so initialize the
1998 INIT_LIST_HEAD(&rx_ring
->recv_list
);
2002 static void et131x_rx_dma_memory_free(struct et131x_adapter
*adapter
)
2009 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
2010 struct fbr_lookup
*fbr
;
2012 /* Free RFDs and associated packet descriptors */
2013 WARN_ON(rx_ring
->num_ready_recv
!= rx_ring
->num_rfd
);
2015 while (!list_empty(&rx_ring
->recv_list
)) {
2016 rfd
= list_entry(rx_ring
->recv_list
.next
,
2017 struct rfd
, list_node
);
2019 list_del(&rfd
->list_node
);
2024 /* Free Free Buffer Rings */
2025 for (id
= 0; id
< NUM_FBRS
; id
++) {
2026 fbr
= rx_ring
->fbr
[id
];
2028 if (!fbr
|| !fbr
->ring_virtaddr
)
2031 /* First the packet memory */
2032 for (ii
= 0; ii
< fbr
->num_entries
/ FBR_CHUNKS
; ii
++) {
2033 if (fbr
->mem_virtaddrs
[ii
]) {
2034 bufsize
= fbr
->buffsize
* FBR_CHUNKS
;
2036 dma_free_coherent(&adapter
->pdev
->dev
,
2038 fbr
->mem_virtaddrs
[ii
],
2039 fbr
->mem_physaddrs
[ii
]);
2041 fbr
->mem_virtaddrs
[ii
] = NULL
;
2045 bufsize
= sizeof(struct fbr_desc
) * fbr
->num_entries
;
2047 dma_free_coherent(&adapter
->pdev
->dev
,
2050 fbr
->ring_physaddr
);
2052 fbr
->ring_virtaddr
= NULL
;
2055 /* Free Packet Status Ring */
2056 if (rx_ring
->ps_ring_virtaddr
) {
2057 psr_size
= sizeof(struct pkt_stat_desc
) * rx_ring
->psr_entries
;
2059 dma_free_coherent(&adapter
->pdev
->dev
, psr_size
,
2060 rx_ring
->ps_ring_virtaddr
,
2061 rx_ring
->ps_ring_physaddr
);
2063 rx_ring
->ps_ring_virtaddr
= NULL
;
2066 /* Free area of memory for the writeback of status information */
2067 if (rx_ring
->rx_status_block
) {
2068 dma_free_coherent(&adapter
->pdev
->dev
,
2069 sizeof(struct rx_status_block
),
2070 rx_ring
->rx_status_block
,
2071 rx_ring
->rx_status_bus
);
2072 rx_ring
->rx_status_block
= NULL
;
2075 /* Free the FBR Lookup Table */
2076 kfree(rx_ring
->fbr
[0]);
2077 kfree(rx_ring
->fbr
[1]);
2079 /* Reset Counters */
2080 rx_ring
->num_ready_recv
= 0;
2083 /* et131x_init_recv - Initialize receive data structures */
2084 static int et131x_init_recv(struct et131x_adapter
*adapter
)
2088 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
2090 /* Setup each RFD */
2091 for (rfdct
= 0; rfdct
< rx_ring
->num_rfd
; rfdct
++) {
2092 rfd
= kzalloc(sizeof(*rfd
), GFP_ATOMIC
| GFP_DMA
);
2098 /* Add this RFD to the recv_list */
2099 list_add_tail(&rfd
->list_node
, &rx_ring
->recv_list
);
2101 /* Increment the available RFD's */
2102 rx_ring
->num_ready_recv
++;
2108 /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
2109 static void et131x_set_rx_dma_timer(struct et131x_adapter
*adapter
)
2111 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
2113 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2114 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2116 if ((phydev
->speed
== SPEED_100
) || (phydev
->speed
== SPEED_10
)) {
2117 writel(0, &adapter
->regs
->rxdma
.max_pkt_time
);
2118 writel(1, &adapter
->regs
->rxdma
.num_pkt_done
);
2122 /* nic_return_rfd - Recycle a RFD and put it back onto the receive list */
2123 static void nic_return_rfd(struct et131x_adapter
*adapter
, struct rfd
*rfd
)
2125 struct rx_ring
*rx_local
= &adapter
->rx_ring
;
2126 struct rxdma_regs __iomem
*rx_dma
= &adapter
->regs
->rxdma
;
2127 u16 buff_index
= rfd
->bufferindex
;
2128 u8 ring_index
= rfd
->ringindex
;
2129 unsigned long flags
;
2130 struct fbr_lookup
*fbr
= rx_local
->fbr
[ring_index
];
2132 /* We don't use any of the OOB data besides status. Otherwise, we
2133 * need to clean up OOB data
2135 if (buff_index
< fbr
->num_entries
) {
2137 u32 __iomem
*offset
;
2138 struct fbr_desc
*next
;
2140 if (ring_index
== 0)
2141 offset
= &rx_dma
->fbr0_full_offset
;
2143 offset
= &rx_dma
->fbr1_full_offset
;
2145 next
= (struct fbr_desc
*)(fbr
->ring_virtaddr
) +
2146 INDEX10(fbr
->local_full
);
2148 /* Handle the Free Buffer Ring advancement here. Write
2149 * the PA / Buffer Index for the returned buffer into
2150 * the oldest (next to be freed)FBR entry
2152 next
->addr_hi
= fbr
->bus_high
[buff_index
];
2153 next
->addr_lo
= fbr
->bus_low
[buff_index
];
2154 next
->word2
= buff_index
;
2156 free_buff_ring
= bump_free_buff_ring(&fbr
->local_full
,
2157 fbr
->num_entries
- 1);
2158 writel(free_buff_ring
, offset
);
2160 dev_err(&adapter
->pdev
->dev
,
2161 "%s illegal Buffer Index returned\n", __func__
);
2164 /* The processing on this RFD is done, so put it back on the tail of
2167 spin_lock_irqsave(&adapter
->rcv_lock
, flags
);
2168 list_add_tail(&rfd
->list_node
, &rx_local
->recv_list
);
2169 rx_local
->num_ready_recv
++;
2170 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
2172 WARN_ON(rx_local
->num_ready_recv
> rx_local
->num_rfd
);
2175 /* nic_rx_pkts - Checks the hardware for available packets
2177 * Checks the hardware for available packets, using completion ring
2178 * If packets are available, it gets an RFD from the recv_list, attaches
2179 * the packet to it, puts the RFD in the RecvPendList, and also returns
2180 * the pointer to the RFD.
2182 static struct rfd
*nic_rx_pkts(struct et131x_adapter
*adapter
)
2184 struct rx_ring
*rx_local
= &adapter
->rx_ring
;
2185 struct rx_status_block
*status
;
2186 struct pkt_stat_desc
*psr
;
2188 unsigned long flags
;
2189 struct list_head
*element
;
2195 struct sk_buff
*skb
;
2196 struct fbr_lookup
*fbr
;
2198 /* RX Status block is written by the DMA engine prior to every
2199 * interrupt. It contains the next to be used entry in the Packet
2200 * Status Ring, and also the two Free Buffer rings.
2202 status
= rx_local
->rx_status_block
;
2203 word1
= status
->word1
>> 16;
2205 /* Check the PSR and wrap bits do not match */
2206 if ((word1
& 0x1FFF) == (rx_local
->local_psr_full
& 0x1FFF))
2207 return NULL
; /* Looks like this ring is not updated yet */
2209 /* The packet status ring indicates that data is available. */
2210 psr
= (struct pkt_stat_desc
*)(rx_local
->ps_ring_virtaddr
) +
2211 (rx_local
->local_psr_full
& 0xFFF);
2213 /* Grab any information that is required once the PSR is advanced,
2214 * since we can no longer rely on the memory being accurate
2216 len
= psr
->word1
& 0xFFFF;
2217 ring_index
= (psr
->word1
>> 26) & 0x03;
2218 fbr
= rx_local
->fbr
[ring_index
];
2219 buff_index
= (psr
->word1
>> 16) & 0x3FF;
2222 /* Indicate that we have used this PSR entry. */
2224 add_12bit(&rx_local
->local_psr_full
, 1);
2225 if ((rx_local
->local_psr_full
& 0xFFF) > rx_local
->psr_entries
- 1) {
2226 /* Clear psr full and toggle the wrap bit */
2227 rx_local
->local_psr_full
&= ~0xFFF;
2228 rx_local
->local_psr_full
^= 0x1000;
2231 writel(rx_local
->local_psr_full
, &adapter
->regs
->rxdma
.psr_full_offset
);
2233 if (ring_index
> 1 || buff_index
> fbr
->num_entries
- 1) {
2234 /* Illegal buffer or ring index cannot be used by S/W*/
2235 dev_err(&adapter
->pdev
->dev
,
2236 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2237 rx_local
->local_psr_full
& 0xFFF, len
, buff_index
);
2241 /* Get and fill the RFD. */
2242 spin_lock_irqsave(&adapter
->rcv_lock
, flags
);
2244 element
= rx_local
->recv_list
.next
;
2245 rfd
= list_entry(element
, struct rfd
, list_node
);
2248 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
2252 list_del(&rfd
->list_node
);
2253 rx_local
->num_ready_recv
--;
2255 spin_unlock_irqrestore(&adapter
->rcv_lock
, flags
);
2257 rfd
->bufferindex
= buff_index
;
2258 rfd
->ringindex
= ring_index
;
2260 /* In V1 silicon, there is a bug which screws up filtering of runt
2261 * packets. Therefore runt packet filtering is disabled in the MAC and
2262 * the packets are dropped here. They are also counted here.
2264 if (len
< (NIC_MIN_PACKET_SIZE
+ 4)) {
2265 adapter
->stats
.rx_other_errs
++;
2270 if ((word0
& ALCATEL_MULTICAST_PKT
) && !(word0
& ALCATEL_BROADCAST_PKT
))
2271 adapter
->stats
.multicast_pkts_rcvd
++;
2275 skb
= dev_alloc_skb(rfd
->len
+ 2);
2279 adapter
->netdev
->stats
.rx_bytes
+= rfd
->len
;
2281 skb_put_data(skb
, fbr
->virt
[buff_index
], rfd
->len
);
2283 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
2284 skb
->ip_summed
= CHECKSUM_NONE
;
2285 netif_receive_skb(skb
);
2288 nic_return_rfd(adapter
, rfd
);
2292 static int et131x_handle_recv_pkts(struct et131x_adapter
*adapter
, int budget
)
2294 struct rfd
*rfd
= NULL
;
2298 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
2300 if (budget
> MAX_PACKETS_HANDLED
)
2301 limit
= MAX_PACKETS_HANDLED
;
2303 /* Process up to available RFD's */
2304 while (count
< limit
) {
2305 if (list_empty(&rx_ring
->recv_list
)) {
2306 WARN_ON(rx_ring
->num_ready_recv
!= 0);
2311 rfd
= nic_rx_pkts(adapter
);
2316 /* Do not receive any packets until a filter has been set.
2317 * Do not receive any packets until we have link.
2318 * If length is zero, return the RFD in order to advance the
2321 if (!adapter
->packet_filter
||
2322 !netif_carrier_ok(adapter
->netdev
) ||
2326 adapter
->netdev
->stats
.rx_packets
++;
2328 if (rx_ring
->num_ready_recv
< RFD_LOW_WATER_MARK
)
2329 dev_warn(&adapter
->pdev
->dev
, "RFD's are running out\n");
2334 if (count
== limit
|| !done
) {
2335 rx_ring
->unfinished_receives
= true;
2336 writel(PARM_TX_TIME_INT_DEF
* NANO_IN_A_MICRO
,
2337 &adapter
->regs
->global
.watchdog_timer
);
2339 /* Watchdog timer will disable itself if appropriate. */
2340 rx_ring
->unfinished_receives
= false;
2346 /* et131x_tx_dma_memory_alloc
2348 * Allocates memory that will be visible both to the device and to the CPU.
2349 * The OS will pass us packets, pointers to which we will insert in the Tx
2350 * Descriptor queue. The device will read this queue to find the packets in
2351 * memory. The device will update the "status" in memory each time it xmits a
2354 static int et131x_tx_dma_memory_alloc(struct et131x_adapter
*adapter
)
2357 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2359 /* Allocate memory for the TCB's (Transmit Control Block) */
2360 tx_ring
->tcb_ring
= kcalloc(NUM_TCB
, sizeof(struct tcb
),
2361 GFP_KERNEL
| GFP_DMA
);
2362 if (!tx_ring
->tcb_ring
)
2365 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
);
2366 tx_ring
->tx_desc_ring
= dma_alloc_coherent(&adapter
->pdev
->dev
,
2368 &tx_ring
->tx_desc_ring_pa
,
2370 if (!tx_ring
->tx_desc_ring
) {
2371 dev_err(&adapter
->pdev
->dev
,
2372 "Cannot alloc memory for Tx Ring\n");
2376 tx_ring
->tx_status
= dma_alloc_coherent(&adapter
->pdev
->dev
,
2378 &tx_ring
->tx_status_pa
,
2380 if (!tx_ring
->tx_status
) {
2381 dev_err(&adapter
->pdev
->dev
,
2382 "Cannot alloc memory for Tx status block\n");
2388 static void et131x_tx_dma_memory_free(struct et131x_adapter
*adapter
)
2391 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2393 if (tx_ring
->tx_desc_ring
) {
2394 /* Free memory relating to Tx rings here */
2395 desc_size
= (sizeof(struct tx_desc
) * NUM_DESC_PER_RING_TX
);
2396 dma_free_coherent(&adapter
->pdev
->dev
,
2398 tx_ring
->tx_desc_ring
,
2399 tx_ring
->tx_desc_ring_pa
);
2400 tx_ring
->tx_desc_ring
= NULL
;
2403 /* Free memory for the Tx status block */
2404 if (tx_ring
->tx_status
) {
2405 dma_free_coherent(&adapter
->pdev
->dev
,
2408 tx_ring
->tx_status_pa
);
2410 tx_ring
->tx_status
= NULL
;
2412 /* Free the memory for the tcb structures */
2413 kfree(tx_ring
->tcb_ring
);
2416 #define MAX_TX_DESC_PER_PKT 24
2418 /* nic_send_packet - NIC specific send handler for version B silicon. */
2419 static int nic_send_packet(struct et131x_adapter
*adapter
, struct tcb
*tcb
)
2422 struct tx_desc desc
[MAX_TX_DESC_PER_PKT
];
2424 u32 thiscopy
, remainder
;
2425 struct sk_buff
*skb
= tcb
->skb
;
2426 u32 nr_frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2427 skb_frag_t
*frags
= &skb_shinfo(skb
)->frags
[0];
2428 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
2429 dma_addr_t dma_addr
;
2430 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2432 /* Part of the optimizations of this send routine restrict us to
2433 * sending 24 fragments at a pass. In practice we should never see
2434 * more than 5 fragments.
2437 memset(desc
, 0, sizeof(struct tx_desc
) * (nr_frags
+ 1));
2439 for (i
= 0; i
< nr_frags
; i
++) {
2440 /* If there is something in this element, lets get a
2441 * descriptor from the ring and get the necessary data
2444 /* If the fragments are smaller than a standard MTU,
2445 * then map them to a single descriptor in the Tx
2446 * Desc ring. However, if they're larger, as is
2447 * possible with support for jumbo packets, then
2448 * split them each across 2 descriptors.
2450 * This will work until we determine why the hardware
2451 * doesn't seem to like large fragments.
2453 if (skb_headlen(skb
) <= 1514) {
2454 /* Low 16bits are length, high is vlan and
2455 * unused currently so zero
2457 desc
[frag
].len_vlan
= skb_headlen(skb
);
2458 dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
2462 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2463 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2466 desc
[frag
].len_vlan
= skb_headlen(skb
) / 2;
2467 dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
2469 skb_headlen(skb
) / 2,
2471 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2472 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2475 desc
[frag
].len_vlan
= skb_headlen(skb
) / 2;
2476 dma_addr
= dma_map_single(&adapter
->pdev
->dev
,
2478 skb_headlen(skb
) / 2,
2479 skb_headlen(skb
) / 2,
2481 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2482 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2486 desc
[frag
].len_vlan
= skb_frag_size(&frags
[i
- 1]);
2487 dma_addr
= skb_frag_dma_map(&adapter
->pdev
->dev
,
2490 desc
[frag
].len_vlan
,
2492 desc
[frag
].addr_lo
= lower_32_bits(dma_addr
);
2493 desc
[frag
].addr_hi
= upper_32_bits(dma_addr
);
2498 if (phydev
&& phydev
->speed
== SPEED_1000
) {
2499 if (++tx_ring
->since_irq
== PARM_TX_NUM_BUFS_DEF
) {
2500 /* Last element & Interrupt flag */
2501 desc
[frag
- 1].flags
=
2502 TXDESC_FLAG_INTPROC
| TXDESC_FLAG_LASTPKT
;
2503 tx_ring
->since_irq
= 0;
2504 } else { /* Last element */
2505 desc
[frag
- 1].flags
= TXDESC_FLAG_LASTPKT
;
2508 desc
[frag
- 1].flags
=
2509 TXDESC_FLAG_INTPROC
| TXDESC_FLAG_LASTPKT
;
2512 desc
[0].flags
|= TXDESC_FLAG_FIRSTPKT
;
2514 tcb
->index_start
= tx_ring
->send_idx
;
2517 thiscopy
= NUM_DESC_PER_RING_TX
- INDEX10(tx_ring
->send_idx
);
2519 if (thiscopy
>= frag
) {
2523 remainder
= frag
- thiscopy
;
2526 memcpy(tx_ring
->tx_desc_ring
+ INDEX10(tx_ring
->send_idx
),
2528 sizeof(struct tx_desc
) * thiscopy
);
2530 add_10bit(&tx_ring
->send_idx
, thiscopy
);
2532 if (INDEX10(tx_ring
->send_idx
) == 0 ||
2533 INDEX10(tx_ring
->send_idx
) == NUM_DESC_PER_RING_TX
) {
2534 tx_ring
->send_idx
&= ~ET_DMA10_MASK
;
2535 tx_ring
->send_idx
^= ET_DMA10_WRAP
;
2539 memcpy(tx_ring
->tx_desc_ring
,
2541 sizeof(struct tx_desc
) * remainder
);
2543 add_10bit(&tx_ring
->send_idx
, remainder
);
2546 if (INDEX10(tx_ring
->send_idx
) == 0) {
2547 if (tx_ring
->send_idx
)
2548 tcb
->index
= NUM_DESC_PER_RING_TX
- 1;
2550 tcb
->index
= ET_DMA10_WRAP
|(NUM_DESC_PER_RING_TX
- 1);
2552 tcb
->index
= tx_ring
->send_idx
- 1;
2555 spin_lock(&adapter
->tcb_send_qlock
);
2557 if (tx_ring
->send_tail
)
2558 tx_ring
->send_tail
->next
= tcb
;
2560 tx_ring
->send_head
= tcb
;
2562 tx_ring
->send_tail
= tcb
;
2564 WARN_ON(tcb
->next
!= NULL
);
2568 spin_unlock(&adapter
->tcb_send_qlock
);
2570 /* Write the new write pointer back to the device. */
2571 writel(tx_ring
->send_idx
, &adapter
->regs
->txdma
.service_request
);
2573 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
2574 * timer to wake us up if this packet isn't followed by N more.
2576 if (phydev
&& phydev
->speed
== SPEED_1000
) {
2577 writel(PARM_TX_TIME_INT_DEF
* NANO_IN_A_MICRO
,
2578 &adapter
->regs
->global
.watchdog_timer
);
2583 static int send_packet(struct sk_buff
*skb
, struct et131x_adapter
*adapter
)
2587 unsigned long flags
;
2588 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2590 /* All packets must have at least a MAC address and a protocol type */
2591 if (skb
->len
< ETH_HLEN
)
2594 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
2596 tcb
= tx_ring
->tcb_qhead
;
2599 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2603 tx_ring
->tcb_qhead
= tcb
->next
;
2605 if (tx_ring
->tcb_qhead
== NULL
)
2606 tx_ring
->tcb_qtail
= NULL
;
2608 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2613 status
= nic_send_packet(adapter
, tcb
);
2616 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
2618 if (tx_ring
->tcb_qtail
)
2619 tx_ring
->tcb_qtail
->next
= tcb
;
2621 /* Apparently ready Q is empty. */
2622 tx_ring
->tcb_qhead
= tcb
;
2624 tx_ring
->tcb_qtail
= tcb
;
2625 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2628 WARN_ON(tx_ring
->used
> NUM_TCB
);
2632 /* free_send_packet - Recycle a struct tcb */
2633 static inline void free_send_packet(struct et131x_adapter
*adapter
,
2636 unsigned long flags
;
2637 struct tx_desc
*desc
= NULL
;
2638 struct net_device_stats
*stats
= &adapter
->netdev
->stats
;
2639 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2643 stats
->tx_bytes
+= tcb
->skb
->len
;
2645 /* Iterate through the TX descriptors on the ring
2646 * corresponding to this packet and umap the fragments
2650 desc
= tx_ring
->tx_desc_ring
+
2651 INDEX10(tcb
->index_start
);
2653 dma_addr
= desc
->addr_lo
;
2654 dma_addr
|= (u64
)desc
->addr_hi
<< 32;
2656 dma_unmap_single(&adapter
->pdev
->dev
,
2658 desc
->len_vlan
, DMA_TO_DEVICE
);
2660 add_10bit(&tcb
->index_start
, 1);
2661 if (INDEX10(tcb
->index_start
) >=
2662 NUM_DESC_PER_RING_TX
) {
2663 tcb
->index_start
&= ~ET_DMA10_MASK
;
2664 tcb
->index_start
^= ET_DMA10_WRAP
;
2666 } while (desc
!= tx_ring
->tx_desc_ring
+ INDEX10(tcb
->index
));
2668 dev_kfree_skb_any(tcb
->skb
);
2671 memset(tcb
, 0, sizeof(struct tcb
));
2673 /* Add the TCB to the Ready Q */
2674 spin_lock_irqsave(&adapter
->tcb_ready_qlock
, flags
);
2676 stats
->tx_packets
++;
2678 if (tx_ring
->tcb_qtail
)
2679 tx_ring
->tcb_qtail
->next
= tcb
;
2680 else /* Apparently ready Q is empty. */
2681 tx_ring
->tcb_qhead
= tcb
;
2683 tx_ring
->tcb_qtail
= tcb
;
2685 spin_unlock_irqrestore(&adapter
->tcb_ready_qlock
, flags
);
2686 WARN_ON(tx_ring
->used
< 0);
2689 /* et131x_free_busy_send_packets - Free and complete the stopped active sends */
2690 static void et131x_free_busy_send_packets(struct et131x_adapter
*adapter
)
2693 unsigned long flags
;
2695 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2697 /* Any packets being sent? Check the first TCB on the send list */
2698 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2700 tcb
= tx_ring
->send_head
;
2702 while (tcb
!= NULL
&& freed
< NUM_TCB
) {
2703 struct tcb
*next
= tcb
->next
;
2705 tx_ring
->send_head
= next
;
2708 tx_ring
->send_tail
= NULL
;
2712 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2715 free_send_packet(adapter
, tcb
);
2717 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2719 tcb
= tx_ring
->send_head
;
2722 WARN_ON(freed
== NUM_TCB
);
2724 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2729 /* et131x_handle_send_pkts
2731 * Re-claim the send resources, complete sends and get more to send from
2732 * the send wait queue.
2734 static void et131x_handle_send_pkts(struct et131x_adapter
*adapter
)
2736 unsigned long flags
;
2740 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
2742 serviced
= readl(&adapter
->regs
->txdma
.new_service_complete
);
2743 index
= INDEX10(serviced
);
2745 /* Has the ring wrapped? Process any descriptors that do not have
2746 * the same "wrap" indicator as the current completion indicator
2748 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2750 tcb
= tx_ring
->send_head
;
2753 ((serviced
^ tcb
->index
) & ET_DMA10_WRAP
) &&
2754 index
< INDEX10(tcb
->index
)) {
2756 tx_ring
->send_head
= tcb
->next
;
2757 if (tcb
->next
== NULL
)
2758 tx_ring
->send_tail
= NULL
;
2760 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2761 free_send_packet(adapter
, tcb
);
2762 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2764 /* Goto the next packet */
2765 tcb
= tx_ring
->send_head
;
2768 !((serviced
^ tcb
->index
) & ET_DMA10_WRAP
) &&
2769 index
> (tcb
->index
& ET_DMA10_MASK
)) {
2771 tx_ring
->send_head
= tcb
->next
;
2772 if (tcb
->next
== NULL
)
2773 tx_ring
->send_tail
= NULL
;
2775 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2776 free_send_packet(adapter
, tcb
);
2777 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
2779 /* Goto the next packet */
2780 tcb
= tx_ring
->send_head
;
2783 /* Wake up the queue when we hit a low-water mark */
2784 if (tx_ring
->used
<= NUM_TCB
/ 3)
2785 netif_wake_queue(adapter
->netdev
);
2787 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
2790 static int et131x_get_regs_len(struct net_device
*netdev
)
2792 #define ET131X_REGS_LEN 256
2793 return ET131X_REGS_LEN
* sizeof(u32
);
2796 static void et131x_get_regs(struct net_device
*netdev
,
2797 struct ethtool_regs
*regs
, void *regs_data
)
2799 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
2800 struct address_map __iomem
*aregs
= adapter
->regs
;
2801 u32
*regs_buff
= regs_data
;
2805 memset(regs_data
, 0, et131x_get_regs_len(netdev
));
2807 regs
->version
= (1 << 24) | (adapter
->pdev
->revision
<< 16) |
2808 adapter
->pdev
->device
;
2811 et131x_mii_read(adapter
, MII_BMCR
, &tmp
);
2812 regs_buff
[num
++] = tmp
;
2813 et131x_mii_read(adapter
, MII_BMSR
, &tmp
);
2814 regs_buff
[num
++] = tmp
;
2815 et131x_mii_read(adapter
, MII_PHYSID1
, &tmp
);
2816 regs_buff
[num
++] = tmp
;
2817 et131x_mii_read(adapter
, MII_PHYSID2
, &tmp
);
2818 regs_buff
[num
++] = tmp
;
2819 et131x_mii_read(adapter
, MII_ADVERTISE
, &tmp
);
2820 regs_buff
[num
++] = tmp
;
2821 et131x_mii_read(adapter
, MII_LPA
, &tmp
);
2822 regs_buff
[num
++] = tmp
;
2823 et131x_mii_read(adapter
, MII_EXPANSION
, &tmp
);
2824 regs_buff
[num
++] = tmp
;
2825 /* Autoneg next page transmit reg */
2826 et131x_mii_read(adapter
, 0x07, &tmp
);
2827 regs_buff
[num
++] = tmp
;
2828 /* Link partner next page reg */
2829 et131x_mii_read(adapter
, 0x08, &tmp
);
2830 regs_buff
[num
++] = tmp
;
2831 et131x_mii_read(adapter
, MII_CTRL1000
, &tmp
);
2832 regs_buff
[num
++] = tmp
;
2833 et131x_mii_read(adapter
, MII_STAT1000
, &tmp
);
2834 regs_buff
[num
++] = tmp
;
2835 et131x_mii_read(adapter
, 0x0b, &tmp
);
2836 regs_buff
[num
++] = tmp
;
2837 et131x_mii_read(adapter
, 0x0c, &tmp
);
2838 regs_buff
[num
++] = tmp
;
2839 et131x_mii_read(adapter
, MII_MMD_CTRL
, &tmp
);
2840 regs_buff
[num
++] = tmp
;
2841 et131x_mii_read(adapter
, MII_MMD_DATA
, &tmp
);
2842 regs_buff
[num
++] = tmp
;
2843 et131x_mii_read(adapter
, MII_ESTATUS
, &tmp
);
2844 regs_buff
[num
++] = tmp
;
2846 et131x_mii_read(adapter
, PHY_INDEX_REG
, &tmp
);
2847 regs_buff
[num
++] = tmp
;
2848 et131x_mii_read(adapter
, PHY_DATA_REG
, &tmp
);
2849 regs_buff
[num
++] = tmp
;
2850 et131x_mii_read(adapter
, PHY_MPHY_CONTROL_REG
, &tmp
);
2851 regs_buff
[num
++] = tmp
;
2852 et131x_mii_read(adapter
, PHY_LOOPBACK_CONTROL
, &tmp
);
2853 regs_buff
[num
++] = tmp
;
2854 et131x_mii_read(adapter
, PHY_LOOPBACK_CONTROL
+ 1, &tmp
);
2855 regs_buff
[num
++] = tmp
;
2857 et131x_mii_read(adapter
, PHY_REGISTER_MGMT_CONTROL
, &tmp
);
2858 regs_buff
[num
++] = tmp
;
2859 et131x_mii_read(adapter
, PHY_CONFIG
, &tmp
);
2860 regs_buff
[num
++] = tmp
;
2861 et131x_mii_read(adapter
, PHY_PHY_CONTROL
, &tmp
);
2862 regs_buff
[num
++] = tmp
;
2863 et131x_mii_read(adapter
, PHY_INTERRUPT_MASK
, &tmp
);
2864 regs_buff
[num
++] = tmp
;
2865 et131x_mii_read(adapter
, PHY_INTERRUPT_STATUS
, &tmp
);
2866 regs_buff
[num
++] = tmp
;
2867 et131x_mii_read(adapter
, PHY_PHY_STATUS
, &tmp
);
2868 regs_buff
[num
++] = tmp
;
2869 et131x_mii_read(adapter
, PHY_LED_1
, &tmp
);
2870 regs_buff
[num
++] = tmp
;
2871 et131x_mii_read(adapter
, PHY_LED_2
, &tmp
);
2872 regs_buff
[num
++] = tmp
;
2875 regs_buff
[num
++] = readl(&aregs
->global
.txq_start_addr
);
2876 regs_buff
[num
++] = readl(&aregs
->global
.txq_end_addr
);
2877 regs_buff
[num
++] = readl(&aregs
->global
.rxq_start_addr
);
2878 regs_buff
[num
++] = readl(&aregs
->global
.rxq_end_addr
);
2879 regs_buff
[num
++] = readl(&aregs
->global
.pm_csr
);
2880 regs_buff
[num
++] = adapter
->stats
.interrupt_status
;
2881 regs_buff
[num
++] = readl(&aregs
->global
.int_mask
);
2882 regs_buff
[num
++] = readl(&aregs
->global
.int_alias_clr_en
);
2883 regs_buff
[num
++] = readl(&aregs
->global
.int_status_alias
);
2884 regs_buff
[num
++] = readl(&aregs
->global
.sw_reset
);
2885 regs_buff
[num
++] = readl(&aregs
->global
.slv_timer
);
2886 regs_buff
[num
++] = readl(&aregs
->global
.msi_config
);
2887 regs_buff
[num
++] = readl(&aregs
->global
.loopback
);
2888 regs_buff
[num
++] = readl(&aregs
->global
.watchdog_timer
);
2891 regs_buff
[num
++] = readl(&aregs
->txdma
.csr
);
2892 regs_buff
[num
++] = readl(&aregs
->txdma
.pr_base_hi
);
2893 regs_buff
[num
++] = readl(&aregs
->txdma
.pr_base_lo
);
2894 regs_buff
[num
++] = readl(&aregs
->txdma
.pr_num_des
);
2895 regs_buff
[num
++] = readl(&aregs
->txdma
.txq_wr_addr
);
2896 regs_buff
[num
++] = readl(&aregs
->txdma
.txq_wr_addr_ext
);
2897 regs_buff
[num
++] = readl(&aregs
->txdma
.txq_rd_addr
);
2898 regs_buff
[num
++] = readl(&aregs
->txdma
.dma_wb_base_hi
);
2899 regs_buff
[num
++] = readl(&aregs
->txdma
.dma_wb_base_lo
);
2900 regs_buff
[num
++] = readl(&aregs
->txdma
.service_request
);
2901 regs_buff
[num
++] = readl(&aregs
->txdma
.service_complete
);
2902 regs_buff
[num
++] = readl(&aregs
->txdma
.cache_rd_index
);
2903 regs_buff
[num
++] = readl(&aregs
->txdma
.cache_wr_index
);
2904 regs_buff
[num
++] = readl(&aregs
->txdma
.tx_dma_error
);
2905 regs_buff
[num
++] = readl(&aregs
->txdma
.desc_abort_cnt
);
2906 regs_buff
[num
++] = readl(&aregs
->txdma
.payload_abort_cnt
);
2907 regs_buff
[num
++] = readl(&aregs
->txdma
.writeback_abort_cnt
);
2908 regs_buff
[num
++] = readl(&aregs
->txdma
.desc_timeout_cnt
);
2909 regs_buff
[num
++] = readl(&aregs
->txdma
.payload_timeout_cnt
);
2910 regs_buff
[num
++] = readl(&aregs
->txdma
.writeback_timeout_cnt
);
2911 regs_buff
[num
++] = readl(&aregs
->txdma
.desc_error_cnt
);
2912 regs_buff
[num
++] = readl(&aregs
->txdma
.payload_error_cnt
);
2913 regs_buff
[num
++] = readl(&aregs
->txdma
.writeback_error_cnt
);
2914 regs_buff
[num
++] = readl(&aregs
->txdma
.dropped_tlp_cnt
);
2915 regs_buff
[num
++] = readl(&aregs
->txdma
.new_service_complete
);
2916 regs_buff
[num
++] = readl(&aregs
->txdma
.ethernet_packet_cnt
);
2919 regs_buff
[num
++] = readl(&aregs
->rxdma
.csr
);
2920 regs_buff
[num
++] = readl(&aregs
->rxdma
.dma_wb_base_hi
);
2921 regs_buff
[num
++] = readl(&aregs
->rxdma
.dma_wb_base_lo
);
2922 regs_buff
[num
++] = readl(&aregs
->rxdma
.num_pkt_done
);
2923 regs_buff
[num
++] = readl(&aregs
->rxdma
.max_pkt_time
);
2924 regs_buff
[num
++] = readl(&aregs
->rxdma
.rxq_rd_addr
);
2925 regs_buff
[num
++] = readl(&aregs
->rxdma
.rxq_rd_addr_ext
);
2926 regs_buff
[num
++] = readl(&aregs
->rxdma
.rxq_wr_addr
);
2927 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_base_hi
);
2928 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_base_lo
);
2929 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_num_des
);
2930 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_avail_offset
);
2931 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_full_offset
);
2932 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_access_index
);
2933 regs_buff
[num
++] = readl(&aregs
->rxdma
.psr_min_des
);
2934 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_base_lo
);
2935 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_base_hi
);
2936 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_num_des
);
2937 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_avail_offset
);
2938 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_full_offset
);
2939 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_rd_index
);
2940 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr0_min_des
);
2941 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_base_lo
);
2942 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_base_hi
);
2943 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_num_des
);
2944 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_avail_offset
);
2945 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_full_offset
);
2946 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_rd_index
);
2947 regs_buff
[num
++] = readl(&aregs
->rxdma
.fbr1_min_des
);
2950 static void et131x_get_drvinfo(struct net_device
*netdev
,
2951 struct ethtool_drvinfo
*info
)
2953 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
2955 strscpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
2956 strscpy(info
->bus_info
, pci_name(adapter
->pdev
),
2957 sizeof(info
->bus_info
));
2960 static const struct ethtool_ops et131x_ethtool_ops
= {
2961 .get_drvinfo
= et131x_get_drvinfo
,
2962 .get_regs_len
= et131x_get_regs_len
,
2963 .get_regs
= et131x_get_regs
,
2964 .get_link
= ethtool_op_get_link
,
2965 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2966 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2969 /* et131x_hwaddr_init - set up the MAC Address */
2970 static void et131x_hwaddr_init(struct et131x_adapter
*adapter
)
2972 /* If have our default mac from init and no mac address from
2973 * EEPROM then we need to generate the last octet and set it on the
2976 if (is_zero_ether_addr(adapter
->rom_addr
)) {
2977 /* We need to randomly generate the last octet so we
2978 * decrease our chances of setting the mac address to
2979 * same as another one of our cards in the system
2981 get_random_bytes(&adapter
->addr
[5], 1);
2982 /* We have the default value in the register we are
2983 * working with so we need to copy the current
2984 * address into the permanent address
2986 ether_addr_copy(adapter
->rom_addr
, adapter
->addr
);
2988 /* We do not have an override address, so set the
2989 * current address to the permanent address and add
2992 ether_addr_copy(adapter
->addr
, adapter
->rom_addr
);
2996 static int et131x_pci_init(struct et131x_adapter
*adapter
,
2997 struct pci_dev
*pdev
)
3002 rc
= et131x_init_eeprom(adapter
);
3006 if (!pci_is_pcie(pdev
)) {
3007 dev_err(&pdev
->dev
, "Missing PCIe capabilities\n");
3011 /* Program the Ack/Nak latency and replay timers */
3012 max_payload
= pdev
->pcie_mpss
;
3014 if (max_payload
< 2) {
3015 static const u16 acknak
[2] = { 0x76, 0xD0 };
3016 static const u16 replay
[2] = { 0x1E0, 0x2ED };
3018 if (pci_write_config_word(pdev
, ET1310_PCI_ACK_NACK
,
3019 acknak
[max_payload
])) {
3021 "Could not write PCI config space for ACK/NAK\n");
3024 if (pci_write_config_word(pdev
, ET1310_PCI_REPLAY
,
3025 replay
[max_payload
])) {
3027 "Could not write PCI config space for Replay Timer\n");
3032 /* l0s and l1 latency timers. We are using default values.
3033 * Representing 001 for L0s and 010 for L1
3035 if (pci_write_config_byte(pdev
, ET1310_PCI_L0L1LATENCY
, 0x11)) {
3037 "Could not write PCI config space for Latency Timers\n");
3041 /* Change the max read size to 2k */
3042 if (pcie_set_readrq(pdev
, 2048)) {
3044 "Couldn't change PCI config space for Max read size\n");
3048 /* Get MAC address from config space if an eeprom exists, otherwise
3049 * the MAC address there will not be valid
3051 if (!adapter
->has_eeprom
) {
3052 et131x_hwaddr_init(adapter
);
3056 for (i
= 0; i
< ETH_ALEN
; i
++) {
3057 if (pci_read_config_byte(pdev
, ET1310_PCI_MAC_ADDRESS
+ i
,
3058 adapter
->rom_addr
+ i
)) {
3059 dev_err(&pdev
->dev
, "Could not read PCI config space for MAC address\n");
3063 ether_addr_copy(adapter
->addr
, adapter
->rom_addr
);
3071 /* et131x_error_timer_handler
3072 * @data: timer-specific variable; here a pointer to our adapter structure
3074 * The routine called when the error timer expires, to track the number of
3077 static void et131x_error_timer_handler(struct timer_list
*t
)
3079 struct et131x_adapter
*adapter
= from_timer(adapter
, t
, error_timer
);
3080 struct phy_device
*phydev
= adapter
->netdev
->phydev
;
3082 if (et1310_in_phy_coma(adapter
)) {
3083 /* Bring the device immediately out of coma, to
3084 * prevent it from sleeping indefinitely, this
3085 * mechanism could be improved!
3087 et1310_disable_phy_coma(adapter
);
3088 adapter
->boot_coma
= 20;
3090 et1310_update_macstat_host_counters(adapter
);
3093 if (!phydev
->link
&& adapter
->boot_coma
< 11)
3094 adapter
->boot_coma
++;
3096 if (adapter
->boot_coma
== 10) {
3097 if (!phydev
->link
) {
3098 if (!et1310_in_phy_coma(adapter
)) {
3099 /* NOTE - This was originally a 'sync with
3100 * interrupt'. How to do that under Linux?
3102 et131x_enable_interrupts(adapter
);
3103 et1310_enable_phy_coma(adapter
);
3108 /* This is a periodic timer, so reschedule */
3109 mod_timer(&adapter
->error_timer
, jiffies
+
3110 msecs_to_jiffies(TX_ERROR_PERIOD
));
3113 static void et131x_adapter_memory_free(struct et131x_adapter
*adapter
)
3115 et131x_tx_dma_memory_free(adapter
);
3116 et131x_rx_dma_memory_free(adapter
);
3119 static int et131x_adapter_memory_alloc(struct et131x_adapter
*adapter
)
3123 status
= et131x_tx_dma_memory_alloc(adapter
);
3125 dev_err(&adapter
->pdev
->dev
,
3126 "et131x_tx_dma_memory_alloc FAILED\n");
3127 et131x_tx_dma_memory_free(adapter
);
3131 status
= et131x_rx_dma_memory_alloc(adapter
);
3133 dev_err(&adapter
->pdev
->dev
,
3134 "et131x_rx_dma_memory_alloc FAILED\n");
3135 et131x_adapter_memory_free(adapter
);
3139 status
= et131x_init_recv(adapter
);
3141 dev_err(&adapter
->pdev
->dev
, "et131x_init_recv FAILED\n");
3142 et131x_adapter_memory_free(adapter
);
3147 static void et131x_adjust_link(struct net_device
*netdev
)
3149 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3150 struct phy_device
*phydev
= netdev
->phydev
;
3154 if (phydev
->link
== adapter
->link
)
3157 /* Check to see if we are in coma mode and if
3158 * so, disable it because we will not be able
3159 * to read PHY values until we are out.
3161 if (et1310_in_phy_coma(adapter
))
3162 et1310_disable_phy_coma(adapter
);
3164 adapter
->link
= phydev
->link
;
3165 phy_print_status(phydev
);
3168 adapter
->boot_coma
= 20;
3169 if (phydev
->speed
== SPEED_10
) {
3172 et131x_mii_read(adapter
, PHY_MPHY_CONTROL_REG
,
3174 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3175 PHY_MPHY_CONTROL_REG
,
3177 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3178 PHY_INDEX_REG
, register18
| 0x8402);
3179 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3180 PHY_DATA_REG
, register18
| 511);
3181 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3182 PHY_MPHY_CONTROL_REG
, register18
);
3185 et1310_config_flow_control(adapter
);
3187 if (phydev
->speed
== SPEED_1000
&&
3188 adapter
->registry_jumbo_packet
> 2048) {
3191 et131x_mii_read(adapter
, PHY_CONFIG
, ®
);
3192 reg
&= ~ET_PHY_CONFIG_TX_FIFO_DEPTH
;
3193 reg
|= ET_PHY_CONFIG_FIFO_DEPTH_32
;
3194 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3198 et131x_set_rx_dma_timer(adapter
);
3199 et1310_config_mac_regs2(adapter
);
3201 adapter
->boot_coma
= 0;
3203 if (phydev
->speed
== SPEED_10
) {
3206 et131x_mii_read(adapter
, PHY_MPHY_CONTROL_REG
,
3208 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3209 PHY_MPHY_CONTROL_REG
,
3211 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3212 PHY_INDEX_REG
, register18
| 0x8402);
3213 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3214 PHY_DATA_REG
, register18
| 511);
3215 et131x_mii_write(adapter
, phydev
->mdio
.addr
,
3216 PHY_MPHY_CONTROL_REG
, register18
);
3219 et131x_free_busy_send_packets(adapter
);
3220 et131x_init_send(adapter
);
3222 /* Bring the device back to the state it was during
3223 * init prior to autonegotiation being complete. This
3224 * way, when we get the auto-neg complete interrupt,
3225 * we can complete init by calling config_mac_regs2.
3227 et131x_soft_reset(adapter
);
3229 et131x_adapter_setup(adapter
);
3231 et131x_disable_txrx(netdev
);
3232 et131x_enable_txrx(netdev
);
3236 static int et131x_mii_probe(struct net_device
*netdev
)
3238 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3239 struct phy_device
*phydev
= NULL
;
3241 phydev
= phy_find_first(adapter
->mii_bus
);
3243 dev_err(&adapter
->pdev
->dev
, "no PHY found\n");
3247 phydev
= phy_connect(netdev
, phydev_name(phydev
),
3248 &et131x_adjust_link
, PHY_INTERFACE_MODE_MII
);
3250 if (IS_ERR(phydev
)) {
3251 dev_err(&adapter
->pdev
->dev
, "Could not attach to PHY\n");
3252 return PTR_ERR(phydev
);
3255 phy_set_max_speed(phydev
, SPEED_100
);
3257 if (adapter
->pdev
->device
!= ET131X_PCI_DEVICE_ID_FAST
)
3258 phy_set_max_speed(phydev
, SPEED_1000
);
3260 phydev
->autoneg
= AUTONEG_ENABLE
;
3262 phy_attached_info(phydev
);
3267 static struct et131x_adapter
*et131x_adapter_init(struct net_device
*netdev
,
3268 struct pci_dev
*pdev
)
3270 static const u8 default_mac
[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3272 struct et131x_adapter
*adapter
;
3274 adapter
= netdev_priv(netdev
);
3275 adapter
->pdev
= pci_dev_get(pdev
);
3276 adapter
->netdev
= netdev
;
3278 spin_lock_init(&adapter
->tcb_send_qlock
);
3279 spin_lock_init(&adapter
->tcb_ready_qlock
);
3280 spin_lock_init(&adapter
->rcv_lock
);
3282 adapter
->registry_jumbo_packet
= 1514; /* 1514-9216 */
3284 ether_addr_copy(adapter
->addr
, default_mac
);
3289 static void et131x_pci_remove(struct pci_dev
*pdev
)
3291 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3292 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3294 unregister_netdev(netdev
);
3295 netif_napi_del(&adapter
->napi
);
3296 phy_disconnect(netdev
->phydev
);
3297 mdiobus_unregister(adapter
->mii_bus
);
3298 mdiobus_free(adapter
->mii_bus
);
3300 et131x_adapter_memory_free(adapter
);
3301 iounmap(adapter
->regs
);
3304 free_netdev(netdev
);
3305 pci_release_regions(pdev
);
3306 pci_disable_device(pdev
);
3309 static void et131x_up(struct net_device
*netdev
)
3311 et131x_enable_txrx(netdev
);
3312 phy_start(netdev
->phydev
);
3315 static void et131x_down(struct net_device
*netdev
)
3317 /* Save the timestamp for the TX watchdog, prevent a timeout */
3318 netif_trans_update(netdev
);
3320 phy_stop(netdev
->phydev
);
3321 et131x_disable_txrx(netdev
);
3324 #ifdef CONFIG_PM_SLEEP
3325 static int et131x_suspend(struct device
*dev
)
3327 struct pci_dev
*pdev
= to_pci_dev(dev
);
3328 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3330 if (netif_running(netdev
)) {
3331 netif_device_detach(netdev
);
3332 et131x_down(netdev
);
3333 pci_save_state(pdev
);
3339 static int et131x_resume(struct device
*dev
)
3341 struct pci_dev
*pdev
= to_pci_dev(dev
);
3342 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3344 if (netif_running(netdev
)) {
3345 pci_restore_state(pdev
);
3347 netif_device_attach(netdev
);
3354 static SIMPLE_DEV_PM_OPS(et131x_pm_ops
, et131x_suspend
, et131x_resume
);
3356 static irqreturn_t
et131x_isr(int irq
, void *dev_id
)
3358 bool handled
= true;
3359 bool enable_interrupts
= true;
3360 struct net_device
*netdev
= dev_id
;
3361 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3362 struct address_map __iomem
*iomem
= adapter
->regs
;
3363 struct rx_ring
*rx_ring
= &adapter
->rx_ring
;
3364 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
3367 if (!netif_device_present(netdev
)) {
3369 enable_interrupts
= false;
3373 et131x_disable_interrupts(adapter
);
3375 status
= readl(&adapter
->regs
->global
.int_status
);
3377 if (adapter
->flow
== FLOW_TXONLY
|| adapter
->flow
== FLOW_BOTH
)
3378 status
&= ~INT_MASK_ENABLE
;
3380 status
&= ~INT_MASK_ENABLE_NO_FLOW
;
3382 /* Make sure this is our interrupt */
3385 et131x_enable_interrupts(adapter
);
3389 /* This is our interrupt, so process accordingly */
3390 if (status
& ET_INTR_WATCHDOG
) {
3391 struct tcb
*tcb
= tx_ring
->send_head
;
3394 if (++tcb
->stale
> 1)
3395 status
|= ET_INTR_TXDMA_ISR
;
3397 if (rx_ring
->unfinished_receives
)
3398 status
|= ET_INTR_RXDMA_XFR_DONE
;
3399 else if (tcb
== NULL
)
3400 writel(0, &adapter
->regs
->global
.watchdog_timer
);
3402 status
&= ~ET_INTR_WATCHDOG
;
3405 if (status
& (ET_INTR_RXDMA_XFR_DONE
| ET_INTR_TXDMA_ISR
)) {
3406 enable_interrupts
= false;
3407 napi_schedule(&adapter
->napi
);
3410 status
&= ~(ET_INTR_TXDMA_ISR
| ET_INTR_RXDMA_XFR_DONE
);
3415 if (status
& ET_INTR_TXDMA_ERR
) {
3416 /* Following read also clears the register (COR) */
3417 u32 txdma_err
= readl(&iomem
->txdma
.tx_dma_error
);
3419 dev_warn(&adapter
->pdev
->dev
,
3420 "TXDMA_ERR interrupt, error = %d\n",
3424 if (status
& (ET_INTR_RXDMA_FB_R0_LOW
| ET_INTR_RXDMA_FB_R1_LOW
)) {
3425 /* This indicates the number of unused buffers in RXDMA free
3426 * buffer ring 0 is <= the limit you programmed. Free buffer
3427 * resources need to be returned. Free buffers are consumed as
3428 * packets are passed from the network to the host. The host
3429 * becomes aware of the packets from the contents of the packet
3430 * status ring. This ring is queried when the packet done
3431 * interrupt occurs. Packets are then passed to the OS. When
3432 * the OS is done with the packets the resources can be
3433 * returned to the ET1310 for re-use. This interrupt is one
3434 * method of returning resources.
3437 /* If the user has flow control on, then we will
3438 * send a pause packet, otherwise just exit
3440 if (adapter
->flow
== FLOW_TXONLY
|| adapter
->flow
== FLOW_BOTH
) {
3441 /* Tell the device to send a pause packet via the back
3442 * pressure register (bp req and bp xon/xoff)
3444 if (!et1310_in_phy_coma(adapter
))
3445 writel(3, &iomem
->txmac
.bp_ctrl
);
3449 /* Handle Packet Status Ring Low Interrupt */
3450 if (status
& ET_INTR_RXDMA_STAT_LOW
) {
3451 /* Same idea as with the two Free Buffer Rings. Packets going
3452 * from the network to the host each consume a free buffer
3453 * resource and a packet status resource. These resources are
3454 * passed to the OS. When the OS is done with the resources,
3455 * they need to be returned to the ET1310. This is one method
3456 * of returning the resources.
3460 if (status
& ET_INTR_RXDMA_ERR
) {
3461 /* The rxdma_error interrupt is sent when a time-out on a
3462 * request issued by the JAGCore has occurred or a completion is
3463 * returned with an un-successful status. In both cases the
3464 * request is considered complete. The JAGCore will
3465 * automatically re-try the request in question. Normally
3466 * information on events like these are sent to the host using
3467 * the "Advanced Error Reporting" capability. This interrupt is
3468 * another way of getting similar information. The only thing
3469 * required is to clear the interrupt by reading the ISR in the
3470 * global resources. The JAGCore will do a re-try on the
3471 * request. Normally you should never see this interrupt. If
3472 * you start to see this interrupt occurring frequently then
3473 * something bad has occurred. A reset might be the thing to do.
3477 dev_warn(&adapter
->pdev
->dev
, "RxDMA_ERR interrupt, error %x\n",
3478 readl(&iomem
->txmac
.tx_test
));
3481 /* Handle the Wake on LAN Event */
3482 if (status
& ET_INTR_WOL
) {
3483 /* This is a secondary interrupt for wake on LAN. The driver
3484 * should never see this, if it does, something serious is
3487 dev_err(&adapter
->pdev
->dev
, "WAKE_ON_LAN interrupt\n");
3490 if (status
& ET_INTR_TXMAC
) {
3491 u32 err
= readl(&iomem
->txmac
.err
);
3493 /* When any of the errors occur and TXMAC generates an
3494 * interrupt to report these errors, it usually means that
3495 * TXMAC has detected an error in the data stream retrieved
3496 * from the on-chip Tx Q. All of these errors are catastrophic
3497 * and TXMAC won't be able to recover data when these errors
3498 * occur. In a nutshell, the whole Tx path will have to be reset
3499 * and re-configured afterwards.
3501 dev_warn(&adapter
->pdev
->dev
, "TXMAC interrupt, error 0x%08x\n",
3504 /* If we are debugging, we want to see this error, otherwise we
3505 * just want the device to be reset and continue
3509 if (status
& ET_INTR_RXMAC
) {
3510 /* These interrupts are catastrophic to the device, what we need
3511 * to do is disable the interrupts and set the flag to cause us
3512 * to reset so we can solve this issue.
3514 dev_warn(&adapter
->pdev
->dev
,
3515 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3516 readl(&iomem
->rxmac
.err_reg
));
3518 dev_warn(&adapter
->pdev
->dev
,
3519 "Enable 0x%08x, Diag 0x%08x\n",
3520 readl(&iomem
->rxmac
.ctrl
),
3521 readl(&iomem
->rxmac
.rxq_diag
));
3523 /* If we are debugging, we want to see this error, otherwise we
3524 * just want the device to be reset and continue
3528 if (status
& ET_INTR_MAC_STAT
) {
3529 /* This means at least one of the un-masked counters in the
3530 * MAC_STAT block has rolled over. Use this to maintain the top,
3531 * software managed bits of the counter(s).
3533 et1310_handle_macstat_interrupt(adapter
);
3536 if (status
& ET_INTR_SLV_TIMEOUT
) {
3537 /* This means a timeout has occurred on a read or write request
3538 * to one of the JAGCore registers. The Global Resources block
3539 * has terminated the request and on a read request, returned a
3540 * "fake" value. The most likely reasons are: Bad Address or the
3541 * addressed module is in a power-down state and can't respond.
3546 if (enable_interrupts
)
3547 et131x_enable_interrupts(adapter
);
3549 return IRQ_RETVAL(handled
);
3552 static int et131x_poll(struct napi_struct
*napi
, int budget
)
3554 struct et131x_adapter
*adapter
=
3555 container_of(napi
, struct et131x_adapter
, napi
);
3556 int work_done
= et131x_handle_recv_pkts(adapter
, budget
);
3558 et131x_handle_send_pkts(adapter
);
3560 if (work_done
< budget
) {
3561 napi_complete_done(&adapter
->napi
, work_done
);
3562 et131x_enable_interrupts(adapter
);
3568 /* et131x_stats - Return the current device statistics */
3569 static struct net_device_stats
*et131x_stats(struct net_device
*netdev
)
3571 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3572 struct net_device_stats
*stats
= &adapter
->netdev
->stats
;
3573 struct ce_stats
*devstat
= &adapter
->stats
;
3575 stats
->rx_errors
= devstat
->rx_length_errs
+
3576 devstat
->rx_align_errs
+
3577 devstat
->rx_crc_errs
+
3578 devstat
->rx_code_violations
+
3579 devstat
->rx_other_errs
;
3580 stats
->tx_errors
= devstat
->tx_max_pkt_errs
;
3581 stats
->multicast
= devstat
->multicast_pkts_rcvd
;
3582 stats
->collisions
= devstat
->tx_collisions
;
3584 stats
->rx_length_errors
= devstat
->rx_length_errs
;
3585 stats
->rx_over_errors
= devstat
->rx_overflows
;
3586 stats
->rx_crc_errors
= devstat
->rx_crc_errs
;
3587 stats
->rx_dropped
= devstat
->rcvd_pkts_dropped
;
3589 /* NOTE: Not used, can't find analogous statistics */
3590 /* stats->rx_frame_errors = devstat->; */
3591 /* stats->rx_fifo_errors = devstat->; */
3592 /* stats->rx_missed_errors = devstat->; */
3594 /* stats->tx_aborted_errors = devstat->; */
3595 /* stats->tx_carrier_errors = devstat->; */
3596 /* stats->tx_fifo_errors = devstat->; */
3597 /* stats->tx_heartbeat_errors = devstat->; */
3598 /* stats->tx_window_errors = devstat->; */
3602 static int et131x_open(struct net_device
*netdev
)
3604 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3605 struct pci_dev
*pdev
= adapter
->pdev
;
3606 unsigned int irq
= pdev
->irq
;
3609 /* Start the timer to track NIC errors */
3610 timer_setup(&adapter
->error_timer
, et131x_error_timer_handler
, 0);
3611 adapter
->error_timer
.expires
= jiffies
+
3612 msecs_to_jiffies(TX_ERROR_PERIOD
);
3613 add_timer(&adapter
->error_timer
);
3615 result
= request_irq(irq
, et131x_isr
,
3616 IRQF_SHARED
, netdev
->name
, netdev
);
3618 dev_err(&pdev
->dev
, "could not register IRQ %d\n", irq
);
3622 adapter
->flags
|= FMP_ADAPTER_INTERRUPT_IN_USE
;
3624 napi_enable(&adapter
->napi
);
3631 static int et131x_close(struct net_device
*netdev
)
3633 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3635 et131x_down(netdev
);
3636 napi_disable(&adapter
->napi
);
3638 adapter
->flags
&= ~FMP_ADAPTER_INTERRUPT_IN_USE
;
3639 free_irq(adapter
->pdev
->irq
, netdev
);
3641 /* Stop the error timer */
3642 return del_timer_sync(&adapter
->error_timer
);
3645 /* et131x_set_packet_filter - Configures the Rx Packet filtering */
3646 static int et131x_set_packet_filter(struct et131x_adapter
*adapter
)
3648 int filter
= adapter
->packet_filter
;
3652 ctrl
= readl(&adapter
->regs
->rxmac
.ctrl
);
3653 pf_ctrl
= readl(&adapter
->regs
->rxmac
.pf_ctrl
);
3655 /* Default to disabled packet filtering */
3658 /* Set us to be in promiscuous mode so we receive everything, this
3659 * is also true when we get a packet filter of 0
3661 if ((filter
& ET131X_PACKET_TYPE_PROMISCUOUS
) || filter
== 0)
3662 pf_ctrl
&= ~7; /* Clear filter bits */
3664 /* Set us up with Multicast packet filtering. Three cases are
3665 * possible - (1) we have a multi-cast list, (2) we receive ALL
3666 * multicast entries or (3) we receive none.
3668 if (filter
& ET131X_PACKET_TYPE_ALL_MULTICAST
)
3669 pf_ctrl
&= ~2; /* Multicast filter bit */
3671 et1310_setup_device_for_multicast(adapter
);
3676 /* Set us up with Unicast packet filtering */
3677 if (filter
& ET131X_PACKET_TYPE_DIRECTED
) {
3678 et1310_setup_device_for_unicast(adapter
);
3683 /* Set us up with Broadcast packet filtering */
3684 if (filter
& ET131X_PACKET_TYPE_BROADCAST
) {
3685 pf_ctrl
|= 1; /* Broadcast filter bit */
3691 /* Setup the receive mac configuration registers - Packet
3692 * Filter control + the enable / disable for packet filter
3693 * in the control reg.
3695 writel(pf_ctrl
, &adapter
->regs
->rxmac
.pf_ctrl
);
3696 writel(ctrl
, &adapter
->regs
->rxmac
.ctrl
);
3701 static void et131x_multicast(struct net_device
*netdev
)
3703 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3705 struct netdev_hw_addr
*ha
;
3708 /* Before we modify the platform-independent filter flags, store them
3709 * locally. This allows us to determine if anything's changed and if
3710 * we even need to bother the hardware
3712 packet_filter
= adapter
->packet_filter
;
3714 /* Clear the 'multicast' flag locally; because we only have a single
3715 * flag to check multicast, and multiple multicast addresses can be
3716 * set, this is the easiest way to determine if more than one
3717 * multicast address is being set.
3719 packet_filter
&= ~ET131X_PACKET_TYPE_MULTICAST
;
3721 /* Check the net_device flags and set the device independent flags
3724 if (netdev
->flags
& IFF_PROMISC
)
3725 adapter
->packet_filter
|= ET131X_PACKET_TYPE_PROMISCUOUS
;
3727 adapter
->packet_filter
&= ~ET131X_PACKET_TYPE_PROMISCUOUS
;
3729 if ((netdev
->flags
& IFF_ALLMULTI
) ||
3730 (netdev_mc_count(netdev
) > NIC_MAX_MCAST_LIST
))
3731 adapter
->packet_filter
|= ET131X_PACKET_TYPE_ALL_MULTICAST
;
3733 if (netdev_mc_count(netdev
) < 1) {
3734 adapter
->packet_filter
&= ~ET131X_PACKET_TYPE_ALL_MULTICAST
;
3735 adapter
->packet_filter
&= ~ET131X_PACKET_TYPE_MULTICAST
;
3737 adapter
->packet_filter
|= ET131X_PACKET_TYPE_MULTICAST
;
3740 /* Set values in the private adapter struct */
3742 netdev_for_each_mc_addr(ha
, netdev
) {
3743 if (i
== NIC_MAX_MCAST_LIST
)
3745 ether_addr_copy(adapter
->multicast_list
[i
++], ha
->addr
);
3747 adapter
->multicast_addr_count
= i
;
3749 /* Are the new flags different from the previous ones? If not, then no
3750 * action is required
3752 * NOTE - This block will always update the multicast_list with the
3753 * hardware, even if the addresses aren't the same.
3755 if (packet_filter
!= adapter
->packet_filter
)
3756 et131x_set_packet_filter(adapter
);
3759 static netdev_tx_t
et131x_tx(struct sk_buff
*skb
, struct net_device
*netdev
)
3761 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3762 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
3764 /* This driver does not support TSO, it is very unlikely
3765 * this condition is true.
3767 if (unlikely(skb_shinfo(skb
)->nr_frags
> MAX_TX_DESC_PER_PKT
- 2)) {
3768 if (skb_linearize(skb
))
3771 /* stop the queue if it's getting full */
3772 if (tx_ring
->used
>= NUM_TCB
- 1 && !netif_queue_stopped(netdev
))
3773 netif_stop_queue(netdev
);
3775 /* Save the timestamp for the TX timeout watchdog */
3776 netif_trans_update(netdev
);
3778 /* TCB is not available */
3779 if (tx_ring
->used
>= NUM_TCB
)
3782 if ((adapter
->flags
& FMP_ADAPTER_FAIL_SEND_MASK
) ||
3783 !netif_carrier_ok(netdev
))
3786 if (send_packet(skb
, adapter
))
3789 return NETDEV_TX_OK
;
3792 dev_kfree_skb_any(skb
);
3793 adapter
->netdev
->stats
.tx_dropped
++;
3794 return NETDEV_TX_OK
;
3797 /* et131x_tx_timeout - Timeout handler
3799 * The handler called when a Tx request times out. The timeout period is
3800 * specified by the 'tx_timeo" element in the net_device structure (see
3801 * et131x_alloc_device() to see how this value is set).
3803 static void et131x_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
3805 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3806 struct tx_ring
*tx_ring
= &adapter
->tx_ring
;
3808 unsigned long flags
;
3810 /* If the device is closed, ignore the timeout */
3811 if (!(adapter
->flags
& FMP_ADAPTER_INTERRUPT_IN_USE
))
3814 /* Any nonrecoverable hardware error?
3815 * Checks adapter->flags for any failure in phy reading
3817 if (adapter
->flags
& FMP_ADAPTER_NON_RECOVER_ERROR
)
3820 /* Hardware failure? */
3821 if (adapter
->flags
& FMP_ADAPTER_HARDWARE_ERROR
) {
3822 dev_err(&adapter
->pdev
->dev
, "hardware error - reset\n");
3826 /* Is send stuck? */
3827 spin_lock_irqsave(&adapter
->tcb_send_qlock
, flags
);
3828 tcb
= tx_ring
->send_head
;
3829 spin_unlock_irqrestore(&adapter
->tcb_send_qlock
, flags
);
3834 if (tcb
->count
> NIC_SEND_HANG_THRESHOLD
) {
3835 dev_warn(&adapter
->pdev
->dev
,
3836 "Send stuck - reset. tcb->WrIndex %x\n",
3839 adapter
->netdev
->stats
.tx_errors
++;
3841 /* perform reset of tx/rx */
3842 et131x_disable_txrx(netdev
);
3843 et131x_enable_txrx(netdev
);
3848 static int et131x_change_mtu(struct net_device
*netdev
, int new_mtu
)
3851 struct et131x_adapter
*adapter
= netdev_priv(netdev
);
3853 et131x_disable_txrx(netdev
);
3855 WRITE_ONCE(netdev
->mtu
, new_mtu
);
3857 et131x_adapter_memory_free(adapter
);
3859 /* Set the config parameter for Jumbo Packet support */
3860 adapter
->registry_jumbo_packet
= new_mtu
+ 14;
3861 et131x_soft_reset(adapter
);
3863 result
= et131x_adapter_memory_alloc(adapter
);
3865 dev_warn(&adapter
->pdev
->dev
,
3866 "Change MTU failed; couldn't re-alloc DMA memory\n");
3870 et131x_init_send(adapter
);
3871 et131x_hwaddr_init(adapter
);
3872 eth_hw_addr_set(netdev
, adapter
->addr
);
3874 /* Init the device with the new settings */
3875 et131x_adapter_setup(adapter
);
3876 et131x_enable_txrx(netdev
);
3881 static const struct net_device_ops et131x_netdev_ops
= {
3882 .ndo_open
= et131x_open
,
3883 .ndo_stop
= et131x_close
,
3884 .ndo_start_xmit
= et131x_tx
,
3885 .ndo_set_rx_mode
= et131x_multicast
,
3886 .ndo_tx_timeout
= et131x_tx_timeout
,
3887 .ndo_change_mtu
= et131x_change_mtu
,
3888 .ndo_set_mac_address
= eth_mac_addr
,
3889 .ndo_validate_addr
= eth_validate_addr
,
3890 .ndo_get_stats
= et131x_stats
,
3891 .ndo_eth_ioctl
= phy_do_ioctl
,
3894 static int et131x_pci_setup(struct pci_dev
*pdev
,
3895 const struct pci_device_id
*ent
)
3897 struct net_device
*netdev
;
3898 struct et131x_adapter
*adapter
;
3901 rc
= pci_enable_device(pdev
);
3903 dev_err(&pdev
->dev
, "pci_enable_device() failed\n");
3907 /* Perform some basic PCI checks */
3908 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
3909 dev_err(&pdev
->dev
, "Can't find PCI device's base address\n");
3914 rc
= pci_request_regions(pdev
, DRIVER_NAME
);
3916 dev_err(&pdev
->dev
, "Can't get PCI resources\n");
3920 pci_set_master(pdev
);
3922 /* Check the DMA addressing support of this device */
3923 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
3925 dev_err(&pdev
->dev
, "No usable DMA addressing method\n");
3926 goto err_release_res
;
3929 netdev
= alloc_etherdev(sizeof(struct et131x_adapter
));
3931 dev_err(&pdev
->dev
, "Couldn't alloc netdev struct\n");
3933 goto err_release_res
;
3936 netdev
->watchdog_timeo
= ET131X_TX_TIMEOUT
;
3937 netdev
->netdev_ops
= &et131x_netdev_ops
;
3938 netdev
->min_mtu
= ET131X_MIN_MTU
;
3939 netdev
->max_mtu
= ET131X_MAX_MTU
;
3941 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3942 netdev
->ethtool_ops
= &et131x_ethtool_ops
;
3944 adapter
= et131x_adapter_init(netdev
, pdev
);
3946 rc
= et131x_pci_init(adapter
, pdev
);
3950 /* Map the bus-relative registers to system virtual memory */
3951 adapter
->regs
= pci_ioremap_bar(pdev
, 0);
3952 if (!adapter
->regs
) {
3953 dev_err(&pdev
->dev
, "Cannot map device registers\n");
3958 /* If Phy COMA mode was enabled when we went down, disable it here. */
3959 writel(ET_PMCSR_INIT
, &adapter
->regs
->global
.pm_csr
);
3961 et131x_soft_reset(adapter
);
3962 et131x_disable_interrupts(adapter
);
3964 rc
= et131x_adapter_memory_alloc(adapter
);
3966 dev_err(&pdev
->dev
, "Could not alloc adapter memory (DMA)\n");
3970 et131x_init_send(adapter
);
3972 netif_napi_add(netdev
, &adapter
->napi
, et131x_poll
);
3974 eth_hw_addr_set(netdev
, adapter
->addr
);
3978 adapter
->mii_bus
= mdiobus_alloc();
3979 if (!adapter
->mii_bus
) {
3980 dev_err(&pdev
->dev
, "Alloc of mii_bus struct failed\n");
3984 adapter
->mii_bus
->name
= "et131x_eth_mii";
3985 snprintf(adapter
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%x", pci_dev_id(adapter
->pdev
));
3986 adapter
->mii_bus
->priv
= netdev
;
3987 adapter
->mii_bus
->read
= et131x_mdio_read
;
3988 adapter
->mii_bus
->write
= et131x_mdio_write
;
3990 rc
= mdiobus_register(adapter
->mii_bus
);
3992 dev_err(&pdev
->dev
, "failed to register MII bus\n");
3996 rc
= et131x_mii_probe(netdev
);
3998 dev_err(&pdev
->dev
, "failed to probe MII bus\n");
3999 goto err_mdio_unregister
;
4002 et131x_adapter_setup(adapter
);
4004 /* Init variable for counting how long we do not have link status */
4005 adapter
->boot_coma
= 0;
4006 et1310_disable_phy_coma(adapter
);
4008 /* We can enable interrupts now
4010 * NOTE - Because registration of interrupt handler is done in the
4011 * device's open(), defer enabling device interrupts to that
4015 rc
= register_netdev(netdev
);
4017 dev_err(&pdev
->dev
, "register_netdev() failed\n");
4018 goto err_phy_disconnect
;
4021 /* Register the net_device struct with the PCI subsystem. Save a copy
4022 * of the PCI config space for this device now that the device has
4023 * been initialized, just in case it needs to be quickly restored.
4025 pci_set_drvdata(pdev
, netdev
);
4030 phy_disconnect(netdev
->phydev
);
4031 err_mdio_unregister
:
4032 mdiobus_unregister(adapter
->mii_bus
);
4034 mdiobus_free(adapter
->mii_bus
);
4036 et131x_adapter_memory_free(adapter
);
4038 iounmap(adapter
->regs
);
4041 free_netdev(netdev
);
4043 pci_release_regions(pdev
);
4045 pci_disable_device(pdev
);
4049 static const struct pci_device_id et131x_pci_table
[] = {
4050 { PCI_VDEVICE(ATT
, ET131X_PCI_DEVICE_ID_GIG
), 0UL},
4051 { PCI_VDEVICE(ATT
, ET131X_PCI_DEVICE_ID_FAST
), 0UL},
4054 MODULE_DEVICE_TABLE(pci
, et131x_pci_table
);
4056 static struct pci_driver et131x_driver
= {
4057 .name
= DRIVER_NAME
,
4058 .id_table
= et131x_pci_table
,
4059 .probe
= et131x_pci_setup
,
4060 .remove
= et131x_pci_remove
,
4061 .driver
.pm
= &et131x_pm_ops
,
4064 module_pci_driver(et131x_driver
);