1 // SPDX-License-Identifier: GPL-2.0
2 /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
4 * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
5 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
8 * o IOC3 ASIC specification 4.51, 1996-04-18
9 * o IEEE 802.3 specification, 2000 edition
10 * o DP38840A Specification, National Semiconductor, March 1997
14 * o Use prefetching for large packets. What is a good lower limit for
16 * o Use hardware checksums.
17 * o Which PHYs might possibly be attached to the IOC3 in real live,
18 * which workarounds are required for them? Do we ever have Lucent's?
19 * o For the 2.5 branch kill the mii-tool ioctls.
22 #define IOC3_NAME "ioc3-eth"
23 #define IOC3_VERSION "2.6.3-4"
25 #include <linux/delay.h>
26 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/crc16.h>
32 #include <linux/crc32.h>
33 #include <linux/mii.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/gfp.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/skbuff.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/platform_device.h>
46 #include <linux/nvmem-consumer.h>
50 #include <asm/sn/ioc3.h>
51 #include <asm/pci/bridge.h>
54 #define CRC16_VALID 0xb001
56 /* Number of RX buffers. This is tunable in the range of 16 <= x < 512.
57 * The value must be a power of two.
60 #define RX_RING_ENTRIES 512 /* fixed in hardware */
61 #define RX_RING_MASK (RX_RING_ENTRIES - 1)
62 #define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64))
64 /* 128 TX buffers (not tunable) */
65 #define TX_RING_ENTRIES 128
66 #define TX_RING_MASK (TX_RING_ENTRIES - 1)
67 #define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd))
69 /* IOC3 does dma transfers in 128 byte blocks */
70 #define IOC3_DMA_XFER_LEN 128UL
72 /* Every RX buffer starts with 8 byte descriptor data */
73 #define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN)
74 #define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN)
76 #define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21)
77 #define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21)
79 /* Private per NIC data of the driver. */
81 struct ioc3_ethregs
*regs
;
82 struct device
*dma_dev
;
84 unsigned long *rxr
; /* pointer to receiver ring */
86 struct ioc3_etxd
*txr
;
89 struct sk_buff
*rx_skbs
[RX_RING_ENTRIES
];
90 struct sk_buff
*tx_skbs
[TX_RING_ENTRIES
];
91 int rx_ci
; /* RX consumer index */
92 int rx_pi
; /* RX producer index */
93 int tx_ci
; /* TX consumer index */
94 int tx_pi
; /* TX producer index */
96 u32 emcr
, ehar_h
, ehar_l
;
98 struct mii_if_info mii
;
100 /* Members used by autonegotiation */
101 struct timer_list ioc3_timer
;
104 static int ioc3_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
105 static void ioc3_set_multicast_list(struct net_device
*dev
);
106 static netdev_tx_t
ioc3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
107 static void ioc3_timeout(struct net_device
*dev
, unsigned int txqueue
);
108 static inline unsigned int ioc3_hash(const unsigned char *addr
);
109 static void ioc3_start(struct ioc3_private
*ip
);
110 static inline void ioc3_stop(struct ioc3_private
*ip
);
111 static void ioc3_init(struct net_device
*dev
);
112 static int ioc3_alloc_rx_bufs(struct net_device
*dev
);
113 static void ioc3_free_rx_bufs(struct ioc3_private
*ip
);
114 static inline void ioc3_clean_tx_ring(struct ioc3_private
*ip
);
116 static const struct ethtool_ops ioc3_ethtool_ops
;
118 static inline unsigned long aligned_rx_skb_addr(unsigned long addr
)
120 return (~addr
+ 1) & (IOC3_DMA_XFER_LEN
- 1UL);
123 static inline int ioc3_alloc_skb(struct ioc3_private
*ip
, struct sk_buff
**skb
,
124 struct ioc3_erxbuf
**rxb
, dma_addr_t
*rxb_dma
)
126 struct sk_buff
*new_skb
;
130 new_skb
= alloc_skb(RX_BUF_SIZE
+ IOC3_DMA_XFER_LEN
- 1, GFP_ATOMIC
);
134 /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */
135 offset
= aligned_rx_skb_addr((unsigned long)new_skb
->data
);
137 skb_reserve(new_skb
, offset
);
139 d
= dma_map_single(ip
->dma_dev
, new_skb
->data
,
140 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
142 if (dma_mapping_error(ip
->dma_dev
, d
)) {
143 dev_kfree_skb_any(new_skb
);
147 *rxb
= (struct ioc3_erxbuf
*)new_skb
->data
;
148 skb_reserve(new_skb
, RX_OFFSET
);
154 #ifdef CONFIG_PCI_XTALK_BRIDGE
155 static inline unsigned long ioc3_map(dma_addr_t addr
, unsigned long attr
)
157 return (addr
& ~PCI64_ATTR_BAR
) | attr
;
160 #define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT)
162 static inline unsigned long ioc3_map(dma_addr_t addr
, unsigned long attr
)
170 static int ioc3eth_nvmem_match(struct device
*dev
, const void *data
)
172 const char *name
= dev_name(dev
);
173 const char *prefix
= data
;
176 prefix_len
= strlen(prefix
);
177 if (strlen(name
) < (prefix_len
+ 3))
180 if (memcmp(prefix
, name
, prefix_len
) != 0)
183 /* found nvmem device which is attached to our ioc3
184 * now check for one wire family code 09, 89 and 91
186 if (memcmp(name
+ prefix_len
, "09-", 3) == 0)
188 if (memcmp(name
+ prefix_len
, "89-", 3) == 0)
190 if (memcmp(name
+ prefix_len
, "91-", 3) == 0)
196 static int ioc3eth_get_mac_addr(struct resource
*res
, u8 mac_addr
[6])
198 struct nvmem_device
*nvmem
;
204 snprintf(prefix
, sizeof(prefix
), "ioc3-%012llx-",
205 res
->start
& ~0xffff);
207 nvmem
= nvmem_device_find(prefix
, ioc3eth_nvmem_match
);
209 return PTR_ERR(nvmem
);
211 ret
= nvmem_device_read(nvmem
, 0, 16, prom
);
212 nvmem_device_put(nvmem
);
216 /* check, if content is valid */
217 if (prom
[0] != 0x0a ||
218 crc16(CRC16_INIT
, prom
, 13) != CRC16_VALID
)
221 for (i
= 0; i
< 6; i
++)
222 mac_addr
[i
] = prom
[10 - i
];
227 static void __ioc3_set_mac_address(struct net_device
*dev
)
229 struct ioc3_private
*ip
= netdev_priv(dev
);
231 writel((dev
->dev_addr
[5] << 8) |
234 writel((dev
->dev_addr
[3] << 24) |
235 (dev
->dev_addr
[2] << 16) |
236 (dev
->dev_addr
[1] << 8) |
241 static int ioc3_set_mac_address(struct net_device
*dev
, void *addr
)
243 struct ioc3_private
*ip
= netdev_priv(dev
);
244 struct sockaddr
*sa
= addr
;
246 memcpy(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
);
248 spin_lock_irq(&ip
->ioc3_lock
);
249 __ioc3_set_mac_address(dev
);
250 spin_unlock_irq(&ip
->ioc3_lock
);
255 /* Caller must hold the ioc3_lock ever for MII readers. This is also
256 * used to protect the transmitter side but it's low contention.
258 static int ioc3_mdio_read(struct net_device
*dev
, int phy
, int reg
)
260 struct ioc3_private
*ip
= netdev_priv(dev
);
261 struct ioc3_ethregs
*regs
= ip
->regs
;
263 while (readl(®s
->micr
) & MICR_BUSY
)
265 writel((phy
<< MICR_PHYADDR_SHIFT
) | reg
| MICR_READTRIG
,
267 while (readl(®s
->micr
) & MICR_BUSY
)
270 return readl(®s
->midr_r
) & MIDR_DATA_MASK
;
273 static void ioc3_mdio_write(struct net_device
*dev
, int phy
, int reg
, int data
)
275 struct ioc3_private
*ip
= netdev_priv(dev
);
276 struct ioc3_ethregs
*regs
= ip
->regs
;
278 while (readl(®s
->micr
) & MICR_BUSY
)
280 writel(data
, ®s
->midr_w
);
281 writel((phy
<< MICR_PHYADDR_SHIFT
) | reg
, ®s
->micr
);
282 while (readl(®s
->micr
) & MICR_BUSY
)
286 static int ioc3_mii_init(struct ioc3_private
*ip
);
288 static struct net_device_stats
*ioc3_get_stats(struct net_device
*dev
)
290 struct ioc3_private
*ip
= netdev_priv(dev
);
291 struct ioc3_ethregs
*regs
= ip
->regs
;
293 dev
->stats
.collisions
+= readl(®s
->etcdc
) & ETCDC_COLLCNT_MASK
;
297 static void ioc3_tcpudp_checksum(struct sk_buff
*skb
, u32 hwsum
, int len
)
299 struct ethhdr
*eh
= eth_hdr(skb
);
306 /* Did hardware handle the checksum at all? The cases we can handle
309 * - TCP and UDP checksums of IPv4 only.
310 * - IPv6 would be doable but we keep that for later ...
311 * - Only unfragmented packets. Did somebody already tell you
312 * fragmentation is evil?
313 * - don't care about packet size. Worst case when processing a
314 * malformed packet we'll try to access the packet at ip header +
315 * 64 bytes which is still inside the skb. Even in the unlikely
316 * case where the checksum is right the higher layers will still
317 * drop the packet as appropriate.
319 if (eh
->h_proto
!= htons(ETH_P_IP
))
322 ih
= (struct iphdr
*)((char *)eh
+ ETH_HLEN
);
323 if (ip_is_fragment(ih
))
326 proto
= ih
->protocol
;
327 if (proto
!= IPPROTO_TCP
&& proto
!= IPPROTO_UDP
)
330 /* Same as tx - compute csum of pseudo header */
332 (ih
->tot_len
- (ih
->ihl
<< 2)) +
333 htons((u16
)ih
->protocol
) +
334 (ih
->saddr
>> 16) + (ih
->saddr
& 0xffff) +
335 (ih
->daddr
>> 16) + (ih
->daddr
& 0xffff);
337 /* Sum up ethernet dest addr, src addr and protocol */
339 ehsum
= ew
[0] + ew
[1] + ew
[2] + ew
[3] + ew
[4] + ew
[5] + ew
[6];
341 ehsum
= (ehsum
& 0xffff) + (ehsum
>> 16);
342 ehsum
= (ehsum
& 0xffff) + (ehsum
>> 16);
344 csum
+= 0xffff ^ ehsum
;
346 /* In the next step we also subtract the 1's complement
347 * checksum of the trailing ethernet CRC.
349 cp
= (char *)eh
+ len
; /* points at trailing CRC */
351 csum
+= 0xffff ^ (u16
)((cp
[1] << 8) | cp
[0]);
352 csum
+= 0xffff ^ (u16
)((cp
[3] << 8) | cp
[2]);
354 csum
+= 0xffff ^ (u16
)((cp
[0] << 8) | cp
[1]);
355 csum
+= 0xffff ^ (u16
)((cp
[2] << 8) | cp
[3]);
358 csum
= (csum
& 0xffff) + (csum
>> 16);
359 csum
= (csum
& 0xffff) + (csum
>> 16);
362 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
365 static inline void ioc3_rx(struct net_device
*dev
)
367 struct ioc3_private
*ip
= netdev_priv(dev
);
368 struct sk_buff
*skb
, *new_skb
;
369 int rx_entry
, n_entry
, len
;
370 struct ioc3_erxbuf
*rxb
;
375 rxr
= ip
->rxr
; /* Ring base */
376 rx_entry
= ip
->rx_ci
; /* RX consume index */
379 skb
= ip
->rx_skbs
[rx_entry
];
380 rxb
= (struct ioc3_erxbuf
*)(skb
->data
- RX_OFFSET
);
381 w0
= be32_to_cpu(rxb
->w0
);
383 while (w0
& ERXBUF_V
) {
384 err
= be32_to_cpu(rxb
->err
); /* It's valid ... */
385 if (err
& ERXBUF_GOODPKT
) {
386 len
= ((w0
>> ERXBUF_BYTECNT_SHIFT
) & 0x7ff) - 4;
388 skb
->protocol
= eth_type_trans(skb
, dev
);
390 if (ioc3_alloc_skb(ip
, &new_skb
, &rxb
, &d
)) {
391 /* Ouch, drop packet and just recycle packet
392 * to keep the ring filled.
394 dev
->stats
.rx_dropped
++;
400 if (likely(dev
->features
& NETIF_F_RXCSUM
))
401 ioc3_tcpudp_checksum(skb
,
402 w0
& ERXBUF_IPCKSUM_MASK
,
405 dma_unmap_single(ip
->dma_dev
, rxr
[rx_entry
],
406 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
410 ip
->rx_skbs
[rx_entry
] = NULL
; /* Poison */
412 dev
->stats
.rx_packets
++; /* Statistics */
413 dev
->stats
.rx_bytes
+= len
;
415 /* The frame is invalid and the skb never
416 * reached the network layer so we can just
421 dev
->stats
.rx_errors
++;
423 if (err
& ERXBUF_CRCERR
) /* Statistics */
424 dev
->stats
.rx_crc_errors
++;
425 if (err
& ERXBUF_FRAMERR
)
426 dev
->stats
.rx_frame_errors
++;
429 ip
->rx_skbs
[n_entry
] = new_skb
;
430 rxr
[n_entry
] = cpu_to_be64(ioc3_map(d
, PCI64_ATTR_BAR
));
431 rxb
->w0
= 0; /* Clear valid flag */
432 n_entry
= (n_entry
+ 1) & RX_RING_MASK
; /* Update erpir */
434 /* Now go on to the next ring entry. */
435 rx_entry
= (rx_entry
+ 1) & RX_RING_MASK
;
436 skb
= ip
->rx_skbs
[rx_entry
];
437 rxb
= (struct ioc3_erxbuf
*)(skb
->data
- RX_OFFSET
);
438 w0
= be32_to_cpu(rxb
->w0
);
440 writel((n_entry
<< 3) | ERPIR_ARM
, &ip
->regs
->erpir
);
442 ip
->rx_ci
= rx_entry
;
445 static inline void ioc3_tx(struct net_device
*dev
)
447 struct ioc3_private
*ip
= netdev_priv(dev
);
448 struct ioc3_ethregs
*regs
= ip
->regs
;
449 unsigned long packets
, bytes
;
450 int tx_entry
, o_entry
;
454 spin_lock(&ip
->ioc3_lock
);
455 etcir
= readl(®s
->etcir
);
457 tx_entry
= (etcir
>> 7) & TX_RING_MASK
;
462 while (o_entry
!= tx_entry
) {
464 skb
= ip
->tx_skbs
[o_entry
];
466 dev_consume_skb_irq(skb
);
467 ip
->tx_skbs
[o_entry
] = NULL
;
469 o_entry
= (o_entry
+ 1) & TX_RING_MASK
; /* Next */
471 etcir
= readl(®s
->etcir
); /* More pkts sent? */
472 tx_entry
= (etcir
>> 7) & TX_RING_MASK
;
475 dev
->stats
.tx_packets
+= packets
;
476 dev
->stats
.tx_bytes
+= bytes
;
477 ip
->txqlen
-= packets
;
479 if (netif_queue_stopped(dev
) && ip
->txqlen
< TX_RING_ENTRIES
)
480 netif_wake_queue(dev
);
483 spin_unlock(&ip
->ioc3_lock
);
486 /* Deal with fatal IOC3 errors. This condition might be caused by a hard or
487 * software problems, so we should try to recover
488 * more gracefully if this ever happens. In theory we might be flooded
489 * with such error interrupts if something really goes wrong, so we might
490 * also consider to take the interface down.
492 static void ioc3_error(struct net_device
*dev
, u32 eisr
)
494 struct ioc3_private
*ip
= netdev_priv(dev
);
496 spin_lock(&ip
->ioc3_lock
);
498 if (eisr
& EISR_RXOFLO
)
499 net_err_ratelimited("%s: RX overflow.\n", dev
->name
);
500 if (eisr
& EISR_RXBUFOFLO
)
501 net_err_ratelimited("%s: RX buffer overflow.\n", dev
->name
);
502 if (eisr
& EISR_RXMEMERR
)
503 net_err_ratelimited("%s: RX PCI error.\n", dev
->name
);
504 if (eisr
& EISR_RXPARERR
)
505 net_err_ratelimited("%s: RX SSRAM parity error.\n", dev
->name
);
506 if (eisr
& EISR_TXBUFUFLO
)
507 net_err_ratelimited("%s: TX buffer underflow.\n", dev
->name
);
508 if (eisr
& EISR_TXMEMERR
)
509 net_err_ratelimited("%s: TX PCI error.\n", dev
->name
);
512 ioc3_free_rx_bufs(ip
);
513 ioc3_clean_tx_ring(ip
);
516 if (ioc3_alloc_rx_bufs(dev
)) {
517 netdev_err(dev
, "%s: rx buffer allocation failed\n", __func__
);
518 spin_unlock(&ip
->ioc3_lock
);
524 netif_wake_queue(dev
);
526 spin_unlock(&ip
->ioc3_lock
);
529 /* The interrupt handler does all of the Rx thread work and cleans up
530 * after the Tx thread.
532 static irqreturn_t
ioc3_interrupt(int irq
, void *dev_id
)
534 struct ioc3_private
*ip
= netdev_priv(dev_id
);
535 struct ioc3_ethregs
*regs
= ip
->regs
;
538 eisr
= readl(®s
->eisr
);
539 writel(eisr
, ®s
->eisr
);
540 readl(®s
->eisr
); /* Flush */
542 if (eisr
& (EISR_RXOFLO
| EISR_RXBUFOFLO
| EISR_RXMEMERR
|
543 EISR_RXPARERR
| EISR_TXBUFUFLO
| EISR_TXMEMERR
))
544 ioc3_error(dev_id
, eisr
);
545 if (eisr
& EISR_RXTIMERINT
)
547 if (eisr
& EISR_TXEXPLICIT
)
553 static inline void ioc3_setup_duplex(struct ioc3_private
*ip
)
555 struct ioc3_ethregs
*regs
= ip
->regs
;
557 spin_lock_irq(&ip
->ioc3_lock
);
559 if (ip
->mii
.full_duplex
) {
560 writel(ETCSR_FD
, ®s
->etcsr
);
561 ip
->emcr
|= EMCR_DUPLEX
;
563 writel(ETCSR_HD
, ®s
->etcsr
);
564 ip
->emcr
&= ~EMCR_DUPLEX
;
566 writel(ip
->emcr
, ®s
->emcr
);
568 spin_unlock_irq(&ip
->ioc3_lock
);
571 static void ioc3_timer(struct timer_list
*t
)
573 struct ioc3_private
*ip
= from_timer(ip
, t
, ioc3_timer
);
575 /* Print the link status if it has changed */
576 mii_check_media(&ip
->mii
, 1, 0);
577 ioc3_setup_duplex(ip
);
579 ip
->ioc3_timer
.expires
= jiffies
+ ((12 * HZ
) / 10); /* 1.2s */
580 add_timer(&ip
->ioc3_timer
);
583 /* Try to find a PHY. There is no apparent relation between the MII addresses
584 * in the SGI documentation and what we find in reality, so we simply probe
587 static int ioc3_mii_init(struct ioc3_private
*ip
)
592 for (i
= 0; i
< 32; i
++) {
593 word
= ioc3_mdio_read(ip
->mii
.dev
, i
, MII_PHYSID1
);
595 if (word
!= 0xffff && word
!= 0x0000) {
604 static void ioc3_mii_start(struct ioc3_private
*ip
)
606 ip
->ioc3_timer
.expires
= jiffies
+ (12 * HZ
) / 10; /* 1.2 sec. */
607 add_timer(&ip
->ioc3_timer
);
610 static inline void ioc3_tx_unmap(struct ioc3_private
*ip
, int entry
)
612 struct ioc3_etxd
*desc
;
613 u32 cmd
, bufcnt
, len
;
615 desc
= &ip
->txr
[entry
];
616 cmd
= be32_to_cpu(desc
->cmd
);
617 bufcnt
= be32_to_cpu(desc
->bufcnt
);
618 if (cmd
& ETXD_B1V
) {
619 len
= (bufcnt
& ETXD_B1CNT_MASK
) >> ETXD_B1CNT_SHIFT
;
620 dma_unmap_single(ip
->dma_dev
, be64_to_cpu(desc
->p1
),
623 if (cmd
& ETXD_B2V
) {
624 len
= (bufcnt
& ETXD_B2CNT_MASK
) >> ETXD_B2CNT_SHIFT
;
625 dma_unmap_single(ip
->dma_dev
, be64_to_cpu(desc
->p2
),
630 static inline void ioc3_clean_tx_ring(struct ioc3_private
*ip
)
635 for (i
= 0; i
< TX_RING_ENTRIES
; i
++) {
636 skb
= ip
->tx_skbs
[i
];
638 ioc3_tx_unmap(ip
, i
);
639 ip
->tx_skbs
[i
] = NULL
;
640 dev_kfree_skb_any(skb
);
648 static void ioc3_free_rx_bufs(struct ioc3_private
*ip
)
650 int rx_entry
, n_entry
;
654 rx_entry
= ip
->rx_pi
;
656 while (n_entry
!= rx_entry
) {
657 skb
= ip
->rx_skbs
[n_entry
];
659 dma_unmap_single(ip
->dma_dev
,
660 be64_to_cpu(ip
->rxr
[n_entry
]),
661 RX_BUF_SIZE
, DMA_FROM_DEVICE
);
662 dev_kfree_skb_any(skb
);
664 n_entry
= (n_entry
+ 1) & RX_RING_MASK
;
668 static int ioc3_alloc_rx_bufs(struct net_device
*dev
)
670 struct ioc3_private
*ip
= netdev_priv(dev
);
671 struct ioc3_erxbuf
*rxb
;
675 /* Now the rx buffers. The RX ring may be larger but
676 * we only allocate 16 buffers for now. Need to tune
677 * this for performance and memory later.
679 for (i
= 0; i
< RX_BUFFS
; i
++) {
680 if (ioc3_alloc_skb(ip
, &ip
->rx_skbs
[i
], &rxb
, &d
))
683 rxb
->w0
= 0; /* Clear valid flag */
684 ip
->rxr
[i
] = cpu_to_be64(ioc3_map(d
, PCI64_ATTR_BAR
));
687 ip
->rx_pi
= RX_BUFFS
;
692 static inline void ioc3_ssram_disc(struct ioc3_private
*ip
)
694 struct ioc3_ethregs
*regs
= ip
->regs
;
695 u32
*ssram0
= &ip
->ssram
[0x0000];
696 u32
*ssram1
= &ip
->ssram
[0x4000];
697 u32 pattern
= 0x5555;
699 /* Assume the larger size SSRAM and enable parity checking */
700 writel(readl(®s
->emcr
) | (EMCR_BUFSIZ
| EMCR_RAMPAR
), ®s
->emcr
);
701 readl(®s
->emcr
); /* Flush */
703 writel(pattern
, ssram0
);
704 writel(~pattern
& IOC3_SSRAM_DM
, ssram1
);
706 if ((readl(ssram0
) & IOC3_SSRAM_DM
) != pattern
||
707 (readl(ssram1
) & IOC3_SSRAM_DM
) != (~pattern
& IOC3_SSRAM_DM
)) {
708 /* set ssram size to 64 KB */
709 ip
->emcr
|= EMCR_RAMPAR
;
710 writel(readl(®s
->emcr
) & ~EMCR_BUFSIZ
, ®s
->emcr
);
712 ip
->emcr
|= EMCR_BUFSIZ
| EMCR_RAMPAR
;
716 static void ioc3_init(struct net_device
*dev
)
718 struct ioc3_private
*ip
= netdev_priv(dev
);
719 struct ioc3_ethregs
*regs
= ip
->regs
;
721 del_timer_sync(&ip
->ioc3_timer
); /* Kill if running */
723 writel(EMCR_RST
, ®s
->emcr
); /* Reset */
724 readl(®s
->emcr
); /* Flush WB */
725 udelay(4); /* Give it time ... */
726 writel(0, ®s
->emcr
);
730 writel(ERBAR_VAL
, ®s
->erbar
);
731 readl(®s
->etcdc
); /* Clear on read */
732 writel(15, ®s
->ercsr
); /* RX low watermark */
733 writel(0, ®s
->ertr
); /* Interrupt immediately */
734 __ioc3_set_mac_address(dev
);
735 writel(ip
->ehar_h
, ®s
->ehar_h
);
736 writel(ip
->ehar_l
, ®s
->ehar_l
);
737 writel(42, ®s
->ersr
); /* XXX should be random */
740 static void ioc3_start(struct ioc3_private
*ip
)
742 struct ioc3_ethregs
*regs
= ip
->regs
;
745 /* Now the rx ring base, consume & produce registers. */
746 ring
= ioc3_map(ip
->rxr_dma
, PCI64_ATTR_PREC
);
747 writel(ring
>> 32, ®s
->erbr_h
);
748 writel(ring
& 0xffffffff, ®s
->erbr_l
);
749 writel(ip
->rx_ci
<< 3, ®s
->ercir
);
750 writel((ip
->rx_pi
<< 3) | ERPIR_ARM
, ®s
->erpir
);
752 ring
= ioc3_map(ip
->txr_dma
, PCI64_ATTR_PREC
);
754 ip
->txqlen
= 0; /* nothing queued */
756 /* Now the tx ring base, consume & produce registers. */
757 writel(ring
>> 32, ®s
->etbr_h
);
758 writel(ring
& 0xffffffff, ®s
->etbr_l
);
759 writel(ip
->tx_pi
<< 7, ®s
->etpir
);
760 writel(ip
->tx_ci
<< 7, ®s
->etcir
);
761 readl(®s
->etcir
); /* Flush */
763 ip
->emcr
|= ((RX_OFFSET
/ 2) << EMCR_RXOFF_SHIFT
) | EMCR_TXDMAEN
|
764 EMCR_TXEN
| EMCR_RXDMAEN
| EMCR_RXEN
| EMCR_PADEN
;
765 writel(ip
->emcr
, ®s
->emcr
);
766 writel(EISR_RXTIMERINT
| EISR_RXOFLO
| EISR_RXBUFOFLO
|
767 EISR_RXMEMERR
| EISR_RXPARERR
| EISR_TXBUFUFLO
|
768 EISR_TXEXPLICIT
| EISR_TXMEMERR
, ®s
->eier
);
772 static inline void ioc3_stop(struct ioc3_private
*ip
)
774 struct ioc3_ethregs
*regs
= ip
->regs
;
776 writel(0, ®s
->emcr
); /* Shutup */
777 writel(0, ®s
->eier
); /* Disable interrupts */
778 readl(®s
->eier
); /* Flush */
781 static int ioc3_open(struct net_device
*dev
)
783 struct ioc3_private
*ip
= netdev_priv(dev
);
789 if (ioc3_alloc_rx_bufs(dev
)) {
790 netdev_err(dev
, "%s: rx buffer allocation failed\n", __func__
);
796 netif_start_queue(dev
);
800 static int ioc3_close(struct net_device
*dev
)
802 struct ioc3_private
*ip
= netdev_priv(dev
);
804 del_timer_sync(&ip
->ioc3_timer
);
806 netif_stop_queue(dev
);
810 ioc3_free_rx_bufs(ip
);
811 ioc3_clean_tx_ring(ip
);
816 static const struct net_device_ops ioc3_netdev_ops
= {
817 .ndo_open
= ioc3_open
,
818 .ndo_stop
= ioc3_close
,
819 .ndo_start_xmit
= ioc3_start_xmit
,
820 .ndo_tx_timeout
= ioc3_timeout
,
821 .ndo_get_stats
= ioc3_get_stats
,
822 .ndo_set_rx_mode
= ioc3_set_multicast_list
,
823 .ndo_do_ioctl
= ioc3_ioctl
,
824 .ndo_validate_addr
= eth_validate_addr
,
825 .ndo_set_mac_address
= ioc3_set_mac_address
,
828 static int ioc3eth_probe(struct platform_device
*pdev
)
830 u32 sw_physid1
, sw_physid2
, vendor
, model
, rev
;
831 struct ioc3_private
*ip
;
832 struct net_device
*dev
;
833 struct resource
*regs
;
837 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
838 /* get mac addr from one wire prom */
839 if (ioc3eth_get_mac_addr(regs
, mac_addr
))
840 return -EPROBE_DEFER
; /* not available yet */
842 dev
= alloc_etherdev(sizeof(struct ioc3_private
));
846 SET_NETDEV_DEV(dev
, &pdev
->dev
);
848 ip
= netdev_priv(dev
);
849 ip
->dma_dev
= pdev
->dev
.parent
;
850 ip
->regs
= devm_platform_ioremap_resource(pdev
, 0);
856 ip
->ssram
= devm_platform_ioremap_resource(pdev
, 1);
862 dev
->irq
= platform_get_irq(pdev
, 0);
868 if (devm_request_irq(&pdev
->dev
, dev
->irq
, ioc3_interrupt
,
869 IRQF_SHARED
, "ioc3-eth", dev
)) {
870 dev_err(&pdev
->dev
, "Can't get irq %d\n", dev
->irq
);
875 spin_lock_init(&ip
->ioc3_lock
);
876 timer_setup(&ip
->ioc3_timer
, ioc3_timer
, 0);
880 /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
881 ip
->rxr
= dma_alloc_coherent(ip
->dma_dev
, RX_RING_SIZE
, &ip
->rxr_dma
,
884 pr_err("ioc3-eth: rx ring allocation failed\n");
889 /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
890 ip
->tx_ring
= dma_alloc_coherent(ip
->dma_dev
, TX_RING_SIZE
+ SZ_16K
- 1,
891 &ip
->txr_dma
, GFP_KERNEL
);
893 pr_err("ioc3-eth: tx ring allocation failed\n");
898 ip
->txr
= PTR_ALIGN(ip
->tx_ring
, SZ_16K
);
899 ip
->txr_dma
= ALIGN(ip
->txr_dma
, SZ_16K
);
903 ip
->mii
.phy_id_mask
= 0x1f;
904 ip
->mii
.reg_num_mask
= 0x1f;
906 ip
->mii
.mdio_read
= ioc3_mdio_read
;
907 ip
->mii
.mdio_write
= ioc3_mdio_write
;
911 if (ip
->mii
.phy_id
== -1) {
912 netdev_err(dev
, "Didn't find a PHY, goodbye.\n");
919 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
921 /* The IOC3-specific entries in the device structure. */
922 dev
->watchdog_timeo
= 5 * HZ
;
923 dev
->netdev_ops
= &ioc3_netdev_ops
;
924 dev
->ethtool_ops
= &ioc3_ethtool_ops
;
925 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
926 dev
->features
= NETIF_F_IP_CSUM
| NETIF_F_HIGHDMA
;
928 sw_physid1
= ioc3_mdio_read(dev
, ip
->mii
.phy_id
, MII_PHYSID1
);
929 sw_physid2
= ioc3_mdio_read(dev
, ip
->mii
.phy_id
, MII_PHYSID2
);
931 err
= register_netdev(dev
);
935 mii_check_media(&ip
->mii
, 1, 1);
936 ioc3_setup_duplex(ip
);
938 vendor
= (sw_physid1
<< 12) | (sw_physid2
>> 4);
939 model
= (sw_physid2
>> 4) & 0x3f;
940 rev
= sw_physid2
& 0xf;
941 netdev_info(dev
, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n",
942 ip
->mii
.phy_id
, vendor
, model
, rev
);
943 netdev_info(dev
, "IOC3 SSRAM has %d kbyte.\n",
944 ip
->emcr
& EMCR_BUFSIZ
? 128 : 64);
949 del_timer_sync(&ip
->ioc3_timer
);
951 dma_free_coherent(ip
->dma_dev
, RX_RING_SIZE
, ip
->rxr
,
954 dma_free_coherent(ip
->dma_dev
, TX_RING_SIZE
, ip
->tx_ring
,
961 static int ioc3eth_remove(struct platform_device
*pdev
)
963 struct net_device
*dev
= platform_get_drvdata(pdev
);
964 struct ioc3_private
*ip
= netdev_priv(dev
);
966 dma_free_coherent(ip
->dma_dev
, RX_RING_SIZE
, ip
->rxr
, ip
->rxr_dma
);
967 dma_free_coherent(ip
->dma_dev
, TX_RING_SIZE
, ip
->tx_ring
, ip
->txr_dma
);
969 unregister_netdev(dev
);
970 del_timer_sync(&ip
->ioc3_timer
);
977 static netdev_tx_t
ioc3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
979 struct ioc3_private
*ip
= netdev_priv(dev
);
980 struct ioc3_etxd
*desc
;
986 /* IOC3 has a fairly simple minded checksumming hardware which simply
987 * adds up the 1's complement checksum for the entire packet and
988 * inserts it at an offset which can be specified in the descriptor
989 * into the transmit packet. This means we have to compensate for the
990 * MAC header which should not be summed and the TCP/UDP pseudo headers
993 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
994 const struct iphdr
*ih
= ip_hdr(skb
);
995 const int proto
= ntohs(ih
->protocol
);
1000 /* The MAC header. skb->mac seem the logic approach
1001 * to find the MAC header - except it's a NULL pointer ...
1003 eh
= (u16
*)skb
->data
;
1005 /* Sum up dest addr, src addr and protocol */
1006 ehsum
= eh
[0] + eh
[1] + eh
[2] + eh
[3] + eh
[4] + eh
[5] + eh
[6];
1008 /* Skip IP header; it's sum is always zero and was
1009 * already filled in by ip_output.c
1011 csum
= csum_tcpudp_nofold(ih
->saddr
, ih
->daddr
,
1012 ih
->tot_len
- (ih
->ihl
<< 2),
1013 proto
, csum_fold(ehsum
));
1015 csum
= (csum
& 0xffff) + (csum
>> 16); /* Fold again */
1016 csum
= (csum
& 0xffff) + (csum
>> 16);
1018 csoff
= ETH_HLEN
+ (ih
->ihl
<< 2);
1019 if (proto
== IPPROTO_UDP
) {
1020 csoff
+= offsetof(struct udphdr
, check
);
1021 udp_hdr(skb
)->check
= csum
;
1023 if (proto
== IPPROTO_TCP
) {
1024 csoff
+= offsetof(struct tcphdr
, check
);
1025 tcp_hdr(skb
)->check
= csum
;
1028 w0
= ETXD_DOCHECKSUM
| (csoff
<< ETXD_CHKOFF_SHIFT
);
1031 spin_lock_irq(&ip
->ioc3_lock
);
1033 data
= (unsigned long)skb
->data
;
1036 produce
= ip
->tx_pi
;
1037 desc
= &ip
->txr
[produce
];
1040 /* Short packet, let's copy it directly into the ring. */
1041 skb_copy_from_linear_data(skb
, desc
->data
, skb
->len
);
1042 if (len
< ETH_ZLEN
) {
1043 /* Very short packet, pad with zeros at the end. */
1044 memset(desc
->data
+ len
, 0, ETH_ZLEN
- len
);
1047 desc
->cmd
= cpu_to_be32(len
| ETXD_INTWHENDONE
| ETXD_D0V
| w0
);
1048 desc
->bufcnt
= cpu_to_be32(len
);
1049 } else if ((data
^ (data
+ len
- 1)) & 0x4000) {
1050 unsigned long b2
= (data
| 0x3fffUL
) + 1UL;
1051 unsigned long s1
= b2
- data
;
1052 unsigned long s2
= data
+ len
- b2
;
1055 desc
->cmd
= cpu_to_be32(len
| ETXD_INTWHENDONE
|
1056 ETXD_B1V
| ETXD_B2V
| w0
);
1057 desc
->bufcnt
= cpu_to_be32((s1
<< ETXD_B1CNT_SHIFT
) |
1058 (s2
<< ETXD_B2CNT_SHIFT
));
1059 d1
= dma_map_single(ip
->dma_dev
, skb
->data
, s1
, DMA_TO_DEVICE
);
1060 if (dma_mapping_error(ip
->dma_dev
, d1
))
1062 d2
= dma_map_single(ip
->dma_dev
, (void *)b2
, s1
, DMA_TO_DEVICE
);
1063 if (dma_mapping_error(ip
->dma_dev
, d2
)) {
1064 dma_unmap_single(ip
->dma_dev
, d1
, len
, DMA_TO_DEVICE
);
1067 desc
->p1
= cpu_to_be64(ioc3_map(d1
, PCI64_ATTR_PREF
));
1068 desc
->p2
= cpu_to_be64(ioc3_map(d2
, PCI64_ATTR_PREF
));
1072 /* Normal sized packet that doesn't cross a page boundary. */
1073 desc
->cmd
= cpu_to_be32(len
| ETXD_INTWHENDONE
| ETXD_B1V
| w0
);
1074 desc
->bufcnt
= cpu_to_be32(len
<< ETXD_B1CNT_SHIFT
);
1075 d
= dma_map_single(ip
->dma_dev
, skb
->data
, len
, DMA_TO_DEVICE
);
1076 if (dma_mapping_error(ip
->dma_dev
, d
))
1078 desc
->p1
= cpu_to_be64(ioc3_map(d
, PCI64_ATTR_PREF
));
1081 mb(); /* make sure all descriptor changes are visible */
1083 ip
->tx_skbs
[produce
] = skb
; /* Remember skb */
1084 produce
= (produce
+ 1) & TX_RING_MASK
;
1085 ip
->tx_pi
= produce
;
1086 writel(produce
<< 7, &ip
->regs
->etpir
); /* Fire ... */
1090 if (ip
->txqlen
>= (TX_RING_ENTRIES
- 1))
1091 netif_stop_queue(dev
);
1093 spin_unlock_irq(&ip
->ioc3_lock
);
1095 return NETDEV_TX_OK
;
1098 dev_kfree_skb_any(skb
);
1099 dev
->stats
.tx_dropped
++;
1101 spin_unlock_irq(&ip
->ioc3_lock
);
1103 return NETDEV_TX_OK
;
1106 static void ioc3_timeout(struct net_device
*dev
, unsigned int txqueue
)
1108 struct ioc3_private
*ip
= netdev_priv(dev
);
1110 netdev_err(dev
, "transmit timed out, resetting\n");
1112 spin_lock_irq(&ip
->ioc3_lock
);
1115 ioc3_free_rx_bufs(ip
);
1116 ioc3_clean_tx_ring(ip
);
1119 if (ioc3_alloc_rx_bufs(dev
)) {
1120 netdev_err(dev
, "%s: rx buffer allocation failed\n", __func__
);
1121 spin_unlock_irq(&ip
->ioc3_lock
);
1128 spin_unlock_irq(&ip
->ioc3_lock
);
1130 netif_wake_queue(dev
);
1133 /* Given a multicast ethernet address, this routine calculates the
1134 * address's bit index in the logical address filter mask
1136 static inline unsigned int ioc3_hash(const unsigned char *addr
)
1138 unsigned int temp
= 0;
1142 crc
= ether_crc_le(ETH_ALEN
, addr
);
1144 crc
&= 0x3f; /* bit reverse lowest 6 bits for hash index */
1145 for (bits
= 6; --bits
>= 0; ) {
1147 temp
|= (crc
& 0x1);
1154 static void ioc3_get_drvinfo(struct net_device
*dev
,
1155 struct ethtool_drvinfo
*info
)
1157 strlcpy(info
->driver
, IOC3_NAME
, sizeof(info
->driver
));
1158 strlcpy(info
->version
, IOC3_VERSION
, sizeof(info
->version
));
1159 strlcpy(info
->bus_info
, pci_name(to_pci_dev(dev
->dev
.parent
)),
1160 sizeof(info
->bus_info
));
1163 static int ioc3_get_link_ksettings(struct net_device
*dev
,
1164 struct ethtool_link_ksettings
*cmd
)
1166 struct ioc3_private
*ip
= netdev_priv(dev
);
1168 spin_lock_irq(&ip
->ioc3_lock
);
1169 mii_ethtool_get_link_ksettings(&ip
->mii
, cmd
);
1170 spin_unlock_irq(&ip
->ioc3_lock
);
1175 static int ioc3_set_link_ksettings(struct net_device
*dev
,
1176 const struct ethtool_link_ksettings
*cmd
)
1178 struct ioc3_private
*ip
= netdev_priv(dev
);
1181 spin_lock_irq(&ip
->ioc3_lock
);
1182 rc
= mii_ethtool_set_link_ksettings(&ip
->mii
, cmd
);
1183 spin_unlock_irq(&ip
->ioc3_lock
);
1188 static int ioc3_nway_reset(struct net_device
*dev
)
1190 struct ioc3_private
*ip
= netdev_priv(dev
);
1193 spin_lock_irq(&ip
->ioc3_lock
);
1194 rc
= mii_nway_restart(&ip
->mii
);
1195 spin_unlock_irq(&ip
->ioc3_lock
);
1200 static u32
ioc3_get_link(struct net_device
*dev
)
1202 struct ioc3_private
*ip
= netdev_priv(dev
);
1205 spin_lock_irq(&ip
->ioc3_lock
);
1206 rc
= mii_link_ok(&ip
->mii
);
1207 spin_unlock_irq(&ip
->ioc3_lock
);
1212 static const struct ethtool_ops ioc3_ethtool_ops
= {
1213 .get_drvinfo
= ioc3_get_drvinfo
,
1214 .nway_reset
= ioc3_nway_reset
,
1215 .get_link
= ioc3_get_link
,
1216 .get_link_ksettings
= ioc3_get_link_ksettings
,
1217 .set_link_ksettings
= ioc3_set_link_ksettings
,
1220 static int ioc3_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1222 struct ioc3_private
*ip
= netdev_priv(dev
);
1225 spin_lock_irq(&ip
->ioc3_lock
);
1226 rc
= generic_mii_ioctl(&ip
->mii
, if_mii(rq
), cmd
, NULL
);
1227 spin_unlock_irq(&ip
->ioc3_lock
);
1232 static void ioc3_set_multicast_list(struct net_device
*dev
)
1234 struct ioc3_private
*ip
= netdev_priv(dev
);
1235 struct ioc3_ethregs
*regs
= ip
->regs
;
1236 struct netdev_hw_addr
*ha
;
1239 spin_lock_irq(&ip
->ioc3_lock
);
1241 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1242 ip
->emcr
|= EMCR_PROMISC
;
1243 writel(ip
->emcr
, ®s
->emcr
);
1246 ip
->emcr
&= ~EMCR_PROMISC
;
1247 writel(ip
->emcr
, ®s
->emcr
); /* Clear promiscuous. */
1250 if ((dev
->flags
& IFF_ALLMULTI
) ||
1251 (netdev_mc_count(dev
) > 64)) {
1252 /* Too many for hashing to make sense or we want all
1253 * multicast packets anyway, so skip computing all the
1254 * hashes and just accept all packets.
1256 ip
->ehar_h
= 0xffffffff;
1257 ip
->ehar_l
= 0xffffffff;
1259 netdev_for_each_mc_addr(ha
, dev
) {
1260 ehar
|= (1UL << ioc3_hash(ha
->addr
));
1262 ip
->ehar_h
= ehar
>> 32;
1263 ip
->ehar_l
= ehar
& 0xffffffff;
1265 writel(ip
->ehar_h
, ®s
->ehar_h
);
1266 writel(ip
->ehar_l
, ®s
->ehar_l
);
1269 spin_unlock_irq(&ip
->ioc3_lock
);
1272 static struct platform_driver ioc3eth_driver
= {
1273 .probe
= ioc3eth_probe
,
1274 .remove
= ioc3eth_remove
,
1280 module_platform_driver(ioc3eth_driver
);
1282 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
1283 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1284 MODULE_LICENSE("GPL");