2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
30 #include <linux/skbuff.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
35 #include <linux/inet_lro.h>
38 #include <asm/firmware.h>
39 #include <asm/pasemi_dma.h>
41 #include "pasemi_mac.h"
43 /* We have our own align, since ppc64 in general has it at 0 because
44 * of design flaws in some of the server bridge chips. However, for
45 * PWRficient doing the unaligned copies is more expensive than doing
46 * unaligned DMA, so make sure the data is aligned instead.
48 #define LOCAL_SKB_ALIGN 2
59 /* Must be a power of two */
60 #define RX_RING_SIZE 2048
61 #define TX_RING_SIZE 4096
63 #define LRO_MAX_AGGR 64
65 #define DEFAULT_MSG_ENABLE \
75 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
76 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
77 #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
78 #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
79 #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)])
81 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
83 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
85 #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
87 MODULE_LICENSE("GPL");
88 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
89 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
91 static int debug
= -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
92 module_param(debug
, int, 0);
93 MODULE_PARM_DESC(debug
, "PA Semi MAC bitmapped debugging message enable value");
95 static int translation_enabled(void)
97 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
100 return firmware_has_feature(FW_FEATURE_LPAR
);
104 static void write_iob_reg(unsigned int reg
, unsigned int val
)
106 pasemi_write_iob_reg(reg
, val
);
109 static unsigned int read_mac_reg(const struct pasemi_mac
*mac
, unsigned int reg
)
111 return pasemi_read_mac_reg(mac
->dma_if
, reg
);
114 static void write_mac_reg(const struct pasemi_mac
*mac
, unsigned int reg
,
117 pasemi_write_mac_reg(mac
->dma_if
, reg
, val
);
120 static unsigned int read_dma_reg(unsigned int reg
)
122 return pasemi_read_dma_reg(reg
);
125 static void write_dma_reg(unsigned int reg
, unsigned int val
)
127 pasemi_write_dma_reg(reg
, val
);
130 static struct pasemi_mac_rxring
*rx_ring(const struct pasemi_mac
*mac
)
135 static struct pasemi_mac_txring
*tx_ring(const struct pasemi_mac
*mac
)
140 static inline void prefetch_skb(const struct sk_buff
*skb
)
150 static int mac_to_intf(struct pasemi_mac
*mac
)
152 struct pci_dev
*pdev
= mac
->pdev
;
154 int nintf
, off
, i
, j
;
155 int devfn
= pdev
->devfn
;
157 tmp
= read_dma_reg(PAS_DMA_CAP_IFI
);
158 nintf
= (tmp
& PAS_DMA_CAP_IFI_NIN_M
) >> PAS_DMA_CAP_IFI_NIN_S
;
159 off
= (tmp
& PAS_DMA_CAP_IFI_IOFF_M
) >> PAS_DMA_CAP_IFI_IOFF_S
;
161 /* IOFF contains the offset to the registers containing the
162 * DMA interface-to-MAC-pci-id mappings, and NIN contains number
163 * of total interfaces. Each register contains 4 devfns.
164 * Just do a linear search until we find the devfn of the MAC
165 * we're trying to look up.
168 for (i
= 0; i
< (nintf
+3)/4; i
++) {
169 tmp
= read_dma_reg(off
+4*i
);
170 for (j
= 0; j
< 4; j
++) {
171 if (((tmp
>> (8*j
)) & 0xff) == devfn
)
178 static int pasemi_get_mac_addr(struct pasemi_mac
*mac
)
180 struct pci_dev
*pdev
= mac
->pdev
;
181 struct device_node
*dn
= pci_device_to_OF_node(pdev
);
188 "No device node for mac, not configuring\n");
192 maddr
= of_get_property(dn
, "local-mac-address", &len
);
194 if (maddr
&& len
== 6) {
195 memcpy(mac
->mac_addr
, maddr
, 6);
199 /* Some old versions of firmware mistakenly uses mac-address
200 * (and as a string) instead of a byte array in local-mac-address.
204 maddr
= of_get_property(dn
, "mac-address", NULL
);
208 "no mac address in device tree, not configuring\n");
212 if (sscanf(maddr
, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr
[0],
213 &addr
[1], &addr
[2], &addr
[3], &addr
[4], &addr
[5]) != 6) {
215 "can't parse mac address, not configuring\n");
219 memcpy(mac
->mac_addr
, addr
, 6);
224 static int get_skb_hdr(struct sk_buff
*skb
, void **iphdr
,
225 void **tcph
, u64
*hdr_flags
, void *data
)
227 u64 macrx
= (u64
) data
;
231 /* IPv4 header checksum failed */
232 if ((macrx
& XCT_MACRX_HTY_M
) != XCT_MACRX_HTY_IPV4_OK
)
236 skb_reset_network_header(skb
);
238 if (iph
->protocol
!= IPPROTO_TCP
)
241 ip_len
= ip_hdrlen(skb
);
242 skb_set_transport_header(skb
, ip_len
);
243 *tcph
= tcp_hdr(skb
);
245 /* check if ip header and tcp header are complete */
246 if (iph
->tot_len
< ip_len
+ tcp_hdrlen(skb
))
249 *hdr_flags
= LRO_IPV4
| LRO_TCP
;
255 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac
*mac
,
258 const dma_addr_t
*dmas
)
261 struct pci_dev
*pdev
= mac
->dma_pdev
;
263 pci_unmap_single(pdev
, dmas
[0], skb_headlen(skb
), PCI_DMA_TODEVICE
);
265 for (f
= 0; f
< nfrags
; f
++) {
266 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
268 pci_unmap_page(pdev
, dmas
[f
+1], frag
->size
, PCI_DMA_TODEVICE
);
270 dev_kfree_skb_irq(skb
);
272 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
273 * aligned up to a power of 2
275 return (nfrags
+ 3) & ~1;
278 static int pasemi_mac_setup_rx_resources(const struct net_device
*dev
)
280 struct pasemi_mac_rxring
*ring
;
281 struct pasemi_mac
*mac
= netdev_priv(dev
);
285 ring
= pasemi_dma_alloc_chan(RXCHAN
, sizeof(struct pasemi_mac_rxring
),
286 offsetof(struct pasemi_mac_rxring
, chan
));
289 dev_err(&mac
->pdev
->dev
, "Can't allocate RX channel\n");
292 chno
= ring
->chan
.chno
;
294 spin_lock_init(&ring
->lock
);
296 ring
->size
= RX_RING_SIZE
;
297 ring
->ring_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
298 RX_RING_SIZE
, GFP_KERNEL
);
300 if (!ring
->ring_info
)
303 /* Allocate descriptors */
304 if (pasemi_dma_alloc_ring(&ring
->chan
, RX_RING_SIZE
))
307 ring
->buffers
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
308 RX_RING_SIZE
* sizeof(u64
),
309 &ring
->buf_dma
, GFP_KERNEL
);
313 memset(ring
->buffers
, 0, RX_RING_SIZE
* sizeof(u64
));
315 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno
),
316 PAS_DMA_RXCHAN_BASEL_BRBL(ring
->chan
.ring_dma
));
318 write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno
),
319 PAS_DMA_RXCHAN_BASEU_BRBH(ring
->chan
.ring_dma
>> 32) |
320 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE
>> 3));
322 cfg
= PAS_DMA_RXCHAN_CFG_HBU(2);
324 if (translation_enabled())
325 cfg
|= PAS_DMA_RXCHAN_CFG_CTR
;
327 write_dma_reg(PAS_DMA_RXCHAN_CFG(chno
), cfg
);
329 write_dma_reg(PAS_DMA_RXINT_BASEL(mac
->dma_if
),
330 PAS_DMA_RXINT_BASEL_BRBL(ring
->buf_dma
));
332 write_dma_reg(PAS_DMA_RXINT_BASEU(mac
->dma_if
),
333 PAS_DMA_RXINT_BASEU_BRBH(ring
->buf_dma
>> 32) |
334 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE
>> 3));
336 cfg
= PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2
|
337 PAS_DMA_RXINT_CFG_LW
| PAS_DMA_RXINT_CFG_RBP
|
338 PAS_DMA_RXINT_CFG_HEN
;
340 if (translation_enabled())
341 cfg
|= PAS_DMA_RXINT_CFG_ITRR
| PAS_DMA_RXINT_CFG_ITR
;
343 write_dma_reg(PAS_DMA_RXINT_CFG(mac
->dma_if
), cfg
);
345 ring
->next_to_fill
= 0;
346 ring
->next_to_clean
= 0;
353 kfree(ring
->ring_info
);
355 pasemi_dma_free_chan(&ring
->chan
);
360 static struct pasemi_mac_txring
*
361 pasemi_mac_setup_tx_resources(const struct net_device
*dev
)
363 struct pasemi_mac
*mac
= netdev_priv(dev
);
365 struct pasemi_mac_txring
*ring
;
369 ring
= pasemi_dma_alloc_chan(TXCHAN
, sizeof(struct pasemi_mac_txring
),
370 offsetof(struct pasemi_mac_txring
, chan
));
373 dev_err(&mac
->pdev
->dev
, "Can't allocate TX channel\n");
377 chno
= ring
->chan
.chno
;
379 spin_lock_init(&ring
->lock
);
381 ring
->size
= TX_RING_SIZE
;
382 ring
->ring_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
383 TX_RING_SIZE
, GFP_KERNEL
);
384 if (!ring
->ring_info
)
387 /* Allocate descriptors */
388 if (pasemi_dma_alloc_ring(&ring
->chan
, TX_RING_SIZE
))
391 write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno
),
392 PAS_DMA_TXCHAN_BASEL_BRBL(ring
->chan
.ring_dma
));
393 val
= PAS_DMA_TXCHAN_BASEU_BRBH(ring
->chan
.ring_dma
>> 32);
394 val
|= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE
>> 3);
396 write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno
), val
);
398 cfg
= PAS_DMA_TXCHAN_CFG_TY_IFACE
|
399 PAS_DMA_TXCHAN_CFG_TATTR(mac
->dma_if
) |
400 PAS_DMA_TXCHAN_CFG_UP
|
401 PAS_DMA_TXCHAN_CFG_WT(2);
403 if (translation_enabled())
404 cfg
|= PAS_DMA_TXCHAN_CFG_TRD
| PAS_DMA_TXCHAN_CFG_TRR
;
406 write_dma_reg(PAS_DMA_TXCHAN_CFG(chno
), cfg
);
408 ring
->next_to_fill
= 0;
409 ring
->next_to_clean
= 0;
415 kfree(ring
->ring_info
);
417 pasemi_dma_free_chan(&ring
->chan
);
422 static void pasemi_mac_free_tx_resources(struct pasemi_mac
*mac
)
424 struct pasemi_mac_txring
*txring
= tx_ring(mac
);
426 struct pasemi_mac_buffer
*info
;
427 dma_addr_t dmas
[MAX_SKB_FRAGS
+1];
431 start
= txring
->next_to_clean
;
432 limit
= txring
->next_to_fill
;
434 /* Compensate for when fill has wrapped and clean has not */
436 limit
+= TX_RING_SIZE
;
438 for (i
= start
; i
< limit
; i
+= freed
) {
439 info
= &txring
->ring_info
[(i
+1) & (TX_RING_SIZE
-1)];
440 if (info
->dma
&& info
->skb
) {
441 nfrags
= skb_shinfo(info
->skb
)->nr_frags
;
442 for (j
= 0; j
<= nfrags
; j
++)
443 dmas
[j
] = txring
->ring_info
[(i
+1+j
) &
444 (TX_RING_SIZE
-1)].dma
;
445 freed
= pasemi_mac_unmap_tx_skb(mac
, nfrags
,
451 kfree(txring
->ring_info
);
452 pasemi_dma_free_chan(&txring
->chan
);
456 static void pasemi_mac_free_rx_resources(struct pasemi_mac
*mac
)
458 struct pasemi_mac_rxring
*rx
= rx_ring(mac
);
460 struct pasemi_mac_buffer
*info
;
462 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
463 info
= &RX_DESC_INFO(rx
, i
);
464 if (info
->skb
&& info
->dma
) {
465 pci_unmap_single(mac
->dma_pdev
,
469 dev_kfree_skb_any(info
->skb
);
475 for (i
= 0; i
< RX_RING_SIZE
; i
++)
478 dma_free_coherent(&mac
->dma_pdev
->dev
, RX_RING_SIZE
* sizeof(u64
),
479 rx_ring(mac
)->buffers
, rx_ring(mac
)->buf_dma
);
481 kfree(rx_ring(mac
)->ring_info
);
482 pasemi_dma_free_chan(&rx_ring(mac
)->chan
);
486 static void pasemi_mac_replenish_rx_ring(const struct net_device
*dev
,
489 const struct pasemi_mac
*mac
= netdev_priv(dev
);
490 struct pasemi_mac_rxring
*rx
= rx_ring(mac
);
496 fill
= rx_ring(mac
)->next_to_fill
;
497 for (count
= 0; count
< limit
; count
++) {
498 struct pasemi_mac_buffer
*info
= &RX_DESC_INFO(rx
, fill
);
499 u64
*buff
= &RX_BUFF(rx
, fill
);
506 skb
= dev_alloc_skb(BUF_SIZE
);
507 skb_reserve(skb
, LOCAL_SKB_ALIGN
);
512 dma
= pci_map_single(mac
->dma_pdev
, skb
->data
,
513 BUF_SIZE
- LOCAL_SKB_ALIGN
,
516 if (unlikely(dma_mapping_error(dma
))) {
517 dev_kfree_skb_irq(info
->skb
);
523 *buff
= XCT_RXB_LEN(BUF_SIZE
) | XCT_RXB_ADDR(dma
);
529 write_dma_reg(PAS_DMA_RXINT_INCR(mac
->dma_if
), count
);
531 rx_ring(mac
)->next_to_fill
= (rx_ring(mac
)->next_to_fill
+ count
) &
535 static void pasemi_mac_restart_rx_intr(const struct pasemi_mac
*mac
)
537 struct pasemi_mac_rxring
*rx
= rx_ring(mac
);
538 unsigned int reg
, pcnt
;
539 /* Re-enable packet count interrupts: finally
540 * ack the packet count interrupt we got in rx_intr.
543 pcnt
= *rx
->chan
.status
& PAS_STATUS_PCNT_M
;
545 reg
= PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_RXCH_RESET_PINTC
;
547 if (*rx
->chan
.status
& PAS_STATUS_TIMER
)
548 reg
|= PAS_IOB_DMA_RXCH_RESET_TINTC
;
550 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac
->rx
->chan
.chno
), reg
);
553 static void pasemi_mac_restart_tx_intr(const struct pasemi_mac
*mac
)
555 unsigned int reg
, pcnt
;
557 /* Re-enable packet count interrupts */
558 pcnt
= *tx_ring(mac
)->chan
.status
& PAS_STATUS_PCNT_M
;
560 reg
= PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_TXCH_RESET_PINTC
;
562 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac
)->chan
.chno
), reg
);
566 static inline void pasemi_mac_rx_error(const struct pasemi_mac
*mac
,
569 unsigned int rcmdsta
, ccmdsta
;
570 struct pasemi_dmachan
*chan
= &rx_ring(mac
)->chan
;
572 if (!netif_msg_rx_err(mac
))
575 rcmdsta
= read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
576 ccmdsta
= read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan
->chno
));
578 printk(KERN_ERR
"pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
579 macrx
, *chan
->status
);
581 printk(KERN_ERR
"pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
585 static inline void pasemi_mac_tx_error(const struct pasemi_mac
*mac
,
589 struct pasemi_dmachan
*chan
= &tx_ring(mac
)->chan
;
591 if (!netif_msg_tx_err(mac
))
594 cmdsta
= read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan
->chno
));
596 printk(KERN_ERR
"pasemi_mac: tx error. mactx 0x%016lx, "\
597 "tx status 0x%016lx\n", mactx
, *chan
->status
);
599 printk(KERN_ERR
"pasemi_mac: tcmdsta 0x%08x\n", cmdsta
);
602 static int pasemi_mac_clean_rx(struct pasemi_mac_rxring
*rx
,
605 const struct pasemi_dmachan
*chan
= &rx
->chan
;
606 struct pasemi_mac
*mac
= rx
->mac
;
607 struct pci_dev
*pdev
= mac
->dma_pdev
;
609 int count
, buf_index
, tot_bytes
, packets
;
610 struct pasemi_mac_buffer
*info
;
619 spin_lock(&rx
->lock
);
621 n
= rx
->next_to_clean
;
623 prefetch(&RX_DESC(rx
, n
));
625 for (count
= 0; count
< limit
; count
++) {
626 macrx
= RX_DESC(rx
, n
);
627 prefetch(&RX_DESC(rx
, n
+4));
629 if ((macrx
& XCT_MACRX_E
) ||
630 (*chan
->status
& PAS_STATUS_ERROR
))
631 pasemi_mac_rx_error(mac
, macrx
);
633 if (!(macrx
& XCT_MACRX_O
))
638 BUG_ON(!(macrx
& XCT_MACRX_RR_8BRES
));
640 eval
= (RX_DESC(rx
, n
+1) & XCT_RXRES_8B_EVAL_M
) >>
644 dma
= (RX_DESC(rx
, n
+2) & XCT_PTR_ADDR_M
);
645 info
= &RX_DESC_INFO(rx
, buf_index
);
651 len
= (macrx
& XCT_MACRX_LLEN_M
) >> XCT_MACRX_LLEN_S
;
653 pci_unmap_single(pdev
, dma
, BUF_SIZE
-LOCAL_SKB_ALIGN
,
656 if (macrx
& XCT_MACRX_CRC
) {
657 /* CRC error flagged */
658 mac
->netdev
->stats
.rx_errors
++;
659 mac
->netdev
->stats
.rx_crc_errors
++;
660 /* No need to free skb, it'll be reused */
667 if (likely((macrx
& XCT_MACRX_HTY_M
) == XCT_MACRX_HTY_IPV4_OK
)) {
668 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
669 skb
->csum
= (macrx
& XCT_MACRX_CSUM_M
) >>
672 skb
->ip_summed
= CHECKSUM_NONE
;
677 /* Don't include CRC */
680 skb
->protocol
= eth_type_trans(skb
, mac
->netdev
);
681 lro_receive_skb(&mac
->lro_mgr
, skb
, (void *)macrx
);
685 RX_DESC(rx
, n
+1) = 0;
687 /* Need to zero it out since hardware doesn't, since the
688 * replenish loop uses it to tell when it's done.
690 RX_BUFF(rx
, buf_index
) = 0;
695 if (n
> RX_RING_SIZE
) {
696 /* Errata 5971 workaround: L2 target of headers */
697 write_iob_reg(PAS_IOB_COM_PKTHDRCNT
, 0);
698 n
&= (RX_RING_SIZE
-1);
701 rx_ring(mac
)->next_to_clean
= n
;
703 lro_flush_all(&mac
->lro_mgr
);
705 /* Increase is in number of 16-byte entries, and since each descriptor
706 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
709 write_dma_reg(PAS_DMA_RXCHAN_INCR(mac
->rx
->chan
.chno
), count
<< 1);
711 pasemi_mac_replenish_rx_ring(mac
->netdev
, count
);
713 mac
->netdev
->stats
.rx_bytes
+= tot_bytes
;
714 mac
->netdev
->stats
.rx_packets
+= packets
;
716 spin_unlock(&rx_ring(mac
)->lock
);
721 /* Can't make this too large or we blow the kernel stack limits */
722 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
724 static int pasemi_mac_clean_tx(struct pasemi_mac_txring
*txring
)
726 struct pasemi_dmachan
*chan
= &txring
->chan
;
727 struct pasemi_mac
*mac
= txring
->mac
;
729 unsigned int start
, descr_count
, buf_count
, batch_limit
;
730 unsigned int ring_limit
;
731 unsigned int total_count
;
733 struct sk_buff
*skbs
[TX_CLEAN_BATCHSIZE
];
734 dma_addr_t dmas
[TX_CLEAN_BATCHSIZE
][MAX_SKB_FRAGS
+1];
735 int nf
[TX_CLEAN_BATCHSIZE
];
739 batch_limit
= TX_CLEAN_BATCHSIZE
;
741 spin_lock_irqsave(&txring
->lock
, flags
);
743 start
= txring
->next_to_clean
;
744 ring_limit
= txring
->next_to_fill
;
746 prefetch(&TX_DESC_INFO(txring
, start
+1).skb
);
748 /* Compensate for when fill has wrapped but clean has not */
749 if (start
> ring_limit
)
750 ring_limit
+= TX_RING_SIZE
;
756 descr_count
< batch_limit
&& i
< ring_limit
;
758 u64 mactx
= TX_DESC(txring
, i
);
761 skb
= TX_DESC_INFO(txring
, i
+1).skb
;
762 nr_frags
= TX_DESC_INFO(txring
, i
).dma
;
764 if ((mactx
& XCT_MACTX_E
) ||
765 (*chan
->status
& PAS_STATUS_ERROR
))
766 pasemi_mac_tx_error(mac
, mactx
);
768 if (unlikely(mactx
& XCT_MACTX_O
))
769 /* Not yet transmitted */
772 buf_count
= 2 + nr_frags
;
773 /* Since we always fill with an even number of entries, make
774 * sure we skip any unused one at the end as well.
779 for (j
= 0; j
<= nr_frags
; j
++)
780 dmas
[descr_count
][j
] = TX_DESC_INFO(txring
, i
+1+j
).dma
;
782 skbs
[descr_count
] = skb
;
783 nf
[descr_count
] = nr_frags
;
785 TX_DESC(txring
, i
) = 0;
786 TX_DESC(txring
, i
+1) = 0;
790 txring
->next_to_clean
= i
& (TX_RING_SIZE
-1);
792 spin_unlock_irqrestore(&txring
->lock
, flags
);
793 netif_wake_queue(mac
->netdev
);
795 for (i
= 0; i
< descr_count
; i
++)
796 pasemi_mac_unmap_tx_skb(mac
, nf
[i
], skbs
[i
], dmas
[i
]);
798 total_count
+= descr_count
;
800 /* If the batch was full, try to clean more */
801 if (descr_count
== batch_limit
)
808 static irqreturn_t
pasemi_mac_rx_intr(int irq
, void *data
)
810 const struct pasemi_mac_rxring
*rxring
= data
;
811 struct pasemi_mac
*mac
= rxring
->mac
;
812 struct net_device
*dev
= mac
->netdev
;
813 const struct pasemi_dmachan
*chan
= &rxring
->chan
;
816 if (!(*chan
->status
& PAS_STATUS_CAUSE_M
))
819 /* Don't reset packet count so it won't fire again but clear
824 if (*chan
->status
& PAS_STATUS_SOFT
)
825 reg
|= PAS_IOB_DMA_RXCH_RESET_SINTC
;
826 if (*chan
->status
& PAS_STATUS_ERROR
)
827 reg
|= PAS_IOB_DMA_RXCH_RESET_DINTC
;
829 netif_rx_schedule(dev
, &mac
->napi
);
831 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan
->chno
), reg
);
836 #define TX_CLEAN_INTERVAL HZ
838 static void pasemi_mac_tx_timer(unsigned long data
)
840 struct pasemi_mac_txring
*txring
= (struct pasemi_mac_txring
*)data
;
841 struct pasemi_mac
*mac
= txring
->mac
;
843 pasemi_mac_clean_tx(txring
);
845 mod_timer(&txring
->clean_timer
, jiffies
+ TX_CLEAN_INTERVAL
);
847 pasemi_mac_restart_tx_intr(mac
);
850 static irqreturn_t
pasemi_mac_tx_intr(int irq
, void *data
)
852 struct pasemi_mac_txring
*txring
= data
;
853 const struct pasemi_dmachan
*chan
= &txring
->chan
;
854 struct pasemi_mac
*mac
= txring
->mac
;
857 if (!(*chan
->status
& PAS_STATUS_CAUSE_M
))
862 if (*chan
->status
& PAS_STATUS_SOFT
)
863 reg
|= PAS_IOB_DMA_TXCH_RESET_SINTC
;
864 if (*chan
->status
& PAS_STATUS_ERROR
)
865 reg
|= PAS_IOB_DMA_TXCH_RESET_DINTC
;
867 mod_timer(&txring
->clean_timer
, jiffies
+ (TX_CLEAN_INTERVAL
)*2);
869 netif_rx_schedule(mac
->netdev
, &mac
->napi
);
872 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan
->chno
), reg
);
877 static void pasemi_mac_intf_disable(struct pasemi_mac
*mac
)
881 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
882 flags
&= ~PAS_MAC_CFG_PCFG_PE
;
883 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
886 static void pasemi_mac_intf_enable(struct pasemi_mac
*mac
)
890 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
891 flags
|= PAS_MAC_CFG_PCFG_PE
;
892 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
895 static void pasemi_adjust_link(struct net_device
*dev
)
897 struct pasemi_mac
*mac
= netdev_priv(dev
);
900 unsigned int new_flags
;
902 if (!mac
->phydev
->link
) {
903 /* If no link, MAC speed settings don't matter. Just report
904 * link down and return.
906 if (mac
->link
&& netif_msg_link(mac
))
907 printk(KERN_INFO
"%s: Link is down.\n", dev
->name
);
909 netif_carrier_off(dev
);
910 pasemi_mac_intf_disable(mac
);
915 pasemi_mac_intf_enable(mac
);
916 netif_carrier_on(dev
);
919 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
920 new_flags
= flags
& ~(PAS_MAC_CFG_PCFG_HD
| PAS_MAC_CFG_PCFG_SPD_M
|
921 PAS_MAC_CFG_PCFG_TSR_M
);
923 if (!mac
->phydev
->duplex
)
924 new_flags
|= PAS_MAC_CFG_PCFG_HD
;
926 switch (mac
->phydev
->speed
) {
928 new_flags
|= PAS_MAC_CFG_PCFG_SPD_1G
|
929 PAS_MAC_CFG_PCFG_TSR_1G
;
932 new_flags
|= PAS_MAC_CFG_PCFG_SPD_100M
|
933 PAS_MAC_CFG_PCFG_TSR_100M
;
936 new_flags
|= PAS_MAC_CFG_PCFG_SPD_10M
|
937 PAS_MAC_CFG_PCFG_TSR_10M
;
940 printk("Unsupported speed %d\n", mac
->phydev
->speed
);
943 /* Print on link or speed/duplex change */
944 msg
= mac
->link
!= mac
->phydev
->link
|| flags
!= new_flags
;
946 mac
->duplex
= mac
->phydev
->duplex
;
947 mac
->speed
= mac
->phydev
->speed
;
948 mac
->link
= mac
->phydev
->link
;
950 if (new_flags
!= flags
)
951 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, new_flags
);
953 if (msg
&& netif_msg_link(mac
))
954 printk(KERN_INFO
"%s: Link is up at %d Mbps, %s duplex.\n",
955 dev
->name
, mac
->speed
, mac
->duplex
? "full" : "half");
958 static int pasemi_mac_phy_init(struct net_device
*dev
)
960 struct pasemi_mac
*mac
= netdev_priv(dev
);
961 struct device_node
*dn
, *phy_dn
;
962 struct phy_device
*phydev
;
965 const unsigned int *prop
;
969 dn
= pci_device_to_OF_node(mac
->pdev
);
970 ph
= of_get_property(dn
, "phy-handle", NULL
);
973 phy_dn
= of_find_node_by_phandle(*ph
);
975 prop
= of_get_property(phy_dn
, "reg", NULL
);
976 ret
= of_address_to_resource(phy_dn
->parent
, 0, &r
);
981 snprintf(mac
->phy_id
, BUS_ID_SIZE
, PHY_ID_FMT
, (int)r
.start
, phy_id
);
989 phydev
= phy_connect(dev
, mac
->phy_id
, &pasemi_adjust_link
, 0, PHY_INTERFACE_MODE_SGMII
);
991 if (IS_ERR(phydev
)) {
992 printk(KERN_ERR
"%s: Could not attach to phy\n", dev
->name
);
993 return PTR_ERR(phydev
);
996 mac
->phydev
= phydev
;
1001 of_node_put(phy_dn
);
1006 static int pasemi_mac_open(struct net_device
*dev
)
1008 struct pasemi_mac
*mac
= netdev_priv(dev
);
1012 /* enable rx section */
1013 write_dma_reg(PAS_DMA_COM_RXCMD
, PAS_DMA_COM_RXCMD_EN
);
1015 /* enable tx section */
1016 write_dma_reg(PAS_DMA_COM_TXCMD
, PAS_DMA_COM_TXCMD_EN
);
1018 flags
= PAS_MAC_CFG_TXP_FCE
| PAS_MAC_CFG_TXP_FPC(3) |
1019 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
1020 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
1022 write_mac_reg(mac
, PAS_MAC_CFG_TXP
, flags
);
1024 ret
= pasemi_mac_setup_rx_resources(dev
);
1026 goto out_rx_resources
;
1028 mac
->tx
= pasemi_mac_setup_tx_resources(dev
);
1033 /* 0x3ff with 33MHz clock is about 31us */
1034 write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG
,
1035 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));
1037 write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac
->rx
->chan
.chno
),
1038 PAS_IOB_DMA_RXCH_CFG_CNTTH(256));
1040 write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac
->tx
->chan
.chno
),
1041 PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
1043 write_mac_reg(mac
, PAS_MAC_IPC_CHNL
,
1044 PAS_MAC_IPC_CHNL_DCHNO(mac
->rx
->chan
.chno
) |
1045 PAS_MAC_IPC_CHNL_BCH(mac
->rx
->chan
.chno
));
1048 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
1049 PAS_DMA_RXINT_RCMDSTA_EN
|
1050 PAS_DMA_RXINT_RCMDSTA_DROPS_M
|
1051 PAS_DMA_RXINT_RCMDSTA_BP
|
1052 PAS_DMA_RXINT_RCMDSTA_OO
|
1053 PAS_DMA_RXINT_RCMDSTA_BT
);
1055 /* enable rx channel */
1056 pasemi_dma_start_chan(&rx_ring(mac
)->chan
, PAS_DMA_RXCHAN_CCMDSTA_DU
|
1057 PAS_DMA_RXCHAN_CCMDSTA_OD
|
1058 PAS_DMA_RXCHAN_CCMDSTA_FD
|
1059 PAS_DMA_RXCHAN_CCMDSTA_DT
);
1061 /* enable tx channel */
1062 pasemi_dma_start_chan(&tx_ring(mac
)->chan
, PAS_DMA_TXCHAN_TCMDSTA_SZ
|
1063 PAS_DMA_TXCHAN_TCMDSTA_DB
|
1064 PAS_DMA_TXCHAN_TCMDSTA_DE
|
1065 PAS_DMA_TXCHAN_TCMDSTA_DA
);
1067 pasemi_mac_replenish_rx_ring(dev
, RX_RING_SIZE
);
1069 write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac
)->chan
.chno
),
1072 /* Clear out any residual packet count state from firmware */
1073 pasemi_mac_restart_rx_intr(mac
);
1074 pasemi_mac_restart_tx_intr(mac
);
1076 flags
= PAS_MAC_CFG_PCFG_S1
| PAS_MAC_CFG_PCFG_PR
| PAS_MAC_CFG_PCFG_CE
;
1078 if (mac
->type
== MAC_TYPE_GMAC
)
1079 flags
|= PAS_MAC_CFG_PCFG_TSR_1G
| PAS_MAC_CFG_PCFG_SPD_1G
;
1081 flags
|= PAS_MAC_CFG_PCFG_TSR_10G
| PAS_MAC_CFG_PCFG_SPD_10G
;
1083 /* Enable interface in MAC */
1084 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
1086 ret
= pasemi_mac_phy_init(dev
);
1088 /* Since we won't get link notification, just enable RX */
1089 pasemi_mac_intf_enable(mac
);
1090 if (mac
->type
== MAC_TYPE_GMAC
) {
1091 /* Warn for missing PHY on SGMII (1Gig) ports */
1092 dev_warn(&mac
->pdev
->dev
,
1093 "PHY init failed: %d.\n", ret
);
1094 dev_warn(&mac
->pdev
->dev
,
1095 "Defaulting to 1Gbit full duplex\n");
1099 netif_start_queue(dev
);
1100 napi_enable(&mac
->napi
);
1102 snprintf(mac
->tx_irq_name
, sizeof(mac
->tx_irq_name
), "%s tx",
1105 ret
= request_irq(mac
->tx
->chan
.irq
, &pasemi_mac_tx_intr
, IRQF_DISABLED
,
1106 mac
->tx_irq_name
, mac
->tx
);
1108 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
1109 mac
->tx
->chan
.irq
, ret
);
1113 snprintf(mac
->rx_irq_name
, sizeof(mac
->rx_irq_name
), "%s rx",
1116 ret
= request_irq(mac
->rx
->chan
.irq
, &pasemi_mac_rx_intr
, IRQF_DISABLED
,
1117 mac
->rx_irq_name
, mac
->rx
);
1119 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
1120 mac
->rx
->chan
.irq
, ret
);
1125 phy_start(mac
->phydev
);
1127 init_timer(&mac
->tx
->clean_timer
);
1128 mac
->tx
->clean_timer
.function
= pasemi_mac_tx_timer
;
1129 mac
->tx
->clean_timer
.data
= (unsigned long)mac
->tx
;
1130 mac
->tx
->clean_timer
.expires
= jiffies
+HZ
;
1131 add_timer(&mac
->tx
->clean_timer
);
1136 free_irq(mac
->tx
->chan
.irq
, mac
->tx
);
1138 napi_disable(&mac
->napi
);
1139 netif_stop_queue(dev
);
1142 pasemi_mac_free_tx_resources(mac
);
1143 pasemi_mac_free_rx_resources(mac
);
1149 #define MAX_RETRIES 5000
1151 static int pasemi_mac_close(struct net_device
*dev
)
1153 struct pasemi_mac
*mac
= netdev_priv(dev
);
1158 rxch
= rx_ring(mac
)->chan
.chno
;
1159 txch
= tx_ring(mac
)->chan
.chno
;
1162 phy_stop(mac
->phydev
);
1163 phy_disconnect(mac
->phydev
);
1166 del_timer_sync(&mac
->tx
->clean_timer
);
1168 netif_stop_queue(dev
);
1169 napi_disable(&mac
->napi
);
1171 sta
= read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
1172 if (sta
& (PAS_DMA_RXINT_RCMDSTA_BP
|
1173 PAS_DMA_RXINT_RCMDSTA_OO
|
1174 PAS_DMA_RXINT_RCMDSTA_BT
))
1175 printk(KERN_DEBUG
"pasemi_mac: rcmdsta error: 0x%08x\n", sta
);
1177 sta
= read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
));
1178 if (sta
& (PAS_DMA_RXCHAN_CCMDSTA_DU
|
1179 PAS_DMA_RXCHAN_CCMDSTA_OD
|
1180 PAS_DMA_RXCHAN_CCMDSTA_FD
|
1181 PAS_DMA_RXCHAN_CCMDSTA_DT
))
1182 printk(KERN_DEBUG
"pasemi_mac: ccmdsta error: 0x%08x\n", sta
);
1184 sta
= read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch
));
1185 if (sta
& (PAS_DMA_TXCHAN_TCMDSTA_SZ
| PAS_DMA_TXCHAN_TCMDSTA_DB
|
1186 PAS_DMA_TXCHAN_TCMDSTA_DE
| PAS_DMA_TXCHAN_TCMDSTA_DA
))
1187 printk(KERN_DEBUG
"pasemi_mac: tcmdsta error: 0x%08x\n", sta
);
1189 /* Clean out any pending buffers */
1190 pasemi_mac_clean_tx(tx_ring(mac
));
1191 pasemi_mac_clean_rx(rx_ring(mac
), RX_RING_SIZE
);
1193 /* Disable interface */
1194 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch
),
1195 PAS_DMA_TXCHAN_TCMDSTA_ST
);
1196 write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
1197 PAS_DMA_RXINT_RCMDSTA_ST
);
1198 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
),
1199 PAS_DMA_RXCHAN_CCMDSTA_ST
);
1201 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1202 sta
= read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch
));
1203 if (!(sta
& PAS_DMA_TXCHAN_TCMDSTA_ACT
))
1208 if (sta
& PAS_DMA_TXCHAN_TCMDSTA_ACT
)
1209 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop tx channel\n");
1211 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1212 sta
= read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
));
1213 if (!(sta
& PAS_DMA_RXCHAN_CCMDSTA_ACT
))
1218 if (sta
& PAS_DMA_RXCHAN_CCMDSTA_ACT
)
1219 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop rx channel\n");
1221 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1222 sta
= read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
1223 if (!(sta
& PAS_DMA_RXINT_RCMDSTA_ACT
))
1228 if (sta
& PAS_DMA_RXINT_RCMDSTA_ACT
)
1229 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop rx interface\n");
1231 /* Then, disable the channel. This must be done separately from
1232 * stopping, since you can't disable when active.
1235 write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch
), 0);
1236 write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch
), 0);
1237 write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
), 0);
1239 free_irq(mac
->tx
->chan
.irq
, mac
->tx
);
1240 free_irq(mac
->rx
->chan
.irq
, mac
->rx
);
1242 /* Free resources */
1243 pasemi_mac_free_rx_resources(mac
);
1244 pasemi_mac_free_tx_resources(mac
);
1249 static int pasemi_mac_start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1251 struct pasemi_mac
*mac
= netdev_priv(dev
);
1252 struct pasemi_mac_txring
*txring
;
1254 dma_addr_t map
[MAX_SKB_FRAGS
+1];
1255 unsigned int map_size
[MAX_SKB_FRAGS
+1];
1256 unsigned long flags
;
1260 dflags
= XCT_MACTX_O
| XCT_MACTX_ST
| XCT_MACTX_CRC_PAD
;
1262 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1263 const unsigned char *nh
= skb_network_header(skb
);
1265 switch (ip_hdr(skb
)->protocol
) {
1267 dflags
|= XCT_MACTX_CSUM_TCP
;
1268 dflags
|= XCT_MACTX_IPH(skb_network_header_len(skb
) >> 2);
1269 dflags
|= XCT_MACTX_IPO(nh
- skb
->data
);
1272 dflags
|= XCT_MACTX_CSUM_UDP
;
1273 dflags
|= XCT_MACTX_IPH(skb_network_header_len(skb
) >> 2);
1274 dflags
|= XCT_MACTX_IPO(nh
- skb
->data
);
1279 nfrags
= skb_shinfo(skb
)->nr_frags
;
1281 map
[0] = pci_map_single(mac
->dma_pdev
, skb
->data
, skb_headlen(skb
),
1283 map_size
[0] = skb_headlen(skb
);
1284 if (dma_mapping_error(map
[0]))
1285 goto out_err_nolock
;
1287 for (i
= 0; i
< nfrags
; i
++) {
1288 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1290 map
[i
+1] = pci_map_page(mac
->dma_pdev
, frag
->page
,
1291 frag
->page_offset
, frag
->size
,
1293 map_size
[i
+1] = frag
->size
;
1294 if (dma_mapping_error(map
[i
+1])) {
1296 goto out_err_nolock
;
1300 mactx
= dflags
| XCT_MACTX_LLEN(skb
->len
);
1302 txring
= tx_ring(mac
);
1304 spin_lock_irqsave(&txring
->lock
, flags
);
1306 fill
= txring
->next_to_fill
;
1308 /* Avoid stepping on the same cache line that the DMA controller
1309 * is currently about to send, so leave at least 8 words available.
1310 * Total free space needed is mactx + fragments + 8
1312 if (RING_AVAIL(txring
) < nfrags
+ 10) {
1313 /* no room -- stop the queue and wait for tx intr */
1314 netif_stop_queue(dev
);
1318 TX_DESC(txring
, fill
) = mactx
;
1319 TX_DESC_INFO(txring
, fill
).dma
= nfrags
;
1321 TX_DESC_INFO(txring
, fill
).skb
= skb
;
1322 for (i
= 0; i
<= nfrags
; i
++) {
1323 TX_DESC(txring
, fill
+i
) =
1324 XCT_PTR_LEN(map_size
[i
]) | XCT_PTR_ADDR(map
[i
]);
1325 TX_DESC_INFO(txring
, fill
+i
).dma
= map
[i
];
1328 /* We have to add an even number of 8-byte entries to the ring
1329 * even if the last one is unused. That means always an odd number
1330 * of pointers + one mactx descriptor.
1335 txring
->next_to_fill
= (fill
+ nfrags
+ 1) & (TX_RING_SIZE
-1);
1337 dev
->stats
.tx_packets
++;
1338 dev
->stats
.tx_bytes
+= skb
->len
;
1340 spin_unlock_irqrestore(&txring
->lock
, flags
);
1342 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring
->chan
.chno
), (nfrags
+2) >> 1);
1344 return NETDEV_TX_OK
;
1347 spin_unlock_irqrestore(&txring
->lock
, flags
);
1350 pci_unmap_single(mac
->dma_pdev
, map
[nfrags
], map_size
[nfrags
],
1353 return NETDEV_TX_BUSY
;
1356 static void pasemi_mac_set_rx_mode(struct net_device
*dev
)
1358 const struct pasemi_mac
*mac
= netdev_priv(dev
);
1361 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
1363 /* Set promiscuous */
1364 if (dev
->flags
& IFF_PROMISC
)
1365 flags
|= PAS_MAC_CFG_PCFG_PR
;
1367 flags
&= ~PAS_MAC_CFG_PCFG_PR
;
1369 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
1373 static int pasemi_mac_poll(struct napi_struct
*napi
, int budget
)
1375 struct pasemi_mac
*mac
= container_of(napi
, struct pasemi_mac
, napi
);
1376 struct net_device
*dev
= mac
->netdev
;
1379 pasemi_mac_clean_tx(tx_ring(mac
));
1380 pkts
= pasemi_mac_clean_rx(rx_ring(mac
), budget
);
1381 if (pkts
< budget
) {
1382 /* all done, no more packets present */
1383 netif_rx_complete(dev
, napi
);
1385 pasemi_mac_restart_rx_intr(mac
);
1386 pasemi_mac_restart_tx_intr(mac
);
1391 static int __devinit
1392 pasemi_mac_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1394 struct net_device
*dev
;
1395 struct pasemi_mac
*mac
;
1397 DECLARE_MAC_BUF(mac_buf
);
1399 err
= pci_enable_device(pdev
);
1403 dev
= alloc_etherdev(sizeof(struct pasemi_mac
));
1406 "pasemi_mac: Could not allocate ethernet device.\n");
1408 goto out_disable_device
;
1411 pci_set_drvdata(pdev
, dev
);
1412 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1414 mac
= netdev_priv(dev
);
1419 netif_napi_add(dev
, &mac
->napi
, pasemi_mac_poll
, 64);
1421 dev
->features
= NETIF_F_IP_CSUM
| NETIF_F_LLTX
| NETIF_F_SG
|
1424 mac
->lro_mgr
.max_aggr
= LRO_MAX_AGGR
;
1425 mac
->lro_mgr
.max_desc
= MAX_LRO_DESCRIPTORS
;
1426 mac
->lro_mgr
.lro_arr
= mac
->lro_desc
;
1427 mac
->lro_mgr
.get_skb_header
= get_skb_hdr
;
1428 mac
->lro_mgr
.features
= LRO_F_NAPI
| LRO_F_EXTRACT_VLAN_ID
;
1429 mac
->lro_mgr
.dev
= mac
->netdev
;
1430 mac
->lro_mgr
.ip_summed
= CHECKSUM_UNNECESSARY
;
1431 mac
->lro_mgr
.ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
1434 mac
->dma_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa007, NULL
);
1435 if (!mac
->dma_pdev
) {
1436 dev_err(&mac
->pdev
->dev
, "Can't find DMA Controller\n");
1441 mac
->iob_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa001, NULL
);
1442 if (!mac
->iob_pdev
) {
1443 dev_err(&mac
->pdev
->dev
, "Can't find I/O Bridge\n");
1448 /* get mac addr from device tree */
1449 if (pasemi_get_mac_addr(mac
) || !is_valid_ether_addr(mac
->mac_addr
)) {
1453 memcpy(dev
->dev_addr
, mac
->mac_addr
, sizeof(mac
->mac_addr
));
1455 mac
->dma_if
= mac_to_intf(mac
);
1456 if (mac
->dma_if
< 0) {
1457 dev_err(&mac
->pdev
->dev
, "Can't map DMA interface\n");
1462 switch (pdev
->device
) {
1464 mac
->type
= MAC_TYPE_GMAC
;
1467 mac
->type
= MAC_TYPE_XAUI
;
1474 dev
->open
= pasemi_mac_open
;
1475 dev
->stop
= pasemi_mac_close
;
1476 dev
->hard_start_xmit
= pasemi_mac_start_tx
;
1477 dev
->set_multicast_list
= pasemi_mac_set_rx_mode
;
1482 mac
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
1484 /* Enable most messages by default */
1485 mac
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1487 err
= register_netdev(dev
);
1490 dev_err(&mac
->pdev
->dev
, "register_netdev failed with error %d\n",
1493 } else if netif_msg_probe(mac
)
1494 printk(KERN_INFO
"%s: PA Semi %s: intf %d, hw addr %s\n",
1495 dev
->name
, mac
->type
== MAC_TYPE_GMAC
? "GMAC" : "XAUI",
1496 mac
->dma_if
, print_mac(mac_buf
, dev
->dev_addr
));
1502 pci_dev_put(mac
->iob_pdev
);
1504 pci_dev_put(mac
->dma_pdev
);
1508 pci_disable_device(pdev
);
1513 static void __devexit
pasemi_mac_remove(struct pci_dev
*pdev
)
1515 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1516 struct pasemi_mac
*mac
;
1521 mac
= netdev_priv(netdev
);
1523 unregister_netdev(netdev
);
1525 pci_disable_device(pdev
);
1526 pci_dev_put(mac
->dma_pdev
);
1527 pci_dev_put(mac
->iob_pdev
);
1529 pasemi_dma_free_chan(&mac
->tx
->chan
);
1530 pasemi_dma_free_chan(&mac
->rx
->chan
);
1532 pci_set_drvdata(pdev
, NULL
);
1533 free_netdev(netdev
);
1536 static struct pci_device_id pasemi_mac_pci_tbl
[] = {
1537 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa005) },
1538 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa006) },
1542 MODULE_DEVICE_TABLE(pci
, pasemi_mac_pci_tbl
);
1544 static struct pci_driver pasemi_mac_driver
= {
1545 .name
= "pasemi_mac",
1546 .id_table
= pasemi_mac_pci_tbl
,
1547 .probe
= pasemi_mac_probe
,
1548 .remove
= __devexit_p(pasemi_mac_remove
),
1551 static void __exit
pasemi_mac_cleanup_module(void)
1553 pci_unregister_driver(&pasemi_mac_driver
);
1556 int pasemi_mac_init_module(void)
1560 err
= pasemi_dma_init();
1564 return pci_register_driver(&pasemi_mac_driver
);
1567 module_init(pasemi_mac_init_module
);
1568 module_exit(pasemi_mac_cleanup_module
);