2 * Copyright (C) 2006-2007 PA Semi, Inc
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <asm/dma-mapping.h>
30 #include <linux/skbuff.h>
33 #include <linux/tcp.h>
34 #include <net/checksum.h>
37 #include <asm/firmware.h>
39 #include "pasemi_mac.h"
41 /* We have our own align, since ppc64 in general has it at 0 because
42 * of design flaws in some of the server bridge chips. However, for
43 * PWRficient doing the unaligned copies is more expensive than doing
44 * unaligned DMA, so make sure the data is aligned instead.
46 #define LOCAL_SKB_ALIGN 2
57 /* Must be a power of two */
58 #define RX_RING_SIZE 4096
59 #define TX_RING_SIZE 4096
61 #define DEFAULT_MSG_ENABLE \
71 #define TX_RING(mac, num) ((mac)->tx->ring[(num) & (TX_RING_SIZE-1)])
72 #define TX_RING_INFO(mac, num) ((mac)->tx->ring_info[(num) & (TX_RING_SIZE-1)])
73 #define RX_RING(mac, num) ((mac)->rx->ring[(num) & (RX_RING_SIZE-1)])
74 #define RX_RING_INFO(mac, num) ((mac)->rx->ring_info[(num) & (RX_RING_SIZE-1)])
75 #define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
77 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \
79 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
81 #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
83 MODULE_LICENSE("GPL");
84 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
85 MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
87 static int debug
= -1; /* -1 == use DEFAULT_MSG_ENABLE as value */
88 module_param(debug
, int, 0);
89 MODULE_PARM_DESC(debug
, "PA Semi MAC bitmapped debugging message enable value");
91 static struct pasdma_status
*dma_status
;
93 static int translation_enabled(void)
95 #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
98 return firmware_has_feature(FW_FEATURE_LPAR
);
102 static void write_iob_reg(struct pasemi_mac
*mac
, unsigned int reg
,
105 out_le32(mac
->iob_regs
+reg
, val
);
108 static unsigned int read_mac_reg(struct pasemi_mac
*mac
, unsigned int reg
)
110 return in_le32(mac
->regs
+reg
);
113 static void write_mac_reg(struct pasemi_mac
*mac
, unsigned int reg
,
116 out_le32(mac
->regs
+reg
, val
);
119 static unsigned int read_dma_reg(struct pasemi_mac
*mac
, unsigned int reg
)
121 return in_le32(mac
->dma_regs
+reg
);
124 static void write_dma_reg(struct pasemi_mac
*mac
, unsigned int reg
,
127 out_le32(mac
->dma_regs
+reg
, val
);
130 static int pasemi_get_mac_addr(struct pasemi_mac
*mac
)
132 struct pci_dev
*pdev
= mac
->pdev
;
133 struct device_node
*dn
= pci_device_to_OF_node(pdev
);
140 "No device node for mac, not configuring\n");
144 maddr
= of_get_property(dn
, "local-mac-address", &len
);
146 if (maddr
&& len
== 6) {
147 memcpy(mac
->mac_addr
, maddr
, 6);
151 /* Some old versions of firmware mistakenly uses mac-address
152 * (and as a string) instead of a byte array in local-mac-address.
156 maddr
= of_get_property(dn
, "mac-address", NULL
);
160 "no mac address in device tree, not configuring\n");
165 if (sscanf(maddr
, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr
[0],
166 &addr
[1], &addr
[2], &addr
[3], &addr
[4], &addr
[5]) != 6) {
168 "can't parse mac address, not configuring\n");
172 memcpy(mac
->mac_addr
, addr
, 6);
177 static int pasemi_mac_unmap_tx_skb(struct pasemi_mac
*mac
,
182 int nfrags
= skb_shinfo(skb
)->nr_frags
;
184 pci_unmap_single(mac
->dma_pdev
, dmas
[0], skb_headlen(skb
),
187 for (f
= 0; f
< nfrags
; f
++) {
188 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
190 pci_unmap_page(mac
->dma_pdev
, dmas
[f
+1], frag
->size
,
193 dev_kfree_skb_irq(skb
);
195 /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
196 * aligned up to a power of 2
198 return (nfrags
+ 3) & ~1;
201 static int pasemi_mac_setup_rx_resources(struct net_device
*dev
)
203 struct pasemi_mac_rxring
*ring
;
204 struct pasemi_mac
*mac
= netdev_priv(dev
);
205 int chan_id
= mac
->dma_rxch
;
208 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
213 spin_lock_init(&ring
->lock
);
215 ring
->size
= RX_RING_SIZE
;
216 ring
->ring_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
217 RX_RING_SIZE
, GFP_KERNEL
);
219 if (!ring
->ring_info
)
222 /* Allocate descriptors */
223 ring
->ring
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
224 RX_RING_SIZE
* sizeof(u64
),
225 &ring
->dma
, GFP_KERNEL
);
230 memset(ring
->ring
, 0, RX_RING_SIZE
* sizeof(u64
));
232 ring
->buffers
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
233 RX_RING_SIZE
* sizeof(u64
),
234 &ring
->buf_dma
, GFP_KERNEL
);
238 memset(ring
->buffers
, 0, RX_RING_SIZE
* sizeof(u64
));
240 write_dma_reg(mac
, PAS_DMA_RXCHAN_BASEL(chan_id
), PAS_DMA_RXCHAN_BASEL_BRBL(ring
->dma
));
242 write_dma_reg(mac
, PAS_DMA_RXCHAN_BASEU(chan_id
),
243 PAS_DMA_RXCHAN_BASEU_BRBH(ring
->dma
>> 32) |
244 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE
>> 3));
246 cfg
= PAS_DMA_RXCHAN_CFG_HBU(2);
248 if (translation_enabled())
249 cfg
|= PAS_DMA_RXCHAN_CFG_CTR
;
251 write_dma_reg(mac
, PAS_DMA_RXCHAN_CFG(chan_id
), cfg
);
253 write_dma_reg(mac
, PAS_DMA_RXINT_BASEL(mac
->dma_if
),
254 PAS_DMA_RXINT_BASEL_BRBL(ring
->buf_dma
));
256 write_dma_reg(mac
, PAS_DMA_RXINT_BASEU(mac
->dma_if
),
257 PAS_DMA_RXINT_BASEU_BRBH(ring
->buf_dma
>> 32) |
258 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE
>> 3));
260 cfg
= PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2
|
261 PAS_DMA_RXINT_CFG_LW
| PAS_DMA_RXINT_CFG_RBP
|
262 PAS_DMA_RXINT_CFG_HEN
;
264 if (translation_enabled())
265 cfg
|= PAS_DMA_RXINT_CFG_ITRR
| PAS_DMA_RXINT_CFG_ITR
;
267 write_dma_reg(mac
, PAS_DMA_RXINT_CFG(mac
->dma_if
), cfg
);
269 ring
->next_to_fill
= 0;
270 ring
->next_to_clean
= 0;
272 snprintf(ring
->irq_name
, sizeof(ring
->irq_name
),
279 dma_free_coherent(&mac
->dma_pdev
->dev
,
280 RX_RING_SIZE
* sizeof(u64
),
281 mac
->rx
->ring
, mac
->rx
->dma
);
283 kfree(ring
->ring_info
);
291 static int pasemi_mac_setup_tx_resources(struct net_device
*dev
)
293 struct pasemi_mac
*mac
= netdev_priv(dev
);
295 int chan_id
= mac
->dma_txch
;
296 struct pasemi_mac_txring
*ring
;
299 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
303 spin_lock_init(&ring
->lock
);
305 ring
->size
= TX_RING_SIZE
;
306 ring
->ring_info
= kzalloc(sizeof(struct pasemi_mac_buffer
) *
307 TX_RING_SIZE
, GFP_KERNEL
);
308 if (!ring
->ring_info
)
311 /* Allocate descriptors */
312 ring
->ring
= dma_alloc_coherent(&mac
->dma_pdev
->dev
,
313 TX_RING_SIZE
* sizeof(u64
),
314 &ring
->dma
, GFP_KERNEL
);
318 memset(ring
->ring
, 0, TX_RING_SIZE
* sizeof(u64
));
320 write_dma_reg(mac
, PAS_DMA_TXCHAN_BASEL(chan_id
),
321 PAS_DMA_TXCHAN_BASEL_BRBL(ring
->dma
));
322 val
= PAS_DMA_TXCHAN_BASEU_BRBH(ring
->dma
>> 32);
323 val
|= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE
>> 3);
325 write_dma_reg(mac
, PAS_DMA_TXCHAN_BASEU(chan_id
), val
);
327 cfg
= PAS_DMA_TXCHAN_CFG_TY_IFACE
|
328 PAS_DMA_TXCHAN_CFG_TATTR(mac
->dma_if
) |
329 PAS_DMA_TXCHAN_CFG_UP
|
330 PAS_DMA_TXCHAN_CFG_WT(2);
332 if (translation_enabled())
333 cfg
|= PAS_DMA_TXCHAN_CFG_TRD
| PAS_DMA_TXCHAN_CFG_TRR
;
335 write_dma_reg(mac
, PAS_DMA_TXCHAN_CFG(chan_id
), cfg
);
337 ring
->next_to_fill
= 0;
338 ring
->next_to_clean
= 0;
340 snprintf(ring
->irq_name
, sizeof(ring
->irq_name
),
347 kfree(ring
->ring_info
);
354 static void pasemi_mac_free_tx_resources(struct net_device
*dev
)
356 struct pasemi_mac
*mac
= netdev_priv(dev
);
358 struct pasemi_mac_buffer
*info
;
359 dma_addr_t dmas
[MAX_SKB_FRAGS
+1];
363 start
= mac
->tx
->next_to_clean
;
364 limit
= mac
->tx
->next_to_fill
;
366 /* Compensate for when fill has wrapped and clean has not */
368 limit
+= TX_RING_SIZE
;
370 for (i
= start
; i
< limit
; i
+= freed
) {
371 info
= &TX_RING_INFO(mac
, i
+1);
372 if (info
->dma
&& info
->skb
) {
373 for (j
= 0; j
<= skb_shinfo(info
->skb
)->nr_frags
; j
++)
374 dmas
[j
] = TX_RING_INFO(mac
, i
+1+j
).dma
;
375 freed
= pasemi_mac_unmap_tx_skb(mac
, info
->skb
, dmas
);
380 for (i
= 0; i
< TX_RING_SIZE
; i
++)
383 dma_free_coherent(&mac
->dma_pdev
->dev
,
384 TX_RING_SIZE
* sizeof(u64
),
385 mac
->tx
->ring
, mac
->tx
->dma
);
387 kfree(mac
->tx
->ring_info
);
392 static void pasemi_mac_free_rx_resources(struct net_device
*dev
)
394 struct pasemi_mac
*mac
= netdev_priv(dev
);
396 struct pasemi_mac_buffer
*info
;
398 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
399 info
= &RX_RING_INFO(mac
, i
);
400 if (info
->skb
&& info
->dma
) {
401 pci_unmap_single(mac
->dma_pdev
,
405 dev_kfree_skb_any(info
->skb
);
411 for (i
= 0; i
< RX_RING_SIZE
; i
++)
414 dma_free_coherent(&mac
->dma_pdev
->dev
,
415 RX_RING_SIZE
* sizeof(u64
),
416 mac
->rx
->ring
, mac
->rx
->dma
);
418 dma_free_coherent(&mac
->dma_pdev
->dev
, RX_RING_SIZE
* sizeof(u64
),
419 mac
->rx
->buffers
, mac
->rx
->buf_dma
);
421 kfree(mac
->rx
->ring_info
);
426 static void pasemi_mac_replenish_rx_ring(struct net_device
*dev
, int limit
)
428 struct pasemi_mac
*mac
= netdev_priv(dev
);
434 fill
= mac
->rx
->next_to_fill
;
435 for (count
= 0; count
< limit
; count
++) {
436 struct pasemi_mac_buffer
*info
= &RX_RING_INFO(mac
, fill
);
437 u64
*buff
= &RX_BUFF(mac
, fill
);
444 /* skb might still be in there for recycle on short receives */
448 skb
= dev_alloc_skb(BUF_SIZE
);
449 skb_reserve(skb
, LOCAL_SKB_ALIGN
);
455 dma
= pci_map_single(mac
->dma_pdev
, skb
->data
,
456 BUF_SIZE
- LOCAL_SKB_ALIGN
,
459 if (unlikely(dma_mapping_error(dma
))) {
460 dev_kfree_skb_irq(info
->skb
);
466 *buff
= XCT_RXB_LEN(BUF_SIZE
) | XCT_RXB_ADDR(dma
);
472 write_dma_reg(mac
, PAS_DMA_RXINT_INCR(mac
->dma_if
), count
);
474 mac
->rx
->next_to_fill
= (mac
->rx
->next_to_fill
+ count
) &
478 static void pasemi_mac_restart_rx_intr(struct pasemi_mac
*mac
)
480 unsigned int reg
, pcnt
;
481 /* Re-enable packet count interrupts: finally
482 * ack the packet count interrupt we got in rx_intr.
485 pcnt
= *mac
->rx_status
& PAS_STATUS_PCNT_M
;
487 reg
= PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_RXCH_RESET_PINTC
;
489 write_iob_reg(mac
, PAS_IOB_DMA_RXCH_RESET(mac
->dma_rxch
), reg
);
492 static void pasemi_mac_restart_tx_intr(struct pasemi_mac
*mac
)
494 unsigned int reg
, pcnt
;
496 /* Re-enable packet count interrupts */
497 pcnt
= *mac
->tx_status
& PAS_STATUS_PCNT_M
;
499 reg
= PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_TXCH_RESET_PINTC
;
501 write_iob_reg(mac
, PAS_IOB_DMA_TXCH_RESET(mac
->dma_txch
), reg
);
505 static inline void pasemi_mac_rx_error(struct pasemi_mac
*mac
, u64 macrx
)
507 unsigned int rcmdsta
, ccmdsta
;
509 if (!netif_msg_rx_err(mac
))
512 rcmdsta
= read_dma_reg(mac
, PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
513 ccmdsta
= read_dma_reg(mac
, PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
));
515 printk(KERN_ERR
"pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
516 macrx
, *mac
->rx_status
);
518 printk(KERN_ERR
"pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
522 static inline void pasemi_mac_tx_error(struct pasemi_mac
*mac
, u64 mactx
)
526 if (!netif_msg_tx_err(mac
))
529 cmdsta
= read_dma_reg(mac
, PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
));
531 printk(KERN_ERR
"pasemi_mac: tx error. mactx 0x%016lx, "\
532 "tx status 0x%016lx\n", mactx
, *mac
->tx_status
);
534 printk(KERN_ERR
"pasemi_mac: tcmdsta 0x%08x\n", cmdsta
);
537 static int pasemi_mac_clean_rx(struct pasemi_mac
*mac
, int limit
)
541 struct pasemi_mac_buffer
*info
;
549 spin_lock(&mac
->rx
->lock
);
551 n
= mac
->rx
->next_to_clean
;
553 prefetch(RX_RING(mac
, n
));
555 for (count
= 0; count
< limit
; count
++) {
556 macrx
= RX_RING(mac
, n
);
558 if ((macrx
& XCT_MACRX_E
) ||
559 (*mac
->rx_status
& PAS_STATUS_ERROR
))
560 pasemi_mac_rx_error(mac
, macrx
);
562 if (!(macrx
& XCT_MACRX_O
))
567 BUG_ON(!(macrx
& XCT_MACRX_RR_8BRES
));
569 eval
= (RX_RING(mac
, n
+1) & XCT_RXRES_8B_EVAL_M
) >>
573 dma
= (RX_RING(mac
, n
+2) & XCT_PTR_ADDR_M
);
574 info
= &RX_RING_INFO(mac
, buf_index
);
579 prefetch(&skb
->data_len
);
581 len
= (macrx
& XCT_MACRX_LLEN_M
) >> XCT_MACRX_LLEN_S
;
584 struct sk_buff
*new_skb
;
586 new_skb
= netdev_alloc_skb(mac
->netdev
,
587 len
+ LOCAL_SKB_ALIGN
);
589 skb_reserve(new_skb
, LOCAL_SKB_ALIGN
);
590 memcpy(new_skb
->data
, skb
->data
, len
);
591 /* save the skb in buffer_info as good */
594 /* else just continue with the old one */
598 pci_unmap_single(mac
->dma_pdev
, dma
, len
, PCI_DMA_FROMDEVICE
);
604 if (likely((macrx
& XCT_MACRX_HTY_M
) == XCT_MACRX_HTY_IPV4_OK
)) {
605 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
606 skb
->csum
= (macrx
& XCT_MACRX_CSUM_M
) >>
609 skb
->ip_summed
= CHECKSUM_NONE
;
611 mac
->netdev
->stats
.rx_bytes
+= len
;
612 mac
->netdev
->stats
.rx_packets
++;
614 skb
->protocol
= eth_type_trans(skb
, mac
->netdev
);
615 netif_receive_skb(skb
);
618 RX_RING(mac
, n
+1) = 0;
620 /* Need to zero it out since hardware doesn't, since the
621 * replenish loop uses it to tell when it's done.
623 RX_BUFF(mac
, buf_index
) = 0;
628 if (n
> RX_RING_SIZE
) {
629 /* Errata 5971 workaround: L2 target of headers */
630 write_iob_reg(mac
, PAS_IOB_COM_PKTHDRCNT
, 0);
631 n
&= (RX_RING_SIZE
-1);
634 mac
->rx
->next_to_clean
= n
;
636 /* Increase is in number of 16-byte entries, and since each descriptor
637 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
640 write_dma_reg(mac
, PAS_DMA_RXCHAN_INCR(mac
->dma_rxch
), count
<< 1);
642 pasemi_mac_replenish_rx_ring(mac
->netdev
, count
);
644 spin_unlock(&mac
->rx
->lock
);
649 /* Can't make this too large or we blow the kernel stack limits */
650 #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)
652 static int pasemi_mac_clean_tx(struct pasemi_mac
*mac
)
655 unsigned int start
, descr_count
, buf_count
, batch_limit
;
656 unsigned int ring_limit
;
657 unsigned int total_count
;
659 struct sk_buff
*skbs
[TX_CLEAN_BATCHSIZE
];
660 dma_addr_t dmas
[TX_CLEAN_BATCHSIZE
][MAX_SKB_FRAGS
+1];
663 batch_limit
= TX_CLEAN_BATCHSIZE
;
665 spin_lock_irqsave(&mac
->tx
->lock
, flags
);
667 start
= mac
->tx
->next_to_clean
;
668 ring_limit
= mac
->tx
->next_to_fill
;
670 /* Compensate for when fill has wrapped but clean has not */
671 if (start
> ring_limit
)
672 ring_limit
+= TX_RING_SIZE
;
678 descr_count
< batch_limit
&& i
< ring_limit
;
680 u64 mactx
= TX_RING(mac
, i
);
683 if ((mactx
& XCT_MACTX_E
) ||
684 (*mac
->tx_status
& PAS_STATUS_ERROR
))
685 pasemi_mac_tx_error(mac
, mactx
);
687 if (unlikely(mactx
& XCT_MACTX_O
))
688 /* Not yet transmitted */
691 skb
= TX_RING_INFO(mac
, i
+1).skb
;
692 skbs
[descr_count
] = skb
;
694 buf_count
= 2 + skb_shinfo(skb
)->nr_frags
;
695 for (j
= 0; j
<= skb_shinfo(skb
)->nr_frags
; j
++)
696 dmas
[descr_count
][j
] = TX_RING_INFO(mac
, i
+1+j
).dma
;
699 TX_RING(mac
, i
+1) = 0;
701 /* Since we always fill with an even number of entries, make
702 * sure we skip any unused one at the end as well.
708 mac
->tx
->next_to_clean
= i
& (TX_RING_SIZE
-1);
710 spin_unlock_irqrestore(&mac
->tx
->lock
, flags
);
711 netif_wake_queue(mac
->netdev
);
713 for (i
= 0; i
< descr_count
; i
++)
714 pasemi_mac_unmap_tx_skb(mac
, skbs
[i
], dmas
[i
]);
716 total_count
+= descr_count
;
718 /* If the batch was full, try to clean more */
719 if (descr_count
== batch_limit
)
726 static irqreturn_t
pasemi_mac_rx_intr(int irq
, void *data
)
728 struct net_device
*dev
= data
;
729 struct pasemi_mac
*mac
= netdev_priv(dev
);
732 if (!(*mac
->rx_status
& PAS_STATUS_CAUSE_M
))
735 /* Don't reset packet count so it won't fire again but clear
740 if (*mac
->rx_status
& PAS_STATUS_SOFT
)
741 reg
|= PAS_IOB_DMA_RXCH_RESET_SINTC
;
742 if (*mac
->rx_status
& PAS_STATUS_ERROR
)
743 reg
|= PAS_IOB_DMA_RXCH_RESET_DINTC
;
744 if (*mac
->rx_status
& PAS_STATUS_TIMER
)
745 reg
|= PAS_IOB_DMA_RXCH_RESET_TINTC
;
747 netif_rx_schedule(dev
, &mac
->napi
);
749 write_iob_reg(mac
, PAS_IOB_DMA_RXCH_RESET(mac
->dma_rxch
), reg
);
754 static irqreturn_t
pasemi_mac_tx_intr(int irq
, void *data
)
756 struct net_device
*dev
= data
;
757 struct pasemi_mac
*mac
= netdev_priv(dev
);
758 unsigned int reg
, pcnt
;
760 if (!(*mac
->tx_status
& PAS_STATUS_CAUSE_M
))
763 pasemi_mac_clean_tx(mac
);
765 pcnt
= *mac
->tx_status
& PAS_STATUS_PCNT_M
;
767 reg
= PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt
) | PAS_IOB_DMA_TXCH_RESET_PINTC
;
769 if (*mac
->tx_status
& PAS_STATUS_SOFT
)
770 reg
|= PAS_IOB_DMA_TXCH_RESET_SINTC
;
771 if (*mac
->tx_status
& PAS_STATUS_ERROR
)
772 reg
|= PAS_IOB_DMA_TXCH_RESET_DINTC
;
774 write_iob_reg(mac
, PAS_IOB_DMA_TXCH_RESET(mac
->dma_txch
), reg
);
779 static void pasemi_adjust_link(struct net_device
*dev
)
781 struct pasemi_mac
*mac
= netdev_priv(dev
);
784 unsigned int new_flags
;
786 if (!mac
->phydev
->link
) {
787 /* If no link, MAC speed settings don't matter. Just report
788 * link down and return.
790 if (mac
->link
&& netif_msg_link(mac
))
791 printk(KERN_INFO
"%s: Link is down.\n", dev
->name
);
793 netif_carrier_off(dev
);
798 netif_carrier_on(dev
);
800 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
801 new_flags
= flags
& ~(PAS_MAC_CFG_PCFG_HD
| PAS_MAC_CFG_PCFG_SPD_M
|
802 PAS_MAC_CFG_PCFG_TSR_M
);
804 if (!mac
->phydev
->duplex
)
805 new_flags
|= PAS_MAC_CFG_PCFG_HD
;
807 switch (mac
->phydev
->speed
) {
809 new_flags
|= PAS_MAC_CFG_PCFG_SPD_1G
|
810 PAS_MAC_CFG_PCFG_TSR_1G
;
813 new_flags
|= PAS_MAC_CFG_PCFG_SPD_100M
|
814 PAS_MAC_CFG_PCFG_TSR_100M
;
817 new_flags
|= PAS_MAC_CFG_PCFG_SPD_10M
|
818 PAS_MAC_CFG_PCFG_TSR_10M
;
821 printk("Unsupported speed %d\n", mac
->phydev
->speed
);
824 /* Print on link or speed/duplex change */
825 msg
= mac
->link
!= mac
->phydev
->link
|| flags
!= new_flags
;
827 mac
->duplex
= mac
->phydev
->duplex
;
828 mac
->speed
= mac
->phydev
->speed
;
829 mac
->link
= mac
->phydev
->link
;
831 if (new_flags
!= flags
)
832 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, new_flags
);
834 if (msg
&& netif_msg_link(mac
))
835 printk(KERN_INFO
"%s: Link is up at %d Mbps, %s duplex.\n",
836 dev
->name
, mac
->speed
, mac
->duplex
? "full" : "half");
839 static int pasemi_mac_phy_init(struct net_device
*dev
)
841 struct pasemi_mac
*mac
= netdev_priv(dev
);
842 struct device_node
*dn
, *phy_dn
;
843 struct phy_device
*phydev
;
846 const unsigned int *prop
;
850 dn
= pci_device_to_OF_node(mac
->pdev
);
851 ph
= of_get_property(dn
, "phy-handle", NULL
);
854 phy_dn
= of_find_node_by_phandle(*ph
);
856 prop
= of_get_property(phy_dn
, "reg", NULL
);
857 ret
= of_address_to_resource(phy_dn
->parent
, 0, &r
);
862 snprintf(mac
->phy_id
, BUS_ID_SIZE
, PHY_ID_FMT
, (int)r
.start
, phy_id
);
870 phydev
= phy_connect(dev
, mac
->phy_id
, &pasemi_adjust_link
, 0, PHY_INTERFACE_MODE_SGMII
);
872 if (IS_ERR(phydev
)) {
873 printk(KERN_ERR
"%s: Could not attach to phy\n", dev
->name
);
874 return PTR_ERR(phydev
);
877 mac
->phydev
= phydev
;
887 static int pasemi_mac_open(struct net_device
*dev
)
889 struct pasemi_mac
*mac
= netdev_priv(dev
);
894 /* enable rx section */
895 write_dma_reg(mac
, PAS_DMA_COM_RXCMD
, PAS_DMA_COM_RXCMD_EN
);
897 /* enable tx section */
898 write_dma_reg(mac
, PAS_DMA_COM_TXCMD
, PAS_DMA_COM_TXCMD_EN
);
900 flags
= PAS_MAC_CFG_TXP_FCE
| PAS_MAC_CFG_TXP_FPC(3) |
901 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
902 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
904 write_mac_reg(mac
, PAS_MAC_CFG_TXP
, flags
);
906 write_iob_reg(mac
, PAS_IOB_DMA_RXCH_CFG(mac
->dma_rxch
),
907 PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
909 write_iob_reg(mac
, PAS_IOB_DMA_TXCH_CFG(mac
->dma_txch
),
910 PAS_IOB_DMA_TXCH_CFG_CNTTH(128));
912 /* Clear out any residual packet count state from firmware */
913 pasemi_mac_restart_rx_intr(mac
);
914 pasemi_mac_restart_tx_intr(mac
);
916 /* 0xffffff is max value, about 16ms */
917 write_iob_reg(mac
, PAS_IOB_DMA_COM_TIMEOUTCFG
,
918 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));
920 ret
= pasemi_mac_setup_rx_resources(dev
);
922 goto out_rx_resources
;
924 ret
= pasemi_mac_setup_tx_resources(dev
);
926 goto out_tx_resources
;
928 write_mac_reg(mac
, PAS_MAC_IPC_CHNL
,
929 PAS_MAC_IPC_CHNL_DCHNO(mac
->dma_rxch
) |
930 PAS_MAC_IPC_CHNL_BCH(mac
->dma_rxch
));
933 write_dma_reg(mac
, PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
),
934 PAS_DMA_RXINT_RCMDSTA_EN
|
935 PAS_DMA_RXINT_RCMDSTA_DROPS_M
|
936 PAS_DMA_RXINT_RCMDSTA_BP
|
937 PAS_DMA_RXINT_RCMDSTA_OO
|
938 PAS_DMA_RXINT_RCMDSTA_BT
);
940 /* enable rx channel */
941 write_dma_reg(mac
, PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
),
942 PAS_DMA_RXCHAN_CCMDSTA_EN
|
943 PAS_DMA_RXCHAN_CCMDSTA_DU
|
944 PAS_DMA_RXCHAN_CCMDSTA_OD
|
945 PAS_DMA_RXCHAN_CCMDSTA_FD
|
946 PAS_DMA_RXCHAN_CCMDSTA_DT
);
948 /* enable tx channel */
949 write_dma_reg(mac
, PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
),
950 PAS_DMA_TXCHAN_TCMDSTA_EN
|
951 PAS_DMA_TXCHAN_TCMDSTA_SZ
|
952 PAS_DMA_TXCHAN_TCMDSTA_DB
|
953 PAS_DMA_TXCHAN_TCMDSTA_DE
|
954 PAS_DMA_TXCHAN_TCMDSTA_DA
);
956 pasemi_mac_replenish_rx_ring(dev
, RX_RING_SIZE
);
958 write_dma_reg(mac
, PAS_DMA_RXCHAN_INCR(mac
->dma_rxch
), RX_RING_SIZE
>>1);
960 flags
= PAS_MAC_CFG_PCFG_S1
| PAS_MAC_CFG_PCFG_PE
|
961 PAS_MAC_CFG_PCFG_PR
| PAS_MAC_CFG_PCFG_CE
;
963 if (mac
->type
== MAC_TYPE_GMAC
)
964 flags
|= PAS_MAC_CFG_PCFG_TSR_1G
| PAS_MAC_CFG_PCFG_SPD_1G
;
966 flags
|= PAS_MAC_CFG_PCFG_TSR_10G
| PAS_MAC_CFG_PCFG_SPD_10G
;
968 /* Enable interface in MAC */
969 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
971 ret
= pasemi_mac_phy_init(dev
);
972 /* Some configs don't have PHYs (XAUI etc), so don't complain about
973 * failed init due to -ENODEV.
975 if (ret
&& ret
!= -ENODEV
)
976 dev_warn(&mac
->pdev
->dev
, "phy init failed: %d\n", ret
);
978 netif_start_queue(dev
);
979 napi_enable(&mac
->napi
);
981 /* Interrupts are a bit different for our DMA controller: While
982 * it's got one a regular PCI device header, the interrupt there
983 * is really the base of the range it's using. Each tx and rx
984 * channel has it's own interrupt source.
987 base_irq
= virq_to_hw(mac
->dma_pdev
->irq
);
989 mac
->tx_irq
= irq_create_mapping(NULL
, base_irq
+ mac
->dma_txch
);
990 mac
->rx_irq
= irq_create_mapping(NULL
, base_irq
+ 20 + mac
->dma_txch
);
992 ret
= request_irq(mac
->tx_irq
, &pasemi_mac_tx_intr
, IRQF_DISABLED
,
993 mac
->tx
->irq_name
, dev
);
995 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
996 base_irq
+ mac
->dma_txch
, ret
);
1000 ret
= request_irq(mac
->rx_irq
, &pasemi_mac_rx_intr
, IRQF_DISABLED
,
1001 mac
->rx
->irq_name
, dev
);
1003 dev_err(&mac
->pdev
->dev
, "request_irq of irq %d failed: %d\n",
1004 base_irq
+ 20 + mac
->dma_rxch
, ret
);
1009 phy_start(mac
->phydev
);
1014 free_irq(mac
->tx_irq
, dev
);
1016 napi_disable(&mac
->napi
);
1017 netif_stop_queue(dev
);
1018 pasemi_mac_free_tx_resources(dev
);
1020 pasemi_mac_free_rx_resources(dev
);
1026 #define MAX_RETRIES 5000
1028 static int pasemi_mac_close(struct net_device
*dev
)
1030 struct pasemi_mac
*mac
= netdev_priv(dev
);
1035 phy_stop(mac
->phydev
);
1036 phy_disconnect(mac
->phydev
);
1039 netif_stop_queue(dev
);
1040 napi_disable(&mac
->napi
);
1042 sta
= read_dma_reg(mac
, PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
1043 if (sta
& (PAS_DMA_RXINT_RCMDSTA_BP
|
1044 PAS_DMA_RXINT_RCMDSTA_OO
|
1045 PAS_DMA_RXINT_RCMDSTA_BT
))
1046 printk(KERN_DEBUG
"pasemi_mac: rcmdsta error: 0x%08x\n", sta
);
1048 sta
= read_dma_reg(mac
, PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
));
1049 if (sta
& (PAS_DMA_RXCHAN_CCMDSTA_DU
|
1050 PAS_DMA_RXCHAN_CCMDSTA_OD
|
1051 PAS_DMA_RXCHAN_CCMDSTA_FD
|
1052 PAS_DMA_RXCHAN_CCMDSTA_DT
))
1053 printk(KERN_DEBUG
"pasemi_mac: ccmdsta error: 0x%08x\n", sta
);
1055 sta
= read_dma_reg(mac
, PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
));
1056 if (sta
& (PAS_DMA_TXCHAN_TCMDSTA_SZ
|
1057 PAS_DMA_TXCHAN_TCMDSTA_DB
|
1058 PAS_DMA_TXCHAN_TCMDSTA_DE
|
1059 PAS_DMA_TXCHAN_TCMDSTA_DA
))
1060 printk(KERN_DEBUG
"pasemi_mac: tcmdsta error: 0x%08x\n", sta
);
1062 /* Clean out any pending buffers */
1063 pasemi_mac_clean_tx(mac
);
1064 pasemi_mac_clean_rx(mac
, RX_RING_SIZE
);
1066 /* Disable interface */
1067 write_dma_reg(mac
, PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
), PAS_DMA_TXCHAN_TCMDSTA_ST
);
1068 write_dma_reg(mac
, PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
), PAS_DMA_RXINT_RCMDSTA_ST
);
1069 write_dma_reg(mac
, PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
), PAS_DMA_RXCHAN_CCMDSTA_ST
);
1071 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1072 sta
= read_dma_reg(mac
, PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
));
1073 if (!(sta
& PAS_DMA_TXCHAN_TCMDSTA_ACT
))
1078 if (sta
& PAS_DMA_TXCHAN_TCMDSTA_ACT
)
1079 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop tx channel\n");
1081 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1082 sta
= read_dma_reg(mac
, PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
));
1083 if (!(sta
& PAS_DMA_RXCHAN_CCMDSTA_ACT
))
1088 if (sta
& PAS_DMA_RXCHAN_CCMDSTA_ACT
)
1089 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop rx channel\n");
1091 for (retries
= 0; retries
< MAX_RETRIES
; retries
++) {
1092 sta
= read_dma_reg(mac
, PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
));
1093 if (!(sta
& PAS_DMA_RXINT_RCMDSTA_ACT
))
1098 if (sta
& PAS_DMA_RXINT_RCMDSTA_ACT
)
1099 dev_err(&mac
->dma_pdev
->dev
, "Failed to stop rx interface\n");
1101 /* Then, disable the channel. This must be done separately from
1102 * stopping, since you can't disable when active.
1105 write_dma_reg(mac
, PAS_DMA_TXCHAN_TCMDSTA(mac
->dma_txch
), 0);
1106 write_dma_reg(mac
, PAS_DMA_RXCHAN_CCMDSTA(mac
->dma_rxch
), 0);
1107 write_dma_reg(mac
, PAS_DMA_RXINT_RCMDSTA(mac
->dma_if
), 0);
1109 free_irq(mac
->tx_irq
, dev
);
1110 free_irq(mac
->rx_irq
, dev
);
1112 /* Free resources */
1113 pasemi_mac_free_rx_resources(dev
);
1114 pasemi_mac_free_tx_resources(dev
);
1119 static int pasemi_mac_start_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1121 struct pasemi_mac
*mac
= netdev_priv(dev
);
1122 struct pasemi_mac_txring
*txring
;
1124 dma_addr_t map
[MAX_SKB_FRAGS
+1];
1125 unsigned int map_size
[MAX_SKB_FRAGS
+1];
1126 unsigned long flags
;
1129 dflags
= XCT_MACTX_O
| XCT_MACTX_ST
| XCT_MACTX_SS
| XCT_MACTX_CRC_PAD
;
1131 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1132 const unsigned char *nh
= skb_network_header(skb
);
1134 switch (ip_hdr(skb
)->protocol
) {
1136 dflags
|= XCT_MACTX_CSUM_TCP
;
1137 dflags
|= XCT_MACTX_IPH(skb_network_header_len(skb
) >> 2);
1138 dflags
|= XCT_MACTX_IPO(nh
- skb
->data
);
1141 dflags
|= XCT_MACTX_CSUM_UDP
;
1142 dflags
|= XCT_MACTX_IPH(skb_network_header_len(skb
) >> 2);
1143 dflags
|= XCT_MACTX_IPO(nh
- skb
->data
);
1148 nfrags
= skb_shinfo(skb
)->nr_frags
;
1150 map
[0] = pci_map_single(mac
->dma_pdev
, skb
->data
, skb_headlen(skb
),
1152 map_size
[0] = skb_headlen(skb
);
1153 if (dma_mapping_error(map
[0]))
1154 goto out_err_nolock
;
1156 for (i
= 0; i
< nfrags
; i
++) {
1157 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1159 map
[i
+1] = pci_map_page(mac
->dma_pdev
, frag
->page
,
1160 frag
->page_offset
, frag
->size
,
1162 map_size
[i
+1] = frag
->size
;
1163 if (dma_mapping_error(map
[i
+1])) {
1165 goto out_err_nolock
;
1169 mactx
= dflags
| XCT_MACTX_LLEN(skb
->len
);
1173 spin_lock_irqsave(&txring
->lock
, flags
);
1175 /* Avoid stepping on the same cache line that the DMA controller
1176 * is currently about to send, so leave at least 8 words available.
1177 * Total free space needed is mactx + fragments + 8
1179 if (RING_AVAIL(txring
) < nfrags
+ 10) {
1180 /* no room -- stop the queue and wait for tx intr */
1181 netif_stop_queue(dev
);
1185 TX_RING(mac
, txring
->next_to_fill
) = mactx
;
1186 txring
->next_to_fill
++;
1187 TX_RING_INFO(mac
, txring
->next_to_fill
).skb
= skb
;
1188 for (i
= 0; i
<= nfrags
; i
++) {
1189 TX_RING(mac
, txring
->next_to_fill
+i
) =
1190 XCT_PTR_LEN(map_size
[i
]) | XCT_PTR_ADDR(map
[i
]);
1191 TX_RING_INFO(mac
, txring
->next_to_fill
+i
).dma
= map
[i
];
1194 /* We have to add an even number of 8-byte entries to the ring
1195 * even if the last one is unused. That means always an odd number
1196 * of pointers + one mactx descriptor.
1201 txring
->next_to_fill
= (txring
->next_to_fill
+ nfrags
+ 1) &
1204 dev
->stats
.tx_packets
++;
1205 dev
->stats
.tx_bytes
+= skb
->len
;
1207 spin_unlock_irqrestore(&txring
->lock
, flags
);
1209 write_dma_reg(mac
, PAS_DMA_TXCHAN_INCR(mac
->dma_txch
), (nfrags
+2) >> 1);
1211 return NETDEV_TX_OK
;
1214 spin_unlock_irqrestore(&txring
->lock
, flags
);
1217 pci_unmap_single(mac
->dma_pdev
, map
[nfrags
], map_size
[nfrags
],
1220 return NETDEV_TX_BUSY
;
1223 static void pasemi_mac_set_rx_mode(struct net_device
*dev
)
1225 struct pasemi_mac
*mac
= netdev_priv(dev
);
1228 flags
= read_mac_reg(mac
, PAS_MAC_CFG_PCFG
);
1230 /* Set promiscuous */
1231 if (dev
->flags
& IFF_PROMISC
)
1232 flags
|= PAS_MAC_CFG_PCFG_PR
;
1234 flags
&= ~PAS_MAC_CFG_PCFG_PR
;
1236 write_mac_reg(mac
, PAS_MAC_CFG_PCFG
, flags
);
1240 static int pasemi_mac_poll(struct napi_struct
*napi
, int budget
)
1242 struct pasemi_mac
*mac
= container_of(napi
, struct pasemi_mac
, napi
);
1243 struct net_device
*dev
= mac
->netdev
;
1246 pasemi_mac_clean_tx(mac
);
1247 pkts
= pasemi_mac_clean_rx(mac
, budget
);
1248 if (pkts
< budget
) {
1249 /* all done, no more packets present */
1250 netif_rx_complete(dev
, napi
);
1252 pasemi_mac_restart_rx_intr(mac
);
1257 static void __iomem
* __devinit
map_onedev(struct pci_dev
*p
, int index
)
1259 struct device_node
*dn
;
1262 dn
= pci_device_to_OF_node(p
);
1266 ret
= of_iomap(dn
, index
);
1272 /* This is hardcoded and ugly, but we have some firmware versions
1273 * that don't provide the register space in the device tree. Luckily
1274 * they are at well-known locations so we can just do the math here.
1276 return ioremap(0xe0000000 + (p
->devfn
<< 12), 0x2000);
1279 static int __devinit
pasemi_mac_map_regs(struct pasemi_mac
*mac
)
1281 struct resource res
;
1282 struct device_node
*dn
;
1285 mac
->dma_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa007, NULL
);
1286 if (!mac
->dma_pdev
) {
1287 dev_err(&mac
->pdev
->dev
, "Can't find DMA Controller\n");
1291 mac
->iob_pdev
= pci_get_device(PCI_VENDOR_ID_PASEMI
, 0xa001, NULL
);
1292 if (!mac
->iob_pdev
) {
1293 dev_err(&mac
->pdev
->dev
, "Can't find I/O Bridge\n");
1297 mac
->regs
= map_onedev(mac
->pdev
, 0);
1298 mac
->dma_regs
= map_onedev(mac
->dma_pdev
, 0);
1299 mac
->iob_regs
= map_onedev(mac
->iob_pdev
, 0);
1301 if (!mac
->regs
|| !mac
->dma_regs
|| !mac
->iob_regs
) {
1302 dev_err(&mac
->pdev
->dev
, "Can't map registers\n");
1306 /* The dma status structure is located in the I/O bridge, and
1307 * is cache coherent.
1310 dn
= pci_device_to_OF_node(mac
->iob_pdev
);
1312 err
= of_address_to_resource(dn
, 1, &res
);
1314 /* Fallback for old firmware */
1315 res
.start
= 0xfd800000;
1316 res
.end
= res
.start
+ 0x1000;
1318 dma_status
= __ioremap(res
.start
, res
.end
-res
.start
, 0);
1324 static int __devinit
1325 pasemi_mac_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1327 static int index
= 0;
1328 struct net_device
*dev
;
1329 struct pasemi_mac
*mac
;
1331 DECLARE_MAC_BUF(mac_buf
);
1333 err
= pci_enable_device(pdev
);
1337 dev
= alloc_etherdev(sizeof(struct pasemi_mac
));
1340 "pasemi_mac: Could not allocate ethernet device.\n");
1342 goto out_disable_device
;
1345 pci_set_drvdata(pdev
, dev
);
1346 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1348 mac
= netdev_priv(dev
);
1353 netif_napi_add(dev
, &mac
->napi
, pasemi_mac_poll
, 64);
1355 dev
->features
= NETIF_F_HW_CSUM
| NETIF_F_LLTX
| NETIF_F_SG
;
1357 /* These should come out of the device tree eventually */
1358 mac
->dma_txch
= index
;
1359 mac
->dma_rxch
= index
;
1361 /* We probe GMAC before XAUI, but the DMA interfaces are
1362 * in XAUI, GMAC order.
1365 mac
->dma_if
= index
+ 2;
1367 mac
->dma_if
= index
- 4;
1370 switch (pdev
->device
) {
1372 mac
->type
= MAC_TYPE_GMAC
;
1375 mac
->type
= MAC_TYPE_XAUI
;
1382 /* get mac addr from device tree */
1383 if (pasemi_get_mac_addr(mac
) || !is_valid_ether_addr(mac
->mac_addr
)) {
1387 memcpy(dev
->dev_addr
, mac
->mac_addr
, sizeof(mac
->mac_addr
));
1389 dev
->open
= pasemi_mac_open
;
1390 dev
->stop
= pasemi_mac_close
;
1391 dev
->hard_start_xmit
= pasemi_mac_start_tx
;
1392 dev
->set_multicast_list
= pasemi_mac_set_rx_mode
;
1394 err
= pasemi_mac_map_regs(mac
);
1398 mac
->rx_status
= &dma_status
->rx_sta
[mac
->dma_rxch
];
1399 mac
->tx_status
= &dma_status
->tx_sta
[mac
->dma_txch
];
1401 mac
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
1403 /* Enable most messages by default */
1404 mac
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1406 err
= register_netdev(dev
);
1409 dev_err(&mac
->pdev
->dev
, "register_netdev failed with error %d\n",
1412 } else if netif_msg_probe(mac
)
1413 printk(KERN_INFO
"%s: PA Semi %s: intf %d, txch %d, rxch %d, "
1415 dev
->name
, mac
->type
== MAC_TYPE_GMAC
? "GMAC" : "XAUI",
1416 mac
->dma_if
, mac
->dma_txch
, mac
->dma_rxch
,
1417 print_mac(mac_buf
, dev
->dev_addr
));
1423 pci_dev_put(mac
->iob_pdev
);
1425 pci_dev_put(mac
->dma_pdev
);
1427 iounmap(mac
->dma_regs
);
1429 iounmap(mac
->iob_regs
);
1435 pci_disable_device(pdev
);
1440 static void __devexit
pasemi_mac_remove(struct pci_dev
*pdev
)
1442 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1443 struct pasemi_mac
*mac
;
1448 mac
= netdev_priv(netdev
);
1450 unregister_netdev(netdev
);
1452 pci_disable_device(pdev
);
1453 pci_dev_put(mac
->dma_pdev
);
1454 pci_dev_put(mac
->iob_pdev
);
1457 iounmap(mac
->dma_regs
);
1458 iounmap(mac
->iob_regs
);
1460 pci_set_drvdata(pdev
, NULL
);
1461 free_netdev(netdev
);
1464 static struct pci_device_id pasemi_mac_pci_tbl
[] = {
1465 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa005) },
1466 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI
, 0xa006) },
1470 MODULE_DEVICE_TABLE(pci
, pasemi_mac_pci_tbl
);
1472 static struct pci_driver pasemi_mac_driver
= {
1473 .name
= "pasemi_mac",
1474 .id_table
= pasemi_mac_pci_tbl
,
1475 .probe
= pasemi_mac_probe
,
1476 .remove
= __devexit_p(pasemi_mac_remove
),
1479 static void __exit
pasemi_mac_cleanup_module(void)
1481 pci_unregister_driver(&pasemi_mac_driver
);
1482 __iounmap(dma_status
);
1486 int pasemi_mac_init_module(void)
1488 return pci_register_driver(&pasemi_mac_driver
);
1491 module_init(pasemi_mac_init_module
);
1492 module_exit(pasemi_mac_cleanup_module
);