1 // SPDX-License-Identifier: GPL-2.0-only
7 * Converted to DMA API, added zero-copy buffer handling, and
8 * (from the mac68k project) introduced dhd's support for 16-bit cards.
10 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
12 * This driver is based on work from Andreas Busse, but most of
13 * the code is rewritten.
15 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
17 * Core code included by system sonic drivers
19 * And... partially rewritten again by David Huggins-Daines in order
20 * to cope with screwed up Macintosh NICs that may or may not use
23 * (C) 1999 David Huggins-Daines <dhd@debian.org>
28 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
29 * National Semiconductors data sheet for the DP83932B Sonic Ethernet
30 * controller, and the files "8390.c" and "skeleton.c" in this directory.
32 * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
33 * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
34 * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
37 static unsigned int version_printed
;
39 static int sonic_debug
= -1;
40 module_param(sonic_debug
, int, 0);
41 MODULE_PARM_DESC(sonic_debug
, "debug message level");
43 static void sonic_msg_init(struct net_device
*dev
)
45 struct sonic_local
*lp
= netdev_priv(dev
);
47 lp
->msg_enable
= netif_msg_init(sonic_debug
, 0);
49 if (version_printed
++ == 0)
50 netif_dbg(lp
, drv
, dev
, "%s", version
);
53 static int sonic_alloc_descriptors(struct net_device
*dev
)
55 struct sonic_local
*lp
= netdev_priv(dev
);
57 /* Allocate a chunk of memory for the descriptors. Note that this
58 * must not cross a 64K boundary. It is smaller than one page which
59 * means that page alignment is a sufficient condition.
62 dma_alloc_coherent(lp
->device
,
64 SONIC_BUS_SCALE(lp
->dma_bitmode
),
65 &lp
->descriptors_laddr
, GFP_KERNEL
);
70 lp
->cda
= lp
->descriptors
;
71 lp
->tda
= lp
->cda
+ SIZEOF_SONIC_CDA
*
72 SONIC_BUS_SCALE(lp
->dma_bitmode
);
73 lp
->rda
= lp
->tda
+ SIZEOF_SONIC_TD
* SONIC_NUM_TDS
*
74 SONIC_BUS_SCALE(lp
->dma_bitmode
);
75 lp
->rra
= lp
->rda
+ SIZEOF_SONIC_RD
* SONIC_NUM_RDS
*
76 SONIC_BUS_SCALE(lp
->dma_bitmode
);
78 lp
->cda_laddr
= lp
->descriptors_laddr
;
79 lp
->tda_laddr
= lp
->cda_laddr
+ SIZEOF_SONIC_CDA
*
80 SONIC_BUS_SCALE(lp
->dma_bitmode
);
81 lp
->rda_laddr
= lp
->tda_laddr
+ SIZEOF_SONIC_TD
* SONIC_NUM_TDS
*
82 SONIC_BUS_SCALE(lp
->dma_bitmode
);
83 lp
->rra_laddr
= lp
->rda_laddr
+ SIZEOF_SONIC_RD
* SONIC_NUM_RDS
*
84 SONIC_BUS_SCALE(lp
->dma_bitmode
);
90 * Open/initialize the SONIC controller.
92 * This routine should set everything up anew at each open, even
93 * registers that "should" only need to be set once at boot, so that
94 * there is non-reboot way to recover if something goes wrong.
96 static int sonic_open(struct net_device
*dev
)
98 struct sonic_local
*lp
= netdev_priv(dev
);
101 netif_dbg(lp
, ifup
, dev
, "%s: initializing sonic driver\n", __func__
);
103 spin_lock_init(&lp
->lock
);
105 for (i
= 0; i
< SONIC_NUM_RRS
; i
++) {
106 struct sk_buff
*skb
= netdev_alloc_skb(dev
, SONIC_RBSIZE
+ 2);
108 while(i
> 0) { /* free any that were allocated successfully */
110 dev_kfree_skb(lp
->rx_skb
[i
]);
111 lp
->rx_skb
[i
] = NULL
;
113 printk(KERN_ERR
"%s: couldn't allocate receive buffers\n",
117 /* align IP header unless DMA requires otherwise */
118 if (SONIC_BUS_SCALE(lp
->dma_bitmode
) == 2)
123 for (i
= 0; i
< SONIC_NUM_RRS
; i
++) {
124 dma_addr_t laddr
= dma_map_single(lp
->device
, skb_put(lp
->rx_skb
[i
], SONIC_RBSIZE
),
125 SONIC_RBSIZE
, DMA_FROM_DEVICE
);
126 if (dma_mapping_error(lp
->device
, laddr
)) {
127 while(i
> 0) { /* free any that were mapped successfully */
129 dma_unmap_single(lp
->device
, lp
->rx_laddr
[i
], SONIC_RBSIZE
, DMA_FROM_DEVICE
);
130 lp
->rx_laddr
[i
] = (dma_addr_t
)0;
132 for (i
= 0; i
< SONIC_NUM_RRS
; i
++) {
133 dev_kfree_skb(lp
->rx_skb
[i
]);
134 lp
->rx_skb
[i
] = NULL
;
136 printk(KERN_ERR
"%s: couldn't map rx DMA buffers\n",
140 lp
->rx_laddr
[i
] = laddr
;
144 * Initialize the SONIC
148 netif_start_queue(dev
);
150 netif_dbg(lp
, ifup
, dev
, "%s: Initialization done\n", __func__
);
155 /* Wait for the SONIC to become idle. */
156 static void sonic_quiesce(struct net_device
*dev
, u16 mask
)
158 struct sonic_local
* __maybe_unused lp
= netdev_priv(dev
);
162 for (i
= 0; i
< 1000; ++i
) {
163 bits
= SONIC_READ(SONIC_CMD
) & mask
;
166 if (irqs_disabled() || in_interrupt())
169 usleep_range(100, 200);
171 WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits
);
175 * Close the SONIC device
177 static int sonic_close(struct net_device
*dev
)
179 struct sonic_local
*lp
= netdev_priv(dev
);
182 netif_dbg(lp
, ifdown
, dev
, "%s\n", __func__
);
184 netif_stop_queue(dev
);
187 * stop the SONIC, disable interrupts
189 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RXDIS
);
190 sonic_quiesce(dev
, SONIC_CR_ALL
);
192 SONIC_WRITE(SONIC_IMR
, 0);
193 SONIC_WRITE(SONIC_ISR
, 0x7fff);
194 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RST
);
196 /* unmap and free skbs that haven't been transmitted */
197 for (i
= 0; i
< SONIC_NUM_TDS
; i
++) {
198 if(lp
->tx_laddr
[i
]) {
199 dma_unmap_single(lp
->device
, lp
->tx_laddr
[i
], lp
->tx_len
[i
], DMA_TO_DEVICE
);
200 lp
->tx_laddr
[i
] = (dma_addr_t
)0;
203 dev_kfree_skb(lp
->tx_skb
[i
]);
204 lp
->tx_skb
[i
] = NULL
;
208 /* unmap and free the receive buffers */
209 for (i
= 0; i
< SONIC_NUM_RRS
; i
++) {
210 if(lp
->rx_laddr
[i
]) {
211 dma_unmap_single(lp
->device
, lp
->rx_laddr
[i
], SONIC_RBSIZE
, DMA_FROM_DEVICE
);
212 lp
->rx_laddr
[i
] = (dma_addr_t
)0;
215 dev_kfree_skb(lp
->rx_skb
[i
]);
216 lp
->rx_skb
[i
] = NULL
;
223 static void sonic_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
225 struct sonic_local
*lp
= netdev_priv(dev
);
228 * put the Sonic into software-reset mode and
229 * disable all interrupts before releasing DMA buffers
231 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RXDIS
);
232 sonic_quiesce(dev
, SONIC_CR_ALL
);
234 SONIC_WRITE(SONIC_IMR
, 0);
235 SONIC_WRITE(SONIC_ISR
, 0x7fff);
236 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RST
);
237 /* We could resend the original skbs. Easier to re-initialise. */
238 for (i
= 0; i
< SONIC_NUM_TDS
; i
++) {
239 if(lp
->tx_laddr
[i
]) {
240 dma_unmap_single(lp
->device
, lp
->tx_laddr
[i
], lp
->tx_len
[i
], DMA_TO_DEVICE
);
241 lp
->tx_laddr
[i
] = (dma_addr_t
)0;
244 dev_kfree_skb(lp
->tx_skb
[i
]);
245 lp
->tx_skb
[i
] = NULL
;
248 /* Try to restart the adaptor. */
250 lp
->stats
.tx_errors
++;
251 netif_trans_update(dev
); /* prevent tx timeout */
252 netif_wake_queue(dev
);
258 * Appends new TD during transmission thus avoiding any TX interrupts
259 * until we run out of TDs.
260 * This routine interacts closely with the ISR in that it may,
262 * reset the status flags of the new TD
263 * set and reset EOL flags
265 * The ISR interacts with this routine in various ways. It may,
267 * test the EOL and status flags of the TDs
269 * Concurrently with all of this, the SONIC is potentially writing to
270 * the status flags of the TDs.
273 static int sonic_send_packet(struct sk_buff
*skb
, struct net_device
*dev
)
275 struct sonic_local
*lp
= netdev_priv(dev
);
281 netif_dbg(lp
, tx_queued
, dev
, "%s: skb=%p\n", __func__
, skb
);
284 if (length
< ETH_ZLEN
) {
285 if (skb_padto(skb
, ETH_ZLEN
))
291 * Map the packet data into the logical DMA address space
294 laddr
= dma_map_single(lp
->device
, skb
->data
, length
, DMA_TO_DEVICE
);
296 pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev
->name
);
297 dev_kfree_skb_any(skb
);
301 spin_lock_irqsave(&lp
->lock
, flags
);
303 entry
= (lp
->eol_tx
+ 1) & SONIC_TDS_MASK
;
305 sonic_tda_put(dev
, entry
, SONIC_TD_STATUS
, 0); /* clear status */
306 sonic_tda_put(dev
, entry
, SONIC_TD_FRAG_COUNT
, 1); /* single fragment */
307 sonic_tda_put(dev
, entry
, SONIC_TD_PKTSIZE
, length
); /* length of packet */
308 sonic_tda_put(dev
, entry
, SONIC_TD_FRAG_PTR_L
, laddr
& 0xffff);
309 sonic_tda_put(dev
, entry
, SONIC_TD_FRAG_PTR_H
, laddr
>> 16);
310 sonic_tda_put(dev
, entry
, SONIC_TD_FRAG_SIZE
, length
);
311 sonic_tda_put(dev
, entry
, SONIC_TD_LINK
,
312 sonic_tda_get(dev
, entry
, SONIC_TD_LINK
) | SONIC_EOL
);
314 sonic_tda_put(dev
, lp
->eol_tx
, SONIC_TD_LINK
, ~SONIC_EOL
&
315 sonic_tda_get(dev
, lp
->eol_tx
, SONIC_TD_LINK
));
317 netif_dbg(lp
, tx_queued
, dev
, "%s: issuing Tx command\n", __func__
);
319 SONIC_WRITE(SONIC_CMD
, SONIC_CR_TXP
);
321 lp
->tx_len
[entry
] = length
;
322 lp
->tx_laddr
[entry
] = laddr
;
323 lp
->tx_skb
[entry
] = skb
;
327 entry
= (entry
+ 1) & SONIC_TDS_MASK
;
328 if (lp
->tx_skb
[entry
]) {
329 /* The ring is full, the ISR has yet to process the next TD. */
330 netif_dbg(lp
, tx_queued
, dev
, "%s: stopping queue\n", __func__
);
331 netif_stop_queue(dev
);
332 /* after this packet, wait for ISR to free up some TDAs */
335 spin_unlock_irqrestore(&lp
->lock
, flags
);
341 * The typical workload of the driver:
342 * Handle the network interface interrupts.
344 static irqreturn_t
sonic_interrupt(int irq
, void *dev_id
)
346 struct net_device
*dev
= dev_id
;
347 struct sonic_local
*lp
= netdev_priv(dev
);
351 /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
352 * with sonic_send_packet() so that the two functions can share state.
353 * Secondly, it makes sonic_interrupt() re-entrant, as that is required
354 * by macsonic which must use two IRQs with different priority levels.
356 spin_lock_irqsave(&lp
->lock
, flags
);
358 status
= SONIC_READ(SONIC_ISR
) & SONIC_IMR_DEFAULT
;
360 spin_unlock_irqrestore(&lp
->lock
, flags
);
366 SONIC_WRITE(SONIC_ISR
, status
); /* clear the interrupt(s) */
368 if (status
& SONIC_INT_PKTRX
) {
369 netif_dbg(lp
, intr
, dev
, "%s: packet rx\n", __func__
);
370 sonic_rx(dev
); /* got packet(s) */
373 if (status
& SONIC_INT_TXDN
) {
374 int entry
= lp
->cur_tx
;
378 /* The state of a Transmit Descriptor may be inferred
379 * from { tx_skb[entry], td_status } as follows.
380 * { clear, clear } => the TD has never been used
381 * { set, clear } => the TD was handed to SONIC
382 * { set, set } => the TD was handed back
383 * { clear, set } => the TD is available for re-use
386 netif_dbg(lp
, intr
, dev
, "%s: tx done\n", __func__
);
388 while (lp
->tx_skb
[entry
] != NULL
) {
389 if ((td_status
= sonic_tda_get(dev
, entry
, SONIC_TD_STATUS
)) == 0)
392 if (td_status
& SONIC_TCR_PTX
) {
393 lp
->stats
.tx_packets
++;
394 lp
->stats
.tx_bytes
+= sonic_tda_get(dev
, entry
, SONIC_TD_PKTSIZE
);
396 if (td_status
& (SONIC_TCR_EXD
|
397 SONIC_TCR_EXC
| SONIC_TCR_BCM
))
398 lp
->stats
.tx_aborted_errors
++;
400 (SONIC_TCR_NCRS
| SONIC_TCR_CRLS
))
401 lp
->stats
.tx_carrier_errors
++;
402 if (td_status
& SONIC_TCR_OWC
)
403 lp
->stats
.tx_window_errors
++;
404 if (td_status
& SONIC_TCR_FU
)
405 lp
->stats
.tx_fifo_errors
++;
408 /* We must free the original skb */
409 dev_consume_skb_irq(lp
->tx_skb
[entry
]);
410 lp
->tx_skb
[entry
] = NULL
;
411 /* and unmap DMA buffer */
412 dma_unmap_single(lp
->device
, lp
->tx_laddr
[entry
], lp
->tx_len
[entry
], DMA_TO_DEVICE
);
413 lp
->tx_laddr
[entry
] = (dma_addr_t
)0;
416 if (sonic_tda_get(dev
, entry
, SONIC_TD_LINK
) & SONIC_EOL
) {
417 entry
= (entry
+ 1) & SONIC_TDS_MASK
;
420 entry
= (entry
+ 1) & SONIC_TDS_MASK
;
423 if (freed_some
|| lp
->tx_skb
[entry
] == NULL
)
424 netif_wake_queue(dev
); /* The ring is no longer full */
429 * check error conditions
431 if (status
& SONIC_INT_RFO
) {
432 netif_dbg(lp
, rx_err
, dev
, "%s: rx fifo overrun\n",
435 if (status
& SONIC_INT_RDE
) {
436 netif_dbg(lp
, rx_err
, dev
, "%s: rx descriptors exhausted\n",
439 if (status
& SONIC_INT_RBAE
) {
440 netif_dbg(lp
, rx_err
, dev
, "%s: rx buffer area exceeded\n",
444 /* counter overruns; all counters are 16bit wide */
445 if (status
& SONIC_INT_FAE
)
446 lp
->stats
.rx_frame_errors
+= 65536;
447 if (status
& SONIC_INT_CRC
)
448 lp
->stats
.rx_crc_errors
+= 65536;
449 if (status
& SONIC_INT_MP
)
450 lp
->stats
.rx_missed_errors
+= 65536;
453 if (status
& SONIC_INT_TXER
) {
454 u16 tcr
= SONIC_READ(SONIC_TCR
);
456 netif_dbg(lp
, tx_err
, dev
, "%s: TXER intr, TCR %04x\n",
459 if (tcr
& (SONIC_TCR_EXD
| SONIC_TCR_EXC
|
460 SONIC_TCR_FU
| SONIC_TCR_BCM
)) {
461 /* Aborted transmission. Try again. */
462 netif_stop_queue(dev
);
463 SONIC_WRITE(SONIC_CMD
, SONIC_CR_TXP
);
468 if (status
& SONIC_INT_BR
) {
469 printk(KERN_ERR
"%s: Bus retry occurred! Device interrupt disabled.\n",
471 /* ... to help debug DMA problems causing endless interrupts. */
472 /* Bounce the eth interface to turn on the interrupt again. */
473 SONIC_WRITE(SONIC_IMR
, 0);
476 status
= SONIC_READ(SONIC_ISR
) & SONIC_IMR_DEFAULT
;
479 spin_unlock_irqrestore(&lp
->lock
, flags
);
484 /* Return the array index corresponding to a given Receive Buffer pointer. */
485 static int index_from_addr(struct sonic_local
*lp
, dma_addr_t addr
,
488 unsigned int i
= last
;
491 i
= (i
+ 1) & SONIC_RRS_MASK
;
492 if (addr
== lp
->rx_laddr
[i
])
499 /* Allocate and map a new skb to be used as a receive buffer. */
500 static bool sonic_alloc_rb(struct net_device
*dev
, struct sonic_local
*lp
,
501 struct sk_buff
**new_skb
, dma_addr_t
*new_addr
)
503 *new_skb
= netdev_alloc_skb(dev
, SONIC_RBSIZE
+ 2);
507 if (SONIC_BUS_SCALE(lp
->dma_bitmode
) == 2)
508 skb_reserve(*new_skb
, 2);
510 *new_addr
= dma_map_single(lp
->device
, skb_put(*new_skb
, SONIC_RBSIZE
),
511 SONIC_RBSIZE
, DMA_FROM_DEVICE
);
513 dev_kfree_skb(*new_skb
);
521 /* Place a new receive resource in the Receive Resource Area and update RWP. */
522 static void sonic_update_rra(struct net_device
*dev
, struct sonic_local
*lp
,
523 dma_addr_t old_addr
, dma_addr_t new_addr
)
525 unsigned int entry
= sonic_rr_entry(dev
, SONIC_READ(SONIC_RWP
));
526 unsigned int end
= sonic_rr_entry(dev
, SONIC_READ(SONIC_RRP
));
529 /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
530 * scans the other resources in the RRA, those in the range [RWP, RRP).
533 buf
= (sonic_rra_get(dev
, entry
, SONIC_RR_BUFADR_H
) << 16) |
534 sonic_rra_get(dev
, entry
, SONIC_RR_BUFADR_L
);
539 entry
= (entry
+ 1) & SONIC_RRS_MASK
;
540 } while (entry
!= end
);
542 WARN_ONCE(buf
!= old_addr
, "failed to find resource!\n");
544 sonic_rra_put(dev
, entry
, SONIC_RR_BUFADR_H
, new_addr
>> 16);
545 sonic_rra_put(dev
, entry
, SONIC_RR_BUFADR_L
, new_addr
& 0xffff);
547 entry
= (entry
+ 1) & SONIC_RRS_MASK
;
549 SONIC_WRITE(SONIC_RWP
, sonic_rr_addr(dev
, entry
));
553 * We have a good packet(s), pass it/them up the network stack.
555 static void sonic_rx(struct net_device
*dev
)
557 struct sonic_local
*lp
= netdev_priv(dev
);
558 int entry
= lp
->cur_rx
;
559 int prev_entry
= lp
->eol_rx
;
562 while (sonic_rda_get(dev
, entry
, SONIC_RD_IN_USE
) == 0) {
563 u16 status
= sonic_rda_get(dev
, entry
, SONIC_RD_STATUS
);
565 /* If the RD has LPKT set, the chip has finished with the RB */
566 if ((status
& SONIC_RCR_PRX
) && (status
& SONIC_RCR_LPKT
)) {
567 struct sk_buff
*new_skb
;
568 dma_addr_t new_laddr
;
569 u32 addr
= (sonic_rda_get(dev
, entry
,
570 SONIC_RD_PKTPTR_H
) << 16) |
571 sonic_rda_get(dev
, entry
, SONIC_RD_PKTPTR_L
);
572 int i
= index_from_addr(lp
, addr
, entry
);
575 WARN_ONCE(1, "failed to find buffer!\n");
579 if (sonic_alloc_rb(dev
, lp
, &new_skb
, &new_laddr
)) {
580 struct sk_buff
*used_skb
= lp
->rx_skb
[i
];
583 /* Pass the used buffer up the stack */
584 dma_unmap_single(lp
->device
, addr
, SONIC_RBSIZE
,
587 pkt_len
= sonic_rda_get(dev
, entry
,
589 skb_trim(used_skb
, pkt_len
);
590 used_skb
->protocol
= eth_type_trans(used_skb
,
593 lp
->stats
.rx_packets
++;
594 lp
->stats
.rx_bytes
+= pkt_len
;
596 lp
->rx_skb
[i
] = new_skb
;
597 lp
->rx_laddr
[i
] = new_laddr
;
599 /* Failed to obtain a new buffer so re-use it */
601 lp
->stats
.rx_dropped
++;
603 /* If RBE is already asserted when RWP advances then
604 * it's safe to clear RBE after processing this packet.
606 rbe
= rbe
|| SONIC_READ(SONIC_ISR
) & SONIC_INT_RBE
;
607 sonic_update_rra(dev
, lp
, addr
, new_laddr
);
610 * give back the descriptor
612 sonic_rda_put(dev
, entry
, SONIC_RD_STATUS
, 0);
613 sonic_rda_put(dev
, entry
, SONIC_RD_IN_USE
, 1);
616 entry
= (entry
+ 1) & SONIC_RDS_MASK
;
621 if (prev_entry
!= lp
->eol_rx
) {
622 /* Advance the EOL flag to put descriptors back into service */
623 sonic_rda_put(dev
, prev_entry
, SONIC_RD_LINK
, SONIC_EOL
|
624 sonic_rda_get(dev
, prev_entry
, SONIC_RD_LINK
));
625 sonic_rda_put(dev
, lp
->eol_rx
, SONIC_RD_LINK
, ~SONIC_EOL
&
626 sonic_rda_get(dev
, lp
->eol_rx
, SONIC_RD_LINK
));
627 lp
->eol_rx
= prev_entry
;
631 SONIC_WRITE(SONIC_ISR
, SONIC_INT_RBE
);
636 * Get the current statistics.
637 * This may be called with the device open or closed.
639 static struct net_device_stats
*sonic_get_stats(struct net_device
*dev
)
641 struct sonic_local
*lp
= netdev_priv(dev
);
643 /* read the tally counter from the SONIC and reset them */
644 lp
->stats
.rx_crc_errors
+= SONIC_READ(SONIC_CRCT
);
645 SONIC_WRITE(SONIC_CRCT
, 0xffff);
646 lp
->stats
.rx_frame_errors
+= SONIC_READ(SONIC_FAET
);
647 SONIC_WRITE(SONIC_FAET
, 0xffff);
648 lp
->stats
.rx_missed_errors
+= SONIC_READ(SONIC_MPT
);
649 SONIC_WRITE(SONIC_MPT
, 0xffff);
656 * Set or clear the multicast filter for this adaptor.
658 static void sonic_multicast_list(struct net_device
*dev
)
660 struct sonic_local
*lp
= netdev_priv(dev
);
662 struct netdev_hw_addr
*ha
;
666 rcr
= SONIC_READ(SONIC_RCR
) & ~(SONIC_RCR_PRO
| SONIC_RCR_AMC
);
667 rcr
|= SONIC_RCR_BRD
; /* accept broadcast packets */
669 if (dev
->flags
& IFF_PROMISC
) { /* set promiscuous mode */
670 rcr
|= SONIC_RCR_PRO
;
672 if ((dev
->flags
& IFF_ALLMULTI
) ||
673 (netdev_mc_count(dev
) > 15)) {
674 rcr
|= SONIC_RCR_AMC
;
678 netif_dbg(lp
, ifup
, dev
, "%s: mc_count %d\n", __func__
,
679 netdev_mc_count(dev
));
680 sonic_set_cam_enable(dev
, 1); /* always enable our own address */
682 netdev_for_each_mc_addr(ha
, dev
) {
684 sonic_cda_put(dev
, i
, SONIC_CD_CAP0
, addr
[1] << 8 | addr
[0]);
685 sonic_cda_put(dev
, i
, SONIC_CD_CAP1
, addr
[3] << 8 | addr
[2]);
686 sonic_cda_put(dev
, i
, SONIC_CD_CAP2
, addr
[5] << 8 | addr
[4]);
687 sonic_set_cam_enable(dev
, sonic_get_cam_enable(dev
) | (1 << i
));
690 SONIC_WRITE(SONIC_CDC
, 16);
691 SONIC_WRITE(SONIC_CDP
, lp
->cda_laddr
& 0xffff);
693 /* LCAM and TXP commands can't be used simultaneously */
694 spin_lock_irqsave(&lp
->lock
, flags
);
695 sonic_quiesce(dev
, SONIC_CR_TXP
);
696 SONIC_WRITE(SONIC_CMD
, SONIC_CR_LCAM
);
697 sonic_quiesce(dev
, SONIC_CR_LCAM
);
698 spin_unlock_irqrestore(&lp
->lock
, flags
);
702 netif_dbg(lp
, ifup
, dev
, "%s: setting RCR=%x\n", __func__
, rcr
);
704 SONIC_WRITE(SONIC_RCR
, rcr
);
709 * Initialize the SONIC ethernet controller.
711 static int sonic_init(struct net_device
*dev
)
713 struct sonic_local
*lp
= netdev_priv(dev
);
717 * put the Sonic into software-reset mode and
718 * disable all interrupts
720 SONIC_WRITE(SONIC_IMR
, 0);
721 SONIC_WRITE(SONIC_ISR
, 0x7fff);
722 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RST
);
724 /* While in reset mode, clear CAM Enable register */
725 SONIC_WRITE(SONIC_CE
, 0);
728 * clear software reset flag, disable receiver, clear and
729 * enable interrupts, then completely initialize the SONIC
731 SONIC_WRITE(SONIC_CMD
, 0);
732 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RXDIS
| SONIC_CR_STP
);
733 sonic_quiesce(dev
, SONIC_CR_ALL
);
736 * initialize the receive resource area
738 netif_dbg(lp
, ifup
, dev
, "%s: initialize receive resource area\n",
741 for (i
= 0; i
< SONIC_NUM_RRS
; i
++) {
742 u16 bufadr_l
= (unsigned long)lp
->rx_laddr
[i
] & 0xffff;
743 u16 bufadr_h
= (unsigned long)lp
->rx_laddr
[i
] >> 16;
744 sonic_rra_put(dev
, i
, SONIC_RR_BUFADR_L
, bufadr_l
);
745 sonic_rra_put(dev
, i
, SONIC_RR_BUFADR_H
, bufadr_h
);
746 sonic_rra_put(dev
, i
, SONIC_RR_BUFSIZE_L
, SONIC_RBSIZE
>> 1);
747 sonic_rra_put(dev
, i
, SONIC_RR_BUFSIZE_H
, 0);
750 /* initialize all RRA registers */
751 SONIC_WRITE(SONIC_RSA
, sonic_rr_addr(dev
, 0));
752 SONIC_WRITE(SONIC_REA
, sonic_rr_addr(dev
, SONIC_NUM_RRS
));
753 SONIC_WRITE(SONIC_RRP
, sonic_rr_addr(dev
, 0));
754 SONIC_WRITE(SONIC_RWP
, sonic_rr_addr(dev
, SONIC_NUM_RRS
- 1));
755 SONIC_WRITE(SONIC_URRA
, lp
->rra_laddr
>> 16);
756 SONIC_WRITE(SONIC_EOBC
, (SONIC_RBSIZE
>> 1) - (lp
->dma_bitmode
? 2 : 1));
758 /* load the resource pointers */
759 netif_dbg(lp
, ifup
, dev
, "%s: issuing RRRA command\n", __func__
);
761 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RRRA
);
762 sonic_quiesce(dev
, SONIC_CR_RRRA
);
765 * Initialize the receive descriptors so that they
766 * become a circular linked list, ie. let the last
767 * descriptor point to the first again.
769 netif_dbg(lp
, ifup
, dev
, "%s: initialize receive descriptors\n",
772 for (i
=0; i
<SONIC_NUM_RDS
; i
++) {
773 sonic_rda_put(dev
, i
, SONIC_RD_STATUS
, 0);
774 sonic_rda_put(dev
, i
, SONIC_RD_PKTLEN
, 0);
775 sonic_rda_put(dev
, i
, SONIC_RD_PKTPTR_L
, 0);
776 sonic_rda_put(dev
, i
, SONIC_RD_PKTPTR_H
, 0);
777 sonic_rda_put(dev
, i
, SONIC_RD_SEQNO
, 0);
778 sonic_rda_put(dev
, i
, SONIC_RD_IN_USE
, 1);
779 sonic_rda_put(dev
, i
, SONIC_RD_LINK
,
781 ((i
+1) * SIZEOF_SONIC_RD
* SONIC_BUS_SCALE(lp
->dma_bitmode
)));
783 /* fix last descriptor */
784 sonic_rda_put(dev
, SONIC_NUM_RDS
- 1, SONIC_RD_LINK
,
785 (lp
->rda_laddr
& 0xffff) | SONIC_EOL
);
786 lp
->eol_rx
= SONIC_NUM_RDS
- 1;
788 SONIC_WRITE(SONIC_URDA
, lp
->rda_laddr
>> 16);
789 SONIC_WRITE(SONIC_CRDA
, lp
->rda_laddr
& 0xffff);
792 * initialize transmit descriptors
794 netif_dbg(lp
, ifup
, dev
, "%s: initialize transmit descriptors\n",
797 for (i
= 0; i
< SONIC_NUM_TDS
; i
++) {
798 sonic_tda_put(dev
, i
, SONIC_TD_STATUS
, 0);
799 sonic_tda_put(dev
, i
, SONIC_TD_CONFIG
, 0);
800 sonic_tda_put(dev
, i
, SONIC_TD_PKTSIZE
, 0);
801 sonic_tda_put(dev
, i
, SONIC_TD_FRAG_COUNT
, 0);
802 sonic_tda_put(dev
, i
, SONIC_TD_LINK
,
803 (lp
->tda_laddr
& 0xffff) +
804 (i
+ 1) * SIZEOF_SONIC_TD
* SONIC_BUS_SCALE(lp
->dma_bitmode
));
805 lp
->tx_skb
[i
] = NULL
;
807 /* fix last descriptor */
808 sonic_tda_put(dev
, SONIC_NUM_TDS
- 1, SONIC_TD_LINK
,
809 (lp
->tda_laddr
& 0xffff));
811 SONIC_WRITE(SONIC_UTDA
, lp
->tda_laddr
>> 16);
812 SONIC_WRITE(SONIC_CTDA
, lp
->tda_laddr
& 0xffff);
814 lp
->eol_tx
= SONIC_NUM_TDS
- 1;
817 * put our own address to CAM desc[0]
819 sonic_cda_put(dev
, 0, SONIC_CD_CAP0
, dev
->dev_addr
[1] << 8 | dev
->dev_addr
[0]);
820 sonic_cda_put(dev
, 0, SONIC_CD_CAP1
, dev
->dev_addr
[3] << 8 | dev
->dev_addr
[2]);
821 sonic_cda_put(dev
, 0, SONIC_CD_CAP2
, dev
->dev_addr
[5] << 8 | dev
->dev_addr
[4]);
822 sonic_set_cam_enable(dev
, 1);
824 for (i
= 0; i
< 16; i
++)
825 sonic_cda_put(dev
, i
, SONIC_CD_ENTRY_POINTER
, i
);
828 * initialize CAM registers
830 SONIC_WRITE(SONIC_CDP
, lp
->cda_laddr
& 0xffff);
831 SONIC_WRITE(SONIC_CDC
, 16);
836 SONIC_WRITE(SONIC_CMD
, SONIC_CR_LCAM
);
837 sonic_quiesce(dev
, SONIC_CR_LCAM
);
840 * enable receiver, disable loopback
841 * and enable all interrupts
843 SONIC_WRITE(SONIC_RCR
, SONIC_RCR_DEFAULT
);
844 SONIC_WRITE(SONIC_TCR
, SONIC_TCR_DEFAULT
);
845 SONIC_WRITE(SONIC_ISR
, 0x7fff);
846 SONIC_WRITE(SONIC_IMR
, SONIC_IMR_DEFAULT
);
847 SONIC_WRITE(SONIC_CMD
, SONIC_CR_RXEN
);
849 netif_dbg(lp
, ifup
, dev
, "%s: new status=%x\n", __func__
,
850 SONIC_READ(SONIC_CMD
));
855 MODULE_LICENSE("GPL");