1 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3 * Copyright (C) 2004 Sun Microsystems Inc.
4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * This driver uses the sungem driver (c) David Miller
22 * (davem@redhat.com) as its basis.
24 * The cassini chip has a number of features that distinguish it from
26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or
27 * load balancing (non-VLAN mode)
28 * batching of multiple packets
29 * multiple CPU dispatching
30 * page-based RX descriptor engine with separate completion rings
31 * Gigabit support (GMII and PCS interface)
32 * MIF link up/down detection works
34 * RX is handled by page sized buffers that are attached as fragments to
35 * the skb. here's what's done:
36 * -- driver allocates pages at a time and keeps reference counts
38 * -- the upper protocol layers assume that the header is in the skb
39 * itself. as a result, cassini will copy a small amount (64 bytes)
41 * -- driver appends the rest of the data pages as frags to skbuffs
42 * and increments the reference count
43 * -- on page reclamation, the driver swaps the page with a spare page.
44 * if that page is still in use, it frees its reference to that page,
45 * and allocates a new page for use. otherwise, it just recycles the
48 * NOTE: cassini can parse the header. however, it's not worth it
49 * as long as the network stack requires a header copy.
51 * TX has 4 queues. currently these queues are used in a round-robin
52 * fashion for load balancing. They can also be used for QoS. for that
53 * to work, however, QoS information needs to be exposed down to the driver
54 * level so that subqueues get targetted to particular transmit rings.
55 * alternatively, the queues can be configured via use of the all-purpose
58 * RX DATA: the rx completion ring has all the info, but the rx desc
59 * ring has all of the data. RX can conceivably come in under multiple
60 * interrupts, but the INT# assignment needs to be set up properly by
61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62 * that. also, the two descriptor rings are designed to distinguish between
63 * encrypted and non-encrypted packets, but we use them for buffering
66 * by default, the selective clear mask is set up to process rx packets.
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/types.h>
73 #include <linux/compiler.h>
74 #include <linux/slab.h>
75 #include <linux/delay.h>
76 #include <linux/init.h>
77 #include <linux/ioport.h>
78 #include <linux/pci.h>
80 #include <linux/highmem.h>
81 #include <linux/list.h>
82 #include <linux/dma-mapping.h>
84 #include <linux/netdevice.h>
85 #include <linux/etherdevice.h>
86 #include <linux/skbuff.h>
87 #include <linux/ethtool.h>
88 #include <linux/crc32.h>
89 #include <linux/random.h>
90 #include <linux/mii.h>
92 #include <linux/tcp.h>
93 #include <linux/mutex.h>
95 #include <net/checksum.h>
97 #include <asm/atomic.h>
98 #include <asm/system.h>
100 #include <asm/byteorder.h>
101 #include <asm/uaccess.h>
103 #define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
104 #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
105 #define CAS_NCPUS num_online_cpus()
107 #if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
109 #define cas_skb_release(x) netif_receive_skb(x)
111 #define cas_skb_release(x) netif_rx(x)
114 /* select which firmware to use */
115 #define USE_HP_WORKAROUND
116 #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
117 #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
121 #define USE_TX_COMPWB /* use completion writeback registers */
122 #define USE_CSMA_CD_PROTO /* standard CSMA/CD */
123 #define USE_RX_BLANK /* hw interrupt mitigation */
124 #undef USE_ENTROPY_DEV /* don't test for entropy device */
126 /* NOTE: these aren't useable unless PCI interrupts can be assigned.
127 * also, we need to make cp->lock finer-grained.
134 #undef USE_VPD_DEBUG /* debug vpd information if defined */
136 /* rx processing options */
137 #define USE_PAGE_ORDER /* specify to allocate large rx pages */
138 #define RX_DONT_BATCH 0 /* if 1, don't batch flows */
139 #define RX_COPY_ALWAYS 0 /* if 0, use frags */
140 #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
141 #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
143 #define DRV_MODULE_NAME "cassini"
144 #define PFX DRV_MODULE_NAME ": "
145 #define DRV_MODULE_VERSION "1.4"
146 #define DRV_MODULE_RELDATE "1 July 2004"
148 #define CAS_DEF_MSG_ENABLE \
158 /* length of time before we decide the hardware is borked,
159 * and dev->tx_timeout() should be called to fix the problem
161 #define CAS_TX_TIMEOUT (HZ)
162 #define CAS_LINK_TIMEOUT (22*HZ/10)
163 #define CAS_LINK_FAST_TIMEOUT (1)
165 /* timeout values for state changing. these specify the number
166 * of 10us delays to be used before giving up.
168 #define STOP_TRIES_PHY 1000
169 #define STOP_TRIES 5000
171 /* specify a minimum frame size to deal with some fifo issues
172 * max mtu == 2 * page size - ethernet header - 64 - swivel =
173 * 2 * page_size - 0x50
175 #define CAS_MIN_FRAME 97
176 #define CAS_1000MB_MIN_FRAME 255
177 #define CAS_MIN_MTU 60
178 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
182 * Eliminate these and use separate atomic counters for each, to
183 * avoid a race condition.
186 #define CAS_RESET_MTU 1
187 #define CAS_RESET_ALL 2
188 #define CAS_RESET_SPARE 3
191 static char version
[] __devinitdata
=
192 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
194 static int cassini_debug
= -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
195 static int link_mode
;
197 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
198 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
199 MODULE_LICENSE("GPL");
200 module_param(cassini_debug
, int, 0);
201 MODULE_PARM_DESC(cassini_debug
, "Cassini bitmapped debugging message enable value");
202 module_param(link_mode
, int, 0);
203 MODULE_PARM_DESC(link_mode
, "default link mode");
206 * Work around for a PCS bug in which the link goes down due to the chip
207 * being confused and never showing a link status of "up."
209 #define DEFAULT_LINKDOWN_TIMEOUT 5
211 * Value in seconds, for user input.
213 static int linkdown_timeout
= DEFAULT_LINKDOWN_TIMEOUT
;
214 module_param(linkdown_timeout
, int, 0);
215 MODULE_PARM_DESC(linkdown_timeout
,
216 "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
219 * value in 'ticks' (units used by jiffies). Set when we init the
220 * module because 'HZ' in actually a function call on some flavors of
221 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
223 static int link_transition_timeout
;
227 static u16 link_modes
[] __devinitdata
= {
228 BMCR_ANENABLE
, /* 0 : autoneg */
229 0, /* 1 : 10bt half duplex */
230 BMCR_SPEED100
, /* 2 : 100bt half duplex */
231 BMCR_FULLDPLX
, /* 3 : 10bt full duplex */
232 BMCR_SPEED100
|BMCR_FULLDPLX
, /* 4 : 100bt full duplex */
233 CAS_BMCR_SPEED1000
|BMCR_FULLDPLX
/* 5 : 1000bt full duplex */
236 static struct pci_device_id cas_pci_tbl
[] __devinitdata
= {
237 { PCI_VENDOR_ID_SUN
, PCI_DEVICE_ID_SUN_CASSINI
,
238 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
239 { PCI_VENDOR_ID_NS
, PCI_DEVICE_ID_NS_SATURN
,
240 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
244 MODULE_DEVICE_TABLE(pci
, cas_pci_tbl
);
246 static void cas_set_link_modes(struct cas
*cp
);
248 static inline void cas_lock_tx(struct cas
*cp
)
252 for (i
= 0; i
< N_TX_RINGS
; i
++)
253 spin_lock(&cp
->tx_lock
[i
]);
256 static inline void cas_lock_all(struct cas
*cp
)
258 spin_lock_irq(&cp
->lock
);
262 /* WTZ: QA was finding deadlock problems with the previous
263 * versions after long test runs with multiple cards per machine.
264 * See if replacing cas_lock_all with safer versions helps. The
265 * symptoms QA is reporting match those we'd expect if interrupts
266 * aren't being properly restored, and we fixed a previous deadlock
267 * with similar symptoms by using save/restore versions in other
270 #define cas_lock_all_save(cp, flags) \
272 struct cas *xxxcp = (cp); \
273 spin_lock_irqsave(&xxxcp->lock, flags); \
274 cas_lock_tx(xxxcp); \
277 static inline void cas_unlock_tx(struct cas
*cp
)
281 for (i
= N_TX_RINGS
; i
> 0; i
--)
282 spin_unlock(&cp
->tx_lock
[i
- 1]);
285 static inline void cas_unlock_all(struct cas
*cp
)
288 spin_unlock_irq(&cp
->lock
);
291 #define cas_unlock_all_restore(cp, flags) \
293 struct cas *xxxcp = (cp); \
294 cas_unlock_tx(xxxcp); \
295 spin_unlock_irqrestore(&xxxcp->lock, flags); \
298 static void cas_disable_irq(struct cas
*cp
, const int ring
)
300 /* Make sure we won't get any more interrupts */
302 writel(0xFFFFFFFF, cp
->regs
+ REG_INTR_MASK
);
306 /* disable completion interrupts and selectively mask */
307 if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) {
309 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
319 writel(INTRN_MASK_CLEAR_ALL
| INTRN_MASK_RX_EN
,
320 cp
->regs
+ REG_PLUS_INTRN_MASK(ring
));
324 writel(INTRN_MASK_CLEAR_ALL
, cp
->regs
+
325 REG_PLUS_INTRN_MASK(ring
));
331 static inline void cas_mask_intr(struct cas
*cp
)
335 for (i
= 0; i
< N_RX_COMP_RINGS
; i
++)
336 cas_disable_irq(cp
, i
);
339 static inline void cas_buffer_init(cas_page_t
*cp
)
341 struct page
*page
= cp
->buffer
;
342 atomic_set((atomic_t
*)&page
->lru
.next
, 1);
345 static inline int cas_buffer_count(cas_page_t
*cp
)
347 struct page
*page
= cp
->buffer
;
348 return atomic_read((atomic_t
*)&page
->lru
.next
);
351 static inline void cas_buffer_inc(cas_page_t
*cp
)
353 struct page
*page
= cp
->buffer
;
354 atomic_inc((atomic_t
*)&page
->lru
.next
);
357 static inline void cas_buffer_dec(cas_page_t
*cp
)
359 struct page
*page
= cp
->buffer
;
360 atomic_dec((atomic_t
*)&page
->lru
.next
);
363 static void cas_enable_irq(struct cas
*cp
, const int ring
)
365 if (ring
== 0) { /* all but TX_DONE */
366 writel(INTR_TX_DONE
, cp
->regs
+ REG_INTR_MASK
);
370 if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) {
372 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
382 writel(INTRN_MASK_RX_EN
, cp
->regs
+
383 REG_PLUS_INTRN_MASK(ring
));
392 static inline void cas_unmask_intr(struct cas
*cp
)
396 for (i
= 0; i
< N_RX_COMP_RINGS
; i
++)
397 cas_enable_irq(cp
, i
);
400 static inline void cas_entropy_gather(struct cas
*cp
)
402 #ifdef USE_ENTROPY_DEV
403 if ((cp
->cas_flags
& CAS_FLAG_ENTROPY_DEV
) == 0)
406 batch_entropy_store(readl(cp
->regs
+ REG_ENTROPY_IV
),
407 readl(cp
->regs
+ REG_ENTROPY_IV
),
412 static inline void cas_entropy_reset(struct cas
*cp
)
414 #ifdef USE_ENTROPY_DEV
415 if ((cp
->cas_flags
& CAS_FLAG_ENTROPY_DEV
) == 0)
418 writel(BIM_LOCAL_DEV_PAD
| BIM_LOCAL_DEV_PROM
| BIM_LOCAL_DEV_EXT
,
419 cp
->regs
+ REG_BIM_LOCAL_DEV_EN
);
420 writeb(ENTROPY_RESET_STC_MODE
, cp
->regs
+ REG_ENTROPY_RESET
);
421 writeb(0x55, cp
->regs
+ REG_ENTROPY_RAND_REG
);
423 /* if we read back 0x0, we don't have an entropy device */
424 if (readb(cp
->regs
+ REG_ENTROPY_RAND_REG
) == 0)
425 cp
->cas_flags
&= ~CAS_FLAG_ENTROPY_DEV
;
429 /* access to the phy. the following assumes that we've initialized the MIF to
430 * be in frame rather than bit-bang mode
432 static u16
cas_phy_read(struct cas
*cp
, int reg
)
435 int limit
= STOP_TRIES_PHY
;
437 cmd
= MIF_FRAME_ST
| MIF_FRAME_OP_READ
;
438 cmd
|= CAS_BASE(MIF_FRAME_PHY_ADDR
, cp
->phy_addr
);
439 cmd
|= CAS_BASE(MIF_FRAME_REG_ADDR
, reg
);
440 cmd
|= MIF_FRAME_TURN_AROUND_MSB
;
441 writel(cmd
, cp
->regs
+ REG_MIF_FRAME
);
443 /* poll for completion */
444 while (limit
-- > 0) {
446 cmd
= readl(cp
->regs
+ REG_MIF_FRAME
);
447 if (cmd
& MIF_FRAME_TURN_AROUND_LSB
)
448 return (cmd
& MIF_FRAME_DATA_MASK
);
450 return 0xFFFF; /* -1 */
453 static int cas_phy_write(struct cas
*cp
, int reg
, u16 val
)
455 int limit
= STOP_TRIES_PHY
;
458 cmd
= MIF_FRAME_ST
| MIF_FRAME_OP_WRITE
;
459 cmd
|= CAS_BASE(MIF_FRAME_PHY_ADDR
, cp
->phy_addr
);
460 cmd
|= CAS_BASE(MIF_FRAME_REG_ADDR
, reg
);
461 cmd
|= MIF_FRAME_TURN_AROUND_MSB
;
462 cmd
|= val
& MIF_FRAME_DATA_MASK
;
463 writel(cmd
, cp
->regs
+ REG_MIF_FRAME
);
465 /* poll for completion */
466 while (limit
-- > 0) {
468 cmd
= readl(cp
->regs
+ REG_MIF_FRAME
);
469 if (cmd
& MIF_FRAME_TURN_AROUND_LSB
)
475 static void cas_phy_powerup(struct cas
*cp
)
477 u16 ctl
= cas_phy_read(cp
, MII_BMCR
);
479 if ((ctl
& BMCR_PDOWN
) == 0)
482 cas_phy_write(cp
, MII_BMCR
, ctl
);
485 static void cas_phy_powerdown(struct cas
*cp
)
487 u16 ctl
= cas_phy_read(cp
, MII_BMCR
);
489 if (ctl
& BMCR_PDOWN
)
492 cas_phy_write(cp
, MII_BMCR
, ctl
);
495 /* cp->lock held. note: the last put_page will free the buffer */
496 static int cas_page_free(struct cas
*cp
, cas_page_t
*page
)
498 pci_unmap_page(cp
->pdev
, page
->dma_addr
, cp
->page_size
,
500 cas_buffer_dec(page
);
501 __free_pages(page
->buffer
, cp
->page_order
);
506 #ifdef RX_COUNT_BUFFERS
507 #define RX_USED_ADD(x, y) ((x)->used += (y))
508 #define RX_USED_SET(x, y) ((x)->used = (y))
510 #define RX_USED_ADD(x, y)
511 #define RX_USED_SET(x, y)
514 /* local page allocation routines for the receive buffers. jumbo pages
515 * require at least 8K contiguous and 8K aligned buffers.
517 static cas_page_t
*cas_page_alloc(struct cas
*cp
, const gfp_t flags
)
521 page
= kmalloc(sizeof(cas_page_t
), flags
);
525 INIT_LIST_HEAD(&page
->list
);
526 RX_USED_SET(page
, 0);
527 page
->buffer
= alloc_pages(flags
, cp
->page_order
);
530 cas_buffer_init(page
);
531 page
->dma_addr
= pci_map_page(cp
->pdev
, page
->buffer
, 0,
532 cp
->page_size
, PCI_DMA_FROMDEVICE
);
540 /* initialize spare pool of rx buffers, but allocate during the open */
541 static void cas_spare_init(struct cas
*cp
)
543 spin_lock(&cp
->rx_inuse_lock
);
544 INIT_LIST_HEAD(&cp
->rx_inuse_list
);
545 spin_unlock(&cp
->rx_inuse_lock
);
547 spin_lock(&cp
->rx_spare_lock
);
548 INIT_LIST_HEAD(&cp
->rx_spare_list
);
549 cp
->rx_spares_needed
= RX_SPARE_COUNT
;
550 spin_unlock(&cp
->rx_spare_lock
);
553 /* used on close. free all the spare buffers. */
554 static void cas_spare_free(struct cas
*cp
)
556 struct list_head list
, *elem
, *tmp
;
558 /* free spare buffers */
559 INIT_LIST_HEAD(&list
);
560 spin_lock(&cp
->rx_spare_lock
);
561 list_splice(&cp
->rx_spare_list
, &list
);
562 INIT_LIST_HEAD(&cp
->rx_spare_list
);
563 spin_unlock(&cp
->rx_spare_lock
);
564 list_for_each_safe(elem
, tmp
, &list
) {
565 cas_page_free(cp
, list_entry(elem
, cas_page_t
, list
));
568 INIT_LIST_HEAD(&list
);
571 * Looks like Adrian had protected this with a different
572 * lock than used everywhere else to manipulate this list.
574 spin_lock(&cp
->rx_inuse_lock
);
575 list_splice(&cp
->rx_inuse_list
, &list
);
576 INIT_LIST_HEAD(&cp
->rx_inuse_list
);
577 spin_unlock(&cp
->rx_inuse_lock
);
579 spin_lock(&cp
->rx_spare_lock
);
580 list_splice(&cp
->rx_inuse_list
, &list
);
581 INIT_LIST_HEAD(&cp
->rx_inuse_list
);
582 spin_unlock(&cp
->rx_spare_lock
);
584 list_for_each_safe(elem
, tmp
, &list
) {
585 cas_page_free(cp
, list_entry(elem
, cas_page_t
, list
));
589 /* replenish spares if needed */
590 static void cas_spare_recover(struct cas
*cp
, const gfp_t flags
)
592 struct list_head list
, *elem
, *tmp
;
595 /* check inuse list. if we don't need any more free buffers,
599 /* make a local copy of the list */
600 INIT_LIST_HEAD(&list
);
601 spin_lock(&cp
->rx_inuse_lock
);
602 list_splice(&cp
->rx_inuse_list
, &list
);
603 INIT_LIST_HEAD(&cp
->rx_inuse_list
);
604 spin_unlock(&cp
->rx_inuse_lock
);
606 list_for_each_safe(elem
, tmp
, &list
) {
607 cas_page_t
*page
= list_entry(elem
, cas_page_t
, list
);
609 if (cas_buffer_count(page
) > 1)
613 spin_lock(&cp
->rx_spare_lock
);
614 if (cp
->rx_spares_needed
> 0) {
615 list_add(elem
, &cp
->rx_spare_list
);
616 cp
->rx_spares_needed
--;
617 spin_unlock(&cp
->rx_spare_lock
);
619 spin_unlock(&cp
->rx_spare_lock
);
620 cas_page_free(cp
, page
);
624 /* put any inuse buffers back on the list */
625 if (!list_empty(&list
)) {
626 spin_lock(&cp
->rx_inuse_lock
);
627 list_splice(&list
, &cp
->rx_inuse_list
);
628 spin_unlock(&cp
->rx_inuse_lock
);
631 spin_lock(&cp
->rx_spare_lock
);
632 needed
= cp
->rx_spares_needed
;
633 spin_unlock(&cp
->rx_spare_lock
);
637 /* we still need spares, so try to allocate some */
638 INIT_LIST_HEAD(&list
);
641 cas_page_t
*spare
= cas_page_alloc(cp
, flags
);
644 list_add(&spare
->list
, &list
);
648 spin_lock(&cp
->rx_spare_lock
);
649 list_splice(&list
, &cp
->rx_spare_list
);
650 cp
->rx_spares_needed
-= i
;
651 spin_unlock(&cp
->rx_spare_lock
);
654 /* pull a page from the list. */
655 static cas_page_t
*cas_page_dequeue(struct cas
*cp
)
657 struct list_head
*entry
;
660 spin_lock(&cp
->rx_spare_lock
);
661 if (list_empty(&cp
->rx_spare_list
)) {
662 /* try to do a quick recovery */
663 spin_unlock(&cp
->rx_spare_lock
);
664 cas_spare_recover(cp
, GFP_ATOMIC
);
665 spin_lock(&cp
->rx_spare_lock
);
666 if (list_empty(&cp
->rx_spare_list
)) {
667 if (netif_msg_rx_err(cp
))
668 printk(KERN_ERR
"%s: no spare buffers "
669 "available.\n", cp
->dev
->name
);
670 spin_unlock(&cp
->rx_spare_lock
);
675 entry
= cp
->rx_spare_list
.next
;
677 recover
= ++cp
->rx_spares_needed
;
678 spin_unlock(&cp
->rx_spare_lock
);
680 /* trigger the timer to do the recovery */
681 if ((recover
& (RX_SPARE_RECOVER_VAL
- 1)) == 0) {
683 atomic_inc(&cp
->reset_task_pending
);
684 atomic_inc(&cp
->reset_task_pending_spare
);
685 schedule_work(&cp
->reset_task
);
687 atomic_set(&cp
->reset_task_pending
, CAS_RESET_SPARE
);
688 schedule_work(&cp
->reset_task
);
691 return list_entry(entry
, cas_page_t
, list
);
695 static void cas_mif_poll(struct cas
*cp
, const int enable
)
699 cfg
= readl(cp
->regs
+ REG_MIF_CFG
);
700 cfg
&= (MIF_CFG_MDIO_0
| MIF_CFG_MDIO_1
);
702 if (cp
->phy_type
& CAS_PHY_MII_MDIO1
)
703 cfg
|= MIF_CFG_PHY_SELECT
;
705 /* poll and interrupt on link status change. */
707 cfg
|= MIF_CFG_POLL_EN
;
708 cfg
|= CAS_BASE(MIF_CFG_POLL_REG
, MII_BMSR
);
709 cfg
|= CAS_BASE(MIF_CFG_POLL_PHY
, cp
->phy_addr
);
711 writel((enable
) ? ~(BMSR_LSTATUS
| BMSR_ANEGCOMPLETE
) : 0xFFFF,
712 cp
->regs
+ REG_MIF_MASK
);
713 writel(cfg
, cp
->regs
+ REG_MIF_CFG
);
716 /* Must be invoked under cp->lock */
717 static void cas_begin_auto_negotiation(struct cas
*cp
, struct ethtool_cmd
*ep
)
723 int oldstate
= cp
->lstate
;
724 int link_was_not_down
= !(oldstate
== link_down
);
726 /* Setup link parameters */
729 lcntl
= cp
->link_cntl
;
730 if (ep
->autoneg
== AUTONEG_ENABLE
)
731 cp
->link_cntl
= BMCR_ANENABLE
;
734 if (ep
->speed
== SPEED_100
)
735 cp
->link_cntl
|= BMCR_SPEED100
;
736 else if (ep
->speed
== SPEED_1000
)
737 cp
->link_cntl
|= CAS_BMCR_SPEED1000
;
738 if (ep
->duplex
== DUPLEX_FULL
)
739 cp
->link_cntl
|= BMCR_FULLDPLX
;
742 changed
= (lcntl
!= cp
->link_cntl
);
745 if (cp
->lstate
== link_up
) {
746 printk(KERN_INFO
"%s: PCS link down.\n",
750 printk(KERN_INFO
"%s: link configuration changed\n",
754 cp
->lstate
= link_down
;
755 cp
->link_transition
= LINK_TRANSITION_LINK_DOWN
;
760 * WTZ: If the old state was link_up, we turn off the carrier
761 * to replicate everything we do elsewhere on a link-down
762 * event when we were already in a link-up state..
764 if (oldstate
== link_up
)
765 netif_carrier_off(cp
->dev
);
766 if (changed
&& link_was_not_down
) {
768 * WTZ: This branch will simply schedule a full reset after
769 * we explicitly changed link modes in an ioctl. See if this
770 * fixes the link-problems we were having for forced mode.
772 atomic_inc(&cp
->reset_task_pending
);
773 atomic_inc(&cp
->reset_task_pending_all
);
774 schedule_work(&cp
->reset_task
);
776 mod_timer(&cp
->link_timer
, jiffies
+ CAS_LINK_TIMEOUT
);
780 if (cp
->phy_type
& CAS_PHY_SERDES
) {
781 u32 val
= readl(cp
->regs
+ REG_PCS_MII_CTRL
);
783 if (cp
->link_cntl
& BMCR_ANENABLE
) {
784 val
|= (PCS_MII_RESTART_AUTONEG
| PCS_MII_AUTONEG_EN
);
785 cp
->lstate
= link_aneg
;
787 if (cp
->link_cntl
& BMCR_FULLDPLX
)
788 val
|= PCS_MII_CTRL_DUPLEX
;
789 val
&= ~PCS_MII_AUTONEG_EN
;
790 cp
->lstate
= link_force_ok
;
792 cp
->link_transition
= LINK_TRANSITION_LINK_CONFIG
;
793 writel(val
, cp
->regs
+ REG_PCS_MII_CTRL
);
797 ctl
= cas_phy_read(cp
, MII_BMCR
);
798 ctl
&= ~(BMCR_FULLDPLX
| BMCR_SPEED100
|
799 CAS_BMCR_SPEED1000
| BMCR_ANENABLE
);
800 ctl
|= cp
->link_cntl
;
801 if (ctl
& BMCR_ANENABLE
) {
802 ctl
|= BMCR_ANRESTART
;
803 cp
->lstate
= link_aneg
;
805 cp
->lstate
= link_force_ok
;
807 cp
->link_transition
= LINK_TRANSITION_LINK_CONFIG
;
808 cas_phy_write(cp
, MII_BMCR
, ctl
);
813 mod_timer(&cp
->link_timer
, jiffies
+ CAS_LINK_TIMEOUT
);
816 /* Must be invoked under cp->lock. */
817 static int cas_reset_mii_phy(struct cas
*cp
)
819 int limit
= STOP_TRIES_PHY
;
822 cas_phy_write(cp
, MII_BMCR
, BMCR_RESET
);
825 val
= cas_phy_read(cp
, MII_BMCR
);
826 if ((val
& BMCR_RESET
) == 0)
833 static void cas_saturn_firmware_load(struct cas
*cp
)
835 cas_saturn_patch_t
*patch
= cas_saturn_patch
;
837 cas_phy_powerdown(cp
);
839 /* expanded memory access mode */
840 cas_phy_write(cp
, DP83065_MII_MEM
, 0x0);
842 /* pointer configuration for new firmware */
843 cas_phy_write(cp
, DP83065_MII_REGE
, 0x8ff9);
844 cas_phy_write(cp
, DP83065_MII_REGD
, 0xbd);
845 cas_phy_write(cp
, DP83065_MII_REGE
, 0x8ffa);
846 cas_phy_write(cp
, DP83065_MII_REGD
, 0x82);
847 cas_phy_write(cp
, DP83065_MII_REGE
, 0x8ffb);
848 cas_phy_write(cp
, DP83065_MII_REGD
, 0x0);
849 cas_phy_write(cp
, DP83065_MII_REGE
, 0x8ffc);
850 cas_phy_write(cp
, DP83065_MII_REGD
, 0x39);
852 /* download new firmware */
853 cas_phy_write(cp
, DP83065_MII_MEM
, 0x1);
854 cas_phy_write(cp
, DP83065_MII_REGE
, patch
->addr
);
855 while (patch
->addr
) {
856 cas_phy_write(cp
, DP83065_MII_REGD
, patch
->val
);
860 /* enable firmware */
861 cas_phy_write(cp
, DP83065_MII_REGE
, 0x8ff8);
862 cas_phy_write(cp
, DP83065_MII_REGD
, 0x1);
866 /* phy initialization */
867 static void cas_phy_init(struct cas
*cp
)
871 /* if we're in MII/GMII mode, set up phy */
872 if (CAS_PHY_MII(cp
->phy_type
)) {
873 writel(PCS_DATAPATH_MODE_MII
,
874 cp
->regs
+ REG_PCS_DATAPATH_MODE
);
877 cas_reset_mii_phy(cp
); /* take out of isolate mode */
879 if (PHY_LUCENT_B0
== cp
->phy_id
) {
880 /* workaround link up/down issue with lucent */
881 cas_phy_write(cp
, LUCENT_MII_REG
, 0x8000);
882 cas_phy_write(cp
, MII_BMCR
, 0x00f1);
883 cas_phy_write(cp
, LUCENT_MII_REG
, 0x0);
885 } else if (PHY_BROADCOM_B0
== (cp
->phy_id
& 0xFFFFFFFC)) {
886 /* workarounds for broadcom phy */
887 cas_phy_write(cp
, BROADCOM_MII_REG8
, 0x0C20);
888 cas_phy_write(cp
, BROADCOM_MII_REG7
, 0x0012);
889 cas_phy_write(cp
, BROADCOM_MII_REG5
, 0x1804);
890 cas_phy_write(cp
, BROADCOM_MII_REG7
, 0x0013);
891 cas_phy_write(cp
, BROADCOM_MII_REG5
, 0x1204);
892 cas_phy_write(cp
, BROADCOM_MII_REG7
, 0x8006);
893 cas_phy_write(cp
, BROADCOM_MII_REG5
, 0x0132);
894 cas_phy_write(cp
, BROADCOM_MII_REG7
, 0x8006);
895 cas_phy_write(cp
, BROADCOM_MII_REG5
, 0x0232);
896 cas_phy_write(cp
, BROADCOM_MII_REG7
, 0x201F);
897 cas_phy_write(cp
, BROADCOM_MII_REG5
, 0x0A20);
899 } else if (PHY_BROADCOM_5411
== cp
->phy_id
) {
900 val
= cas_phy_read(cp
, BROADCOM_MII_REG4
);
901 val
= cas_phy_read(cp
, BROADCOM_MII_REG4
);
903 /* link workaround */
904 cas_phy_write(cp
, BROADCOM_MII_REG4
,
908 } else if (cp
->cas_flags
& CAS_FLAG_SATURN
) {
909 writel((cp
->phy_type
& CAS_PHY_MII_MDIO0
) ?
910 SATURN_PCFG_FSI
: 0x0,
911 cp
->regs
+ REG_SATURN_PCFG
);
913 /* load firmware to address 10Mbps auto-negotiation
914 * issue. NOTE: this will need to be changed if the
915 * default firmware gets fixed.
917 if (PHY_NS_DP83065
== cp
->phy_id
) {
918 cas_saturn_firmware_load(cp
);
923 /* advertise capabilities */
924 val
= cas_phy_read(cp
, MII_BMCR
);
925 val
&= ~BMCR_ANENABLE
;
926 cas_phy_write(cp
, MII_BMCR
, val
);
929 cas_phy_write(cp
, MII_ADVERTISE
,
930 cas_phy_read(cp
, MII_ADVERTISE
) |
931 (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
932 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
933 CAS_ADVERTISE_PAUSE
|
934 CAS_ADVERTISE_ASYM_PAUSE
));
936 if (cp
->cas_flags
& CAS_FLAG_1000MB_CAP
) {
937 /* make sure that we don't advertise half
938 * duplex to avoid a chip issue
940 val
= cas_phy_read(cp
, CAS_MII_1000_CTRL
);
941 val
&= ~CAS_ADVERTISE_1000HALF
;
942 val
|= CAS_ADVERTISE_1000FULL
;
943 cas_phy_write(cp
, CAS_MII_1000_CTRL
, val
);
947 /* reset pcs for serdes */
951 writel(PCS_DATAPATH_MODE_SERDES
,
952 cp
->regs
+ REG_PCS_DATAPATH_MODE
);
954 /* enable serdes pins on saturn */
955 if (cp
->cas_flags
& CAS_FLAG_SATURN
)
956 writel(0, cp
->regs
+ REG_SATURN_PCFG
);
958 /* Reset PCS unit. */
959 val
= readl(cp
->regs
+ REG_PCS_MII_CTRL
);
960 val
|= PCS_MII_RESET
;
961 writel(val
, cp
->regs
+ REG_PCS_MII_CTRL
);
964 while (limit
-- > 0) {
966 if ((readl(cp
->regs
+ REG_PCS_MII_CTRL
) &
971 printk(KERN_WARNING
"%s: PCS reset bit would not "
972 "clear [%08x].\n", cp
->dev
->name
,
973 readl(cp
->regs
+ REG_PCS_STATE_MACHINE
));
975 /* Make sure PCS is disabled while changing advertisement
978 writel(0x0, cp
->regs
+ REG_PCS_CFG
);
980 /* Advertise all capabilities except half-duplex. */
981 val
= readl(cp
->regs
+ REG_PCS_MII_ADVERT
);
982 val
&= ~PCS_MII_ADVERT_HD
;
983 val
|= (PCS_MII_ADVERT_FD
| PCS_MII_ADVERT_SYM_PAUSE
|
984 PCS_MII_ADVERT_ASYM_PAUSE
);
985 writel(val
, cp
->regs
+ REG_PCS_MII_ADVERT
);
988 writel(PCS_CFG_EN
, cp
->regs
+ REG_PCS_CFG
);
990 /* pcs workaround: enable sync detect */
991 writel(PCS_SERDES_CTRL_SYNCD_EN
,
992 cp
->regs
+ REG_PCS_SERDES_CTRL
);
997 static int cas_pcs_link_check(struct cas
*cp
)
999 u32 stat
, state_machine
;
1002 /* The link status bit latches on zero, so you must
1003 * read it twice in such a case to see a transition
1004 * to the link being up.
1006 stat
= readl(cp
->regs
+ REG_PCS_MII_STATUS
);
1007 if ((stat
& PCS_MII_STATUS_LINK_STATUS
) == 0)
1008 stat
= readl(cp
->regs
+ REG_PCS_MII_STATUS
);
1010 /* The remote-fault indication is only valid
1011 * when autoneg has completed.
1013 if ((stat
& (PCS_MII_STATUS_AUTONEG_COMP
|
1014 PCS_MII_STATUS_REMOTE_FAULT
)) ==
1015 (PCS_MII_STATUS_AUTONEG_COMP
| PCS_MII_STATUS_REMOTE_FAULT
)) {
1016 if (netif_msg_link(cp
))
1017 printk(KERN_INFO
"%s: PCS RemoteFault\n",
1021 /* work around link detection issue by querying the PCS state
1024 state_machine
= readl(cp
->regs
+ REG_PCS_STATE_MACHINE
);
1025 if ((state_machine
& PCS_SM_LINK_STATE_MASK
) != SM_LINK_STATE_UP
) {
1026 stat
&= ~PCS_MII_STATUS_LINK_STATUS
;
1027 } else if (state_machine
& PCS_SM_WORD_SYNC_STATE_MASK
) {
1028 stat
|= PCS_MII_STATUS_LINK_STATUS
;
1031 if (stat
& PCS_MII_STATUS_LINK_STATUS
) {
1032 if (cp
->lstate
!= link_up
) {
1034 cp
->lstate
= link_up
;
1035 cp
->link_transition
= LINK_TRANSITION_LINK_UP
;
1037 cas_set_link_modes(cp
);
1038 netif_carrier_on(cp
->dev
);
1041 } else if (cp
->lstate
== link_up
) {
1042 cp
->lstate
= link_down
;
1043 if (link_transition_timeout
!= 0 &&
1044 cp
->link_transition
!= LINK_TRANSITION_REQUESTED_RESET
&&
1045 !cp
->link_transition_jiffies_valid
) {
1047 * force a reset, as a workaround for the
1048 * link-failure problem. May want to move this to a
1049 * point a bit earlier in the sequence. If we had
1050 * generated a reset a short time ago, we'll wait for
1051 * the link timer to check the status until a
1052 * timer expires (link_transistion_jiffies_valid is
1053 * true when the timer is running.) Instead of using
1054 * a system timer, we just do a check whenever the
1055 * link timer is running - this clears the flag after
1059 cp
->link_transition
= LINK_TRANSITION_REQUESTED_RESET
;
1060 cp
->link_transition_jiffies
= jiffies
;
1061 cp
->link_transition_jiffies_valid
= 1;
1063 cp
->link_transition
= LINK_TRANSITION_ON_FAILURE
;
1065 netif_carrier_off(cp
->dev
);
1066 if (cp
->opened
&& netif_msg_link(cp
)) {
1067 printk(KERN_INFO
"%s: PCS link down.\n",
1071 /* Cassini only: if you force a mode, there can be
1072 * sync problems on link down. to fix that, the following
1073 * things need to be checked:
1074 * 1) read serialink state register
1075 * 2) read pcs status register to verify link down.
1076 * 3) if link down and serial link == 0x03, then you need
1077 * to global reset the chip.
1079 if ((cp
->cas_flags
& CAS_FLAG_REG_PLUS
) == 0) {
1080 /* should check to see if we're in a forced mode */
1081 stat
= readl(cp
->regs
+ REG_PCS_SERDES_STATE
);
1085 } else if (cp
->lstate
== link_down
) {
1086 if (link_transition_timeout
!= 0 &&
1087 cp
->link_transition
!= LINK_TRANSITION_REQUESTED_RESET
&&
1088 !cp
->link_transition_jiffies_valid
) {
1089 /* force a reset, as a workaround for the
1090 * link-failure problem. May want to move
1091 * this to a point a bit earlier in the
1095 cp
->link_transition
= LINK_TRANSITION_REQUESTED_RESET
;
1096 cp
->link_transition_jiffies
= jiffies
;
1097 cp
->link_transition_jiffies_valid
= 1;
1099 cp
->link_transition
= LINK_TRANSITION_STILL_FAILED
;
1106 static int cas_pcs_interrupt(struct net_device
*dev
,
1107 struct cas
*cp
, u32 status
)
1109 u32 stat
= readl(cp
->regs
+ REG_PCS_INTR_STATUS
);
1111 if ((stat
& PCS_INTR_STATUS_LINK_CHANGE
) == 0)
1113 return cas_pcs_link_check(cp
);
1116 static int cas_txmac_interrupt(struct net_device
*dev
,
1117 struct cas
*cp
, u32 status
)
1119 u32 txmac_stat
= readl(cp
->regs
+ REG_MAC_TX_STATUS
);
1124 if (netif_msg_intr(cp
))
1125 printk(KERN_DEBUG
"%s: txmac interrupt, txmac_stat: 0x%x\n",
1126 cp
->dev
->name
, txmac_stat
);
1128 /* Defer timer expiration is quite normal,
1129 * don't even log the event.
1131 if ((txmac_stat
& MAC_TX_DEFER_TIMER
) &&
1132 !(txmac_stat
& ~MAC_TX_DEFER_TIMER
))
1135 spin_lock(&cp
->stat_lock
[0]);
1136 if (txmac_stat
& MAC_TX_UNDERRUN
) {
1137 printk(KERN_ERR
"%s: TX MAC xmit underrun.\n",
1139 cp
->net_stats
[0].tx_fifo_errors
++;
1142 if (txmac_stat
& MAC_TX_MAX_PACKET_ERR
) {
1143 printk(KERN_ERR
"%s: TX MAC max packet size error.\n",
1145 cp
->net_stats
[0].tx_errors
++;
1148 /* The rest are all cases of one of the 16-bit TX
1149 * counters expiring.
1151 if (txmac_stat
& MAC_TX_COLL_NORMAL
)
1152 cp
->net_stats
[0].collisions
+= 0x10000;
1154 if (txmac_stat
& MAC_TX_COLL_EXCESS
) {
1155 cp
->net_stats
[0].tx_aborted_errors
+= 0x10000;
1156 cp
->net_stats
[0].collisions
+= 0x10000;
1159 if (txmac_stat
& MAC_TX_COLL_LATE
) {
1160 cp
->net_stats
[0].tx_aborted_errors
+= 0x10000;
1161 cp
->net_stats
[0].collisions
+= 0x10000;
1163 spin_unlock(&cp
->stat_lock
[0]);
1165 /* We do not keep track of MAC_TX_COLL_FIRST and
1166 * MAC_TX_PEAK_ATTEMPTS events.
1171 static void cas_load_firmware(struct cas
*cp
, cas_hp_inst_t
*firmware
)
1173 cas_hp_inst_t
*inst
;
1178 while ((inst
= firmware
) && inst
->note
) {
1179 writel(i
, cp
->regs
+ REG_HP_INSTR_RAM_ADDR
);
1181 val
= CAS_BASE(HP_INSTR_RAM_HI_VAL
, inst
->val
);
1182 val
|= CAS_BASE(HP_INSTR_RAM_HI_MASK
, inst
->mask
);
1183 writel(val
, cp
->regs
+ REG_HP_INSTR_RAM_DATA_HI
);
1185 val
= CAS_BASE(HP_INSTR_RAM_MID_OUTARG
, inst
->outarg
>> 10);
1186 val
|= CAS_BASE(HP_INSTR_RAM_MID_OUTOP
, inst
->outop
);
1187 val
|= CAS_BASE(HP_INSTR_RAM_MID_FNEXT
, inst
->fnext
);
1188 val
|= CAS_BASE(HP_INSTR_RAM_MID_FOFF
, inst
->foff
);
1189 val
|= CAS_BASE(HP_INSTR_RAM_MID_SNEXT
, inst
->snext
);
1190 val
|= CAS_BASE(HP_INSTR_RAM_MID_SOFF
, inst
->soff
);
1191 val
|= CAS_BASE(HP_INSTR_RAM_MID_OP
, inst
->op
);
1192 writel(val
, cp
->regs
+ REG_HP_INSTR_RAM_DATA_MID
);
1194 val
= CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK
, inst
->outmask
);
1195 val
|= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT
, inst
->outshift
);
1196 val
|= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN
, inst
->outenab
);
1197 val
|= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG
, inst
->outarg
);
1198 writel(val
, cp
->regs
+ REG_HP_INSTR_RAM_DATA_LOW
);
1204 static void cas_init_rx_dma(struct cas
*cp
)
1206 u64 desc_dma
= cp
->block_dvma
;
1210 /* rx free descriptors */
1211 val
= CAS_BASE(RX_CFG_SWIVEL
, RX_SWIVEL_OFF_VAL
);
1212 val
|= CAS_BASE(RX_CFG_DESC_RING
, RX_DESC_RINGN_INDEX(0));
1213 val
|= CAS_BASE(RX_CFG_COMP_RING
, RX_COMP_RINGN_INDEX(0));
1214 if ((N_RX_DESC_RINGS
> 1) &&
1215 (cp
->cas_flags
& CAS_FLAG_REG_PLUS
)) /* do desc 2 */
1216 val
|= CAS_BASE(RX_CFG_DESC_RING1
, RX_DESC_RINGN_INDEX(1));
1217 writel(val
, cp
->regs
+ REG_RX_CFG
);
1219 val
= (unsigned long) cp
->init_rxds
[0] -
1220 (unsigned long) cp
->init_block
;
1221 writel((desc_dma
+ val
) >> 32, cp
->regs
+ REG_RX_DB_HI
);
1222 writel((desc_dma
+ val
) & 0xffffffff, cp
->regs
+ REG_RX_DB_LOW
);
1223 writel(RX_DESC_RINGN_SIZE(0) - 4, cp
->regs
+ REG_RX_KICK
);
1225 if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) {
1226 /* rx desc 2 is for IPSEC packets. however,
1227 * we don't it that for that purpose.
1229 val
= (unsigned long) cp
->init_rxds
[1] -
1230 (unsigned long) cp
->init_block
;
1231 writel((desc_dma
+ val
) >> 32, cp
->regs
+ REG_PLUS_RX_DB1_HI
);
1232 writel((desc_dma
+ val
) & 0xffffffff, cp
->regs
+
1233 REG_PLUS_RX_DB1_LOW
);
1234 writel(RX_DESC_RINGN_SIZE(1) - 4, cp
->regs
+
1238 /* rx completion registers */
1239 val
= (unsigned long) cp
->init_rxcs
[0] -
1240 (unsigned long) cp
->init_block
;
1241 writel((desc_dma
+ val
) >> 32, cp
->regs
+ REG_RX_CB_HI
);
1242 writel((desc_dma
+ val
) & 0xffffffff, cp
->regs
+ REG_RX_CB_LOW
);
1244 if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) {
1246 for (i
= 1; i
< MAX_RX_COMP_RINGS
; i
++) {
1247 val
= (unsigned long) cp
->init_rxcs
[i
] -
1248 (unsigned long) cp
->init_block
;
1249 writel((desc_dma
+ val
) >> 32, cp
->regs
+
1250 REG_PLUS_RX_CBN_HI(i
));
1251 writel((desc_dma
+ val
) & 0xffffffff, cp
->regs
+
1252 REG_PLUS_RX_CBN_LOW(i
));
1256 /* read selective clear regs to prevent spurious interrupts
1257 * on reset because complete == kick.
1258 * selective clear set up to prevent interrupts on resets
1260 readl(cp
->regs
+ REG_INTR_STATUS_ALIAS
);
1261 writel(INTR_RX_DONE
| INTR_RX_BUF_UNAVAIL
, cp
->regs
+ REG_ALIAS_CLEAR
);
1262 if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) {
1263 for (i
= 1; i
< N_RX_COMP_RINGS
; i
++)
1264 readl(cp
->regs
+ REG_PLUS_INTRN_STATUS_ALIAS(i
));
1266 /* 2 is different from 3 and 4 */
1267 if (N_RX_COMP_RINGS
> 1)
1268 writel(INTR_RX_DONE_ALT
| INTR_RX_BUF_UNAVAIL_1
,
1269 cp
->regs
+ REG_PLUS_ALIASN_CLEAR(1));
1271 for (i
= 2; i
< N_RX_COMP_RINGS
; i
++)
1272 writel(INTR_RX_DONE_ALT
,
1273 cp
->regs
+ REG_PLUS_ALIASN_CLEAR(i
));
1276 /* set up pause thresholds */
1277 val
= CAS_BASE(RX_PAUSE_THRESH_OFF
,
1278 cp
->rx_pause_off
/ RX_PAUSE_THRESH_QUANTUM
);
1279 val
|= CAS_BASE(RX_PAUSE_THRESH_ON
,
1280 cp
->rx_pause_on
/ RX_PAUSE_THRESH_QUANTUM
);
1281 writel(val
, cp
->regs
+ REG_RX_PAUSE_THRESH
);
1283 /* zero out dma reassembly buffers */
1284 for (i
= 0; i
< 64; i
++) {
1285 writel(i
, cp
->regs
+ REG_RX_TABLE_ADDR
);
1286 writel(0x0, cp
->regs
+ REG_RX_TABLE_DATA_LOW
);
1287 writel(0x0, cp
->regs
+ REG_RX_TABLE_DATA_MID
);
1288 writel(0x0, cp
->regs
+ REG_RX_TABLE_DATA_HI
);
1291 /* make sure address register is 0 for normal operation */
1292 writel(0x0, cp
->regs
+ REG_RX_CTRL_FIFO_ADDR
);
1293 writel(0x0, cp
->regs
+ REG_RX_IPP_FIFO_ADDR
);
1295 /* interrupt mitigation */
1297 val
= CAS_BASE(RX_BLANK_INTR_TIME
, RX_BLANK_INTR_TIME_VAL
);
1298 val
|= CAS_BASE(RX_BLANK_INTR_PKT
, RX_BLANK_INTR_PKT_VAL
);
1299 writel(val
, cp
->regs
+ REG_RX_BLANK
);
1301 writel(0x0, cp
->regs
+ REG_RX_BLANK
);
1304 /* interrupt generation as a function of low water marks for
1305 * free desc and completion entries. these are used to trigger
1306 * housekeeping for rx descs. we don't use the free interrupt
1307 * as it's not very useful
1309 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1310 val
= CAS_BASE(RX_AE_THRESH_COMP
, RX_AE_COMP_VAL
);
1311 writel(val
, cp
->regs
+ REG_RX_AE_THRESH
);
1312 if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) {
1313 val
= CAS_BASE(RX_AE1_THRESH_FREE
, RX_AE_FREEN_VAL(1));
1314 writel(val
, cp
->regs
+ REG_PLUS_RX_AE1_THRESH
);
1317 /* Random early detect registers. useful for congestion avoidance.
1318 * this should be tunable.
1320 writel(0x0, cp
->regs
+ REG_RX_RED
);
1322 /* receive page sizes. default == 2K (0x800) */
1324 if (cp
->page_size
== 0x1000)
1326 else if (cp
->page_size
== 0x2000)
1328 else if (cp
->page_size
== 0x4000)
1331 /* round mtu + offset. constrain to page size. */
1332 size
= cp
->dev
->mtu
+ 64;
1333 if (size
> cp
->page_size
)
1334 size
= cp
->page_size
;
1338 else if (size
<= 0x800)
1340 else if (size
<= 0x1000)
1345 cp
->mtu_stride
= 1 << (i
+ 10);
1346 val
= CAS_BASE(RX_PAGE_SIZE
, val
);
1347 val
|= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE
, i
);
1348 val
|= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT
, cp
->page_size
>> (i
+ 10));
1349 val
|= CAS_BASE(RX_PAGE_SIZE_MTU_OFF
, 0x1);
1350 writel(val
, cp
->regs
+ REG_RX_PAGE_SIZE
);
1352 /* enable the header parser if desired */
1353 if (CAS_HP_FIRMWARE
== cas_prog_null
)
1356 val
= CAS_BASE(HP_CFG_NUM_CPU
, CAS_NCPUS
> 63 ? 0 : CAS_NCPUS
);
1357 val
|= HP_CFG_PARSE_EN
| HP_CFG_SYN_INC_MASK
;
1358 val
|= CAS_BASE(HP_CFG_TCP_THRESH
, HP_TCP_THRESH_VAL
);
1359 writel(val
, cp
->regs
+ REG_HP_CFG
);
1362 static inline void cas_rxc_init(struct cas_rx_comp
*rxc
)
1364 memset(rxc
, 0, sizeof(*rxc
));
1365 rxc
->word4
= cpu_to_le64(RX_COMP4_ZERO
);
1368 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1369 * flipping is protected by the fact that the chip will not
1370 * hand back the same page index while it's being processed.
1372 static inline cas_page_t
*cas_page_spare(struct cas
*cp
, const int index
)
1374 cas_page_t
*page
= cp
->rx_pages
[1][index
];
1377 if (cas_buffer_count(page
) == 1)
1380 new = cas_page_dequeue(cp
);
1382 spin_lock(&cp
->rx_inuse_lock
);
1383 list_add(&page
->list
, &cp
->rx_inuse_list
);
1384 spin_unlock(&cp
->rx_inuse_lock
);
1389 /* this needs to be changed if we actually use the ENC RX DESC ring */
1390 static cas_page_t
*cas_page_swap(struct cas
*cp
, const int ring
,
1393 cas_page_t
**page0
= cp
->rx_pages
[0];
1394 cas_page_t
**page1
= cp
->rx_pages
[1];
1396 /* swap if buffer is in use */
1397 if (cas_buffer_count(page0
[index
]) > 1) {
1398 cas_page_t
*new = cas_page_spare(cp
, index
);
1400 page1
[index
] = page0
[index
];
1404 RX_USED_SET(page0
[index
], 0);
1405 return page0
[index
];
1408 static void cas_clean_rxds(struct cas
*cp
)
1410 /* only clean ring 0 as ring 1 is used for spare buffers */
1411 struct cas_rx_desc
*rxd
= cp
->init_rxds
[0];
1414 /* release all rx flows */
1415 for (i
= 0; i
< N_RX_FLOWS
; i
++) {
1416 struct sk_buff
*skb
;
1417 while ((skb
= __skb_dequeue(&cp
->rx_flows
[i
]))) {
1418 cas_skb_release(skb
);
1422 /* initialize descriptors */
1423 size
= RX_DESC_RINGN_SIZE(0);
1424 for (i
= 0; i
< size
; i
++) {
1425 cas_page_t
*page
= cas_page_swap(cp
, 0, i
);
1426 rxd
[i
].buffer
= cpu_to_le64(page
->dma_addr
);
1427 rxd
[i
].index
= cpu_to_le64(CAS_BASE(RX_INDEX_NUM
, i
) |
1428 CAS_BASE(RX_INDEX_RING
, 0));
1431 cp
->rx_old
[0] = RX_DESC_RINGN_SIZE(0) - 4;
1433 cp
->cas_flags
&= ~CAS_FLAG_RXD_POST(0);
1436 static void cas_clean_rxcs(struct cas
*cp
)
1440 /* take ownership of rx comp descriptors */
1441 memset(cp
->rx_cur
, 0, sizeof(*cp
->rx_cur
)*N_RX_COMP_RINGS
);
1442 memset(cp
->rx_new
, 0, sizeof(*cp
->rx_new
)*N_RX_COMP_RINGS
);
1443 for (i
= 0; i
< N_RX_COMP_RINGS
; i
++) {
1444 struct cas_rx_comp
*rxc
= cp
->init_rxcs
[i
];
1445 for (j
= 0; j
< RX_COMP_RINGN_SIZE(i
); j
++) {
1446 cas_rxc_init(rxc
+ j
);
1452 /* When we get a RX fifo overflow, the RX unit is probably hung
1453 * so we do the following.
1455 * If any part of the reset goes wrong, we return 1 and that causes the
1456 * whole chip to be reset.
1458 static int cas_rxmac_reset(struct cas
*cp
)
1460 struct net_device
*dev
= cp
->dev
;
1464 /* First, reset MAC RX. */
1465 writel(cp
->mac_rx_cfg
& ~MAC_RX_CFG_EN
, cp
->regs
+ REG_MAC_RX_CFG
);
1466 for (limit
= 0; limit
< STOP_TRIES
; limit
++) {
1467 if (!(readl(cp
->regs
+ REG_MAC_RX_CFG
) & MAC_RX_CFG_EN
))
1471 if (limit
== STOP_TRIES
) {
1472 printk(KERN_ERR
"%s: RX MAC will not disable, resetting whole "
1473 "chip.\n", dev
->name
);
1477 /* Second, disable RX DMA. */
1478 writel(0, cp
->regs
+ REG_RX_CFG
);
1479 for (limit
= 0; limit
< STOP_TRIES
; limit
++) {
1480 if (!(readl(cp
->regs
+ REG_RX_CFG
) & RX_CFG_DMA_EN
))
1484 if (limit
== STOP_TRIES
) {
1485 printk(KERN_ERR
"%s: RX DMA will not disable, resetting whole "
1486 "chip.\n", dev
->name
);
1492 /* Execute RX reset command. */
1493 writel(SW_RESET_RX
, cp
->regs
+ REG_SW_RESET
);
1494 for (limit
= 0; limit
< STOP_TRIES
; limit
++) {
1495 if (!(readl(cp
->regs
+ REG_SW_RESET
) & SW_RESET_RX
))
1499 if (limit
== STOP_TRIES
) {
1500 printk(KERN_ERR
"%s: RX reset command will not execute, "
1501 "resetting whole chip.\n", dev
->name
);
1505 /* reset driver rx state */
1509 /* Now, reprogram the rest of RX unit. */
1510 cas_init_rx_dma(cp
);
1513 val
= readl(cp
->regs
+ REG_RX_CFG
);
1514 writel(val
| RX_CFG_DMA_EN
, cp
->regs
+ REG_RX_CFG
);
1515 writel(MAC_RX_FRAME_RECV
, cp
->regs
+ REG_MAC_RX_MASK
);
1516 val
= readl(cp
->regs
+ REG_MAC_RX_CFG
);
1517 writel(val
| MAC_RX_CFG_EN
, cp
->regs
+ REG_MAC_RX_CFG
);
1522 static int cas_rxmac_interrupt(struct net_device
*dev
, struct cas
*cp
,
1525 u32 stat
= readl(cp
->regs
+ REG_MAC_RX_STATUS
);
1530 if (netif_msg_intr(cp
))
1531 printk(KERN_DEBUG
"%s: rxmac interrupt, stat: 0x%x\n",
1532 cp
->dev
->name
, stat
);
1534 /* these are all rollovers */
1535 spin_lock(&cp
->stat_lock
[0]);
1536 if (stat
& MAC_RX_ALIGN_ERR
)
1537 cp
->net_stats
[0].rx_frame_errors
+= 0x10000;
1539 if (stat
& MAC_RX_CRC_ERR
)
1540 cp
->net_stats
[0].rx_crc_errors
+= 0x10000;
1542 if (stat
& MAC_RX_LEN_ERR
)
1543 cp
->net_stats
[0].rx_length_errors
+= 0x10000;
1545 if (stat
& MAC_RX_OVERFLOW
) {
1546 cp
->net_stats
[0].rx_over_errors
++;
1547 cp
->net_stats
[0].rx_fifo_errors
++;
1550 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1553 spin_unlock(&cp
->stat_lock
[0]);
1557 static int cas_mac_interrupt(struct net_device
*dev
, struct cas
*cp
,
1560 u32 stat
= readl(cp
->regs
+ REG_MAC_CTRL_STATUS
);
1565 if (netif_msg_intr(cp
))
1566 printk(KERN_DEBUG
"%s: mac interrupt, stat: 0x%x\n",
1567 cp
->dev
->name
, stat
);
1569 /* This interrupt is just for pause frame and pause
1570 * tracking. It is useful for diagnostics and debug
1571 * but probably by default we will mask these events.
1573 if (stat
& MAC_CTRL_PAUSE_STATE
)
1574 cp
->pause_entered
++;
1576 if (stat
& MAC_CTRL_PAUSE_RECEIVED
)
1577 cp
->pause_last_time_recvd
= (stat
>> 16);
1583 /* Must be invoked under cp->lock. */
1584 static inline int cas_mdio_link_not_up(struct cas
*cp
)
1588 switch (cp
->lstate
) {
1589 case link_force_ret
:
1590 if (netif_msg_link(cp
))
1591 printk(KERN_INFO
"%s: Autoneg failed again, keeping"
1592 " forced mode\n", cp
->dev
->name
);
1593 cas_phy_write(cp
, MII_BMCR
, cp
->link_fcntl
);
1594 cp
->timer_ticks
= 5;
1595 cp
->lstate
= link_force_ok
;
1596 cp
->link_transition
= LINK_TRANSITION_LINK_CONFIG
;
1600 val
= cas_phy_read(cp
, MII_BMCR
);
1602 /* Try forced modes. we try things in the following order:
1603 * 1000 full -> 100 full/half -> 10 half
1605 val
&= ~(BMCR_ANRESTART
| BMCR_ANENABLE
);
1606 val
|= BMCR_FULLDPLX
;
1607 val
|= (cp
->cas_flags
& CAS_FLAG_1000MB_CAP
) ?
1608 CAS_BMCR_SPEED1000
: BMCR_SPEED100
;
1609 cas_phy_write(cp
, MII_BMCR
, val
);
1610 cp
->timer_ticks
= 5;
1611 cp
->lstate
= link_force_try
;
1612 cp
->link_transition
= LINK_TRANSITION_LINK_CONFIG
;
1615 case link_force_try
:
1616 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1617 val
= cas_phy_read(cp
, MII_BMCR
);
1618 cp
->timer_ticks
= 5;
1619 if (val
& CAS_BMCR_SPEED1000
) { /* gigabit */
1620 val
&= ~CAS_BMCR_SPEED1000
;
1621 val
|= (BMCR_SPEED100
| BMCR_FULLDPLX
);
1622 cas_phy_write(cp
, MII_BMCR
, val
);
1626 if (val
& BMCR_SPEED100
) {
1627 if (val
& BMCR_FULLDPLX
) /* fd failed */
1628 val
&= ~BMCR_FULLDPLX
;
1629 else { /* 100Mbps failed */
1630 val
&= ~BMCR_SPEED100
;
1632 cas_phy_write(cp
, MII_BMCR
, val
);
1642 /* must be invoked with cp->lock held */
1643 static int cas_mii_link_check(struct cas
*cp
, const u16 bmsr
)
1647 if (bmsr
& BMSR_LSTATUS
) {
1648 /* Ok, here we got a link. If we had it due to a forced
1649 * fallback, and we were configured for autoneg, we
1650 * retry a short autoneg pass. If you know your hub is
1651 * broken, use ethtool ;)
1653 if ((cp
->lstate
== link_force_try
) &&
1654 (cp
->link_cntl
& BMCR_ANENABLE
)) {
1655 cp
->lstate
= link_force_ret
;
1656 cp
->link_transition
= LINK_TRANSITION_LINK_CONFIG
;
1657 cas_mif_poll(cp
, 0);
1658 cp
->link_fcntl
= cas_phy_read(cp
, MII_BMCR
);
1659 cp
->timer_ticks
= 5;
1660 if (cp
->opened
&& netif_msg_link(cp
))
1661 printk(KERN_INFO
"%s: Got link after fallback, retrying"
1662 " autoneg once...\n", cp
->dev
->name
);
1663 cas_phy_write(cp
, MII_BMCR
,
1664 cp
->link_fcntl
| BMCR_ANENABLE
|
1666 cas_mif_poll(cp
, 1);
1668 } else if (cp
->lstate
!= link_up
) {
1669 cp
->lstate
= link_up
;
1670 cp
->link_transition
= LINK_TRANSITION_LINK_UP
;
1673 cas_set_link_modes(cp
);
1674 netif_carrier_on(cp
->dev
);
1680 /* link not up. if the link was previously up, we restart the
1684 if (cp
->lstate
== link_up
) {
1685 cp
->lstate
= link_down
;
1686 cp
->link_transition
= LINK_TRANSITION_LINK_DOWN
;
1688 netif_carrier_off(cp
->dev
);
1689 if (cp
->opened
&& netif_msg_link(cp
))
1690 printk(KERN_INFO
"%s: Link down\n",
1694 } else if (++cp
->timer_ticks
> 10)
1695 cas_mdio_link_not_up(cp
);
1700 static int cas_mif_interrupt(struct net_device
*dev
, struct cas
*cp
,
1703 u32 stat
= readl(cp
->regs
+ REG_MIF_STATUS
);
1706 /* check for a link change */
1707 if (CAS_VAL(MIF_STATUS_POLL_STATUS
, stat
) == 0)
1710 bmsr
= CAS_VAL(MIF_STATUS_POLL_DATA
, stat
);
1711 return cas_mii_link_check(cp
, bmsr
);
1714 static int cas_pci_interrupt(struct net_device
*dev
, struct cas
*cp
,
1717 u32 stat
= readl(cp
->regs
+ REG_PCI_ERR_STATUS
);
1722 printk(KERN_ERR
"%s: PCI error [%04x:%04x] ", dev
->name
, stat
,
1723 readl(cp
->regs
+ REG_BIM_DIAG
));
1725 /* cassini+ has this reserved */
1726 if ((stat
& PCI_ERR_BADACK
) &&
1727 ((cp
->cas_flags
& CAS_FLAG_REG_PLUS
) == 0))
1728 printk("<No ACK64# during ABS64 cycle> ");
1730 if (stat
& PCI_ERR_DTRTO
)
1731 printk("<Delayed transaction timeout> ");
1732 if (stat
& PCI_ERR_OTHER
)
1734 if (stat
& PCI_ERR_BIM_DMA_WRITE
)
1735 printk("<BIM DMA 0 write req> ");
1736 if (stat
& PCI_ERR_BIM_DMA_READ
)
1737 printk("<BIM DMA 0 read req> ");
1740 if (stat
& PCI_ERR_OTHER
) {
1743 /* Interrogate PCI config space for the
1746 pci_read_config_word(cp
->pdev
, PCI_STATUS
, &cfg
);
1747 printk(KERN_ERR
"%s: Read PCI cfg space status [%04x]\n",
1749 if (cfg
& PCI_STATUS_PARITY
)
1750 printk(KERN_ERR
"%s: PCI parity error detected.\n",
1752 if (cfg
& PCI_STATUS_SIG_TARGET_ABORT
)
1753 printk(KERN_ERR
"%s: PCI target abort.\n",
1755 if (cfg
& PCI_STATUS_REC_TARGET_ABORT
)
1756 printk(KERN_ERR
"%s: PCI master acks target abort.\n",
1758 if (cfg
& PCI_STATUS_REC_MASTER_ABORT
)
1759 printk(KERN_ERR
"%s: PCI master abort.\n", dev
->name
);
1760 if (cfg
& PCI_STATUS_SIG_SYSTEM_ERROR
)
1761 printk(KERN_ERR
"%s: PCI system error SERR#.\n",
1763 if (cfg
& PCI_STATUS_DETECTED_PARITY
)
1764 printk(KERN_ERR
"%s: PCI parity error.\n",
1767 /* Write the error bits back to clear them. */
1768 cfg
&= (PCI_STATUS_PARITY
|
1769 PCI_STATUS_SIG_TARGET_ABORT
|
1770 PCI_STATUS_REC_TARGET_ABORT
|
1771 PCI_STATUS_REC_MASTER_ABORT
|
1772 PCI_STATUS_SIG_SYSTEM_ERROR
|
1773 PCI_STATUS_DETECTED_PARITY
);
1774 pci_write_config_word(cp
->pdev
, PCI_STATUS
, cfg
);
1777 /* For all PCI errors, we should reset the chip. */
1781 /* All non-normal interrupt conditions get serviced here.
1782 * Returns non-zero if we should just exit the interrupt
1783 * handler right now (ie. if we reset the card which invalidates
1784 * all of the other original irq status bits).
1786 static int cas_abnormal_irq(struct net_device
*dev
, struct cas
*cp
,
1789 if (status
& INTR_RX_TAG_ERROR
) {
1790 /* corrupt RX tag framing */
1791 if (netif_msg_rx_err(cp
))
1792 printk(KERN_DEBUG
"%s: corrupt rx tag framing\n",
1794 spin_lock(&cp
->stat_lock
[0]);
1795 cp
->net_stats
[0].rx_errors
++;
1796 spin_unlock(&cp
->stat_lock
[0]);
1800 if (status
& INTR_RX_LEN_MISMATCH
) {
1801 /* length mismatch. */
1802 if (netif_msg_rx_err(cp
))
1803 printk(KERN_DEBUG
"%s: length mismatch for rx frame\n",
1805 spin_lock(&cp
->stat_lock
[0]);
1806 cp
->net_stats
[0].rx_errors
++;
1807 spin_unlock(&cp
->stat_lock
[0]);
1811 if (status
& INTR_PCS_STATUS
) {
1812 if (cas_pcs_interrupt(dev
, cp
, status
))
1816 if (status
& INTR_TX_MAC_STATUS
) {
1817 if (cas_txmac_interrupt(dev
, cp
, status
))
1821 if (status
& INTR_RX_MAC_STATUS
) {
1822 if (cas_rxmac_interrupt(dev
, cp
, status
))
1826 if (status
& INTR_MAC_CTRL_STATUS
) {
1827 if (cas_mac_interrupt(dev
, cp
, status
))
1831 if (status
& INTR_MIF_STATUS
) {
1832 if (cas_mif_interrupt(dev
, cp
, status
))
1836 if (status
& INTR_PCI_ERROR_STATUS
) {
1837 if (cas_pci_interrupt(dev
, cp
, status
))
1844 atomic_inc(&cp
->reset_task_pending
);
1845 atomic_inc(&cp
->reset_task_pending_all
);
1846 printk(KERN_ERR
"%s:reset called in cas_abnormal_irq [0x%x]\n",
1848 schedule_work(&cp
->reset_task
);
1850 atomic_set(&cp
->reset_task_pending
, CAS_RESET_ALL
);
1851 printk(KERN_ERR
"reset called in cas_abnormal_irq\n");
1852 schedule_work(&cp
->reset_task
);
1857 /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1858 * determining whether to do a netif_stop/wakeup
1860 #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1861 #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1862 static inline int cas_calc_tabort(struct cas
*cp
, const unsigned long addr
,
1865 unsigned long off
= addr
+ len
;
1867 if (CAS_TABORT(cp
) == 1)
1869 if ((CAS_ROUND_PAGE(off
) - off
) > TX_TARGET_ABORT_LEN
)
1871 return TX_TARGET_ABORT_LEN
;
1874 static inline void cas_tx_ringN(struct cas
*cp
, int ring
, int limit
)
1876 struct cas_tx_desc
*txds
;
1877 struct sk_buff
**skbs
;
1878 struct net_device
*dev
= cp
->dev
;
1881 spin_lock(&cp
->tx_lock
[ring
]);
1882 txds
= cp
->init_txds
[ring
];
1883 skbs
= cp
->tx_skbs
[ring
];
1884 entry
= cp
->tx_old
[ring
];
1886 count
= TX_BUFF_COUNT(ring
, entry
, limit
);
1887 while (entry
!= limit
) {
1888 struct sk_buff
*skb
= skbs
[entry
];
1894 /* this should never occur */
1895 entry
= TX_DESC_NEXT(ring
, entry
);
1899 /* however, we might get only a partial skb release. */
1900 count
-= skb_shinfo(skb
)->nr_frags
+
1901 + cp
->tx_tiny_use
[ring
][entry
].nbufs
+ 1;
1905 if (netif_msg_tx_done(cp
))
1906 printk(KERN_DEBUG
"%s: tx[%d] done, slot %d\n",
1907 cp
->dev
->name
, ring
, entry
);
1910 cp
->tx_tiny_use
[ring
][entry
].nbufs
= 0;
1912 for (frag
= 0; frag
<= skb_shinfo(skb
)->nr_frags
; frag
++) {
1913 struct cas_tx_desc
*txd
= txds
+ entry
;
1915 daddr
= le64_to_cpu(txd
->buffer
);
1916 dlen
= CAS_VAL(TX_DESC_BUFLEN
,
1917 le64_to_cpu(txd
->control
));
1918 pci_unmap_page(cp
->pdev
, daddr
, dlen
,
1920 entry
= TX_DESC_NEXT(ring
, entry
);
1922 /* tiny buffer may follow */
1923 if (cp
->tx_tiny_use
[ring
][entry
].used
) {
1924 cp
->tx_tiny_use
[ring
][entry
].used
= 0;
1925 entry
= TX_DESC_NEXT(ring
, entry
);
1929 spin_lock(&cp
->stat_lock
[ring
]);
1930 cp
->net_stats
[ring
].tx_packets
++;
1931 cp
->net_stats
[ring
].tx_bytes
+= skb
->len
;
1932 spin_unlock(&cp
->stat_lock
[ring
]);
1933 dev_kfree_skb_irq(skb
);
1935 cp
->tx_old
[ring
] = entry
;
1937 /* this is wrong for multiple tx rings. the net device needs
1938 * multiple queues for this to do the right thing. we wait
1939 * for 2*packets to be available when using tiny buffers
1941 if (netif_queue_stopped(dev
) &&
1942 (TX_BUFFS_AVAIL(cp
, ring
) > CAS_TABORT(cp
)*(MAX_SKB_FRAGS
+ 1)))
1943 netif_wake_queue(dev
);
1944 spin_unlock(&cp
->tx_lock
[ring
]);
1947 static void cas_tx(struct net_device
*dev
, struct cas
*cp
,
1951 #ifdef USE_TX_COMPWB
1952 u64 compwb
= le64_to_cpu(cp
->init_block
->tx_compwb
);
1954 if (netif_msg_intr(cp
))
1955 printk(KERN_DEBUG
"%s: tx interrupt, status: 0x%x, %llx\n",
1956 cp
->dev
->name
, status
, (unsigned long long)compwb
);
1957 /* process all the rings */
1958 for (ring
= 0; ring
< N_TX_RINGS
; ring
++) {
1959 #ifdef USE_TX_COMPWB
1960 /* use the completion writeback registers */
1961 limit
= (CAS_VAL(TX_COMPWB_MSB
, compwb
) << 8) |
1962 CAS_VAL(TX_COMPWB_LSB
, compwb
);
1963 compwb
= TX_COMPWB_NEXT(compwb
);
1965 limit
= readl(cp
->regs
+ REG_TX_COMPN(ring
));
1967 if (cp
->tx_old
[ring
] != limit
)
1968 cas_tx_ringN(cp
, ring
, limit
);
1973 static int cas_rx_process_pkt(struct cas
*cp
, struct cas_rx_comp
*rxc
,
1974 int entry
, const u64
*words
,
1975 struct sk_buff
**skbref
)
1977 int dlen
, hlen
, len
, i
, alloclen
;
1978 int off
, swivel
= RX_SWIVEL_OFF_VAL
;
1979 struct cas_page
*page
;
1980 struct sk_buff
*skb
;
1981 void *addr
, *crcaddr
;
1984 hlen
= CAS_VAL(RX_COMP2_HDR_SIZE
, words
[1]);
1985 dlen
= CAS_VAL(RX_COMP1_DATA_SIZE
, words
[0]);
1988 if (RX_COPY_ALWAYS
|| (words
[2] & RX_COMP3_SMALL_PKT
))
1991 alloclen
= max(hlen
, RX_COPY_MIN
);
1993 skb
= dev_alloc_skb(alloclen
+ swivel
+ cp
->crc_size
);
1998 skb_reserve(skb
, swivel
);
2001 addr
= crcaddr
= NULL
;
2002 if (hlen
) { /* always copy header pages */
2003 i
= CAS_VAL(RX_COMP2_HDR_INDEX
, words
[1]);
2004 page
= cp
->rx_pages
[CAS_VAL(RX_INDEX_RING
, i
)][CAS_VAL(RX_INDEX_NUM
, i
)];
2005 off
= CAS_VAL(RX_COMP2_HDR_OFF
, words
[1]) * 0x100 +
2009 if (!dlen
) /* attach FCS */
2011 pci_dma_sync_single_for_cpu(cp
->pdev
, page
->dma_addr
+ off
, i
,
2012 PCI_DMA_FROMDEVICE
);
2013 addr
= cas_page_map(page
->buffer
);
2014 memcpy(p
, addr
+ off
, i
);
2015 pci_dma_sync_single_for_device(cp
->pdev
, page
->dma_addr
+ off
, i
,
2016 PCI_DMA_FROMDEVICE
);
2017 cas_page_unmap(addr
);
2018 RX_USED_ADD(page
, 0x100);
2024 if (alloclen
< (hlen
+ dlen
)) {
2025 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
;
2027 /* normal or jumbo packets. we use frags */
2028 i
= CAS_VAL(RX_COMP1_DATA_INDEX
, words
[0]);
2029 page
= cp
->rx_pages
[CAS_VAL(RX_INDEX_RING
, i
)][CAS_VAL(RX_INDEX_NUM
, i
)];
2030 off
= CAS_VAL(RX_COMP1_DATA_OFF
, words
[0]) + swivel
;
2032 hlen
= min(cp
->page_size
- off
, dlen
);
2034 if (netif_msg_rx_err(cp
)) {
2035 printk(KERN_DEBUG
"%s: rx page overflow: "
2036 "%d\n", cp
->dev
->name
, hlen
);
2038 dev_kfree_skb_irq(skb
);
2042 if (i
== dlen
) /* attach FCS */
2044 pci_dma_sync_single_for_cpu(cp
->pdev
, page
->dma_addr
+ off
, i
,
2045 PCI_DMA_FROMDEVICE
);
2047 /* make sure we always copy a header */
2049 if (p
== (char *) skb
->data
) { /* not split */
2050 addr
= cas_page_map(page
->buffer
);
2051 memcpy(p
, addr
+ off
, RX_COPY_MIN
);
2052 pci_dma_sync_single_for_device(cp
->pdev
, page
->dma_addr
+ off
, i
,
2053 PCI_DMA_FROMDEVICE
);
2054 cas_page_unmap(addr
);
2056 swivel
= RX_COPY_MIN
;
2057 RX_USED_ADD(page
, cp
->mtu_stride
);
2059 RX_USED_ADD(page
, hlen
);
2061 skb_put(skb
, alloclen
);
2063 skb_shinfo(skb
)->nr_frags
++;
2064 skb
->data_len
+= hlen
- swivel
;
2065 skb
->len
+= hlen
- swivel
;
2067 get_page(page
->buffer
);
2068 cas_buffer_inc(page
);
2069 frag
->page
= page
->buffer
;
2070 frag
->page_offset
= off
;
2071 frag
->size
= hlen
- swivel
;
2073 /* any more data? */
2074 if ((words
[0] & RX_COMP1_SPLIT_PKT
) && ((dlen
-= hlen
) > 0)) {
2078 i
= CAS_VAL(RX_COMP2_NEXT_INDEX
, words
[1]);
2079 page
= cp
->rx_pages
[CAS_VAL(RX_INDEX_RING
, i
)][CAS_VAL(RX_INDEX_NUM
, i
)];
2080 pci_dma_sync_single_for_cpu(cp
->pdev
, page
->dma_addr
,
2081 hlen
+ cp
->crc_size
,
2082 PCI_DMA_FROMDEVICE
);
2083 pci_dma_sync_single_for_device(cp
->pdev
, page
->dma_addr
,
2084 hlen
+ cp
->crc_size
,
2085 PCI_DMA_FROMDEVICE
);
2087 skb_shinfo(skb
)->nr_frags
++;
2088 skb
->data_len
+= hlen
;
2092 get_page(page
->buffer
);
2093 cas_buffer_inc(page
);
2094 frag
->page
= page
->buffer
;
2095 frag
->page_offset
= 0;
2097 RX_USED_ADD(page
, hlen
+ cp
->crc_size
);
2101 addr
= cas_page_map(page
->buffer
);
2102 crcaddr
= addr
+ off
+ hlen
;
2106 /* copying packet */
2110 i
= CAS_VAL(RX_COMP1_DATA_INDEX
, words
[0]);
2111 page
= cp
->rx_pages
[CAS_VAL(RX_INDEX_RING
, i
)][CAS_VAL(RX_INDEX_NUM
, i
)];
2112 off
= CAS_VAL(RX_COMP1_DATA_OFF
, words
[0]) + swivel
;
2113 hlen
= min(cp
->page_size
- off
, dlen
);
2115 if (netif_msg_rx_err(cp
)) {
2116 printk(KERN_DEBUG
"%s: rx page overflow: "
2117 "%d\n", cp
->dev
->name
, hlen
);
2119 dev_kfree_skb_irq(skb
);
2123 if (i
== dlen
) /* attach FCS */
2125 pci_dma_sync_single_for_cpu(cp
->pdev
, page
->dma_addr
+ off
, i
,
2126 PCI_DMA_FROMDEVICE
);
2127 addr
= cas_page_map(page
->buffer
);
2128 memcpy(p
, addr
+ off
, i
);
2129 pci_dma_sync_single_for_device(cp
->pdev
, page
->dma_addr
+ off
, i
,
2130 PCI_DMA_FROMDEVICE
);
2131 cas_page_unmap(addr
);
2132 if (p
== (char *) skb
->data
) /* not split */
2133 RX_USED_ADD(page
, cp
->mtu_stride
);
2135 RX_USED_ADD(page
, i
);
2137 /* any more data? */
2138 if ((words
[0] & RX_COMP1_SPLIT_PKT
) && ((dlen
-= hlen
) > 0)) {
2140 i
= CAS_VAL(RX_COMP2_NEXT_INDEX
, words
[1]);
2141 page
= cp
->rx_pages
[CAS_VAL(RX_INDEX_RING
, i
)][CAS_VAL(RX_INDEX_NUM
, i
)];
2142 pci_dma_sync_single_for_cpu(cp
->pdev
, page
->dma_addr
,
2143 dlen
+ cp
->crc_size
,
2144 PCI_DMA_FROMDEVICE
);
2145 addr
= cas_page_map(page
->buffer
);
2146 memcpy(p
, addr
, dlen
+ cp
->crc_size
);
2147 pci_dma_sync_single_for_device(cp
->pdev
, page
->dma_addr
,
2148 dlen
+ cp
->crc_size
,
2149 PCI_DMA_FROMDEVICE
);
2150 cas_page_unmap(addr
);
2151 RX_USED_ADD(page
, dlen
+ cp
->crc_size
);
2156 crcaddr
= skb
->data
+ alloclen
;
2158 skb_put(skb
, alloclen
);
2161 i
= CAS_VAL(RX_COMP4_TCP_CSUM
, words
[3]);
2163 /* checksum includes FCS. strip it out. */
2164 i
= csum_fold(csum_partial(crcaddr
, cp
->crc_size
, i
));
2166 cas_page_unmap(addr
);
2168 skb
->csum
= ntohs(i
^ 0xffff);
2169 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2170 skb
->protocol
= eth_type_trans(skb
, cp
->dev
);
2175 /* we can handle up to 64 rx flows at a time. we do the same thing
2176 * as nonreassm except that we batch up the buffers.
2177 * NOTE: we currently just treat each flow as a bunch of packets that
2178 * we pass up. a better way would be to coalesce the packets
2179 * into a jumbo packet. to do that, we need to do the following:
2180 * 1) the first packet will have a clean split between header and
2182 * 2) each time the next flow packet comes in, extend the
2183 * data length and merge the checksums.
2184 * 3) on flow release, fix up the header.
2185 * 4) make sure the higher layer doesn't care.
2186 * because packets get coalesced, we shouldn't run into fragment count
2189 static inline void cas_rx_flow_pkt(struct cas
*cp
, const u64
*words
,
2190 struct sk_buff
*skb
)
2192 int flowid
= CAS_VAL(RX_COMP3_FLOWID
, words
[2]) & (N_RX_FLOWS
- 1);
2193 struct sk_buff_head
*flow
= &cp
->rx_flows
[flowid
];
2195 /* this is protected at a higher layer, so no need to
2196 * do any additional locking here. stick the buffer
2199 __skb_insert(skb
, flow
->prev
, (struct sk_buff
*) flow
, flow
);
2200 if (words
[0] & RX_COMP1_RELEASE_FLOW
) {
2201 while ((skb
= __skb_dequeue(flow
))) {
2202 cas_skb_release(skb
);
2207 /* put rx descriptor back on ring. if a buffer is in use by a higher
2208 * layer, this will need to put in a replacement.
2210 static void cas_post_page(struct cas
*cp
, const int ring
, const int index
)
2215 entry
= cp
->rx_old
[ring
];
2217 new = cas_page_swap(cp
, ring
, index
);
2218 cp
->init_rxds
[ring
][entry
].buffer
= cpu_to_le64(new->dma_addr
);
2219 cp
->init_rxds
[ring
][entry
].index
=
2220 cpu_to_le64(CAS_BASE(RX_INDEX_NUM
, index
) |
2221 CAS_BASE(RX_INDEX_RING
, ring
));
2223 entry
= RX_DESC_ENTRY(ring
, entry
+ 1);
2224 cp
->rx_old
[ring
] = entry
;
2230 writel(entry
, cp
->regs
+ REG_RX_KICK
);
2231 else if ((N_RX_DESC_RINGS
> 1) &&
2232 (cp
->cas_flags
& CAS_FLAG_REG_PLUS
))
2233 writel(entry
, cp
->regs
+ REG_PLUS_RX_KICK1
);
2237 /* only when things are bad */
2238 static int cas_post_rxds_ringN(struct cas
*cp
, int ring
, int num
)
2240 unsigned int entry
, last
, count
, released
;
2242 cas_page_t
**page
= cp
->rx_pages
[ring
];
2244 entry
= cp
->rx_old
[ring
];
2246 if (netif_msg_intr(cp
))
2247 printk(KERN_DEBUG
"%s: rxd[%d] interrupt, done: %d\n",
2248 cp
->dev
->name
, ring
, entry
);
2251 count
= entry
& 0x3;
2252 last
= RX_DESC_ENTRY(ring
, num
? entry
+ num
- 4: entry
- 4);
2254 while (entry
!= last
) {
2255 /* make a new buffer if it's still in use */
2256 if (cas_buffer_count(page
[entry
]) > 1) {
2257 cas_page_t
*new = cas_page_dequeue(cp
);
2259 /* let the timer know that we need to
2262 cp
->cas_flags
|= CAS_FLAG_RXD_POST(ring
);
2263 if (!timer_pending(&cp
->link_timer
))
2264 mod_timer(&cp
->link_timer
, jiffies
+
2265 CAS_LINK_FAST_TIMEOUT
);
2266 cp
->rx_old
[ring
] = entry
;
2267 cp
->rx_last
[ring
] = num
? num
- released
: 0;
2270 spin_lock(&cp
->rx_inuse_lock
);
2271 list_add(&page
[entry
]->list
, &cp
->rx_inuse_list
);
2272 spin_unlock(&cp
->rx_inuse_lock
);
2273 cp
->init_rxds
[ring
][entry
].buffer
=
2274 cpu_to_le64(new->dma_addr
);
2284 entry
= RX_DESC_ENTRY(ring
, entry
+ 1);
2286 cp
->rx_old
[ring
] = entry
;
2292 writel(cluster
, cp
->regs
+ REG_RX_KICK
);
2293 else if ((N_RX_DESC_RINGS
> 1) &&
2294 (cp
->cas_flags
& CAS_FLAG_REG_PLUS
))
2295 writel(cluster
, cp
->regs
+ REG_PLUS_RX_KICK1
);
2300 /* process a completion ring. packets are set up in three basic ways:
2301 * small packets: should be copied header + data in single buffer.
2302 * large packets: header and data in a single buffer.
2303 * split packets: header in a separate buffer from data.
2304 * data may be in multiple pages. data may be > 256
2305 * bytes but in a single page.
2307 * NOTE: RX page posting is done in this routine as well. while there's
2308 * the capability of using multiple RX completion rings, it isn't
2309 * really worthwhile due to the fact that the page posting will
2310 * force serialization on the single descriptor ring.
2312 static int cas_rx_ringN(struct cas
*cp
, int ring
, int budget
)
2314 struct cas_rx_comp
*rxcs
= cp
->init_rxcs
[ring
];
2318 if (netif_msg_intr(cp
))
2319 printk(KERN_DEBUG
"%s: rx[%d] interrupt, done: %d/%d\n",
2320 cp
->dev
->name
, ring
,
2321 readl(cp
->regs
+ REG_RX_COMP_HEAD
),
2324 entry
= cp
->rx_new
[ring
];
2327 struct cas_rx_comp
*rxc
= rxcs
+ entry
;
2328 struct sk_buff
*skb
;
2333 words
[0] = le64_to_cpu(rxc
->word1
);
2334 words
[1] = le64_to_cpu(rxc
->word2
);
2335 words
[2] = le64_to_cpu(rxc
->word3
);
2336 words
[3] = le64_to_cpu(rxc
->word4
);
2338 /* don't touch if still owned by hw */
2339 type
= CAS_VAL(RX_COMP1_TYPE
, words
[0]);
2343 /* hw hasn't cleared the zero bit yet */
2344 if (words
[3] & RX_COMP4_ZERO
) {
2348 /* get info on the packet */
2349 if (words
[3] & (RX_COMP4_LEN_MISMATCH
| RX_COMP4_BAD
)) {
2350 spin_lock(&cp
->stat_lock
[ring
]);
2351 cp
->net_stats
[ring
].rx_errors
++;
2352 if (words
[3] & RX_COMP4_LEN_MISMATCH
)
2353 cp
->net_stats
[ring
].rx_length_errors
++;
2354 if (words
[3] & RX_COMP4_BAD
)
2355 cp
->net_stats
[ring
].rx_crc_errors
++;
2356 spin_unlock(&cp
->stat_lock
[ring
]);
2358 /* We'll just return it to Cassini. */
2360 spin_lock(&cp
->stat_lock
[ring
]);
2361 ++cp
->net_stats
[ring
].rx_dropped
;
2362 spin_unlock(&cp
->stat_lock
[ring
]);
2366 len
= cas_rx_process_pkt(cp
, rxc
, entry
, words
, &skb
);
2372 /* see if it's a flow re-assembly or not. the driver
2373 * itself handles release back up.
2375 if (RX_DONT_BATCH
|| (type
== 0x2)) {
2376 /* non-reassm: these always get released */
2377 cas_skb_release(skb
);
2379 cas_rx_flow_pkt(cp
, words
, skb
);
2382 spin_lock(&cp
->stat_lock
[ring
]);
2383 cp
->net_stats
[ring
].rx_packets
++;
2384 cp
->net_stats
[ring
].rx_bytes
+= len
;
2385 spin_unlock(&cp
->stat_lock
[ring
]);
2386 cp
->dev
->last_rx
= jiffies
;
2391 /* should it be released? */
2392 if (words
[0] & RX_COMP1_RELEASE_HDR
) {
2393 i
= CAS_VAL(RX_COMP2_HDR_INDEX
, words
[1]);
2394 dring
= CAS_VAL(RX_INDEX_RING
, i
);
2395 i
= CAS_VAL(RX_INDEX_NUM
, i
);
2396 cas_post_page(cp
, dring
, i
);
2399 if (words
[0] & RX_COMP1_RELEASE_DATA
) {
2400 i
= CAS_VAL(RX_COMP1_DATA_INDEX
, words
[0]);
2401 dring
= CAS_VAL(RX_INDEX_RING
, i
);
2402 i
= CAS_VAL(RX_INDEX_NUM
, i
);
2403 cas_post_page(cp
, dring
, i
);
2406 if (words
[0] & RX_COMP1_RELEASE_NEXT
) {
2407 i
= CAS_VAL(RX_COMP2_NEXT_INDEX
, words
[1]);
2408 dring
= CAS_VAL(RX_INDEX_RING
, i
);
2409 i
= CAS_VAL(RX_INDEX_NUM
, i
);
2410 cas_post_page(cp
, dring
, i
);
2413 /* skip to the next entry */
2414 entry
= RX_COMP_ENTRY(ring
, entry
+ 1 +
2415 CAS_VAL(RX_COMP1_SKIP
, words
[0]));
2417 if (budget
&& (npackets
>= budget
))
2421 cp
->rx_new
[ring
] = entry
;
2424 printk(KERN_INFO
"%s: Memory squeeze, deferring packet.\n",
2430 /* put completion entries back on the ring */
2431 static void cas_post_rxcs_ringN(struct net_device
*dev
,
2432 struct cas
*cp
, int ring
)
2434 struct cas_rx_comp
*rxc
= cp
->init_rxcs
[ring
];
2437 last
= cp
->rx_cur
[ring
];
2438 entry
= cp
->rx_new
[ring
];
2439 if (netif_msg_intr(cp
))
2440 printk(KERN_DEBUG
"%s: rxc[%d] interrupt, done: %d/%d\n",
2441 dev
->name
, ring
, readl(cp
->regs
+ REG_RX_COMP_HEAD
),
2444 /* zero and re-mark descriptors */
2445 while (last
!= entry
) {
2446 cas_rxc_init(rxc
+ last
);
2447 last
= RX_COMP_ENTRY(ring
, last
+ 1);
2449 cp
->rx_cur
[ring
] = last
;
2452 writel(last
, cp
->regs
+ REG_RX_COMP_TAIL
);
2453 else if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
)
2454 writel(last
, cp
->regs
+ REG_PLUS_RX_COMPN_TAIL(ring
));
2459 /* cassini can use all four PCI interrupts for the completion ring.
2460 * rings 3 and 4 are identical
2462 #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2463 static inline void cas_handle_irqN(struct net_device
*dev
,
2464 struct cas
*cp
, const u32 status
,
2467 if (status
& (INTR_RX_COMP_FULL_ALT
| INTR_RX_COMP_AF_ALT
))
2468 cas_post_rxcs_ringN(dev
, cp
, ring
);
2471 static irqreturn_t
cas_interruptN(int irq
, void *dev_id
)
2473 struct net_device
*dev
= dev_id
;
2474 struct cas
*cp
= netdev_priv(dev
);
2475 unsigned long flags
;
2477 u32 status
= readl(cp
->regs
+ REG_PLUS_INTRN_STATUS(ring
));
2479 /* check for shared irq */
2483 ring
= (irq
== cp
->pci_irq_INTC
) ? 2 : 3;
2484 spin_lock_irqsave(&cp
->lock
, flags
);
2485 if (status
& INTR_RX_DONE_ALT
) { /* handle rx separately */
2488 netif_rx_schedule(dev
, &cp
->napi
);
2490 cas_rx_ringN(cp
, ring
, 0);
2492 status
&= ~INTR_RX_DONE_ALT
;
2496 cas_handle_irqN(dev
, cp
, status
, ring
);
2497 spin_unlock_irqrestore(&cp
->lock
, flags
);
2503 /* everything but rx packets */
2504 static inline void cas_handle_irq1(struct cas
*cp
, const u32 status
)
2506 if (status
& INTR_RX_BUF_UNAVAIL_1
) {
2507 /* Frame arrived, no free RX buffers available.
2508 * NOTE: we can get this on a link transition. */
2509 cas_post_rxds_ringN(cp
, 1, 0);
2510 spin_lock(&cp
->stat_lock
[1]);
2511 cp
->net_stats
[1].rx_dropped
++;
2512 spin_unlock(&cp
->stat_lock
[1]);
2515 if (status
& INTR_RX_BUF_AE_1
)
2516 cas_post_rxds_ringN(cp
, 1, RX_DESC_RINGN_SIZE(1) -
2517 RX_AE_FREEN_VAL(1));
2519 if (status
& (INTR_RX_COMP_AF
| INTR_RX_COMP_FULL
))
2520 cas_post_rxcs_ringN(cp
, 1);
2523 /* ring 2 handles a few more events than 3 and 4 */
2524 static irqreturn_t
cas_interrupt1(int irq
, void *dev_id
)
2526 struct net_device
*dev
= dev_id
;
2527 struct cas
*cp
= netdev_priv(dev
);
2528 unsigned long flags
;
2529 u32 status
= readl(cp
->regs
+ REG_PLUS_INTRN_STATUS(1));
2531 /* check for shared interrupt */
2535 spin_lock_irqsave(&cp
->lock
, flags
);
2536 if (status
& INTR_RX_DONE_ALT
) { /* handle rx separately */
2539 netif_rx_schedule(dev
, &cp
->napi
);
2541 cas_rx_ringN(cp
, 1, 0);
2543 status
&= ~INTR_RX_DONE_ALT
;
2546 cas_handle_irq1(cp
, status
);
2547 spin_unlock_irqrestore(&cp
->lock
, flags
);
2552 static inline void cas_handle_irq(struct net_device
*dev
,
2553 struct cas
*cp
, const u32 status
)
2555 /* housekeeping interrupts */
2556 if (status
& INTR_ERROR_MASK
)
2557 cas_abnormal_irq(dev
, cp
, status
);
2559 if (status
& INTR_RX_BUF_UNAVAIL
) {
2560 /* Frame arrived, no free RX buffers available.
2561 * NOTE: we can get this on a link transition.
2563 cas_post_rxds_ringN(cp
, 0, 0);
2564 spin_lock(&cp
->stat_lock
[0]);
2565 cp
->net_stats
[0].rx_dropped
++;
2566 spin_unlock(&cp
->stat_lock
[0]);
2567 } else if (status
& INTR_RX_BUF_AE
) {
2568 cas_post_rxds_ringN(cp
, 0, RX_DESC_RINGN_SIZE(0) -
2569 RX_AE_FREEN_VAL(0));
2572 if (status
& (INTR_RX_COMP_AF
| INTR_RX_COMP_FULL
))
2573 cas_post_rxcs_ringN(dev
, cp
, 0);
2576 static irqreturn_t
cas_interrupt(int irq
, void *dev_id
)
2578 struct net_device
*dev
= dev_id
;
2579 struct cas
*cp
= netdev_priv(dev
);
2580 unsigned long flags
;
2581 u32 status
= readl(cp
->regs
+ REG_INTR_STATUS
);
2586 spin_lock_irqsave(&cp
->lock
, flags
);
2587 if (status
& (INTR_TX_ALL
| INTR_TX_INTME
)) {
2588 cas_tx(dev
, cp
, status
);
2589 status
&= ~(INTR_TX_ALL
| INTR_TX_INTME
);
2592 if (status
& INTR_RX_DONE
) {
2595 netif_rx_schedule(dev
, &cp
->napi
);
2597 cas_rx_ringN(cp
, 0, 0);
2599 status
&= ~INTR_RX_DONE
;
2603 cas_handle_irq(dev
, cp
, status
);
2604 spin_unlock_irqrestore(&cp
->lock
, flags
);
2610 static int cas_poll(struct napi_struct
*napi
, int budget
)
2612 struct cas
*cp
= container_of(napi
, struct cas
, napi
);
2613 struct net_device
*dev
= cp
->dev
;
2614 int i
, enable_intr
, todo
, credits
;
2615 u32 status
= readl(cp
->regs
+ REG_INTR_STATUS
);
2616 unsigned long flags
;
2618 spin_lock_irqsave(&cp
->lock
, flags
);
2619 cas_tx(dev
, cp
, status
);
2620 spin_unlock_irqrestore(&cp
->lock
, flags
);
2622 /* NAPI rx packets. we spread the credits across all of the
2625 * to make sure we're fair with the work we loop through each
2626 * ring N_RX_COMP_RING times with a request of
2627 * budget / N_RX_COMP_RINGS
2631 for (i
= 0; i
< N_RX_COMP_RINGS
; i
++) {
2633 for (j
= 0; j
< N_RX_COMP_RINGS
; j
++) {
2634 credits
+= cas_rx_ringN(cp
, j
, budget
/ N_RX_COMP_RINGS
);
2635 if (credits
>= budget
) {
2643 /* final rx completion */
2644 spin_lock_irqsave(&cp
->lock
, flags
);
2646 cas_handle_irq(dev
, cp
, status
);
2649 if (N_RX_COMP_RINGS
> 1) {
2650 status
= readl(cp
->regs
+ REG_PLUS_INTRN_STATUS(1));
2652 cas_handle_irq1(dev
, cp
, status
);
2657 if (N_RX_COMP_RINGS
> 2) {
2658 status
= readl(cp
->regs
+ REG_PLUS_INTRN_STATUS(2));
2660 cas_handle_irqN(dev
, cp
, status
, 2);
2665 if (N_RX_COMP_RINGS
> 3) {
2666 status
= readl(cp
->regs
+ REG_PLUS_INTRN_STATUS(3));
2668 cas_handle_irqN(dev
, cp
, status
, 3);
2671 spin_unlock_irqrestore(&cp
->lock
, flags
);
2673 netif_rx_complete(dev
, napi
);
2674 cas_unmask_intr(cp
);
2680 #ifdef CONFIG_NET_POLL_CONTROLLER
2681 static void cas_netpoll(struct net_device
*dev
)
2683 struct cas
*cp
= netdev_priv(dev
);
2685 cas_disable_irq(cp
, 0);
2686 cas_interrupt(cp
->pdev
->irq
, dev
);
2687 cas_enable_irq(cp
, 0);
2690 if (N_RX_COMP_RINGS
> 1) {
2691 /* cas_interrupt1(); */
2695 if (N_RX_COMP_RINGS
> 2) {
2696 /* cas_interruptN(); */
2700 if (N_RX_COMP_RINGS
> 3) {
2701 /* cas_interruptN(); */
2707 static void cas_tx_timeout(struct net_device
*dev
)
2709 struct cas
*cp
= netdev_priv(dev
);
2711 printk(KERN_ERR
"%s: transmit timed out, resetting\n", dev
->name
);
2712 if (!cp
->hw_running
) {
2713 printk("%s: hrm.. hw not running!\n", dev
->name
);
2717 printk(KERN_ERR
"%s: MIF_STATE[%08x]\n",
2718 dev
->name
, readl(cp
->regs
+ REG_MIF_STATE_MACHINE
));
2720 printk(KERN_ERR
"%s: MAC_STATE[%08x]\n",
2721 dev
->name
, readl(cp
->regs
+ REG_MAC_STATE_MACHINE
));
2723 printk(KERN_ERR
"%s: TX_STATE[%08x:%08x:%08x] "
2724 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2726 readl(cp
->regs
+ REG_TX_CFG
),
2727 readl(cp
->regs
+ REG_MAC_TX_STATUS
),
2728 readl(cp
->regs
+ REG_MAC_TX_CFG
),
2729 readl(cp
->regs
+ REG_TX_FIFO_PKT_CNT
),
2730 readl(cp
->regs
+ REG_TX_FIFO_WRITE_PTR
),
2731 readl(cp
->regs
+ REG_TX_FIFO_READ_PTR
),
2732 readl(cp
->regs
+ REG_TX_SM_1
),
2733 readl(cp
->regs
+ REG_TX_SM_2
));
2735 printk(KERN_ERR
"%s: RX_STATE[%08x:%08x:%08x]\n",
2737 readl(cp
->regs
+ REG_RX_CFG
),
2738 readl(cp
->regs
+ REG_MAC_RX_STATUS
),
2739 readl(cp
->regs
+ REG_MAC_RX_CFG
));
2741 printk(KERN_ERR
"%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
2743 readl(cp
->regs
+ REG_HP_STATE_MACHINE
),
2744 readl(cp
->regs
+ REG_HP_STATUS0
),
2745 readl(cp
->regs
+ REG_HP_STATUS1
),
2746 readl(cp
->regs
+ REG_HP_STATUS2
));
2749 atomic_inc(&cp
->reset_task_pending
);
2750 atomic_inc(&cp
->reset_task_pending_all
);
2751 schedule_work(&cp
->reset_task
);
2753 atomic_set(&cp
->reset_task_pending
, CAS_RESET_ALL
);
2754 schedule_work(&cp
->reset_task
);
2758 static inline int cas_intme(int ring
, int entry
)
2760 /* Algorithm: IRQ every 1/2 of descriptors. */
2761 if (!(entry
& ((TX_DESC_RINGN_SIZE(ring
) >> 1) - 1)))
2767 static void cas_write_txd(struct cas
*cp
, int ring
, int entry
,
2768 dma_addr_t mapping
, int len
, u64 ctrl
, int last
)
2770 struct cas_tx_desc
*txd
= cp
->init_txds
[ring
] + entry
;
2772 ctrl
|= CAS_BASE(TX_DESC_BUFLEN
, len
);
2773 if (cas_intme(ring
, entry
))
2774 ctrl
|= TX_DESC_INTME
;
2776 ctrl
|= TX_DESC_EOF
;
2777 txd
->control
= cpu_to_le64(ctrl
);
2778 txd
->buffer
= cpu_to_le64(mapping
);
2781 static inline void *tx_tiny_buf(struct cas
*cp
, const int ring
,
2784 return cp
->tx_tiny_bufs
[ring
] + TX_TINY_BUF_LEN
*entry
;
2787 static inline dma_addr_t
tx_tiny_map(struct cas
*cp
, const int ring
,
2788 const int entry
, const int tentry
)
2790 cp
->tx_tiny_use
[ring
][tentry
].nbufs
++;
2791 cp
->tx_tiny_use
[ring
][entry
].used
= 1;
2792 return cp
->tx_tiny_dvma
[ring
] + TX_TINY_BUF_LEN
*entry
;
2795 static inline int cas_xmit_tx_ringN(struct cas
*cp
, int ring
,
2796 struct sk_buff
*skb
)
2798 struct net_device
*dev
= cp
->dev
;
2799 int entry
, nr_frags
, frag
, tabort
, tentry
;
2801 unsigned long flags
;
2805 spin_lock_irqsave(&cp
->tx_lock
[ring
], flags
);
2807 /* This is a hard error, log it. */
2808 if (TX_BUFFS_AVAIL(cp
, ring
) <=
2809 CAS_TABORT(cp
)*(skb_shinfo(skb
)->nr_frags
+ 1)) {
2810 netif_stop_queue(dev
);
2811 spin_unlock_irqrestore(&cp
->tx_lock
[ring
], flags
);
2812 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
2813 "queue awake!\n", dev
->name
);
2818 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2819 const u64 csum_start_off
= skb_transport_offset(skb
);
2820 const u64 csum_stuff_off
= csum_start_off
+ skb
->csum_offset
;
2822 ctrl
= TX_DESC_CSUM_EN
|
2823 CAS_BASE(TX_DESC_CSUM_START
, csum_start_off
) |
2824 CAS_BASE(TX_DESC_CSUM_STUFF
, csum_stuff_off
);
2827 entry
= cp
->tx_new
[ring
];
2828 cp
->tx_skbs
[ring
][entry
] = skb
;
2830 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2831 len
= skb_headlen(skb
);
2832 mapping
= pci_map_page(cp
->pdev
, virt_to_page(skb
->data
),
2833 offset_in_page(skb
->data
), len
,
2837 tabort
= cas_calc_tabort(cp
, (unsigned long) skb
->data
, len
);
2838 if (unlikely(tabort
)) {
2839 /* NOTE: len is always > tabort */
2840 cas_write_txd(cp
, ring
, entry
, mapping
, len
- tabort
,
2841 ctrl
| TX_DESC_SOF
, 0);
2842 entry
= TX_DESC_NEXT(ring
, entry
);
2844 skb_copy_from_linear_data_offset(skb
, len
- tabort
,
2845 tx_tiny_buf(cp
, ring
, entry
), tabort
);
2846 mapping
= tx_tiny_map(cp
, ring
, entry
, tentry
);
2847 cas_write_txd(cp
, ring
, entry
, mapping
, tabort
, ctrl
,
2850 cas_write_txd(cp
, ring
, entry
, mapping
, len
, ctrl
|
2851 TX_DESC_SOF
, (nr_frags
== 0));
2853 entry
= TX_DESC_NEXT(ring
, entry
);
2855 for (frag
= 0; frag
< nr_frags
; frag
++) {
2856 skb_frag_t
*fragp
= &skb_shinfo(skb
)->frags
[frag
];
2859 mapping
= pci_map_page(cp
->pdev
, fragp
->page
,
2860 fragp
->page_offset
, len
,
2863 tabort
= cas_calc_tabort(cp
, fragp
->page_offset
, len
);
2864 if (unlikely(tabort
)) {
2867 /* NOTE: len is always > tabort */
2868 cas_write_txd(cp
, ring
, entry
, mapping
, len
- tabort
,
2870 entry
= TX_DESC_NEXT(ring
, entry
);
2872 addr
= cas_page_map(fragp
->page
);
2873 memcpy(tx_tiny_buf(cp
, ring
, entry
),
2874 addr
+ fragp
->page_offset
+ len
- tabort
,
2876 cas_page_unmap(addr
);
2877 mapping
= tx_tiny_map(cp
, ring
, entry
, tentry
);
2881 cas_write_txd(cp
, ring
, entry
, mapping
, len
, ctrl
,
2882 (frag
+ 1 == nr_frags
));
2883 entry
= TX_DESC_NEXT(ring
, entry
);
2886 cp
->tx_new
[ring
] = entry
;
2887 if (TX_BUFFS_AVAIL(cp
, ring
) <= CAS_TABORT(cp
)*(MAX_SKB_FRAGS
+ 1))
2888 netif_stop_queue(dev
);
2890 if (netif_msg_tx_queued(cp
))
2891 printk(KERN_DEBUG
"%s: tx[%d] queued, slot %d, skblen %d, "
2893 dev
->name
, ring
, entry
, skb
->len
,
2894 TX_BUFFS_AVAIL(cp
, ring
));
2895 writel(entry
, cp
->regs
+ REG_TX_KICKN(ring
));
2896 spin_unlock_irqrestore(&cp
->tx_lock
[ring
], flags
);
2900 static int cas_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2902 struct cas
*cp
= netdev_priv(dev
);
2904 /* this is only used as a load-balancing hint, so it doesn't
2905 * need to be SMP safe
2909 if (skb_padto(skb
, cp
->min_frame_size
))
2912 /* XXX: we need some higher-level QoS hooks to steer packets to
2913 * individual queues.
2915 if (cas_xmit_tx_ringN(cp
, ring
++ & N_TX_RINGS_MASK
, skb
))
2917 dev
->trans_start
= jiffies
;
2921 static void cas_init_tx_dma(struct cas
*cp
)
2923 u64 desc_dma
= cp
->block_dvma
;
2928 /* set up tx completion writeback registers. must be 8-byte aligned */
2929 #ifdef USE_TX_COMPWB
2930 off
= offsetof(struct cas_init_block
, tx_compwb
);
2931 writel((desc_dma
+ off
) >> 32, cp
->regs
+ REG_TX_COMPWB_DB_HI
);
2932 writel((desc_dma
+ off
) & 0xffffffff, cp
->regs
+ REG_TX_COMPWB_DB_LOW
);
2935 /* enable completion writebacks, enable paced mode,
2936 * disable read pipe, and disable pre-interrupt compwbs
2938 val
= TX_CFG_COMPWB_Q1
| TX_CFG_COMPWB_Q2
|
2939 TX_CFG_COMPWB_Q3
| TX_CFG_COMPWB_Q4
|
2940 TX_CFG_DMA_RDPIPE_DIS
| TX_CFG_PACED_MODE
|
2941 TX_CFG_INTR_COMPWB_DIS
;
2943 /* write out tx ring info and tx desc bases */
2944 for (i
= 0; i
< MAX_TX_RINGS
; i
++) {
2945 off
= (unsigned long) cp
->init_txds
[i
] -
2946 (unsigned long) cp
->init_block
;
2948 val
|= CAS_TX_RINGN_BASE(i
);
2949 writel((desc_dma
+ off
) >> 32, cp
->regs
+ REG_TX_DBN_HI(i
));
2950 writel((desc_dma
+ off
) & 0xffffffff, cp
->regs
+
2952 /* don't zero out the kick register here as the system
2956 writel(val
, cp
->regs
+ REG_TX_CFG
);
2958 /* program max burst sizes. these numbers should be different
2962 writel(0x800, cp
->regs
+ REG_TX_MAXBURST_0
);
2963 writel(0x1600, cp
->regs
+ REG_TX_MAXBURST_1
);
2964 writel(0x2400, cp
->regs
+ REG_TX_MAXBURST_2
);
2965 writel(0x4800, cp
->regs
+ REG_TX_MAXBURST_3
);
2967 writel(0x800, cp
->regs
+ REG_TX_MAXBURST_0
);
2968 writel(0x800, cp
->regs
+ REG_TX_MAXBURST_1
);
2969 writel(0x800, cp
->regs
+ REG_TX_MAXBURST_2
);
2970 writel(0x800, cp
->regs
+ REG_TX_MAXBURST_3
);
2974 /* Must be invoked under cp->lock. */
2975 static inline void cas_init_dma(struct cas
*cp
)
2977 cas_init_tx_dma(cp
);
2978 cas_init_rx_dma(cp
);
2981 /* Must be invoked under cp->lock. */
2982 static u32
cas_setup_multicast(struct cas
*cp
)
2987 if (cp
->dev
->flags
& IFF_PROMISC
) {
2988 rxcfg
|= MAC_RX_CFG_PROMISC_EN
;
2990 } else if (cp
->dev
->flags
& IFF_ALLMULTI
) {
2991 for (i
=0; i
< 16; i
++)
2992 writel(0xFFFF, cp
->regs
+ REG_MAC_HASH_TABLEN(i
));
2993 rxcfg
|= MAC_RX_CFG_HASH_FILTER_EN
;
2998 struct dev_mc_list
*dmi
= cp
->dev
->mc_list
;
3001 /* use the alternate mac address registers for the
3002 * first 15 multicast addresses
3004 for (i
= 1; i
<= CAS_MC_EXACT_MATCH_SIZE
; i
++) {
3006 writel(0x0, cp
->regs
+ REG_MAC_ADDRN(i
*3 + 0));
3007 writel(0x0, cp
->regs
+ REG_MAC_ADDRN(i
*3 + 1));
3008 writel(0x0, cp
->regs
+ REG_MAC_ADDRN(i
*3 + 2));
3011 writel((dmi
->dmi_addr
[4] << 8) | dmi
->dmi_addr
[5],
3012 cp
->regs
+ REG_MAC_ADDRN(i
*3 + 0));
3013 writel((dmi
->dmi_addr
[2] << 8) | dmi
->dmi_addr
[3],
3014 cp
->regs
+ REG_MAC_ADDRN(i
*3 + 1));
3015 writel((dmi
->dmi_addr
[0] << 8) | dmi
->dmi_addr
[1],
3016 cp
->regs
+ REG_MAC_ADDRN(i
*3 + 2));
3020 /* use hw hash table for the next series of
3021 * multicast addresses
3023 memset(hash_table
, 0, sizeof(hash_table
));
3025 crc
= ether_crc_le(ETH_ALEN
, dmi
->dmi_addr
);
3027 hash_table
[crc
>> 4] |= 1 << (15 - (crc
& 0xf));
3030 for (i
=0; i
< 16; i
++)
3031 writel(hash_table
[i
], cp
->regs
+
3032 REG_MAC_HASH_TABLEN(i
));
3033 rxcfg
|= MAC_RX_CFG_HASH_FILTER_EN
;
3039 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
3040 static void cas_clear_mac_err(struct cas
*cp
)
3042 writel(0, cp
->regs
+ REG_MAC_COLL_NORMAL
);
3043 writel(0, cp
->regs
+ REG_MAC_COLL_FIRST
);
3044 writel(0, cp
->regs
+ REG_MAC_COLL_EXCESS
);
3045 writel(0, cp
->regs
+ REG_MAC_COLL_LATE
);
3046 writel(0, cp
->regs
+ REG_MAC_TIMER_DEFER
);
3047 writel(0, cp
->regs
+ REG_MAC_ATTEMPTS_PEAK
);
3048 writel(0, cp
->regs
+ REG_MAC_RECV_FRAME
);
3049 writel(0, cp
->regs
+ REG_MAC_LEN_ERR
);
3050 writel(0, cp
->regs
+ REG_MAC_ALIGN_ERR
);
3051 writel(0, cp
->regs
+ REG_MAC_FCS_ERR
);
3052 writel(0, cp
->regs
+ REG_MAC_RX_CODE_ERR
);
3056 static void cas_mac_reset(struct cas
*cp
)
3060 /* do both TX and RX reset */
3061 writel(0x1, cp
->regs
+ REG_MAC_TX_RESET
);
3062 writel(0x1, cp
->regs
+ REG_MAC_RX_RESET
);
3067 if (readl(cp
->regs
+ REG_MAC_TX_RESET
) == 0)
3075 if (readl(cp
->regs
+ REG_MAC_RX_RESET
) == 0)
3080 if (readl(cp
->regs
+ REG_MAC_TX_RESET
) |
3081 readl(cp
->regs
+ REG_MAC_RX_RESET
))
3082 printk(KERN_ERR
"%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
3083 cp
->dev
->name
, readl(cp
->regs
+ REG_MAC_TX_RESET
),
3084 readl(cp
->regs
+ REG_MAC_RX_RESET
),
3085 readl(cp
->regs
+ REG_MAC_STATE_MACHINE
));
3089 /* Must be invoked under cp->lock. */
3090 static void cas_init_mac(struct cas
*cp
)
3092 unsigned char *e
= &cp
->dev
->dev_addr
[0];
3094 #ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3099 /* setup core arbitration weight register */
3100 writel(CAWR_RR_DIS
, cp
->regs
+ REG_CAWR
);
3102 /* XXX Use pci_dma_burst_advice() */
3103 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3104 /* set the infinite burst register for chips that don't have
3107 if ((cp
->cas_flags
& CAS_FLAG_TARGET_ABORT
) == 0)
3108 writel(INF_BURST_EN
, cp
->regs
+ REG_INF_BURST
);
3111 writel(0x1BF0, cp
->regs
+ REG_MAC_SEND_PAUSE
);
3113 writel(0x00, cp
->regs
+ REG_MAC_IPG0
);
3114 writel(0x08, cp
->regs
+ REG_MAC_IPG1
);
3115 writel(0x04, cp
->regs
+ REG_MAC_IPG2
);
3117 /* change later for 802.3z */
3118 writel(0x40, cp
->regs
+ REG_MAC_SLOT_TIME
);
3120 /* min frame + FCS */
3121 writel(ETH_ZLEN
+ 4, cp
->regs
+ REG_MAC_FRAMESIZE_MIN
);
3123 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3124 * specify the maximum frame size to prevent RX tag errors on
3127 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST
, 0x2000) |
3128 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME
,
3129 (CAS_MAX_MTU
+ ETH_HLEN
+ 4 + 4)),
3130 cp
->regs
+ REG_MAC_FRAMESIZE_MAX
);
3132 /* NOTE: crc_size is used as a surrogate for half-duplex.
3133 * workaround saturn half-duplex issue by increasing preamble
3136 if ((cp
->cas_flags
& CAS_FLAG_SATURN
) && cp
->crc_size
)
3137 writel(0x41, cp
->regs
+ REG_MAC_PA_SIZE
);
3139 writel(0x07, cp
->regs
+ REG_MAC_PA_SIZE
);
3140 writel(0x04, cp
->regs
+ REG_MAC_JAM_SIZE
);
3141 writel(0x10, cp
->regs
+ REG_MAC_ATTEMPT_LIMIT
);
3142 writel(0x8808, cp
->regs
+ REG_MAC_CTRL_TYPE
);
3144 writel((e
[5] | (e
[4] << 8)) & 0x3ff, cp
->regs
+ REG_MAC_RANDOM_SEED
);
3146 writel(0, cp
->regs
+ REG_MAC_ADDR_FILTER0
);
3147 writel(0, cp
->regs
+ REG_MAC_ADDR_FILTER1
);
3148 writel(0, cp
->regs
+ REG_MAC_ADDR_FILTER2
);
3149 writel(0, cp
->regs
+ REG_MAC_ADDR_FILTER2_1_MASK
);
3150 writel(0, cp
->regs
+ REG_MAC_ADDR_FILTER0_MASK
);
3152 /* setup mac address in perfect filter array */
3153 for (i
= 0; i
< 45; i
++)
3154 writel(0x0, cp
->regs
+ REG_MAC_ADDRN(i
));
3156 writel((e
[4] << 8) | e
[5], cp
->regs
+ REG_MAC_ADDRN(0));
3157 writel((e
[2] << 8) | e
[3], cp
->regs
+ REG_MAC_ADDRN(1));
3158 writel((e
[0] << 8) | e
[1], cp
->regs
+ REG_MAC_ADDRN(2));
3160 writel(0x0001, cp
->regs
+ REG_MAC_ADDRN(42));
3161 writel(0xc200, cp
->regs
+ REG_MAC_ADDRN(43));
3162 writel(0x0180, cp
->regs
+ REG_MAC_ADDRN(44));
3164 #ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3165 cp
->mac_rx_cfg
= cas_setup_multicast(cp
);
3167 /* WTZ: Do what Adrian did in cas_set_multicast. Doing
3168 * a writel does not seem to be necessary because Cassini
3169 * seems to preserve the configuration when we do the reset.
3170 * If the chip is in trouble, though, it is not clear if we
3171 * can really count on this behavior. cas_set_multicast uses
3172 * spin_lock_irqsave, but we are called only in cas_init_hw and
3173 * cas_init_hw is protected by cas_lock_all, which calls
3174 * spin_lock_irq (so it doesn't need to save the flags, and
3175 * we should be OK for the writel, as that is the only
3178 cp
->mac_rx_cfg
= rxcfg
= cas_setup_multicast(cp
);
3179 writel(rxcfg
, cp
->regs
+ REG_MAC_RX_CFG
);
3181 spin_lock(&cp
->stat_lock
[N_TX_RINGS
]);
3182 cas_clear_mac_err(cp
);
3183 spin_unlock(&cp
->stat_lock
[N_TX_RINGS
]);
3185 /* Setup MAC interrupts. We want to get all of the interesting
3186 * counter expiration events, but we do not want to hear about
3187 * normal rx/tx as the DMA engine tells us that.
3189 writel(MAC_TX_FRAME_XMIT
, cp
->regs
+ REG_MAC_TX_MASK
);
3190 writel(MAC_RX_FRAME_RECV
, cp
->regs
+ REG_MAC_RX_MASK
);
3192 /* Don't enable even the PAUSE interrupts for now, we
3193 * make no use of those events other than to record them.
3195 writel(0xffffffff, cp
->regs
+ REG_MAC_CTRL_MASK
);
3198 /* Must be invoked under cp->lock. */
3199 static void cas_init_pause_thresholds(struct cas
*cp
)
3201 /* Calculate pause thresholds. Setting the OFF threshold to the
3202 * full RX fifo size effectively disables PAUSE generation
3204 if (cp
->rx_fifo_size
<= (2 * 1024)) {
3205 cp
->rx_pause_off
= cp
->rx_pause_on
= cp
->rx_fifo_size
;
3207 int max_frame
= (cp
->dev
->mtu
+ ETH_HLEN
+ 4 + 4 + 64) & ~63;
3208 if (max_frame
* 3 > cp
->rx_fifo_size
) {
3209 cp
->rx_pause_off
= 7104;
3210 cp
->rx_pause_on
= 960;
3212 int off
= (cp
->rx_fifo_size
- (max_frame
* 2));
3213 int on
= off
- max_frame
;
3214 cp
->rx_pause_off
= off
;
3215 cp
->rx_pause_on
= on
;
3220 static int cas_vpd_match(const void __iomem
*p
, const char *str
)
3222 int len
= strlen(str
) + 1;
3225 for (i
= 0; i
< len
; i
++) {
3226 if (readb(p
+ i
) != str
[i
])
3233 /* get the mac address by reading the vpd information in the rom.
3234 * also get the phy type and determine if there's an entropy generator.
3235 * NOTE: this is a bit convoluted for the following reasons:
3236 * 1) vpd info has order-dependent mac addresses for multinic cards
3237 * 2) the only way to determine the nic order is to use the slot
3239 * 3) fiber cards don't have bridges, so their slot numbers don't
3241 * 4) we don't actually know we have a fiber card until after
3242 * the mac addresses are parsed.
3244 static int cas_get_vpd_info(struct cas
*cp
, unsigned char *dev_addr
,
3247 void __iomem
*p
= cp
->regs
+ REG_EXPANSION_ROM_RUN_START
;
3248 void __iomem
*base
, *kstart
;
3251 #define VPD_FOUND_MAC 0x01
3252 #define VPD_FOUND_PHY 0x02
3254 int phy_type
= CAS_PHY_MII_MDIO0
; /* default phy type */
3257 /* give us access to the PROM */
3258 writel(BIM_LOCAL_DEV_PROM
| BIM_LOCAL_DEV_PAD
,
3259 cp
->regs
+ REG_BIM_LOCAL_DEV_EN
);
3261 /* check for an expansion rom */
3262 if (readb(p
) != 0x55 || readb(p
+ 1) != 0xaa)
3263 goto use_random_mac_addr
;
3265 /* search for beginning of vpd */
3267 for (i
= 2; i
< EXPANSION_ROM_SIZE
; i
++) {
3268 /* check for PCIR */
3269 if ((readb(p
+ i
+ 0) == 0x50) &&
3270 (readb(p
+ i
+ 1) == 0x43) &&
3271 (readb(p
+ i
+ 2) == 0x49) &&
3272 (readb(p
+ i
+ 3) == 0x52)) {
3273 base
= p
+ (readb(p
+ i
+ 8) |
3274 (readb(p
+ i
+ 9) << 8));
3279 if (!base
|| (readb(base
) != 0x82))
3280 goto use_random_mac_addr
;
3282 i
= (readb(base
+ 1) | (readb(base
+ 2) << 8)) + 3;
3283 while (i
< EXPANSION_ROM_SIZE
) {
3284 if (readb(base
+ i
) != 0x90) /* no vpd found */
3285 goto use_random_mac_addr
;
3287 /* found a vpd field */
3288 len
= readb(base
+ i
+ 1) | (readb(base
+ i
+ 2) << 8);
3290 /* extract keywords */
3291 kstart
= base
+ i
+ 3;
3293 while ((p
- kstart
) < len
) {
3294 int klen
= readb(p
+ 2);
3300 /* look for the following things:
3301 * -- correct length == 29
3302 * 3 (type) + 2 (size) +
3303 * 18 (strlen("local-mac-address") + 1) +
3305 * -- VPD Instance 'I'
3306 * -- VPD Type Bytes 'B'
3307 * -- VPD data length == 6
3308 * -- property string == local-mac-address
3310 * -- correct length == 24
3311 * 3 (type) + 2 (size) +
3312 * 12 (strlen("entropy-dev") + 1) +
3313 * 7 (strlen("vms110") + 1)
3314 * -- VPD Instance 'I'
3315 * -- VPD Type String 'B'
3316 * -- VPD data length == 7
3317 * -- property string == entropy-dev
3319 * -- correct length == 18
3320 * 3 (type) + 2 (size) +
3321 * 9 (strlen("phy-type") + 1) +
3322 * 4 (strlen("pcs") + 1)
3323 * -- VPD Instance 'I'
3324 * -- VPD Type String 'S'
3325 * -- VPD data length == 4
3326 * -- property string == phy-type
3328 * -- correct length == 23
3329 * 3 (type) + 2 (size) +
3330 * 14 (strlen("phy-interface") + 1) +
3331 * 4 (strlen("pcs") + 1)
3332 * -- VPD Instance 'I'
3333 * -- VPD Type String 'S'
3334 * -- VPD data length == 4
3335 * -- property string == phy-interface
3337 if (readb(p
) != 'I')
3340 /* finally, check string and length */
3341 type
= readb(p
+ 3);
3343 if ((klen
== 29) && readb(p
+ 4) == 6 &&
3344 cas_vpd_match(p
+ 5,
3345 "local-mac-address")) {
3346 if (mac_off
++ > offset
)
3349 /* set mac address */
3350 for (j
= 0; j
< 6; j
++)
3360 #ifdef USE_ENTROPY_DEV
3362 cas_vpd_match(p
+ 5, "entropy-dev") &&
3363 cas_vpd_match(p
+ 17, "vms110")) {
3364 cp
->cas_flags
|= CAS_FLAG_ENTROPY_DEV
;
3369 if (found
& VPD_FOUND_PHY
)
3372 if ((klen
== 18) && readb(p
+ 4) == 4 &&
3373 cas_vpd_match(p
+ 5, "phy-type")) {
3374 if (cas_vpd_match(p
+ 14, "pcs")) {
3375 phy_type
= CAS_PHY_SERDES
;
3380 if ((klen
== 23) && readb(p
+ 4) == 4 &&
3381 cas_vpd_match(p
+ 5, "phy-interface")) {
3382 if (cas_vpd_match(p
+ 19, "pcs")) {
3383 phy_type
= CAS_PHY_SERDES
;
3388 found
|= VPD_FOUND_MAC
;
3392 found
|= VPD_FOUND_PHY
;
3400 use_random_mac_addr
:
3401 if (found
& VPD_FOUND_MAC
)
3404 /* Sun MAC prefix then 3 random bytes. */
3405 printk(PFX
"MAC address not found in ROM VPD\n");
3409 get_random_bytes(dev_addr
+ 3, 3);
3412 writel(0, cp
->regs
+ REG_BIM_LOCAL_DEV_EN
);
3416 /* check pci invariants */
3417 static void cas_check_pci_invariants(struct cas
*cp
)
3419 struct pci_dev
*pdev
= cp
->pdev
;
3422 if ((pdev
->vendor
== PCI_VENDOR_ID_SUN
) &&
3423 (pdev
->device
== PCI_DEVICE_ID_SUN_CASSINI
)) {
3424 if (pdev
->revision
>= CAS_ID_REVPLUS
)
3425 cp
->cas_flags
|= CAS_FLAG_REG_PLUS
;
3426 if (pdev
->revision
< CAS_ID_REVPLUS02u
)
3427 cp
->cas_flags
|= CAS_FLAG_TARGET_ABORT
;
3429 /* Original Cassini supports HW CSUM, but it's not
3430 * enabled by default as it can trigger TX hangs.
3432 if (pdev
->revision
< CAS_ID_REV2
)
3433 cp
->cas_flags
|= CAS_FLAG_NO_HW_CSUM
;
3435 /* Only sun has original cassini chips. */
3436 cp
->cas_flags
|= CAS_FLAG_REG_PLUS
;
3438 /* We use a flag because the same phy might be externally
3441 if ((pdev
->vendor
== PCI_VENDOR_ID_NS
) &&
3442 (pdev
->device
== PCI_DEVICE_ID_NS_SATURN
))
3443 cp
->cas_flags
|= CAS_FLAG_SATURN
;
3448 static int cas_check_invariants(struct cas
*cp
)
3450 struct pci_dev
*pdev
= cp
->pdev
;
3454 /* get page size for rx buffers. */
3456 #ifdef USE_PAGE_ORDER
3457 if (PAGE_SHIFT
< CAS_JUMBO_PAGE_SHIFT
) {
3458 /* see if we can allocate larger pages */
3459 struct page
*page
= alloc_pages(GFP_ATOMIC
,
3460 CAS_JUMBO_PAGE_SHIFT
-
3463 __free_pages(page
, CAS_JUMBO_PAGE_SHIFT
- PAGE_SHIFT
);
3464 cp
->page_order
= CAS_JUMBO_PAGE_SHIFT
- PAGE_SHIFT
;
3466 printk(PFX
"MTU limited to %d bytes\n", CAS_MAX_MTU
);
3470 cp
->page_size
= (PAGE_SIZE
<< cp
->page_order
);
3472 /* Fetch the FIFO configurations. */
3473 cp
->tx_fifo_size
= readl(cp
->regs
+ REG_TX_FIFO_SIZE
) * 64;
3474 cp
->rx_fifo_size
= RX_FIFO_SIZE
;
3476 /* finish phy determination. MDIO1 takes precedence over MDIO0 if
3477 * they're both connected.
3479 cp
->phy_type
= cas_get_vpd_info(cp
, cp
->dev
->dev_addr
,
3480 PCI_SLOT(pdev
->devfn
));
3481 if (cp
->phy_type
& CAS_PHY_SERDES
) {
3482 cp
->cas_flags
|= CAS_FLAG_1000MB_CAP
;
3483 return 0; /* no more checking needed */
3487 cfg
= readl(cp
->regs
+ REG_MIF_CFG
);
3488 if (cfg
& MIF_CFG_MDIO_1
) {
3489 cp
->phy_type
= CAS_PHY_MII_MDIO1
;
3490 } else if (cfg
& MIF_CFG_MDIO_0
) {
3491 cp
->phy_type
= CAS_PHY_MII_MDIO0
;
3494 cas_mif_poll(cp
, 0);
3495 writel(PCS_DATAPATH_MODE_MII
, cp
->regs
+ REG_PCS_DATAPATH_MODE
);
3497 for (i
= 0; i
< 32; i
++) {
3501 for (j
= 0; j
< 3; j
++) {
3503 phy_id
= cas_phy_read(cp
, MII_PHYSID1
) << 16;
3504 phy_id
|= cas_phy_read(cp
, MII_PHYSID2
);
3505 if (phy_id
&& (phy_id
!= 0xFFFFFFFF)) {
3506 cp
->phy_id
= phy_id
;
3511 printk(KERN_ERR PFX
"MII phy did not respond [%08x]\n",
3512 readl(cp
->regs
+ REG_MIF_STATE_MACHINE
));
3516 /* see if we can do gigabit */
3517 cfg
= cas_phy_read(cp
, MII_BMSR
);
3518 if ((cfg
& CAS_BMSR_1000_EXTEND
) &&
3519 cas_phy_read(cp
, CAS_MII_1000_EXTEND
))
3520 cp
->cas_flags
|= CAS_FLAG_1000MB_CAP
;
3524 /* Must be invoked under cp->lock. */
3525 static inline void cas_start_dma(struct cas
*cp
)
3532 val
= readl(cp
->regs
+ REG_TX_CFG
) | TX_CFG_DMA_EN
;
3533 writel(val
, cp
->regs
+ REG_TX_CFG
);
3534 val
= readl(cp
->regs
+ REG_RX_CFG
) | RX_CFG_DMA_EN
;
3535 writel(val
, cp
->regs
+ REG_RX_CFG
);
3537 /* enable the mac */
3538 val
= readl(cp
->regs
+ REG_MAC_TX_CFG
) | MAC_TX_CFG_EN
;
3539 writel(val
, cp
->regs
+ REG_MAC_TX_CFG
);
3540 val
= readl(cp
->regs
+ REG_MAC_RX_CFG
) | MAC_RX_CFG_EN
;
3541 writel(val
, cp
->regs
+ REG_MAC_RX_CFG
);
3545 val
= readl(cp
->regs
+ REG_MAC_TX_CFG
);
3546 if ((val
& MAC_TX_CFG_EN
))
3550 if (i
< 0) txfailed
= 1;
3553 val
= readl(cp
->regs
+ REG_MAC_RX_CFG
);
3554 if ((val
& MAC_RX_CFG_EN
)) {
3557 "%s: enabling mac failed [tx:%08x:%08x].\n",
3559 readl(cp
->regs
+ REG_MIF_STATE_MACHINE
),
3560 readl(cp
->regs
+ REG_MAC_STATE_MACHINE
));
3562 goto enable_rx_done
;
3566 printk(KERN_ERR
"%s: enabling mac failed [%s:%08x:%08x].\n",
3568 (txfailed
? "tx,rx":"rx"),
3569 readl(cp
->regs
+ REG_MIF_STATE_MACHINE
),
3570 readl(cp
->regs
+ REG_MAC_STATE_MACHINE
));
3573 cas_unmask_intr(cp
); /* enable interrupts */
3574 writel(RX_DESC_RINGN_SIZE(0) - 4, cp
->regs
+ REG_RX_KICK
);
3575 writel(0, cp
->regs
+ REG_RX_COMP_TAIL
);
3577 if (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) {
3578 if (N_RX_DESC_RINGS
> 1)
3579 writel(RX_DESC_RINGN_SIZE(1) - 4,
3580 cp
->regs
+ REG_PLUS_RX_KICK1
);
3582 for (i
= 1; i
< N_RX_COMP_RINGS
; i
++)
3583 writel(0, cp
->regs
+ REG_PLUS_RX_COMPN_TAIL(i
));
3587 /* Must be invoked under cp->lock. */
3588 static void cas_read_pcs_link_mode(struct cas
*cp
, int *fd
, int *spd
,
3591 u32 val
= readl(cp
->regs
+ REG_PCS_MII_LPA
);
3592 *fd
= (val
& PCS_MII_LPA_FD
) ? 1 : 0;
3593 *pause
= (val
& PCS_MII_LPA_SYM_PAUSE
) ? 0x01 : 0x00;
3594 if (val
& PCS_MII_LPA_ASYM_PAUSE
)
3599 /* Must be invoked under cp->lock. */
3600 static void cas_read_mii_link_mode(struct cas
*cp
, int *fd
, int *spd
,
3609 /* use GMII registers */
3610 val
= cas_phy_read(cp
, MII_LPA
);
3611 if (val
& CAS_LPA_PAUSE
)
3614 if (val
& CAS_LPA_ASYM_PAUSE
)
3617 if (val
& LPA_DUPLEX
)
3622 if (cp
->cas_flags
& CAS_FLAG_1000MB_CAP
) {
3623 val
= cas_phy_read(cp
, CAS_MII_1000_STATUS
);
3624 if (val
& (CAS_LPA_1000FULL
| CAS_LPA_1000HALF
))
3626 if (val
& CAS_LPA_1000FULL
)
3631 /* A link-up condition has occurred, initialize and enable the
3634 * Must be invoked under cp->lock.
3636 static void cas_set_link_modes(struct cas
*cp
)
3639 int full_duplex
, speed
, pause
;
3645 if (CAS_PHY_MII(cp
->phy_type
)) {
3646 cas_mif_poll(cp
, 0);
3647 val
= cas_phy_read(cp
, MII_BMCR
);
3648 if (val
& BMCR_ANENABLE
) {
3649 cas_read_mii_link_mode(cp
, &full_duplex
, &speed
,
3652 if (val
& BMCR_FULLDPLX
)
3655 if (val
& BMCR_SPEED100
)
3657 else if (val
& CAS_BMCR_SPEED1000
)
3658 speed
= (cp
->cas_flags
& CAS_FLAG_1000MB_CAP
) ?
3661 cas_mif_poll(cp
, 1);
3664 val
= readl(cp
->regs
+ REG_PCS_MII_CTRL
);
3665 cas_read_pcs_link_mode(cp
, &full_duplex
, &speed
, &pause
);
3666 if ((val
& PCS_MII_AUTONEG_EN
) == 0) {
3667 if (val
& PCS_MII_CTRL_DUPLEX
)
3672 if (netif_msg_link(cp
))
3673 printk(KERN_INFO
"%s: Link up at %d Mbps, %s-duplex.\n",
3674 cp
->dev
->name
, speed
, (full_duplex
? "full" : "half"));
3676 val
= MAC_XIF_TX_MII_OUTPUT_EN
| MAC_XIF_LINK_LED
;
3677 if (CAS_PHY_MII(cp
->phy_type
)) {
3678 val
|= MAC_XIF_MII_BUFFER_OUTPUT_EN
;
3680 val
|= MAC_XIF_DISABLE_ECHO
;
3683 val
|= MAC_XIF_FDPLX_LED
;
3685 val
|= MAC_XIF_GMII_MODE
;
3686 writel(val
, cp
->regs
+ REG_MAC_XIF_CFG
);
3688 /* deal with carrier and collision detect. */
3689 val
= MAC_TX_CFG_IPG_EN
;
3691 val
|= MAC_TX_CFG_IGNORE_CARRIER
;
3692 val
|= MAC_TX_CFG_IGNORE_COLL
;
3694 #ifndef USE_CSMA_CD_PROTO
3695 val
|= MAC_TX_CFG_NEVER_GIVE_UP_EN
;
3696 val
|= MAC_TX_CFG_NEVER_GIVE_UP_LIM
;
3699 /* val now set up for REG_MAC_TX_CFG */
3701 /* If gigabit and half-duplex, enable carrier extension
3702 * mode. increase slot time to 512 bytes as well.
3703 * else, disable it and make sure slot time is 64 bytes.
3704 * also activate checksum bug workaround
3706 if ((speed
== 1000) && !full_duplex
) {
3707 writel(val
| MAC_TX_CFG_CARRIER_EXTEND
,
3708 cp
->regs
+ REG_MAC_TX_CFG
);
3710 val
= readl(cp
->regs
+ REG_MAC_RX_CFG
);
3711 val
&= ~MAC_RX_CFG_STRIP_FCS
; /* checksum workaround */
3712 writel(val
| MAC_RX_CFG_CARRIER_EXTEND
,
3713 cp
->regs
+ REG_MAC_RX_CFG
);
3715 writel(0x200, cp
->regs
+ REG_MAC_SLOT_TIME
);
3718 /* minimum size gigabit frame at half duplex */
3719 cp
->min_frame_size
= CAS_1000MB_MIN_FRAME
;
3722 writel(val
, cp
->regs
+ REG_MAC_TX_CFG
);
3724 /* checksum bug workaround. don't strip FCS when in
3727 val
= readl(cp
->regs
+ REG_MAC_RX_CFG
);
3729 val
|= MAC_RX_CFG_STRIP_FCS
;
3731 cp
->min_frame_size
= CAS_MIN_MTU
;
3733 val
&= ~MAC_RX_CFG_STRIP_FCS
;
3735 cp
->min_frame_size
= CAS_MIN_FRAME
;
3737 writel(val
& ~MAC_RX_CFG_CARRIER_EXTEND
,
3738 cp
->regs
+ REG_MAC_RX_CFG
);
3739 writel(0x40, cp
->regs
+ REG_MAC_SLOT_TIME
);
3742 if (netif_msg_link(cp
)) {
3744 printk(KERN_INFO
"%s: Pause is enabled "
3745 "(rxfifo: %d off: %d on: %d)\n",
3750 } else if (pause
& 0x10) {
3751 printk(KERN_INFO
"%s: TX pause enabled\n",
3754 printk(KERN_INFO
"%s: Pause is disabled\n",
3759 val
= readl(cp
->regs
+ REG_MAC_CTRL_CFG
);
3760 val
&= ~(MAC_CTRL_CFG_SEND_PAUSE_EN
| MAC_CTRL_CFG_RECV_PAUSE_EN
);
3761 if (pause
) { /* symmetric or asymmetric pause */
3762 val
|= MAC_CTRL_CFG_SEND_PAUSE_EN
;
3763 if (pause
& 0x01) { /* symmetric pause */
3764 val
|= MAC_CTRL_CFG_RECV_PAUSE_EN
;
3767 writel(val
, cp
->regs
+ REG_MAC_CTRL_CFG
);
3771 /* Must be invoked under cp->lock. */
3772 static void cas_init_hw(struct cas
*cp
, int restart_link
)
3777 cas_init_pause_thresholds(cp
);
3782 /* Default aneg parameters */
3783 cp
->timer_ticks
= 0;
3784 cas_begin_auto_negotiation(cp
, NULL
);
3785 } else if (cp
->lstate
== link_up
) {
3786 cas_set_link_modes(cp
);
3787 netif_carrier_on(cp
->dev
);
3791 /* Must be invoked under cp->lock. on earlier cassini boards,
3792 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3793 * let it settle out, and then restore pci state.
3795 static void cas_hard_reset(struct cas
*cp
)
3797 writel(BIM_LOCAL_DEV_SOFT_0
, cp
->regs
+ REG_BIM_LOCAL_DEV_EN
);
3799 pci_restore_state(cp
->pdev
);
3803 static void cas_global_reset(struct cas
*cp
, int blkflag
)
3807 /* issue a global reset. don't use RSTOUT. */
3808 if (blkflag
&& !CAS_PHY_MII(cp
->phy_type
)) {
3809 /* For PCS, when the blkflag is set, we should set the
3810 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3811 * the last autonegotiation from being cleared. We'll
3812 * need some special handling if the chip is set into a
3815 writel((SW_RESET_TX
| SW_RESET_RX
| SW_RESET_BLOCK_PCS_SLINK
),
3816 cp
->regs
+ REG_SW_RESET
);
3818 writel(SW_RESET_TX
| SW_RESET_RX
, cp
->regs
+ REG_SW_RESET
);
3821 /* need to wait at least 3ms before polling register */
3825 while (limit
-- > 0) {
3826 u32 val
= readl(cp
->regs
+ REG_SW_RESET
);
3827 if ((val
& (SW_RESET_TX
| SW_RESET_RX
)) == 0)
3831 printk(KERN_ERR
"%s: sw reset failed.\n", cp
->dev
->name
);
3834 /* enable various BIM interrupts */
3835 writel(BIM_CFG_DPAR_INTR_ENABLE
| BIM_CFG_RMA_INTR_ENABLE
|
3836 BIM_CFG_RTA_INTR_ENABLE
, cp
->regs
+ REG_BIM_CFG
);
3838 /* clear out pci error status mask for handled errors.
3839 * we don't deal with DMA counter overflows as they happen
3842 writel(0xFFFFFFFFU
& ~(PCI_ERR_BADACK
| PCI_ERR_DTRTO
|
3843 PCI_ERR_OTHER
| PCI_ERR_BIM_DMA_WRITE
|
3844 PCI_ERR_BIM_DMA_READ
), cp
->regs
+
3845 REG_PCI_ERR_STATUS_MASK
);
3847 /* set up for MII by default to address mac rx reset timeout
3850 writel(PCS_DATAPATH_MODE_MII
, cp
->regs
+ REG_PCS_DATAPATH_MODE
);
3853 static void cas_reset(struct cas
*cp
, int blkflag
)
3858 cas_global_reset(cp
, blkflag
);
3860 cas_entropy_reset(cp
);
3862 /* disable dma engines. */
3863 val
= readl(cp
->regs
+ REG_TX_CFG
);
3864 val
&= ~TX_CFG_DMA_EN
;
3865 writel(val
, cp
->regs
+ REG_TX_CFG
);
3867 val
= readl(cp
->regs
+ REG_RX_CFG
);
3868 val
&= ~RX_CFG_DMA_EN
;
3869 writel(val
, cp
->regs
+ REG_RX_CFG
);
3871 /* program header parser */
3872 if ((cp
->cas_flags
& CAS_FLAG_TARGET_ABORT
) ||
3873 (CAS_HP_ALT_FIRMWARE
== cas_prog_null
)) {
3874 cas_load_firmware(cp
, CAS_HP_FIRMWARE
);
3876 cas_load_firmware(cp
, CAS_HP_ALT_FIRMWARE
);
3879 /* clear out error registers */
3880 spin_lock(&cp
->stat_lock
[N_TX_RINGS
]);
3881 cas_clear_mac_err(cp
);
3882 spin_unlock(&cp
->stat_lock
[N_TX_RINGS
]);
3885 /* Shut down the chip, must be called with pm_mutex held. */
3886 static void cas_shutdown(struct cas
*cp
)
3888 unsigned long flags
;
3890 /* Make us not-running to avoid timers respawning */
3893 del_timer_sync(&cp
->link_timer
);
3895 /* Stop the reset task */
3897 while (atomic_read(&cp
->reset_task_pending_mtu
) ||
3898 atomic_read(&cp
->reset_task_pending_spare
) ||
3899 atomic_read(&cp
->reset_task_pending_all
))
3903 while (atomic_read(&cp
->reset_task_pending
))
3906 /* Actually stop the chip */
3907 cas_lock_all_save(cp
, flags
);
3909 if (cp
->cas_flags
& CAS_FLAG_SATURN
)
3910 cas_phy_powerdown(cp
);
3911 cas_unlock_all_restore(cp
, flags
);
3914 static int cas_change_mtu(struct net_device
*dev
, int new_mtu
)
3916 struct cas
*cp
= netdev_priv(dev
);
3918 if (new_mtu
< CAS_MIN_MTU
|| new_mtu
> CAS_MAX_MTU
)
3922 if (!netif_running(dev
) || !netif_device_present(dev
))
3925 /* let the reset task handle it */
3927 atomic_inc(&cp
->reset_task_pending
);
3928 if ((cp
->phy_type
& CAS_PHY_SERDES
)) {
3929 atomic_inc(&cp
->reset_task_pending_all
);
3931 atomic_inc(&cp
->reset_task_pending_mtu
);
3933 schedule_work(&cp
->reset_task
);
3935 atomic_set(&cp
->reset_task_pending
, (cp
->phy_type
& CAS_PHY_SERDES
) ?
3936 CAS_RESET_ALL
: CAS_RESET_MTU
);
3937 printk(KERN_ERR
"reset called in cas_change_mtu\n");
3938 schedule_work(&cp
->reset_task
);
3941 flush_scheduled_work();
3945 static void cas_clean_txd(struct cas
*cp
, int ring
)
3947 struct cas_tx_desc
*txd
= cp
->init_txds
[ring
];
3948 struct sk_buff
*skb
, **skbs
= cp
->tx_skbs
[ring
];
3952 size
= TX_DESC_RINGN_SIZE(ring
);
3953 for (i
= 0; i
< size
; i
++) {
3956 if (skbs
[i
] == NULL
)
3962 for (frag
= 0; frag
<= skb_shinfo(skb
)->nr_frags
; frag
++) {
3963 int ent
= i
& (size
- 1);
3965 /* first buffer is never a tiny buffer and so
3966 * needs to be unmapped.
3968 daddr
= le64_to_cpu(txd
[ent
].buffer
);
3969 dlen
= CAS_VAL(TX_DESC_BUFLEN
,
3970 le64_to_cpu(txd
[ent
].control
));
3971 pci_unmap_page(cp
->pdev
, daddr
, dlen
,
3974 if (frag
!= skb_shinfo(skb
)->nr_frags
) {
3977 /* next buffer might by a tiny buffer.
3980 ent
= i
& (size
- 1);
3981 if (cp
->tx_tiny_use
[ring
][ent
].used
)
3985 dev_kfree_skb_any(skb
);
3988 /* zero out tiny buf usage */
3989 memset(cp
->tx_tiny_use
[ring
], 0, size
*sizeof(*cp
->tx_tiny_use
[ring
]));
3992 /* freed on close */
3993 static inline void cas_free_rx_desc(struct cas
*cp
, int ring
)
3995 cas_page_t
**page
= cp
->rx_pages
[ring
];
3998 size
= RX_DESC_RINGN_SIZE(ring
);
3999 for (i
= 0; i
< size
; i
++) {
4001 cas_page_free(cp
, page
[i
]);
4007 static void cas_free_rxds(struct cas
*cp
)
4011 for (i
= 0; i
< N_RX_DESC_RINGS
; i
++)
4012 cas_free_rx_desc(cp
, i
);
4015 /* Must be invoked under cp->lock. */
4016 static void cas_clean_rings(struct cas
*cp
)
4020 /* need to clean all tx rings */
4021 memset(cp
->tx_old
, 0, sizeof(*cp
->tx_old
)*N_TX_RINGS
);
4022 memset(cp
->tx_new
, 0, sizeof(*cp
->tx_new
)*N_TX_RINGS
);
4023 for (i
= 0; i
< N_TX_RINGS
; i
++)
4024 cas_clean_txd(cp
, i
);
4026 /* zero out init block */
4027 memset(cp
->init_block
, 0, sizeof(struct cas_init_block
));
4032 /* allocated on open */
4033 static inline int cas_alloc_rx_desc(struct cas
*cp
, int ring
)
4035 cas_page_t
**page
= cp
->rx_pages
[ring
];
4038 size
= RX_DESC_RINGN_SIZE(ring
);
4039 for (i
= 0; i
< size
; i
++) {
4040 if ((page
[i
] = cas_page_alloc(cp
, GFP_KERNEL
)) == NULL
)
4046 static int cas_alloc_rxds(struct cas
*cp
)
4050 for (i
= 0; i
< N_RX_DESC_RINGS
; i
++) {
4051 if (cas_alloc_rx_desc(cp
, i
) < 0) {
4059 static void cas_reset_task(struct work_struct
*work
)
4061 struct cas
*cp
= container_of(work
, struct cas
, reset_task
);
4063 int pending
= atomic_read(&cp
->reset_task_pending
);
4065 int pending_all
= atomic_read(&cp
->reset_task_pending_all
);
4066 int pending_spare
= atomic_read(&cp
->reset_task_pending_spare
);
4067 int pending_mtu
= atomic_read(&cp
->reset_task_pending_mtu
);
4069 if (pending_all
== 0 && pending_spare
== 0 && pending_mtu
== 0) {
4070 /* We can have more tasks scheduled than actually
4073 atomic_dec(&cp
->reset_task_pending
);
4077 /* The link went down, we reset the ring, but keep
4078 * DMA stopped. Use this function for reset
4081 if (cp
->hw_running
) {
4082 unsigned long flags
;
4084 /* Make sure we don't get interrupts or tx packets */
4085 netif_device_detach(cp
->dev
);
4086 cas_lock_all_save(cp
, flags
);
4089 /* We call cas_spare_recover when we call cas_open.
4090 * but we do not initialize the lists cas_spare_recover
4091 * uses until cas_open is called.
4093 cas_spare_recover(cp
, GFP_ATOMIC
);
4096 /* test => only pending_spare set */
4097 if (!pending_all
&& !pending_mtu
)
4100 if (pending
== CAS_RESET_SPARE
)
4103 /* when pending == CAS_RESET_ALL, the following
4104 * call to cas_init_hw will restart auto negotiation.
4105 * Setting the second argument of cas_reset to
4106 * !(pending == CAS_RESET_ALL) will set this argument
4107 * to 1 (avoiding reinitializing the PHY for the normal
4108 * PCS case) when auto negotiation is not restarted.
4111 cas_reset(cp
, !(pending_all
> 0));
4113 cas_clean_rings(cp
);
4114 cas_init_hw(cp
, (pending_all
> 0));
4116 cas_reset(cp
, !(pending
== CAS_RESET_ALL
));
4118 cas_clean_rings(cp
);
4119 cas_init_hw(cp
, pending
== CAS_RESET_ALL
);
4123 cas_unlock_all_restore(cp
, flags
);
4124 netif_device_attach(cp
->dev
);
4127 atomic_sub(pending_all
, &cp
->reset_task_pending_all
);
4128 atomic_sub(pending_spare
, &cp
->reset_task_pending_spare
);
4129 atomic_sub(pending_mtu
, &cp
->reset_task_pending_mtu
);
4130 atomic_dec(&cp
->reset_task_pending
);
4132 atomic_set(&cp
->reset_task_pending
, 0);
4136 static void cas_link_timer(unsigned long data
)
4138 struct cas
*cp
= (struct cas
*) data
;
4139 int mask
, pending
= 0, reset
= 0;
4140 unsigned long flags
;
4142 if (link_transition_timeout
!= 0 &&
4143 cp
->link_transition_jiffies_valid
&&
4144 ((jiffies
- cp
->link_transition_jiffies
) >
4145 (link_transition_timeout
))) {
4146 /* One-second counter so link-down workaround doesn't
4147 * cause resets to occur so fast as to fool the switch
4148 * into thinking the link is down.
4150 cp
->link_transition_jiffies_valid
= 0;
4153 if (!cp
->hw_running
)
4156 spin_lock_irqsave(&cp
->lock
, flags
);
4158 cas_entropy_gather(cp
);
4160 /* If the link task is still pending, we just
4161 * reschedule the link timer
4164 if (atomic_read(&cp
->reset_task_pending_all
) ||
4165 atomic_read(&cp
->reset_task_pending_spare
) ||
4166 atomic_read(&cp
->reset_task_pending_mtu
))
4169 if (atomic_read(&cp
->reset_task_pending
))
4173 /* check for rx cleaning */
4174 if ((mask
= (cp
->cas_flags
& CAS_FLAG_RXD_POST_MASK
))) {
4177 for (i
= 0; i
< MAX_RX_DESC_RINGS
; i
++) {
4178 rmask
= CAS_FLAG_RXD_POST(i
);
4179 if ((mask
& rmask
) == 0)
4182 /* post_rxds will do a mod_timer */
4183 if (cas_post_rxds_ringN(cp
, i
, cp
->rx_last
[i
]) < 0) {
4187 cp
->cas_flags
&= ~rmask
;
4191 if (CAS_PHY_MII(cp
->phy_type
)) {
4193 cas_mif_poll(cp
, 0);
4194 bmsr
= cas_phy_read(cp
, MII_BMSR
);
4195 /* WTZ: Solaris driver reads this twice, but that
4196 * may be due to the PCS case and the use of a
4197 * common implementation. Read it twice here to be
4200 bmsr
= cas_phy_read(cp
, MII_BMSR
);
4201 cas_mif_poll(cp
, 1);
4202 readl(cp
->regs
+ REG_MIF_STATUS
); /* avoid dups */
4203 reset
= cas_mii_link_check(cp
, bmsr
);
4205 reset
= cas_pcs_link_check(cp
);
4211 /* check for tx state machine confusion */
4212 if ((readl(cp
->regs
+ REG_MAC_TX_STATUS
) & MAC_TX_FRAME_XMIT
) == 0) {
4213 u32 val
= readl(cp
->regs
+ REG_MAC_STATE_MACHINE
);
4215 int tlm
= CAS_VAL(MAC_SM_TLM
, val
);
4217 if (((tlm
== 0x5) || (tlm
== 0x3)) &&
4218 (CAS_VAL(MAC_SM_ENCAP_SM
, val
) == 0)) {
4219 if (netif_msg_tx_err(cp
))
4220 printk(KERN_DEBUG
"%s: tx err: "
4221 "MAC_STATE[%08x]\n",
4222 cp
->dev
->name
, val
);
4227 val
= readl(cp
->regs
+ REG_TX_FIFO_PKT_CNT
);
4228 wptr
= readl(cp
->regs
+ REG_TX_FIFO_WRITE_PTR
);
4229 rptr
= readl(cp
->regs
+ REG_TX_FIFO_READ_PTR
);
4230 if ((val
== 0) && (wptr
!= rptr
)) {
4231 if (netif_msg_tx_err(cp
))
4232 printk(KERN_DEBUG
"%s: tx err: "
4233 "TX_FIFO[%08x:%08x:%08x]\n",
4234 cp
->dev
->name
, val
, wptr
, rptr
);
4245 atomic_inc(&cp
->reset_task_pending
);
4246 atomic_inc(&cp
->reset_task_pending_all
);
4247 schedule_work(&cp
->reset_task
);
4249 atomic_set(&cp
->reset_task_pending
, CAS_RESET_ALL
);
4250 printk(KERN_ERR
"reset called in cas_link_timer\n");
4251 schedule_work(&cp
->reset_task
);
4256 mod_timer(&cp
->link_timer
, jiffies
+ CAS_LINK_TIMEOUT
);
4258 spin_unlock_irqrestore(&cp
->lock
, flags
);
4261 /* tiny buffers are used to avoid target abort issues with
4264 static void cas_tx_tiny_free(struct cas
*cp
)
4266 struct pci_dev
*pdev
= cp
->pdev
;
4269 for (i
= 0; i
< N_TX_RINGS
; i
++) {
4270 if (!cp
->tx_tiny_bufs
[i
])
4273 pci_free_consistent(pdev
, TX_TINY_BUF_BLOCK
,
4274 cp
->tx_tiny_bufs
[i
],
4275 cp
->tx_tiny_dvma
[i
]);
4276 cp
->tx_tiny_bufs
[i
] = NULL
;
4280 static int cas_tx_tiny_alloc(struct cas
*cp
)
4282 struct pci_dev
*pdev
= cp
->pdev
;
4285 for (i
= 0; i
< N_TX_RINGS
; i
++) {
4286 cp
->tx_tiny_bufs
[i
] =
4287 pci_alloc_consistent(pdev
, TX_TINY_BUF_BLOCK
,
4288 &cp
->tx_tiny_dvma
[i
]);
4289 if (!cp
->tx_tiny_bufs
[i
]) {
4290 cas_tx_tiny_free(cp
);
4298 static int cas_open(struct net_device
*dev
)
4300 struct cas
*cp
= netdev_priv(dev
);
4302 unsigned long flags
;
4304 mutex_lock(&cp
->pm_mutex
);
4306 hw_was_up
= cp
->hw_running
;
4308 /* The power-management mutex protects the hw_running
4309 * etc. state so it is safe to do this bit without cp->lock
4311 if (!cp
->hw_running
) {
4312 /* Reset the chip */
4313 cas_lock_all_save(cp
, flags
);
4314 /* We set the second arg to cas_reset to zero
4315 * because cas_init_hw below will have its second
4316 * argument set to non-zero, which will force
4317 * autonegotiation to start.
4321 cas_unlock_all_restore(cp
, flags
);
4324 if (cas_tx_tiny_alloc(cp
) < 0)
4327 /* alloc rx descriptors */
4329 if (cas_alloc_rxds(cp
) < 0)
4332 /* allocate spares */
4334 cas_spare_recover(cp
, GFP_KERNEL
);
4336 /* We can now request the interrupt as we know it's masked
4337 * on the controller. cassini+ has up to 4 interrupts
4338 * that can be used, but you need to do explicit pci interrupt
4339 * mapping to expose them
4341 if (request_irq(cp
->pdev
->irq
, cas_interrupt
,
4342 IRQF_SHARED
, dev
->name
, (void *) dev
)) {
4343 printk(KERN_ERR
"%s: failed to request irq !\n",
4350 napi_enable(&cp
->napi
);
4353 cas_lock_all_save(cp
, flags
);
4354 cas_clean_rings(cp
);
4355 cas_init_hw(cp
, !hw_was_up
);
4357 cas_unlock_all_restore(cp
, flags
);
4359 netif_start_queue(dev
);
4360 mutex_unlock(&cp
->pm_mutex
);
4367 cas_tx_tiny_free(cp
);
4368 mutex_unlock(&cp
->pm_mutex
);
4372 static int cas_close(struct net_device
*dev
)
4374 unsigned long flags
;
4375 struct cas
*cp
= netdev_priv(dev
);
4378 napi_enable(&cp
->napi
);
4380 /* Make sure we don't get distracted by suspend/resume */
4381 mutex_lock(&cp
->pm_mutex
);
4383 netif_stop_queue(dev
);
4385 /* Stop traffic, mark us closed */
4386 cas_lock_all_save(cp
, flags
);
4390 cas_begin_auto_negotiation(cp
, NULL
);
4391 cas_clean_rings(cp
);
4392 cas_unlock_all_restore(cp
, flags
);
4394 free_irq(cp
->pdev
->irq
, (void *) dev
);
4397 cas_tx_tiny_free(cp
);
4398 mutex_unlock(&cp
->pm_mutex
);
4403 const char name
[ETH_GSTRING_LEN
];
4404 } ethtool_cassini_statnames
[] = {
4411 {"rx_frame_errors"},
4412 {"rx_length_errors"},
4415 {"tx_aborted_errors"},
4422 #define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
4425 const int offsets
; /* neg. values for 2nd arg to cas_read_phy */
4426 } ethtool_register_table
[] = {
4441 {REG_PCS_MII_STATUS
},
4442 {REG_PCS_STATE_MACHINE
},
4443 {REG_MAC_COLL_EXCESS
},
4446 #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4447 #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4449 static void cas_read_regs(struct cas
*cp
, u8
*ptr
, int len
)
4453 unsigned long flags
;
4455 spin_lock_irqsave(&cp
->lock
, flags
);
4456 for (i
= 0, p
= ptr
; i
< len
; i
++, p
+= sizeof(u32
)) {
4459 if (ethtool_register_table
[i
].offsets
< 0) {
4460 hval
= cas_phy_read(cp
,
4461 -ethtool_register_table
[i
].offsets
);
4464 val
= readl(cp
->regs
+ethtool_register_table
[i
].offsets
);
4466 memcpy(p
, (u8
*)&val
, sizeof(u32
));
4468 spin_unlock_irqrestore(&cp
->lock
, flags
);
4471 static struct net_device_stats
*cas_get_stats(struct net_device
*dev
)
4473 struct cas
*cp
= netdev_priv(dev
);
4474 struct net_device_stats
*stats
= cp
->net_stats
;
4475 unsigned long flags
;
4479 /* we collate all of the stats into net_stats[N_TX_RING] */
4480 if (!cp
->hw_running
)
4481 return stats
+ N_TX_RINGS
;
4483 /* collect outstanding stats */
4484 /* WTZ: the Cassini spec gives these as 16 bit counters but
4485 * stored in 32-bit words. Added a mask of 0xffff to be safe,
4486 * in case the chip somehow puts any garbage in the other bits.
4487 * Also, counter usage didn't seem to mach what Adrian did
4488 * in the parts of the code that set these quantities. Made
4491 spin_lock_irqsave(&cp
->stat_lock
[N_TX_RINGS
], flags
);
4492 stats
[N_TX_RINGS
].rx_crc_errors
+=
4493 readl(cp
->regs
+ REG_MAC_FCS_ERR
) & 0xffff;
4494 stats
[N_TX_RINGS
].rx_frame_errors
+=
4495 readl(cp
->regs
+ REG_MAC_ALIGN_ERR
) &0xffff;
4496 stats
[N_TX_RINGS
].rx_length_errors
+=
4497 readl(cp
->regs
+ REG_MAC_LEN_ERR
) & 0xffff;
4499 tmp
= (readl(cp
->regs
+ REG_MAC_COLL_EXCESS
) & 0xffff) +
4500 (readl(cp
->regs
+ REG_MAC_COLL_LATE
) & 0xffff);
4501 stats
[N_TX_RINGS
].tx_aborted_errors
+= tmp
;
4502 stats
[N_TX_RINGS
].collisions
+=
4503 tmp
+ (readl(cp
->regs
+ REG_MAC_COLL_NORMAL
) & 0xffff);
4505 stats
[N_TX_RINGS
].tx_aborted_errors
+=
4506 readl(cp
->regs
+ REG_MAC_COLL_EXCESS
);
4507 stats
[N_TX_RINGS
].collisions
+= readl(cp
->regs
+ REG_MAC_COLL_EXCESS
) +
4508 readl(cp
->regs
+ REG_MAC_COLL_LATE
);
4510 cas_clear_mac_err(cp
);
4512 /* saved bits that are unique to ring 0 */
4513 spin_lock(&cp
->stat_lock
[0]);
4514 stats
[N_TX_RINGS
].collisions
+= stats
[0].collisions
;
4515 stats
[N_TX_RINGS
].rx_over_errors
+= stats
[0].rx_over_errors
;
4516 stats
[N_TX_RINGS
].rx_frame_errors
+= stats
[0].rx_frame_errors
;
4517 stats
[N_TX_RINGS
].rx_fifo_errors
+= stats
[0].rx_fifo_errors
;
4518 stats
[N_TX_RINGS
].tx_aborted_errors
+= stats
[0].tx_aborted_errors
;
4519 stats
[N_TX_RINGS
].tx_fifo_errors
+= stats
[0].tx_fifo_errors
;
4520 spin_unlock(&cp
->stat_lock
[0]);
4522 for (i
= 0; i
< N_TX_RINGS
; i
++) {
4523 spin_lock(&cp
->stat_lock
[i
]);
4524 stats
[N_TX_RINGS
].rx_length_errors
+=
4525 stats
[i
].rx_length_errors
;
4526 stats
[N_TX_RINGS
].rx_crc_errors
+= stats
[i
].rx_crc_errors
;
4527 stats
[N_TX_RINGS
].rx_packets
+= stats
[i
].rx_packets
;
4528 stats
[N_TX_RINGS
].tx_packets
+= stats
[i
].tx_packets
;
4529 stats
[N_TX_RINGS
].rx_bytes
+= stats
[i
].rx_bytes
;
4530 stats
[N_TX_RINGS
].tx_bytes
+= stats
[i
].tx_bytes
;
4531 stats
[N_TX_RINGS
].rx_errors
+= stats
[i
].rx_errors
;
4532 stats
[N_TX_RINGS
].tx_errors
+= stats
[i
].tx_errors
;
4533 stats
[N_TX_RINGS
].rx_dropped
+= stats
[i
].rx_dropped
;
4534 stats
[N_TX_RINGS
].tx_dropped
+= stats
[i
].tx_dropped
;
4535 memset(stats
+ i
, 0, sizeof(struct net_device_stats
));
4536 spin_unlock(&cp
->stat_lock
[i
]);
4538 spin_unlock_irqrestore(&cp
->stat_lock
[N_TX_RINGS
], flags
);
4539 return stats
+ N_TX_RINGS
;
4543 static void cas_set_multicast(struct net_device
*dev
)
4545 struct cas
*cp
= netdev_priv(dev
);
4546 u32 rxcfg
, rxcfg_new
;
4547 unsigned long flags
;
4548 int limit
= STOP_TRIES
;
4550 if (!cp
->hw_running
)
4553 spin_lock_irqsave(&cp
->lock
, flags
);
4554 rxcfg
= readl(cp
->regs
+ REG_MAC_RX_CFG
);
4556 /* disable RX MAC and wait for completion */
4557 writel(rxcfg
& ~MAC_RX_CFG_EN
, cp
->regs
+ REG_MAC_RX_CFG
);
4558 while (readl(cp
->regs
+ REG_MAC_RX_CFG
) & MAC_RX_CFG_EN
) {
4564 /* disable hash filter and wait for completion */
4566 rxcfg
&= ~(MAC_RX_CFG_PROMISC_EN
| MAC_RX_CFG_HASH_FILTER_EN
);
4567 writel(rxcfg
& ~MAC_RX_CFG_EN
, cp
->regs
+ REG_MAC_RX_CFG
);
4568 while (readl(cp
->regs
+ REG_MAC_RX_CFG
) & MAC_RX_CFG_HASH_FILTER_EN
) {
4574 /* program hash filters */
4575 cp
->mac_rx_cfg
= rxcfg_new
= cas_setup_multicast(cp
);
4577 writel(rxcfg
, cp
->regs
+ REG_MAC_RX_CFG
);
4578 spin_unlock_irqrestore(&cp
->lock
, flags
);
4581 static void cas_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4583 struct cas
*cp
= netdev_priv(dev
);
4584 strncpy(info
->driver
, DRV_MODULE_NAME
, ETHTOOL_BUSINFO_LEN
);
4585 strncpy(info
->version
, DRV_MODULE_VERSION
, ETHTOOL_BUSINFO_LEN
);
4586 info
->fw_version
[0] = '\0';
4587 strncpy(info
->bus_info
, pci_name(cp
->pdev
), ETHTOOL_BUSINFO_LEN
);
4588 info
->regdump_len
= cp
->casreg_len
< CAS_MAX_REGS
?
4589 cp
->casreg_len
: CAS_MAX_REGS
;
4590 info
->n_stats
= CAS_NUM_STAT_KEYS
;
4593 static int cas_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4595 struct cas
*cp
= netdev_priv(dev
);
4597 int full_duplex
, speed
, pause
;
4598 unsigned long flags
;
4599 enum link_state linkstate
= link_up
;
4601 cmd
->advertising
= 0;
4602 cmd
->supported
= SUPPORTED_Autoneg
;
4603 if (cp
->cas_flags
& CAS_FLAG_1000MB_CAP
) {
4604 cmd
->supported
|= SUPPORTED_1000baseT_Full
;
4605 cmd
->advertising
|= ADVERTISED_1000baseT_Full
;
4608 /* Record PHY settings if HW is on. */
4609 spin_lock_irqsave(&cp
->lock
, flags
);
4611 linkstate
= cp
->lstate
;
4612 if (CAS_PHY_MII(cp
->phy_type
)) {
4613 cmd
->port
= PORT_MII
;
4614 cmd
->transceiver
= (cp
->cas_flags
& CAS_FLAG_SATURN
) ?
4615 XCVR_INTERNAL
: XCVR_EXTERNAL
;
4616 cmd
->phy_address
= cp
->phy_addr
;
4617 cmd
->advertising
|= ADVERTISED_TP
| ADVERTISED_MII
|
4618 ADVERTISED_10baseT_Half
|
4619 ADVERTISED_10baseT_Full
|
4620 ADVERTISED_100baseT_Half
|
4621 ADVERTISED_100baseT_Full
;
4624 (SUPPORTED_10baseT_Half
|
4625 SUPPORTED_10baseT_Full
|
4626 SUPPORTED_100baseT_Half
|
4627 SUPPORTED_100baseT_Full
|
4628 SUPPORTED_TP
| SUPPORTED_MII
);
4630 if (cp
->hw_running
) {
4631 cas_mif_poll(cp
, 0);
4632 bmcr
= cas_phy_read(cp
, MII_BMCR
);
4633 cas_read_mii_link_mode(cp
, &full_duplex
,
4635 cas_mif_poll(cp
, 1);
4639 cmd
->port
= PORT_FIBRE
;
4640 cmd
->transceiver
= XCVR_INTERNAL
;
4641 cmd
->phy_address
= 0;
4642 cmd
->supported
|= SUPPORTED_FIBRE
;
4643 cmd
->advertising
|= ADVERTISED_FIBRE
;
4645 if (cp
->hw_running
) {
4646 /* pcs uses the same bits as mii */
4647 bmcr
= readl(cp
->regs
+ REG_PCS_MII_CTRL
);
4648 cas_read_pcs_link_mode(cp
, &full_duplex
,
4652 spin_unlock_irqrestore(&cp
->lock
, flags
);
4654 if (bmcr
& BMCR_ANENABLE
) {
4655 cmd
->advertising
|= ADVERTISED_Autoneg
;
4656 cmd
->autoneg
= AUTONEG_ENABLE
;
4657 cmd
->speed
= ((speed
== 10) ?
4660 SPEED_1000
: SPEED_100
));
4661 cmd
->duplex
= full_duplex
? DUPLEX_FULL
: DUPLEX_HALF
;
4663 cmd
->autoneg
= AUTONEG_DISABLE
;
4665 (bmcr
& CAS_BMCR_SPEED1000
) ?
4667 ((bmcr
& BMCR_SPEED100
) ? SPEED_100
:
4670 (bmcr
& BMCR_FULLDPLX
) ?
4671 DUPLEX_FULL
: DUPLEX_HALF
;
4673 if (linkstate
!= link_up
) {
4674 /* Force these to "unknown" if the link is not up and
4675 * autonogotiation in enabled. We can set the link
4676 * speed to 0, but not cmd->duplex,
4677 * because its legal values are 0 and 1. Ethtool will
4678 * print the value reported in parentheses after the
4679 * word "Unknown" for unrecognized values.
4681 * If in forced mode, we report the speed and duplex
4682 * settings that we configured.
4684 if (cp
->link_cntl
& BMCR_ANENABLE
) {
4688 cmd
->speed
= SPEED_10
;
4689 if (cp
->link_cntl
& BMCR_SPEED100
) {
4690 cmd
->speed
= SPEED_100
;
4691 } else if (cp
->link_cntl
& CAS_BMCR_SPEED1000
) {
4692 cmd
->speed
= SPEED_1000
;
4694 cmd
->duplex
= (cp
->link_cntl
& BMCR_FULLDPLX
)?
4695 DUPLEX_FULL
: DUPLEX_HALF
;
4701 static int cas_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4703 struct cas
*cp
= netdev_priv(dev
);
4704 unsigned long flags
;
4706 /* Verify the settings we care about. */
4707 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
4708 cmd
->autoneg
!= AUTONEG_DISABLE
)
4711 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
4712 ((cmd
->speed
!= SPEED_1000
&&
4713 cmd
->speed
!= SPEED_100
&&
4714 cmd
->speed
!= SPEED_10
) ||
4715 (cmd
->duplex
!= DUPLEX_HALF
&&
4716 cmd
->duplex
!= DUPLEX_FULL
)))
4719 /* Apply settings and restart link process. */
4720 spin_lock_irqsave(&cp
->lock
, flags
);
4721 cas_begin_auto_negotiation(cp
, cmd
);
4722 spin_unlock_irqrestore(&cp
->lock
, flags
);
4726 static int cas_nway_reset(struct net_device
*dev
)
4728 struct cas
*cp
= netdev_priv(dev
);
4729 unsigned long flags
;
4731 if ((cp
->link_cntl
& BMCR_ANENABLE
) == 0)
4734 /* Restart link process. */
4735 spin_lock_irqsave(&cp
->lock
, flags
);
4736 cas_begin_auto_negotiation(cp
, NULL
);
4737 spin_unlock_irqrestore(&cp
->lock
, flags
);
4742 static u32
cas_get_link(struct net_device
*dev
)
4744 struct cas
*cp
= netdev_priv(dev
);
4745 return cp
->lstate
== link_up
;
4748 static u32
cas_get_msglevel(struct net_device
*dev
)
4750 struct cas
*cp
= netdev_priv(dev
);
4751 return cp
->msg_enable
;
4754 static void cas_set_msglevel(struct net_device
*dev
, u32 value
)
4756 struct cas
*cp
= netdev_priv(dev
);
4757 cp
->msg_enable
= value
;
4760 static int cas_get_regs_len(struct net_device
*dev
)
4762 struct cas
*cp
= netdev_priv(dev
);
4763 return cp
->casreg_len
< CAS_MAX_REGS
? cp
->casreg_len
: CAS_MAX_REGS
;
4766 static void cas_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
4769 struct cas
*cp
= netdev_priv(dev
);
4771 /* cas_read_regs handles locks (cp->lock). */
4772 cas_read_regs(cp
, p
, regs
->len
/ sizeof(u32
));
4775 static int cas_get_sset_count(struct net_device
*dev
, int sset
)
4779 return CAS_NUM_STAT_KEYS
;
4785 static void cas_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
4787 memcpy(data
, ðtool_cassini_statnames
,
4788 CAS_NUM_STAT_KEYS
* ETH_GSTRING_LEN
);
4791 static void cas_get_ethtool_stats(struct net_device
*dev
,
4792 struct ethtool_stats
*estats
, u64
*data
)
4794 struct cas
*cp
= netdev_priv(dev
);
4795 struct net_device_stats
*stats
= cas_get_stats(cp
->dev
);
4797 data
[i
++] = stats
->collisions
;
4798 data
[i
++] = stats
->rx_bytes
;
4799 data
[i
++] = stats
->rx_crc_errors
;
4800 data
[i
++] = stats
->rx_dropped
;
4801 data
[i
++] = stats
->rx_errors
;
4802 data
[i
++] = stats
->rx_fifo_errors
;
4803 data
[i
++] = stats
->rx_frame_errors
;
4804 data
[i
++] = stats
->rx_length_errors
;
4805 data
[i
++] = stats
->rx_over_errors
;
4806 data
[i
++] = stats
->rx_packets
;
4807 data
[i
++] = stats
->tx_aborted_errors
;
4808 data
[i
++] = stats
->tx_bytes
;
4809 data
[i
++] = stats
->tx_dropped
;
4810 data
[i
++] = stats
->tx_errors
;
4811 data
[i
++] = stats
->tx_fifo_errors
;
4812 data
[i
++] = stats
->tx_packets
;
4813 BUG_ON(i
!= CAS_NUM_STAT_KEYS
);
4816 static const struct ethtool_ops cas_ethtool_ops
= {
4817 .get_drvinfo
= cas_get_drvinfo
,
4818 .get_settings
= cas_get_settings
,
4819 .set_settings
= cas_set_settings
,
4820 .nway_reset
= cas_nway_reset
,
4821 .get_link
= cas_get_link
,
4822 .get_msglevel
= cas_get_msglevel
,
4823 .set_msglevel
= cas_set_msglevel
,
4824 .get_regs_len
= cas_get_regs_len
,
4825 .get_regs
= cas_get_regs
,
4826 .get_sset_count
= cas_get_sset_count
,
4827 .get_strings
= cas_get_strings
,
4828 .get_ethtool_stats
= cas_get_ethtool_stats
,
4831 static int cas_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
4833 struct cas
*cp
= netdev_priv(dev
);
4834 struct mii_ioctl_data
*data
= if_mii(ifr
);
4835 unsigned long flags
;
4836 int rc
= -EOPNOTSUPP
;
4838 /* Hold the PM mutex while doing ioctl's or we may collide
4839 * with open/close and power management and oops.
4841 mutex_lock(&cp
->pm_mutex
);
4843 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
4844 data
->phy_id
= cp
->phy_addr
;
4845 /* Fallthrough... */
4847 case SIOCGMIIREG
: /* Read MII PHY register. */
4848 spin_lock_irqsave(&cp
->lock
, flags
);
4849 cas_mif_poll(cp
, 0);
4850 data
->val_out
= cas_phy_read(cp
, data
->reg_num
& 0x1f);
4851 cas_mif_poll(cp
, 1);
4852 spin_unlock_irqrestore(&cp
->lock
, flags
);
4856 case SIOCSMIIREG
: /* Write MII PHY register. */
4857 if (!capable(CAP_NET_ADMIN
)) {
4861 spin_lock_irqsave(&cp
->lock
, flags
);
4862 cas_mif_poll(cp
, 0);
4863 rc
= cas_phy_write(cp
, data
->reg_num
& 0x1f, data
->val_in
);
4864 cas_mif_poll(cp
, 1);
4865 spin_unlock_irqrestore(&cp
->lock
, flags
);
4871 mutex_unlock(&cp
->pm_mutex
);
4875 static int __devinit
cas_init_one(struct pci_dev
*pdev
,
4876 const struct pci_device_id
*ent
)
4878 static int cas_version_printed
= 0;
4879 unsigned long casreg_len
;
4880 struct net_device
*dev
;
4882 int i
, err
, pci_using_dac
;
4884 u8 orig_cacheline_size
= 0, cas_cacheline_size
= 0;
4885 DECLARE_MAC_BUF(mac
);
4887 if (cas_version_printed
++ == 0)
4888 printk(KERN_INFO
"%s", version
);
4890 err
= pci_enable_device(pdev
);
4892 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting.\n");
4896 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
4897 dev_err(&pdev
->dev
, "Cannot find proper PCI device "
4898 "base address, aborting.\n");
4900 goto err_out_disable_pdev
;
4903 dev
= alloc_etherdev(sizeof(*cp
));
4905 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting.\n");
4907 goto err_out_disable_pdev
;
4909 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4911 err
= pci_request_regions(pdev
, dev
->name
);
4913 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting.\n");
4914 goto err_out_free_netdev
;
4916 pci_set_master(pdev
);
4918 /* we must always turn on parity response or else parity
4919 * doesn't get generated properly. disable SERR/PERR as well.
4920 * in addition, we want to turn MWI on.
4922 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
4923 pci_cmd
&= ~PCI_COMMAND_SERR
;
4924 pci_cmd
|= PCI_COMMAND_PARITY
;
4925 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
4926 if (pci_try_set_mwi(pdev
))
4927 printk(KERN_WARNING PFX
"Could not enable MWI for %s\n",
4931 * On some architectures, the default cache line size set
4932 * by pci_try_set_mwi reduces perforamnce. We have to increase
4933 * it for this case. To start, we'll print some configuration
4937 pci_read_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
4938 &orig_cacheline_size
);
4939 if (orig_cacheline_size
< CAS_PREF_CACHELINE_SIZE
) {
4940 cas_cacheline_size
=
4941 (CAS_PREF_CACHELINE_SIZE
< SMP_CACHE_BYTES
) ?
4942 CAS_PREF_CACHELINE_SIZE
: SMP_CACHE_BYTES
;
4943 if (pci_write_config_byte(pdev
,
4944 PCI_CACHE_LINE_SIZE
,
4945 cas_cacheline_size
)) {
4946 dev_err(&pdev
->dev
, "Could not set PCI cache "
4948 goto err_write_cacheline
;
4954 /* Configure DMA attributes. */
4955 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
4957 err
= pci_set_consistent_dma_mask(pdev
,
4960 dev_err(&pdev
->dev
, "Unable to obtain 64-bit DMA "
4961 "for consistent allocations\n");
4962 goto err_out_free_res
;
4966 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
4968 dev_err(&pdev
->dev
, "No usable DMA configuration, "
4970 goto err_out_free_res
;
4975 casreg_len
= pci_resource_len(pdev
, 0);
4977 cp
= netdev_priv(dev
);
4980 /* A value of 0 indicates we never explicitly set it */
4981 cp
->orig_cacheline_size
= cas_cacheline_size
? orig_cacheline_size
: 0;
4984 cp
->msg_enable
= (cassini_debug
< 0) ? CAS_DEF_MSG_ENABLE
:
4987 cp
->link_transition
= LINK_TRANSITION_UNKNOWN
;
4988 cp
->link_transition_jiffies_valid
= 0;
4990 spin_lock_init(&cp
->lock
);
4991 spin_lock_init(&cp
->rx_inuse_lock
);
4992 spin_lock_init(&cp
->rx_spare_lock
);
4993 for (i
= 0; i
< N_TX_RINGS
; i
++) {
4994 spin_lock_init(&cp
->stat_lock
[i
]);
4995 spin_lock_init(&cp
->tx_lock
[i
]);
4997 spin_lock_init(&cp
->stat_lock
[N_TX_RINGS
]);
4998 mutex_init(&cp
->pm_mutex
);
5000 init_timer(&cp
->link_timer
);
5001 cp
->link_timer
.function
= cas_link_timer
;
5002 cp
->link_timer
.data
= (unsigned long) cp
;
5005 /* Just in case the implementation of atomic operations
5006 * change so that an explicit initialization is necessary.
5008 atomic_set(&cp
->reset_task_pending
, 0);
5009 atomic_set(&cp
->reset_task_pending_all
, 0);
5010 atomic_set(&cp
->reset_task_pending_spare
, 0);
5011 atomic_set(&cp
->reset_task_pending_mtu
, 0);
5013 INIT_WORK(&cp
->reset_task
, cas_reset_task
);
5015 /* Default link parameters */
5016 if (link_mode
>= 0 && link_mode
<= 6)
5017 cp
->link_cntl
= link_modes
[link_mode
];
5019 cp
->link_cntl
= BMCR_ANENABLE
;
5020 cp
->lstate
= link_down
;
5021 cp
->link_transition
= LINK_TRANSITION_LINK_DOWN
;
5022 netif_carrier_off(cp
->dev
);
5023 cp
->timer_ticks
= 0;
5025 /* give us access to cassini registers */
5026 cp
->regs
= pci_iomap(pdev
, 0, casreg_len
);
5027 if (cp
->regs
== 0UL) {
5028 dev_err(&pdev
->dev
, "Cannot map device registers, aborting.\n");
5029 goto err_out_free_res
;
5031 cp
->casreg_len
= casreg_len
;
5033 pci_save_state(pdev
);
5034 cas_check_pci_invariants(cp
);
5037 if (cas_check_invariants(cp
))
5038 goto err_out_iounmap
;
5040 cp
->init_block
= (struct cas_init_block
*)
5041 pci_alloc_consistent(pdev
, sizeof(struct cas_init_block
),
5043 if (!cp
->init_block
) {
5044 dev_err(&pdev
->dev
, "Cannot allocate init block, aborting.\n");
5045 goto err_out_iounmap
;
5048 for (i
= 0; i
< N_TX_RINGS
; i
++)
5049 cp
->init_txds
[i
] = cp
->init_block
->txds
[i
];
5051 for (i
= 0; i
< N_RX_DESC_RINGS
; i
++)
5052 cp
->init_rxds
[i
] = cp
->init_block
->rxds
[i
];
5054 for (i
= 0; i
< N_RX_COMP_RINGS
; i
++)
5055 cp
->init_rxcs
[i
] = cp
->init_block
->rxcs
[i
];
5057 for (i
= 0; i
< N_RX_FLOWS
; i
++)
5058 skb_queue_head_init(&cp
->rx_flows
[i
]);
5060 dev
->open
= cas_open
;
5061 dev
->stop
= cas_close
;
5062 dev
->hard_start_xmit
= cas_start_xmit
;
5063 dev
->get_stats
= cas_get_stats
;
5064 dev
->set_multicast_list
= cas_set_multicast
;
5065 dev
->do_ioctl
= cas_ioctl
;
5066 dev
->ethtool_ops
= &cas_ethtool_ops
;
5067 dev
->tx_timeout
= cas_tx_timeout
;
5068 dev
->watchdog_timeo
= CAS_TX_TIMEOUT
;
5069 dev
->change_mtu
= cas_change_mtu
;
5071 netif_napi_add(dev
, &cp
->napi
, cas_poll
, 64);
5073 #ifdef CONFIG_NET_POLL_CONTROLLER
5074 dev
->poll_controller
= cas_netpoll
;
5076 dev
->irq
= pdev
->irq
;
5079 /* Cassini features. */
5080 if ((cp
->cas_flags
& CAS_FLAG_NO_HW_CSUM
) == 0)
5081 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
5084 dev
->features
|= NETIF_F_HIGHDMA
;
5086 if (register_netdev(dev
)) {
5087 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
5088 goto err_out_free_consistent
;
5091 i
= readl(cp
->regs
+ REG_BIM_CFG
);
5092 printk(KERN_INFO
"%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5093 "Ethernet[%d] %s\n", dev
->name
,
5094 (cp
->cas_flags
& CAS_FLAG_REG_PLUS
) ? "+" : "",
5095 (i
& BIM_CFG_32BIT
) ? "32" : "64",
5096 (i
& BIM_CFG_66MHZ
) ? "66" : "33",
5097 (cp
->phy_type
== CAS_PHY_SERDES
) ? "Fi" : "Cu", pdev
->irq
,
5098 print_mac(mac
, dev
->dev_addr
));
5100 pci_set_drvdata(pdev
, dev
);
5102 cas_entropy_reset(cp
);
5104 cas_begin_auto_negotiation(cp
, NULL
);
5107 err_out_free_consistent
:
5108 pci_free_consistent(pdev
, sizeof(struct cas_init_block
),
5109 cp
->init_block
, cp
->block_dvma
);
5112 mutex_lock(&cp
->pm_mutex
);
5115 mutex_unlock(&cp
->pm_mutex
);
5117 pci_iounmap(pdev
, cp
->regs
);
5121 pci_release_regions(pdev
);
5123 err_write_cacheline
:
5124 /* Try to restore it in case the error occured after we
5127 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
, orig_cacheline_size
);
5129 err_out_free_netdev
:
5132 err_out_disable_pdev
:
5133 pci_disable_device(pdev
);
5134 pci_set_drvdata(pdev
, NULL
);
5138 static void __devexit
cas_remove_one(struct pci_dev
*pdev
)
5140 struct net_device
*dev
= pci_get_drvdata(pdev
);
5145 cp
= netdev_priv(dev
);
5146 unregister_netdev(dev
);
5148 mutex_lock(&cp
->pm_mutex
);
5149 flush_scheduled_work();
5152 mutex_unlock(&cp
->pm_mutex
);
5155 if (cp
->orig_cacheline_size
) {
5156 /* Restore the cache line size if we had modified
5159 pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
5160 cp
->orig_cacheline_size
);
5163 pci_free_consistent(pdev
, sizeof(struct cas_init_block
),
5164 cp
->init_block
, cp
->block_dvma
);
5165 pci_iounmap(pdev
, cp
->regs
);
5167 pci_release_regions(pdev
);
5168 pci_disable_device(pdev
);
5169 pci_set_drvdata(pdev
, NULL
);
5173 static int cas_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5175 struct net_device
*dev
= pci_get_drvdata(pdev
);
5176 struct cas
*cp
= netdev_priv(dev
);
5177 unsigned long flags
;
5179 mutex_lock(&cp
->pm_mutex
);
5181 /* If the driver is opened, we stop the DMA */
5183 netif_device_detach(dev
);
5185 cas_lock_all_save(cp
, flags
);
5187 /* We can set the second arg of cas_reset to 0
5188 * because on resume, we'll call cas_init_hw with
5189 * its second arg set so that autonegotiation is
5193 cas_clean_rings(cp
);
5194 cas_unlock_all_restore(cp
, flags
);
5199 mutex_unlock(&cp
->pm_mutex
);
5204 static int cas_resume(struct pci_dev
*pdev
)
5206 struct net_device
*dev
= pci_get_drvdata(pdev
);
5207 struct cas
*cp
= netdev_priv(dev
);
5209 printk(KERN_INFO
"%s: resuming\n", dev
->name
);
5211 mutex_lock(&cp
->pm_mutex
);
5214 unsigned long flags
;
5215 cas_lock_all_save(cp
, flags
);
5218 cas_clean_rings(cp
);
5220 cas_unlock_all_restore(cp
, flags
);
5222 netif_device_attach(dev
);
5224 mutex_unlock(&cp
->pm_mutex
);
5227 #endif /* CONFIG_PM */
5229 static struct pci_driver cas_driver
= {
5230 .name
= DRV_MODULE_NAME
,
5231 .id_table
= cas_pci_tbl
,
5232 .probe
= cas_init_one
,
5233 .remove
= __devexit_p(cas_remove_one
),
5235 .suspend
= cas_suspend
,
5236 .resume
= cas_resume
5240 static int __init
cas_init(void)
5242 if (linkdown_timeout
> 0)
5243 link_transition_timeout
= linkdown_timeout
* HZ
;
5245 link_transition_timeout
= 0;
5247 return pci_register_driver(&cas_driver
);
5250 static void __exit
cas_cleanup(void)
5252 pci_unregister_driver(&cas_driver
);
5255 module_init(cas_init
);
5256 module_exit(cas_cleanup
);