1 // SPDX-License-Identifier: GPL-2.0+
2 /* FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
4 * Copyright (c) 2018 Maciej W. Rozycki
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 * Dave Sawyer & Phil Weeks & Frank Itkowsky,
14 * "DEC FDDIcontroller 700 Port Specification",
15 * Revision 1.1, Digital Equipment Corporation
18 /* ------------------------------------------------------------------------- */
19 /* FZA configurable parameters. */
21 /* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024. */
22 #define FZA_RING_TX_MODE 0
24 /* The number of receive ring descriptors; from 2 up to 256. */
25 #define FZA_RING_RX_SIZE 256
27 /* End of FZA configurable parameters. No need to change anything below. */
28 /* ------------------------------------------------------------------------- */
30 #include <linux/delay.h>
31 #include <linux/device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/init.h>
34 #include <linux/interrupt.h>
36 #include <linux/io-64-nonatomic-lo-hi.h>
37 #include <linux/ioport.h>
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/module.h>
41 #include <linux/netdevice.h>
42 #include <linux/fddidevice.h>
43 #include <linux/sched.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/stat.h>
48 #include <linux/timer.h>
49 #include <linux/types.h>
50 #include <linux/wait.h>
52 #include <asm/barrier.h>
56 #define DRV_NAME "defza"
57 #define DRV_VERSION "v.1.1.4"
58 #define DRV_RELDATE "Oct 6 2018"
60 static const char version
[] =
61 DRV_NAME
": " DRV_VERSION
" " DRV_RELDATE
" Maciej W. Rozycki\n";
63 MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
64 MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
65 MODULE_LICENSE("GPL");
68 module_param(loopback
, int, 0644);
70 /* Ring Purger Multicast */
71 static u8 hw_addr_purger
[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 };
72 /* Directed Beacon Multicast */
73 static u8 hw_addr_beacon
[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 };
75 /* Shorthands for MMIO accesses that we require to be strongly ordered
76 * WRT preceding MMIO accesses.
78 #define readw_o readw_relaxed
79 #define readl_o readl_relaxed
81 #define writew_o writew_relaxed
82 #define writel_o writel_relaxed
84 /* Shorthands for MMIO accesses that we are happy with being weakly ordered
85 * WRT preceding MMIO accesses.
87 #define readw_u readw_relaxed
88 #define readl_u readl_relaxed
89 #define readq_u readq_relaxed
91 #define writew_u writew_relaxed
92 #define writel_u writel_relaxed
93 #define writeq_u writeq_relaxed
95 static inline struct sk_buff
*fza_alloc_skb_irq(struct net_device
*dev
,
98 return __netdev_alloc_skb(dev
, length
, GFP_ATOMIC
);
101 static inline struct sk_buff
*fza_alloc_skb(struct net_device
*dev
,
104 return __netdev_alloc_skb(dev
, length
, GFP_KERNEL
);
107 static inline void fza_skb_align(struct sk_buff
*skb
, unsigned int v
)
111 x
= (unsigned long)skb
->data
;
114 skb_reserve(skb
, y
- x
);
117 static inline void fza_reads(const void __iomem
*from
, void *to
,
120 if (sizeof(unsigned long) == 8) {
121 const u64 __iomem
*src
= from
;
122 const u32 __iomem
*src_trail
;
126 for (size
= (size
+ 3) / 4; size
> 1; size
-= 2)
127 *dst
++ = readq_u(src
++);
129 src_trail
= (u32 __iomem
*)src
;
130 dst_trail
= (u32
*)dst
;
131 *dst_trail
= readl_u(src_trail
);
134 const u32 __iomem
*src
= from
;
137 for (size
= (size
+ 3) / 4; size
; size
--)
138 *dst
++ = readl_u(src
++);
142 static inline void fza_writes(const void *from
, void __iomem
*to
,
145 if (sizeof(unsigned long) == 8) {
146 const u64
*src
= from
;
147 const u32
*src_trail
;
148 u64 __iomem
*dst
= to
;
149 u32 __iomem
*dst_trail
;
151 for (size
= (size
+ 3) / 4; size
> 1; size
-= 2)
152 writeq_u(*src
++, dst
++);
154 src_trail
= (u32
*)src
;
155 dst_trail
= (u32 __iomem
*)dst
;
156 writel_u(*src_trail
, dst_trail
);
159 const u32
*src
= from
;
160 u32 __iomem
*dst
= to
;
162 for (size
= (size
+ 3) / 4; size
; size
--)
163 writel_u(*src
++, dst
++);
167 static inline void fza_moves(const void __iomem
*from
, void __iomem
*to
,
170 if (sizeof(unsigned long) == 8) {
171 const u64 __iomem
*src
= from
;
172 const u32 __iomem
*src_trail
;
173 u64 __iomem
*dst
= to
;
174 u32 __iomem
*dst_trail
;
176 for (size
= (size
+ 3) / 4; size
> 1; size
-= 2)
177 writeq_u(readq_u(src
++), dst
++);
179 src_trail
= (u32 __iomem
*)src
;
180 dst_trail
= (u32 __iomem
*)dst
;
181 writel_u(readl_u(src_trail
), dst_trail
);
184 const u32 __iomem
*src
= from
;
185 u32 __iomem
*dst
= to
;
187 for (size
= (size
+ 3) / 4; size
; size
--)
188 writel_u(readl_u(src
++), dst
++);
192 static inline void fza_zeros(void __iomem
*to
, unsigned long size
)
194 if (sizeof(unsigned long) == 8) {
195 u64 __iomem
*dst
= to
;
196 u32 __iomem
*dst_trail
;
198 for (size
= (size
+ 3) / 4; size
> 1; size
-= 2)
201 dst_trail
= (u32 __iomem
*)dst
;
202 writel_u(0, dst_trail
);
205 u32 __iomem
*dst
= to
;
207 for (size
= (size
+ 3) / 4; size
; size
--)
212 static inline void fza_regs_dump(struct fza_private
*fp
)
214 pr_debug("%s: iomem registers:\n", fp
->name
);
215 pr_debug(" reset: 0x%04x\n", readw_o(&fp
->regs
->reset
));
216 pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp
->regs
->int_event
));
217 pr_debug(" status: 0x%04x\n", readw_u(&fp
->regs
->status
));
218 pr_debug(" interrupt mask: 0x%04x\n", readw_u(&fp
->regs
->int_mask
));
219 pr_debug(" control A: 0x%04x\n", readw_u(&fp
->regs
->control_a
));
220 pr_debug(" control B: 0x%04x\n", readw_u(&fp
->regs
->control_b
));
223 static inline void fza_do_reset(struct fza_private
*fp
)
225 /* Reset the board. */
226 writew_o(FZA_RESET_INIT
, &fp
->regs
->reset
);
227 readw_o(&fp
->regs
->reset
); /* Synchronize. */
228 readw_o(&fp
->regs
->reset
); /* Read it back for a small delay. */
229 writew_o(FZA_RESET_CLR
, &fp
->regs
->reset
);
231 /* Enable all interrupt events we handle. */
232 writew_o(fp
->int_mask
, &fp
->regs
->int_mask
);
233 readw_o(&fp
->regs
->int_mask
); /* Synchronize. */
236 static inline void fza_do_shutdown(struct fza_private
*fp
)
238 /* Disable the driver mode. */
239 writew_o(FZA_CONTROL_B_IDLE
, &fp
->regs
->control_b
);
241 /* And reset the board. */
242 writew_o(FZA_RESET_INIT
, &fp
->regs
->reset
);
243 readw_o(&fp
->regs
->reset
); /* Synchronize. */
244 writew_o(FZA_RESET_CLR
, &fp
->regs
->reset
);
245 readw_o(&fp
->regs
->reset
); /* Synchronize. */
248 static int fza_reset(struct fza_private
*fp
)
254 pr_info("%s: resetting the board...\n", fp
->name
);
256 spin_lock_irqsave(&fp
->lock
, flags
);
257 fp
->state_chg_flag
= 0;
259 spin_unlock_irqrestore(&fp
->lock
, flags
);
261 /* DEC says RESET needs up to 30 seconds to complete. My DEFZA-AA
262 * rev. C03 happily finishes in 9.7 seconds. :-) But we need to
263 * be on the safe side...
265 t
= wait_event_timeout(fp
->state_chg_wait
, fp
->state_chg_flag
,
267 status
= readw_u(&fp
->regs
->status
);
268 state
= FZA_STATUS_GET_STATE(status
);
269 if (fp
->state_chg_flag
== 0) {
270 pr_err("%s: RESET timed out!, state %x\n", fp
->name
, state
);
273 if (state
!= FZA_STATE_UNINITIALIZED
) {
274 pr_err("%s: RESET failed!, state %x, failure ID %x\n",
275 fp
->name
, state
, FZA_STATUS_GET_TEST(status
));
278 pr_info("%s: OK\n", fp
->name
);
279 pr_debug("%s: RESET: %lums elapsed\n", fp
->name
,
280 (45 * HZ
- t
) * 1000 / HZ
);
285 static struct fza_ring_cmd __iomem
*fza_cmd_send(struct net_device
*dev
,
288 struct fza_private
*fp
= netdev_priv(dev
);
289 struct fza_ring_cmd __iomem
*ring
= fp
->ring_cmd
+ fp
->ring_cmd_index
;
290 unsigned int old_mask
, new_mask
;
291 union fza_cmd_buf __iomem
*buf
;
292 struct netdev_hw_addr
*ha
;
295 old_mask
= fp
->int_mask
;
296 new_mask
= old_mask
& ~FZA_MASK_STATE_CHG
;
297 writew_u(new_mask
, &fp
->regs
->int_mask
);
298 readw_o(&fp
->regs
->int_mask
); /* Synchronize. */
299 fp
->int_mask
= new_mask
;
301 buf
= fp
->mmio
+ readl_u(&ring
->buffer
);
303 if ((readl_u(&ring
->cmd_own
) & FZA_RING_OWN_MASK
) !=
305 pr_warn("%s: command buffer full, command: %u!\n", fp
->name
,
311 case FZA_RING_CMD_INIT
:
312 writel_u(FZA_RING_TX_MODE
, &buf
->init
.tx_mode
);
313 writel_u(FZA_RING_RX_SIZE
, &buf
->init
.hst_rx_size
);
314 fza_zeros(&buf
->init
.counters
, sizeof(buf
->init
.counters
));
317 case FZA_RING_CMD_MODCAM
:
319 fza_writes(&hw_addr_purger
, &buf
->cam
.hw_addr
[i
++],
320 sizeof(*buf
->cam
.hw_addr
));
321 fza_writes(&hw_addr_beacon
, &buf
->cam
.hw_addr
[i
++],
322 sizeof(*buf
->cam
.hw_addr
));
323 netdev_for_each_mc_addr(ha
, dev
) {
324 if (i
>= FZA_CMD_CAM_SIZE
)
326 fza_writes(ha
->addr
, &buf
->cam
.hw_addr
[i
++],
327 sizeof(*buf
->cam
.hw_addr
));
329 while (i
< FZA_CMD_CAM_SIZE
)
330 fza_zeros(&buf
->cam
.hw_addr
[i
++],
331 sizeof(*buf
->cam
.hw_addr
));
334 case FZA_RING_CMD_PARAM
:
335 writel_u(loopback
, &buf
->param
.loop_mode
);
336 writel_u(fp
->t_max
, &buf
->param
.t_max
);
337 writel_u(fp
->t_req
, &buf
->param
.t_req
);
338 writel_u(fp
->tvx
, &buf
->param
.tvx
);
339 writel_u(fp
->lem_threshold
, &buf
->param
.lem_threshold
);
340 fza_writes(&fp
->station_id
, &buf
->param
.station_id
,
341 sizeof(buf
->param
.station_id
));
342 /* Convert to milliseconds due to buggy firmware. */
343 writel_u(fp
->rtoken_timeout
/ 12500,
344 &buf
->param
.rtoken_timeout
);
345 writel_u(fp
->ring_purger
, &buf
->param
.ring_purger
);
348 case FZA_RING_CMD_MODPROM
:
349 if (dev
->flags
& IFF_PROMISC
) {
350 writel_u(1, &buf
->modprom
.llc_prom
);
351 writel_u(1, &buf
->modprom
.smt_prom
);
353 writel_u(0, &buf
->modprom
.llc_prom
);
354 writel_u(0, &buf
->modprom
.smt_prom
);
356 if (dev
->flags
& IFF_ALLMULTI
||
357 netdev_mc_count(dev
) > FZA_CMD_CAM_SIZE
- 2)
358 writel_u(1, &buf
->modprom
.llc_multi
);
360 writel_u(0, &buf
->modprom
.llc_multi
);
361 writel_u(1, &buf
->modprom
.llc_bcast
);
365 /* Trigger the command. */
366 writel_u(FZA_RING_OWN_FZA
| command
, &ring
->cmd_own
);
367 writew_o(FZA_CONTROL_A_CMD_POLL
, &fp
->regs
->control_a
);
369 fp
->ring_cmd_index
= (fp
->ring_cmd_index
+ 1) % FZA_RING_CMD_SIZE
;
371 fp
->int_mask
= old_mask
;
372 writew_u(fp
->int_mask
, &fp
->regs
->int_mask
);
377 static int fza_init_send(struct net_device
*dev
,
378 struct fza_cmd_init
*__iomem
*init
)
380 struct fza_private
*fp
= netdev_priv(dev
);
381 struct fza_ring_cmd __iomem
*ring
;
386 spin_lock_irqsave(&fp
->lock
, flags
);
387 fp
->cmd_done_flag
= 0;
388 ring
= fza_cmd_send(dev
, FZA_RING_CMD_INIT
);
389 spin_unlock_irqrestore(&fp
->lock
, flags
);
391 /* This should never happen in the uninitialized state,
392 * so do not try to recover and just consider it fatal.
396 /* INIT may take quite a long time (160ms for my C03). */
397 t
= wait_event_timeout(fp
->cmd_done_wait
, fp
->cmd_done_flag
, 3 * HZ
);
398 if (fp
->cmd_done_flag
== 0) {
399 pr_err("%s: INIT command timed out!, state %x\n", fp
->name
,
400 FZA_STATUS_GET_STATE(readw_u(&fp
->regs
->status
)));
403 stat
= readl_u(&ring
->stat
);
404 if (stat
!= FZA_RING_STAT_SUCCESS
) {
405 pr_err("%s: INIT command failed!, status %02x, state %x\n",
407 FZA_STATUS_GET_STATE(readw_u(&fp
->regs
->status
)));
410 pr_debug("%s: INIT: %lums elapsed\n", fp
->name
,
411 (3 * HZ
- t
) * 1000 / HZ
);
414 *init
= fp
->mmio
+ readl_u(&ring
->buffer
);
418 static void fza_rx_init(struct fza_private
*fp
)
422 /* Fill the host receive descriptor ring. */
423 for (i
= 0; i
< FZA_RING_RX_SIZE
; i
++) {
424 writel_o(0, &fp
->ring_hst_rx
[i
].rmc
);
425 writel_o((fp
->rx_dma
[i
] + 0x1000) >> 9,
426 &fp
->ring_hst_rx
[i
].buffer1
);
427 writel_o(fp
->rx_dma
[i
] >> 9 | FZA_RING_OWN_FZA
,
428 &fp
->ring_hst_rx
[i
].buf0_own
);
432 static void fza_set_rx_mode(struct net_device
*dev
)
434 fza_cmd_send(dev
, FZA_RING_CMD_MODCAM
);
435 fza_cmd_send(dev
, FZA_RING_CMD_MODPROM
);
438 union fza_buffer_txp
{
439 struct fza_buffer_tx
*data_ptr
;
440 struct fza_buffer_tx __iomem
*mmio_ptr
;
443 static int fza_do_xmit(union fza_buffer_txp ub
, int len
,
444 struct net_device
*dev
, int smt
)
446 struct fza_private
*fp
= netdev_priv(dev
);
447 struct fza_buffer_tx __iomem
*rmc_tx_ptr
;
448 int i
, first
, frag_len
, left_len
;
451 if (((((fp
->ring_rmc_txd_index
- 1 + fp
->ring_rmc_tx_size
) -
452 fp
->ring_rmc_tx_index
) % fp
->ring_rmc_tx_size
) *
453 FZA_TX_BUFFER_SIZE
) < len
)
456 first
= fp
->ring_rmc_tx_index
;
459 frag_len
= FZA_TX_BUFFER_SIZE
;
460 /* First descriptor is relinquished last. */
461 own
= FZA_RING_TX_OWN_HOST
;
462 /* First descriptor carries frame length; we don't use cut-through. */
463 rmc
= FZA_RING_TX_SOP
| FZA_RING_TX_VBC
| len
;
465 i
= fp
->ring_rmc_tx_index
;
466 rmc_tx_ptr
= &fp
->buffer_tx
[i
];
468 if (left_len
< FZA_TX_BUFFER_SIZE
)
470 left_len
-= frag_len
;
472 /* Length must be a multiple of 4 as only word writes are
475 frag_len
= (frag_len
+ 3) & ~3;
477 fza_moves(ub
.mmio_ptr
, rmc_tx_ptr
, frag_len
);
479 fza_writes(ub
.data_ptr
, rmc_tx_ptr
, frag_len
);
482 rmc
|= FZA_RING_TX_EOP
; /* Mark last frag. */
484 writel_o(rmc
, &fp
->ring_rmc_tx
[i
].rmc
);
485 writel_o(own
, &fp
->ring_rmc_tx
[i
].own
);
488 fp
->ring_rmc_tx_index
= (fp
->ring_rmc_tx_index
+ 1) %
489 fp
->ring_rmc_tx_size
;
491 /* Settings for intermediate frags. */
492 own
= FZA_RING_TX_OWN_RMC
;
494 } while (left_len
> 0);
496 if (((((fp
->ring_rmc_txd_index
- 1 + fp
->ring_rmc_tx_size
) -
497 fp
->ring_rmc_tx_index
) % fp
->ring_rmc_tx_size
) *
498 FZA_TX_BUFFER_SIZE
) < dev
->mtu
+ dev
->hard_header_len
) {
499 netif_stop_queue(dev
);
500 pr_debug("%s: queue stopped\n", fp
->name
);
503 writel_o(FZA_RING_TX_OWN_RMC
, &fp
->ring_rmc_tx
[first
].own
);
506 writew_o(FZA_CONTROL_A_TX_POLL
, &fp
->regs
->control_a
);
511 static int fza_do_recv_smt(struct fza_buffer_tx
*data_ptr
, int len
,
512 u32 rmc
, struct net_device
*dev
)
514 struct fza_private
*fp
= netdev_priv(dev
);
515 struct fza_buffer_tx __iomem
*smt_rx_ptr
;
519 i
= fp
->ring_smt_rx_index
;
520 own
= readl_o(&fp
->ring_smt_rx
[i
].own
);
521 if ((own
& FZA_RING_OWN_MASK
) == FZA_RING_OWN_FZA
)
524 smt_rx_ptr
= fp
->mmio
+ readl_u(&fp
->ring_smt_rx
[i
].buffer
);
526 /* Length must be a multiple of 4 as only word writes are permitted! */
527 fza_writes(data_ptr
, smt_rx_ptr
, (len
+ 3) & ~3);
529 writel_o(rmc
, &fp
->ring_smt_rx
[i
].rmc
);
530 writel_o(FZA_RING_OWN_FZA
, &fp
->ring_smt_rx
[i
].own
);
532 fp
->ring_smt_rx_index
=
533 (fp
->ring_smt_rx_index
+ 1) % fp
->ring_smt_rx_size
;
536 writew_o(FZA_CONTROL_A_SMT_RX_POLL
, &fp
->regs
->control_a
);
541 static void fza_tx(struct net_device
*dev
)
543 struct fza_private
*fp
= netdev_priv(dev
);
548 i
= fp
->ring_rmc_txd_index
;
549 if (i
== fp
->ring_rmc_tx_index
)
551 own
= readl_o(&fp
->ring_rmc_tx
[i
].own
);
552 if ((own
& FZA_RING_OWN_MASK
) == FZA_RING_TX_OWN_RMC
)
555 rmc
= readl_u(&fp
->ring_rmc_tx
[i
].rmc
);
556 /* Only process the first descriptor. */
557 if ((rmc
& FZA_RING_TX_SOP
) != 0) {
558 if ((rmc
& FZA_RING_TX_DCC_MASK
) ==
559 FZA_RING_TX_DCC_SUCCESS
) {
560 int pkt_len
= (rmc
& FZA_RING_PBC_MASK
) - 3;
563 fp
->stats
.tx_packets
++;
564 fp
->stats
.tx_bytes
+= pkt_len
;
566 fp
->stats
.tx_errors
++;
567 switch (rmc
& FZA_RING_TX_DCC_MASK
) {
568 case FZA_RING_TX_DCC_DTP_SOP
:
569 case FZA_RING_TX_DCC_DTP
:
570 case FZA_RING_TX_DCC_ABORT
:
571 fp
->stats
.tx_aborted_errors
++;
573 case FZA_RING_TX_DCC_UNDRRUN
:
574 fp
->stats
.tx_fifo_errors
++;
576 case FZA_RING_TX_DCC_PARITY
:
583 fp
->ring_rmc_txd_index
= (fp
->ring_rmc_txd_index
+ 1) %
584 fp
->ring_rmc_tx_size
;
587 if (((((fp
->ring_rmc_txd_index
- 1 + fp
->ring_rmc_tx_size
) -
588 fp
->ring_rmc_tx_index
) % fp
->ring_rmc_tx_size
) *
589 FZA_TX_BUFFER_SIZE
) >= dev
->mtu
+ dev
->hard_header_len
) {
590 if (fp
->queue_active
) {
591 netif_wake_queue(dev
);
592 pr_debug("%s: queue woken\n", fp
->name
);
597 static inline int fza_rx_err(struct fza_private
*fp
,
598 const u32 rmc
, const u8 fc
)
600 int len
, min_len
, max_len
;
602 len
= rmc
& FZA_RING_PBC_MASK
;
604 if (unlikely((rmc
& FZA_RING_RX_BAD
) != 0)) {
605 fp
->stats
.rx_errors
++;
607 /* Check special status codes. */
608 if ((rmc
& (FZA_RING_RX_CRC
| FZA_RING_RX_RRR_MASK
|
609 FZA_RING_RX_DA_MASK
| FZA_RING_RX_SA_MASK
)) ==
610 (FZA_RING_RX_CRC
| FZA_RING_RX_RRR_DADDR
|
611 FZA_RING_RX_DA_CAM
| FZA_RING_RX_SA_ALIAS
)) {
613 fp
->stats
.rx_length_errors
++;
616 if ((rmc
& (FZA_RING_RX_CRC
| FZA_RING_RX_RRR_MASK
|
617 FZA_RING_RX_DA_MASK
| FZA_RING_RX_SA_MASK
)) ==
618 (FZA_RING_RX_CRC
| FZA_RING_RX_RRR_DADDR
|
619 FZA_RING_RX_DA_CAM
| FZA_RING_RX_SA_CAM
)) {
620 /* Halt the interface to trigger a reset. */
621 writew_o(FZA_CONTROL_A_HALT
, &fp
->regs
->control_a
);
622 readw_o(&fp
->regs
->control_a
); /* Synchronize. */
626 /* Check the MAC status. */
627 switch (rmc
& FZA_RING_RX_RRR_MASK
) {
628 case FZA_RING_RX_RRR_OK
:
629 if ((rmc
& FZA_RING_RX_CRC
) != 0)
630 fp
->stats
.rx_crc_errors
++;
631 else if ((rmc
& FZA_RING_RX_FSC_MASK
) == 0 ||
632 (rmc
& FZA_RING_RX_FSB_ERR
) != 0)
633 fp
->stats
.rx_frame_errors
++;
635 case FZA_RING_RX_RRR_SADDR
:
636 case FZA_RING_RX_RRR_DADDR
:
637 case FZA_RING_RX_RRR_ABORT
:
638 /* Halt the interface to trigger a reset. */
639 writew_o(FZA_CONTROL_A_HALT
, &fp
->regs
->control_a
);
640 readw_o(&fp
->regs
->control_a
); /* Synchronize. */
642 case FZA_RING_RX_RRR_LENGTH
:
643 fp
->stats
.rx_frame_errors
++;
650 /* Packet received successfully; validate the length. */
651 switch (fc
& FDDI_FC_K_FORMAT_MASK
) {
652 case FDDI_FC_K_FORMAT_MANAGEMENT
:
653 if ((fc
& FDDI_FC_K_CLASS_MASK
) == FDDI_FC_K_CLASS_ASYNC
)
658 case FDDI_FC_K_FORMAT_LLC
:
666 if (len
< min_len
|| len
> max_len
) {
667 fp
->stats
.rx_errors
++;
668 fp
->stats
.rx_length_errors
++;
675 static void fza_rx(struct net_device
*dev
)
677 struct fza_private
*fp
= netdev_priv(dev
);
678 struct sk_buff
*skb
, *newskb
;
679 struct fza_fddihdr
*frame
;
680 dma_addr_t dma
, newdma
;
686 i
= fp
->ring_hst_rx_index
;
687 own
= readl_o(&fp
->ring_hst_rx
[i
].buf0_own
);
688 if ((own
& FZA_RING_OWN_MASK
) == FZA_RING_OWN_FZA
)
691 rmc
= readl_u(&fp
->ring_hst_rx
[i
].rmc
);
692 skb
= fp
->rx_skbuff
[i
];
695 /* The RMC doesn't count the preamble and the starting
696 * delimiter. We fix it up here for a total of 3 octets.
699 len
= (rmc
& FZA_RING_PBC_MASK
) + 3;
700 frame
= (struct fza_fddihdr
*)skb
->data
;
702 /* We need to get at real FC. */
703 dma_sync_single_for_cpu(fp
->bdev
,
705 ((u8
*)&frame
->hdr
.fc
- (u8
*)frame
),
706 sizeof(frame
->hdr
.fc
),
710 if (fza_rx_err(fp
, rmc
, fc
))
713 /* We have to 512-byte-align RX buffers... */
714 newskb
= fza_alloc_skb_irq(dev
, FZA_RX_BUFFER_SIZE
+ 511);
716 fza_skb_align(newskb
, 512);
717 newdma
= dma_map_single(fp
->bdev
, newskb
->data
,
720 if (dma_mapping_error(fp
->bdev
, newdma
)) {
721 dev_kfree_skb_irq(newskb
);
726 int pkt_len
= len
- 7; /* Omit P, SD and FCS. */
730 dma_unmap_single(fp
->bdev
, dma
, FZA_RX_BUFFER_SIZE
,
733 /* Queue SMT frames to the SMT receive ring. */
734 if ((fc
& (FDDI_FC_K_CLASS_MASK
|
735 FDDI_FC_K_FORMAT_MASK
)) ==
736 (FDDI_FC_K_CLASS_ASYNC
|
737 FDDI_FC_K_FORMAT_MANAGEMENT
) &&
738 (rmc
& FZA_RING_RX_DA_MASK
) !=
739 FZA_RING_RX_DA_PROM
) {
740 if (fza_do_recv_smt((struct fza_buffer_tx
*)
743 writel_o(FZA_CONTROL_A_SMT_RX_OVFL
,
744 &fp
->regs
->control_a
);
748 is_multi
= ((frame
->hdr
.daddr
[0] & 0x01) != 0);
750 skb_reserve(skb
, 3); /* Skip over P and SD. */
751 skb_put(skb
, pkt_len
); /* And cut off FCS. */
752 skb
->protocol
= fddi_type_trans(skb
, dev
);
754 rx_stat
= netif_rx(skb
);
755 if (rx_stat
!= NET_RX_DROP
) {
756 fp
->stats
.rx_packets
++;
757 fp
->stats
.rx_bytes
+= pkt_len
;
759 fp
->stats
.multicast
++;
761 fp
->stats
.rx_dropped
++;
766 fp
->rx_skbuff
[i
] = skb
;
769 fp
->stats
.rx_dropped
++;
770 pr_notice("%s: memory squeeze, dropping packet\n",
775 writel_o(0, &fp
->ring_hst_rx
[i
].rmc
);
776 buf
= (dma
+ 0x1000) >> 9;
777 writel_o(buf
, &fp
->ring_hst_rx
[i
].buffer1
);
778 buf
= dma
>> 9 | FZA_RING_OWN_FZA
;
779 writel_o(buf
, &fp
->ring_hst_rx
[i
].buf0_own
);
780 fp
->ring_hst_rx_index
=
781 (fp
->ring_hst_rx_index
+ 1) % fp
->ring_hst_rx_size
;
785 static void fza_tx_smt(struct net_device
*dev
)
787 struct fza_private
*fp
= netdev_priv(dev
);
788 struct fza_buffer_tx __iomem
*smt_tx_ptr
;
793 i
= fp
->ring_smt_tx_index
;
794 own
= readl_o(&fp
->ring_smt_tx
[i
].own
);
795 if ((own
& FZA_RING_OWN_MASK
) == FZA_RING_OWN_FZA
)
798 smt_tx_ptr
= fp
->mmio
+ readl_u(&fp
->ring_smt_tx
[i
].buffer
);
799 len
= readl_u(&fp
->ring_smt_tx
[i
].rmc
) & FZA_RING_PBC_MASK
;
801 if (!netif_queue_stopped(dev
)) {
802 if (dev_nit_active(dev
)) {
803 struct fza_buffer_tx
*skb_data_ptr
;
806 /* Length must be a multiple of 4 as only word
807 * reads are permitted!
809 skb
= fza_alloc_skb_irq(dev
, (len
+ 3) & ~3);
811 goto err_no_skb
; /* Drop. */
813 skb_data_ptr
= (struct fza_buffer_tx
*)
816 fza_reads(smt_tx_ptr
, skb_data_ptr
,
819 skb_reserve(skb
, 3); /* Skip over PRH. */
820 skb_put(skb
, len
- 3);
821 skb_reset_network_header(skb
);
823 dev_queue_xmit_nit(skb
, dev
);
825 dev_kfree_skb_irq(skb
);
831 /* Queue the frame to the RMC transmit ring. */
832 fza_do_xmit((union fza_buffer_txp
)
833 { .mmio_ptr
= smt_tx_ptr
},
837 writel_o(FZA_RING_OWN_FZA
, &fp
->ring_smt_tx
[i
].own
);
838 fp
->ring_smt_tx_index
=
839 (fp
->ring_smt_tx_index
+ 1) % fp
->ring_smt_tx_size
;
843 static void fza_uns(struct net_device
*dev
)
845 struct fza_private
*fp
= netdev_priv(dev
);
850 i
= fp
->ring_uns_index
;
851 own
= readl_o(&fp
->ring_uns
[i
].own
);
852 if ((own
& FZA_RING_OWN_MASK
) == FZA_RING_OWN_FZA
)
855 if (readl_u(&fp
->ring_uns
[i
].id
) == FZA_RING_UNS_RX_OVER
) {
856 fp
->stats
.rx_errors
++;
857 fp
->stats
.rx_over_errors
++;
860 writel_o(FZA_RING_OWN_FZA
, &fp
->ring_uns
[i
].own
);
862 (fp
->ring_uns_index
+ 1) % FZA_RING_UNS_SIZE
;
866 static void fza_tx_flush(struct net_device
*dev
)
868 struct fza_private
*fp
= netdev_priv(dev
);
872 /* Clean up the SMT TX ring. */
873 i
= fp
->ring_smt_tx_index
;
875 writel_o(FZA_RING_OWN_FZA
, &fp
->ring_smt_tx
[i
].own
);
876 fp
->ring_smt_tx_index
=
877 (fp
->ring_smt_tx_index
+ 1) % fp
->ring_smt_tx_size
;
879 } while (i
!= fp
->ring_smt_tx_index
);
881 /* Clean up the RMC TX ring. */
882 i
= fp
->ring_rmc_tx_index
;
884 own
= readl_o(&fp
->ring_rmc_tx
[i
].own
);
885 if ((own
& FZA_RING_OWN_MASK
) == FZA_RING_TX_OWN_RMC
) {
886 u32 rmc
= readl_u(&fp
->ring_rmc_tx
[i
].rmc
);
888 writel_u(rmc
| FZA_RING_TX_DTP
,
889 &fp
->ring_rmc_tx
[i
].rmc
);
891 fp
->ring_rmc_tx_index
=
892 (fp
->ring_rmc_tx_index
+ 1) % fp
->ring_rmc_tx_size
;
894 } while (i
!= fp
->ring_rmc_tx_index
);
897 writew_o(FZA_CONTROL_A_FLUSH_DONE
, &fp
->regs
->control_a
);
900 static irqreturn_t
fza_interrupt(int irq
, void *dev_id
)
902 struct net_device
*dev
= dev_id
;
903 struct fza_private
*fp
= netdev_priv(dev
);
906 /* Get interrupt events. */
907 int_event
= readw_o(&fp
->regs
->int_event
) & fp
->int_mask
;
911 /* Clear the events. */
912 writew_u(int_event
, &fp
->regs
->int_event
);
914 /* Now handle the events. The order matters. */
916 /* Command finished interrupt. */
917 if ((int_event
& FZA_EVENT_CMD_DONE
) != 0) {
918 fp
->irq_count_cmd_done
++;
920 spin_lock(&fp
->lock
);
921 fp
->cmd_done_flag
= 1;
922 wake_up(&fp
->cmd_done_wait
);
923 spin_unlock(&fp
->lock
);
926 /* Transmit finished interrupt. */
927 if ((int_event
& FZA_EVENT_TX_DONE
) != 0) {
928 fp
->irq_count_tx_done
++;
932 /* Host receive interrupt. */
933 if ((int_event
& FZA_EVENT_RX_POLL
) != 0) {
934 fp
->irq_count_rx_poll
++;
938 /* SMT transmit interrupt. */
939 if ((int_event
& FZA_EVENT_SMT_TX_POLL
) != 0) {
940 fp
->irq_count_smt_tx_poll
++;
944 /* Transmit ring flush request. */
945 if ((int_event
& FZA_EVENT_FLUSH_TX
) != 0) {
946 fp
->irq_count_flush_tx
++;
950 /* Link status change interrupt. */
951 if ((int_event
& FZA_EVENT_LINK_ST_CHG
) != 0) {
954 fp
->irq_count_link_st_chg
++;
955 status
= readw_u(&fp
->regs
->status
);
956 if (FZA_STATUS_GET_LINK(status
) == FZA_LINK_ON
) {
957 netif_carrier_on(dev
);
958 pr_info("%s: link available\n", fp
->name
);
960 netif_carrier_off(dev
);
961 pr_info("%s: link unavailable\n", fp
->name
);
965 /* Unsolicited event interrupt. */
966 if ((int_event
& FZA_EVENT_UNS_POLL
) != 0) {
967 fp
->irq_count_uns_poll
++;
971 /* State change interrupt. */
972 if ((int_event
& FZA_EVENT_STATE_CHG
) != 0) {
975 fp
->irq_count_state_chg
++;
977 status
= readw_u(&fp
->regs
->status
);
978 state
= FZA_STATUS_GET_STATE(status
);
979 pr_debug("%s: state change: %x\n", fp
->name
, state
);
981 case FZA_STATE_RESET
:
984 case FZA_STATE_UNINITIALIZED
:
985 netif_carrier_off(dev
);
986 del_timer_sync(&fp
->reset_timer
);
987 fp
->ring_cmd_index
= 0;
988 fp
->ring_uns_index
= 0;
989 fp
->ring_rmc_tx_index
= 0;
990 fp
->ring_rmc_txd_index
= 0;
991 fp
->ring_hst_rx_index
= 0;
992 fp
->ring_smt_tx_index
= 0;
993 fp
->ring_smt_rx_index
= 0;
994 if (fp
->state
> state
) {
995 pr_info("%s: OK\n", fp
->name
);
996 fza_cmd_send(dev
, FZA_RING_CMD_INIT
);
1000 case FZA_STATE_INITIALIZED
:
1001 if (fp
->state
> state
) {
1002 fza_set_rx_mode(dev
);
1003 fza_cmd_send(dev
, FZA_RING_CMD_PARAM
);
1007 case FZA_STATE_RUNNING
:
1008 case FZA_STATE_MAINTENANCE
:
1011 fp
->queue_active
= 1;
1012 netif_wake_queue(dev
);
1013 pr_debug("%s: queue woken\n", fp
->name
);
1016 case FZA_STATE_HALTED
:
1017 fp
->queue_active
= 0;
1018 netif_stop_queue(dev
);
1019 pr_debug("%s: queue stopped\n", fp
->name
);
1020 del_timer_sync(&fp
->reset_timer
);
1021 pr_warn("%s: halted, reason: %x\n", fp
->name
,
1022 FZA_STATUS_GET_HALT(status
));
1024 pr_info("%s: resetting the board...\n", fp
->name
);
1026 fp
->timer_state
= 0;
1027 fp
->reset_timer
.expires
= jiffies
+ 45 * HZ
;
1028 add_timer(&fp
->reset_timer
);
1032 pr_warn("%s: undefined state: %x\n", fp
->name
, state
);
1036 spin_lock(&fp
->lock
);
1037 fp
->state_chg_flag
= 1;
1038 wake_up(&fp
->state_chg_wait
);
1039 spin_unlock(&fp
->lock
);
1045 static void fza_reset_timer(struct timer_list
*t
)
1047 struct fza_private
*fp
= from_timer(fp
, t
, reset_timer
);
1049 if (!fp
->timer_state
) {
1050 pr_err("%s: RESET timed out!\n", fp
->name
);
1051 pr_info("%s: trying harder...\n", fp
->name
);
1053 /* Assert the board reset. */
1054 writew_o(FZA_RESET_INIT
, &fp
->regs
->reset
);
1055 readw_o(&fp
->regs
->reset
); /* Synchronize. */
1057 fp
->timer_state
= 1;
1058 fp
->reset_timer
.expires
= jiffies
+ HZ
;
1060 /* Clear the board reset. */
1061 writew_u(FZA_RESET_CLR
, &fp
->regs
->reset
);
1063 /* Enable all interrupt events we handle. */
1064 writew_o(fp
->int_mask
, &fp
->regs
->int_mask
);
1065 readw_o(&fp
->regs
->int_mask
); /* Synchronize. */
1067 fp
->timer_state
= 0;
1068 fp
->reset_timer
.expires
= jiffies
+ 45 * HZ
;
1070 add_timer(&fp
->reset_timer
);
1073 static int fza_set_mac_address(struct net_device
*dev
, void *addr
)
1078 static netdev_tx_t
fza_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1080 struct fza_private
*fp
= netdev_priv(dev
);
1081 unsigned int old_mask
, new_mask
;
1085 skb_push(skb
, 3); /* Make room for PRH. */
1087 /* Decode FC to set PRH. */
1091 skb
->data
[2] = FZA_PRH2_NORMAL
;
1092 if ((fc
& FDDI_FC_K_CLASS_MASK
) == FDDI_FC_K_CLASS_SYNC
)
1093 skb
->data
[0] |= FZA_PRH0_FRAME_SYNC
;
1094 switch (fc
& FDDI_FC_K_FORMAT_MASK
) {
1095 case FDDI_FC_K_FORMAT_MANAGEMENT
:
1096 if ((fc
& FDDI_FC_K_CONTROL_MASK
) == 0) {
1098 skb
->data
[0] |= FZA_PRH0_TKN_TYPE_IMM
;
1099 skb
->data
[1] |= FZA_PRH1_TKN_SEND_NONE
;
1102 skb
->data
[0] |= FZA_PRH0_TKN_TYPE_UNR
;
1103 skb
->data
[1] |= FZA_PRH1_TKN_SEND_UNR
;
1105 skb
->data
[1] |= FZA_PRH1_CRC_NORMAL
;
1107 case FDDI_FC_K_FORMAT_LLC
:
1108 case FDDI_FC_K_FORMAT_FUTURE
:
1109 skb
->data
[0] |= FZA_PRH0_TKN_TYPE_UNR
;
1110 skb
->data
[1] |= FZA_PRH1_CRC_NORMAL
| FZA_PRH1_TKN_SEND_UNR
;
1112 case FDDI_FC_K_FORMAT_IMPLEMENTOR
:
1113 skb
->data
[0] |= FZA_PRH0_TKN_TYPE_UNR
;
1114 skb
->data
[1] |= FZA_PRH1_TKN_SEND_ORIG
;
1118 /* SMT transmit interrupts may sneak frames into the RMC
1119 * transmit ring. We disable them while queueing a frame
1120 * to maintain consistency.
1122 old_mask
= fp
->int_mask
;
1123 new_mask
= old_mask
& ~FZA_MASK_SMT_TX_POLL
;
1124 writew_u(new_mask
, &fp
->regs
->int_mask
);
1125 readw_o(&fp
->regs
->int_mask
); /* Synchronize. */
1126 fp
->int_mask
= new_mask
;
1127 ret
= fza_do_xmit((union fza_buffer_txp
)
1128 { .data_ptr
= (struct fza_buffer_tx
*)skb
->data
},
1130 fp
->int_mask
= old_mask
;
1131 writew_u(fp
->int_mask
, &fp
->regs
->int_mask
);
1134 /* Probably an SMT packet filled the remaining space,
1135 * so just stop the queue, but don't report it as an error.
1137 netif_stop_queue(dev
);
1138 pr_debug("%s: queue stopped\n", fp
->name
);
1139 fp
->stats
.tx_dropped
++;
1147 static int fza_open(struct net_device
*dev
)
1149 struct fza_private
*fp
= netdev_priv(dev
);
1150 struct fza_ring_cmd __iomem
*ring
;
1151 struct sk_buff
*skb
;
1152 unsigned long flags
;
1158 for (i
= 0; i
< FZA_RING_RX_SIZE
; i
++) {
1159 /* We have to 512-byte-align RX buffers... */
1160 skb
= fza_alloc_skb(dev
, FZA_RX_BUFFER_SIZE
+ 511);
1162 fza_skb_align(skb
, 512);
1163 dma
= dma_map_single(fp
->bdev
, skb
->data
,
1166 if (dma_mapping_error(fp
->bdev
, dma
)) {
1172 for (--i
; i
>= 0; i
--) {
1173 dma_unmap_single(fp
->bdev
, fp
->rx_dma
[i
],
1176 dev_kfree_skb(fp
->rx_skbuff
[i
]);
1178 fp
->rx_skbuff
[i
] = NULL
;
1182 fp
->rx_skbuff
[i
] = skb
;
1183 fp
->rx_dma
[i
] = dma
;
1186 ret
= fza_init_send(dev
, NULL
);
1190 /* Purger and Beacon multicasts need to be supplied before PARAM. */
1191 fza_set_rx_mode(dev
);
1193 spin_lock_irqsave(&fp
->lock
, flags
);
1194 fp
->cmd_done_flag
= 0;
1195 ring
= fza_cmd_send(dev
, FZA_RING_CMD_PARAM
);
1196 spin_unlock_irqrestore(&fp
->lock
, flags
);
1200 t
= wait_event_timeout(fp
->cmd_done_wait
, fp
->cmd_done_flag
, 3 * HZ
);
1201 if (fp
->cmd_done_flag
== 0) {
1202 pr_err("%s: PARAM command timed out!, state %x\n", fp
->name
,
1203 FZA_STATUS_GET_STATE(readw_u(&fp
->regs
->status
)));
1206 stat
= readl_u(&ring
->stat
);
1207 if (stat
!= FZA_RING_STAT_SUCCESS
) {
1208 pr_err("%s: PARAM command failed!, status %02x, state %x\n",
1210 FZA_STATUS_GET_STATE(readw_u(&fp
->regs
->status
)));
1213 pr_debug("%s: PARAM: %lums elapsed\n", fp
->name
,
1214 (3 * HZ
- t
) * 1000 / HZ
);
1219 static int fza_close(struct net_device
*dev
)
1221 struct fza_private
*fp
= netdev_priv(dev
);
1222 unsigned long flags
;
1227 netif_stop_queue(dev
);
1228 pr_debug("%s: queue stopped\n", fp
->name
);
1230 del_timer_sync(&fp
->reset_timer
);
1231 spin_lock_irqsave(&fp
->lock
, flags
);
1232 fp
->state
= FZA_STATE_UNINITIALIZED
;
1233 fp
->state_chg_flag
= 0;
1234 /* Shut the interface down. */
1235 writew_o(FZA_CONTROL_A_SHUT
, &fp
->regs
->control_a
);
1236 readw_o(&fp
->regs
->control_a
); /* Synchronize. */
1237 spin_unlock_irqrestore(&fp
->lock
, flags
);
1239 /* DEC says SHUT needs up to 10 seconds to complete. */
1240 t
= wait_event_timeout(fp
->state_chg_wait
, fp
->state_chg_flag
,
1242 state
= FZA_STATUS_GET_STATE(readw_o(&fp
->regs
->status
));
1243 if (fp
->state_chg_flag
== 0) {
1244 pr_err("%s: SHUT timed out!, state %x\n", fp
->name
, state
);
1247 if (state
!= FZA_STATE_UNINITIALIZED
) {
1248 pr_err("%s: SHUT failed!, state %x\n", fp
->name
, state
);
1251 pr_debug("%s: SHUT: %lums elapsed\n", fp
->name
,
1252 (15 * HZ
- t
) * 1000 / HZ
);
1254 for (i
= 0; i
< FZA_RING_RX_SIZE
; i
++)
1255 if (fp
->rx_skbuff
[i
]) {
1256 dma_unmap_single(fp
->bdev
, fp
->rx_dma
[i
],
1257 FZA_RX_BUFFER_SIZE
, DMA_FROM_DEVICE
);
1258 dev_kfree_skb(fp
->rx_skbuff
[i
]);
1260 fp
->rx_skbuff
[i
] = NULL
;
1266 static struct net_device_stats
*fza_get_stats(struct net_device
*dev
)
1268 struct fza_private
*fp
= netdev_priv(dev
);
1273 static int fza_probe(struct device
*bdev
)
1275 static const struct net_device_ops netdev_ops
= {
1276 .ndo_open
= fza_open
,
1277 .ndo_stop
= fza_close
,
1278 .ndo_start_xmit
= fza_start_xmit
,
1279 .ndo_set_rx_mode
= fza_set_rx_mode
,
1280 .ndo_set_mac_address
= fza_set_mac_address
,
1281 .ndo_get_stats
= fza_get_stats
,
1283 static int version_printed
;
1284 char rom_rev
[4], fw_rev
[4], rmc_rev
[4];
1285 struct tc_dev
*tdev
= to_tc_dev(bdev
);
1286 struct fza_cmd_init __iomem
*init
;
1287 resource_size_t start
, len
;
1288 struct net_device
*dev
;
1289 struct fza_private
*fp
;
1290 uint smt_ver
, pmd_type
;
1295 if (!version_printed
) {
1296 pr_info("%s", version
);
1297 version_printed
= 1;
1300 dev
= alloc_fddidev(sizeof(*fp
));
1303 SET_NETDEV_DEV(dev
, bdev
);
1305 fp
= netdev_priv(dev
);
1306 dev_set_drvdata(bdev
, dev
);
1309 fp
->name
= dev_name(bdev
);
1311 /* Request the I/O MEM resource. */
1312 start
= tdev
->resource
.start
;
1313 len
= tdev
->resource
.end
- start
+ 1;
1314 if (!request_mem_region(start
, len
, dev_name(bdev
))) {
1315 pr_err("%s: cannot reserve MMIO region\n", fp
->name
);
1320 /* MMIO mapping setup. */
1321 mmio
= ioremap(start
, len
);
1323 pr_err("%s: cannot map MMIO\n", fp
->name
);
1325 goto err_out_resource
;
1328 /* Initialize the new device structure. */
1330 case FZA_LOOP_NORMAL
:
1331 case FZA_LOOP_INTERN
:
1332 case FZA_LOOP_EXTERN
:
1335 loopback
= FZA_LOOP_NORMAL
;
1339 dev
->irq
= tdev
->interrupt
;
1341 pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n",
1342 fp
->name
, (long long)tdev
->resource
.start
, dev
->irq
);
1343 pr_debug("%s: mapped at: 0x%p\n", fp
->name
, mmio
);
1345 fp
->regs
= mmio
+ FZA_REG_BASE
;
1346 fp
->ring_cmd
= mmio
+ FZA_RING_CMD
;
1347 fp
->ring_uns
= mmio
+ FZA_RING_UNS
;
1349 init_waitqueue_head(&fp
->state_chg_wait
);
1350 init_waitqueue_head(&fp
->cmd_done_wait
);
1351 spin_lock_init(&fp
->lock
);
1352 fp
->int_mask
= FZA_MASK_NORMAL
;
1354 timer_setup(&fp
->reset_timer
, fza_reset_timer
, 0);
1356 /* Sanitize the board. */
1358 fza_do_shutdown(fp
);
1360 ret
= request_irq(dev
->irq
, fza_interrupt
, IRQF_SHARED
, fp
->name
, dev
);
1362 pr_err("%s: unable to get IRQ %d!\n", fp
->name
, dev
->irq
);
1366 /* Enable the driver mode. */
1367 writew_o(FZA_CONTROL_B_DRIVER
, &fp
->regs
->control_b
);
1369 /* For some reason transmit done interrupts can trigger during
1370 * reset. This avoids a division error in the handler.
1372 fp
->ring_rmc_tx_size
= FZA_RING_TX_SIZE
;
1374 ret
= fza_reset(fp
);
1378 ret
= fza_init_send(dev
, &init
);
1382 fza_reads(&init
->hw_addr
, &hw_addr
, sizeof(hw_addr
));
1383 memcpy(dev
->dev_addr
, &hw_addr
, FDDI_K_ALEN
);
1385 fza_reads(&init
->rom_rev
, &rom_rev
, sizeof(rom_rev
));
1386 fza_reads(&init
->fw_rev
, &fw_rev
, sizeof(fw_rev
));
1387 fza_reads(&init
->rmc_rev
, &rmc_rev
, sizeof(rmc_rev
));
1388 for (i
= 3; i
>= 0 && rom_rev
[i
] == ' '; i
--)
1390 for (i
= 3; i
>= 0 && fw_rev
[i
] == ' '; i
--)
1392 for (i
= 3; i
>= 0 && rmc_rev
[i
] == ' '; i
--)
1395 fp
->ring_rmc_tx
= mmio
+ readl_u(&init
->rmc_tx
);
1396 fp
->ring_rmc_tx_size
= readl_u(&init
->rmc_tx_size
);
1397 fp
->ring_hst_rx
= mmio
+ readl_u(&init
->hst_rx
);
1398 fp
->ring_hst_rx_size
= readl_u(&init
->hst_rx_size
);
1399 fp
->ring_smt_tx
= mmio
+ readl_u(&init
->smt_tx
);
1400 fp
->ring_smt_tx_size
= readl_u(&init
->smt_tx_size
);
1401 fp
->ring_smt_rx
= mmio
+ readl_u(&init
->smt_rx
);
1402 fp
->ring_smt_rx_size
= readl_u(&init
->smt_rx_size
);
1404 fp
->buffer_tx
= mmio
+ FZA_TX_BUFFER_ADDR(readl_u(&init
->rmc_tx
));
1406 fp
->t_max
= readl_u(&init
->def_t_max
);
1407 fp
->t_req
= readl_u(&init
->def_t_req
);
1408 fp
->tvx
= readl_u(&init
->def_tvx
);
1409 fp
->lem_threshold
= readl_u(&init
->lem_threshold
);
1410 fza_reads(&init
->def_station_id
, &fp
->station_id
,
1411 sizeof(fp
->station_id
));
1412 fp
->rtoken_timeout
= readl_u(&init
->rtoken_timeout
);
1413 fp
->ring_purger
= readl_u(&init
->ring_purger
);
1415 smt_ver
= readl_u(&init
->smt_ver
);
1416 pmd_type
= readl_u(&init
->pmd_type
);
1418 pr_debug("%s: INIT parameters:\n", fp
->name
);
1419 pr_debug(" tx_mode: %u\n", readl_u(&init
->tx_mode
));
1420 pr_debug(" hst_rx_size: %u\n", readl_u(&init
->hst_rx_size
));
1421 pr_debug(" rmc_rev: %.4s\n", rmc_rev
);
1422 pr_debug(" rom_rev: %.4s\n", rom_rev
);
1423 pr_debug(" fw_rev: %.4s\n", fw_rev
);
1424 pr_debug(" mop_type: %u\n", readl_u(&init
->mop_type
));
1425 pr_debug(" hst_rx: 0x%08x\n", readl_u(&init
->hst_rx
));
1426 pr_debug(" rmc_tx: 0x%08x\n", readl_u(&init
->rmc_tx
));
1427 pr_debug(" rmc_tx_size: %u\n", readl_u(&init
->rmc_tx_size
));
1428 pr_debug(" smt_tx: 0x%08x\n", readl_u(&init
->smt_tx
));
1429 pr_debug(" smt_tx_size: %u\n", readl_u(&init
->smt_tx_size
));
1430 pr_debug(" smt_rx: 0x%08x\n", readl_u(&init
->smt_rx
));
1431 pr_debug(" smt_rx_size: %u\n", readl_u(&init
->smt_rx_size
));
1432 /* TC systems are always LE, so don't bother swapping. */
1433 pr_debug(" hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1434 (readl_u(&init
->hw_addr
[0]) >> 0) & 0xff,
1435 (readl_u(&init
->hw_addr
[0]) >> 8) & 0xff,
1436 (readl_u(&init
->hw_addr
[0]) >> 16) & 0xff,
1437 (readl_u(&init
->hw_addr
[0]) >> 24) & 0xff,
1438 (readl_u(&init
->hw_addr
[1]) >> 0) & 0xff,
1439 (readl_u(&init
->hw_addr
[1]) >> 8) & 0xff,
1440 (readl_u(&init
->hw_addr
[1]) >> 16) & 0xff,
1441 (readl_u(&init
->hw_addr
[1]) >> 24) & 0xff);
1442 pr_debug(" def_t_req: %u\n", readl_u(&init
->def_t_req
));
1443 pr_debug(" def_tvx: %u\n", readl_u(&init
->def_tvx
));
1444 pr_debug(" def_t_max: %u\n", readl_u(&init
->def_t_max
));
1445 pr_debug(" lem_threshold: %u\n", readl_u(&init
->lem_threshold
));
1446 /* Don't bother swapping, see above. */
1447 pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1448 (readl_u(&init
->def_station_id
[0]) >> 0) & 0xff,
1449 (readl_u(&init
->def_station_id
[0]) >> 8) & 0xff,
1450 (readl_u(&init
->def_station_id
[0]) >> 16) & 0xff,
1451 (readl_u(&init
->def_station_id
[0]) >> 24) & 0xff,
1452 (readl_u(&init
->def_station_id
[1]) >> 0) & 0xff,
1453 (readl_u(&init
->def_station_id
[1]) >> 8) & 0xff,
1454 (readl_u(&init
->def_station_id
[1]) >> 16) & 0xff,
1455 (readl_u(&init
->def_station_id
[1]) >> 24) & 0xff);
1456 pr_debug(" pmd_type_alt: %u\n", readl_u(&init
->pmd_type_alt
));
1457 pr_debug(" smt_ver: %u\n", readl_u(&init
->smt_ver
));
1458 pr_debug(" rtoken_timeout: %u\n", readl_u(&init
->rtoken_timeout
));
1459 pr_debug(" ring_purger: %u\n", readl_u(&init
->ring_purger
));
1460 pr_debug(" smt_ver_max: %u\n", readl_u(&init
->smt_ver_max
));
1461 pr_debug(" smt_ver_min: %u\n", readl_u(&init
->smt_ver_min
));
1462 pr_debug(" pmd_type: %u\n", readl_u(&init
->pmd_type
));
1464 pr_info("%s: model %s, address %pMF\n",
1466 pmd_type
== FZA_PMD_TYPE_TW
?
1467 "700-C (DEFZA-CA), ThinWire PMD selected" :
1468 pmd_type
== FZA_PMD_TYPE_STP
?
1469 "700-C (DEFZA-CA), STP PMD selected" :
1470 "700 (DEFZA-AA), MMF PMD",
1472 pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, "
1473 "SMT ver. %u\n", fp
->name
, rom_rev
, fw_rev
, rmc_rev
, smt_ver
);
1475 /* Now that we fetched initial parameters just shut the interface
1478 ret
= fza_close(dev
);
1482 /* The FZA-specific entries in the device structure. */
1483 dev
->netdev_ops
= &netdev_ops
;
1485 ret
= register_netdev(dev
);
1489 pr_info("%s: registered as %s\n", fp
->name
, dev
->name
);
1490 fp
->name
= (const char *)dev
->name
;
1496 del_timer_sync(&fp
->reset_timer
);
1497 fza_do_shutdown(fp
);
1498 free_irq(dev
->irq
, dev
);
1504 release_mem_region(start
, len
);
1509 pr_err("%s: initialization failure, aborting!\n", fp
->name
);
1513 static int fza_remove(struct device
*bdev
)
1515 struct net_device
*dev
= dev_get_drvdata(bdev
);
1516 struct fza_private
*fp
= netdev_priv(dev
);
1517 struct tc_dev
*tdev
= to_tc_dev(bdev
);
1518 resource_size_t start
, len
;
1522 unregister_netdev(dev
);
1524 del_timer_sync(&fp
->reset_timer
);
1525 fza_do_shutdown(fp
);
1526 free_irq(dev
->irq
, dev
);
1530 start
= tdev
->resource
.start
;
1531 len
= tdev
->resource
.end
- start
+ 1;
1532 release_mem_region(start
, len
);
1539 static struct tc_device_id
const fza_tc_table
[] = {
1540 { "DEC ", "PMAF-AA " },
1543 MODULE_DEVICE_TABLE(tc
, fza_tc_table
);
1545 static struct tc_driver fza_driver
= {
1546 .id_table
= fza_tc_table
,
1549 .bus
= &tc_bus_type
,
1551 .remove
= fza_remove
,
1555 static int fza_init(void)
1557 return tc_register_driver(&fza_driver
);
1560 static void fza_exit(void)
1562 tc_unregister_driver(&fza_driver
);
1565 module_init(fza_init
);
1566 module_exit(fza_exit
);