2 * drivers/net/ibm_newemac/mal.c
4 * Memory Access Layer (MAL) support
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
16 * David Gibson <hermes@gibson.dropbear.id.au>,
18 * Armin Kuster <akuster@mvista.com>
19 * Copyright 2002 MontaVista Softare Inc.
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
28 #include <linux/delay.h>
29 #include <linux/slab.h>
32 #include <asm/dcr-regs.h>
36 int __devinit
mal_register_commac(struct mal_instance
*mal
,
37 struct mal_commac
*commac
)
41 spin_lock_irqsave(&mal
->lock
, flags
);
43 MAL_DBG(mal
, "reg(%08x, %08x)" NL
,
44 commac
->tx_chan_mask
, commac
->rx_chan_mask
);
46 /* Don't let multiple commacs claim the same channel(s) */
47 if ((mal
->tx_chan_mask
& commac
->tx_chan_mask
) ||
48 (mal
->rx_chan_mask
& commac
->rx_chan_mask
)) {
49 spin_unlock_irqrestore(&mal
->lock
, flags
);
50 printk(KERN_WARNING
"mal%d: COMMAC channels conflict!\n",
55 if (list_empty(&mal
->list
))
56 napi_enable(&mal
->napi
);
57 mal
->tx_chan_mask
|= commac
->tx_chan_mask
;
58 mal
->rx_chan_mask
|= commac
->rx_chan_mask
;
59 list_add(&commac
->list
, &mal
->list
);
61 spin_unlock_irqrestore(&mal
->lock
, flags
);
66 void mal_unregister_commac(struct mal_instance
*mal
,
67 struct mal_commac
*commac
)
71 spin_lock_irqsave(&mal
->lock
, flags
);
73 MAL_DBG(mal
, "unreg(%08x, %08x)" NL
,
74 commac
->tx_chan_mask
, commac
->rx_chan_mask
);
76 mal
->tx_chan_mask
&= ~commac
->tx_chan_mask
;
77 mal
->rx_chan_mask
&= ~commac
->rx_chan_mask
;
78 list_del_init(&commac
->list
);
79 if (list_empty(&mal
->list
))
80 napi_disable(&mal
->napi
);
82 spin_unlock_irqrestore(&mal
->lock
, flags
);
85 int mal_set_rcbs(struct mal_instance
*mal
, int channel
, unsigned long size
)
87 BUG_ON(channel
< 0 || channel
>= mal
->num_rx_chans
||
88 size
> MAL_MAX_RX_SIZE
);
90 MAL_DBG(mal
, "set_rbcs(%d, %lu)" NL
, channel
, size
);
94 "mal%d: incorrect RX size %lu for the channel %d\n",
95 mal
->index
, size
, channel
);
99 set_mal_dcrn(mal
, MAL_RCBS(channel
), size
>> 4);
103 int mal_tx_bd_offset(struct mal_instance
*mal
, int channel
)
105 BUG_ON(channel
< 0 || channel
>= mal
->num_tx_chans
);
107 return channel
* NUM_TX_BUFF
;
110 int mal_rx_bd_offset(struct mal_instance
*mal
, int channel
)
112 BUG_ON(channel
< 0 || channel
>= mal
->num_rx_chans
);
113 return mal
->num_tx_chans
* NUM_TX_BUFF
+ channel
* NUM_RX_BUFF
;
116 void mal_enable_tx_channel(struct mal_instance
*mal
, int channel
)
120 spin_lock_irqsave(&mal
->lock
, flags
);
122 MAL_DBG(mal
, "enable_tx(%d)" NL
, channel
);
124 set_mal_dcrn(mal
, MAL_TXCASR
,
125 get_mal_dcrn(mal
, MAL_TXCASR
) | MAL_CHAN_MASK(channel
));
127 spin_unlock_irqrestore(&mal
->lock
, flags
);
130 void mal_disable_tx_channel(struct mal_instance
*mal
, int channel
)
132 set_mal_dcrn(mal
, MAL_TXCARR
, MAL_CHAN_MASK(channel
));
134 MAL_DBG(mal
, "disable_tx(%d)" NL
, channel
);
137 void mal_enable_rx_channel(struct mal_instance
*mal
, int channel
)
142 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
143 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
149 spin_lock_irqsave(&mal
->lock
, flags
);
151 MAL_DBG(mal
, "enable_rx(%d)" NL
, channel
);
153 set_mal_dcrn(mal
, MAL_RXCASR
,
154 get_mal_dcrn(mal
, MAL_RXCASR
) | MAL_CHAN_MASK(channel
));
156 spin_unlock_irqrestore(&mal
->lock
, flags
);
159 void mal_disable_rx_channel(struct mal_instance
*mal
, int channel
)
162 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
163 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
169 set_mal_dcrn(mal
, MAL_RXCARR
, MAL_CHAN_MASK(channel
));
171 MAL_DBG(mal
, "disable_rx(%d)" NL
, channel
);
174 void mal_poll_add(struct mal_instance
*mal
, struct mal_commac
*commac
)
178 spin_lock_irqsave(&mal
->lock
, flags
);
180 MAL_DBG(mal
, "poll_add(%p)" NL
, commac
);
182 /* starts disabled */
183 set_bit(MAL_COMMAC_POLL_DISABLED
, &commac
->flags
);
185 list_add_tail(&commac
->poll_list
, &mal
->poll_list
);
187 spin_unlock_irqrestore(&mal
->lock
, flags
);
190 void mal_poll_del(struct mal_instance
*mal
, struct mal_commac
*commac
)
194 spin_lock_irqsave(&mal
->lock
, flags
);
196 MAL_DBG(mal
, "poll_del(%p)" NL
, commac
);
198 list_del(&commac
->poll_list
);
200 spin_unlock_irqrestore(&mal
->lock
, flags
);
203 /* synchronized by mal_poll() */
204 static inline void mal_enable_eob_irq(struct mal_instance
*mal
)
206 MAL_DBG2(mal
, "enable_irq" NL
);
208 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
209 set_mal_dcrn(mal
, MAL_CFG
, get_mal_dcrn(mal
, MAL_CFG
) | MAL_CFG_EOPIE
);
212 /* synchronized by NAPI state */
213 static inline void mal_disable_eob_irq(struct mal_instance
*mal
)
215 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
216 set_mal_dcrn(mal
, MAL_CFG
, get_mal_dcrn(mal
, MAL_CFG
) & ~MAL_CFG_EOPIE
);
218 MAL_DBG2(mal
, "disable_irq" NL
);
221 static irqreturn_t
mal_serr(int irq
, void *dev_instance
)
223 struct mal_instance
*mal
= dev_instance
;
225 u32 esr
= get_mal_dcrn(mal
, MAL_ESR
);
227 /* Clear the error status register */
228 set_mal_dcrn(mal
, MAL_ESR
, esr
);
230 MAL_DBG(mal
, "SERR %08x" NL
, esr
);
232 if (esr
& MAL_ESR_EVB
) {
233 if (esr
& MAL_ESR_DE
) {
234 /* We ignore Descriptor error,
235 * TXDE or RXDE interrupt will be generated anyway.
240 if (esr
& MAL_ESR_PEIN
) {
241 /* PLB error, it's probably buggy hardware or
242 * incorrect physical address in BD (i.e. bug)
246 "mal%d: system error, "
247 "PLB (ESR = 0x%08x)\n",
252 /* OPB error, it's probably buggy hardware or incorrect
257 "mal%d: system error, OPB (ESR = 0x%08x)\n",
263 static inline void mal_schedule_poll(struct mal_instance
*mal
)
265 if (likely(napi_schedule_prep(&mal
->napi
))) {
266 MAL_DBG2(mal
, "schedule_poll" NL
);
267 mal_disable_eob_irq(mal
);
268 __napi_schedule(&mal
->napi
);
270 MAL_DBG2(mal
, "already in poll" NL
);
273 static irqreturn_t
mal_txeob(int irq
, void *dev_instance
)
275 struct mal_instance
*mal
= dev_instance
;
277 u32 r
= get_mal_dcrn(mal
, MAL_TXEOBISR
);
279 MAL_DBG2(mal
, "txeob %08x" NL
, r
);
281 mal_schedule_poll(mal
);
282 set_mal_dcrn(mal
, MAL_TXEOBISR
, r
);
284 #ifdef CONFIG_PPC_DCR_NATIVE
285 if (mal_has_feature(mal
, MAL_FTR_CLEAR_ICINTSTAT
))
286 mtdcri(SDR0
, DCRN_SDR_ICINTSTAT
,
287 (mfdcri(SDR0
, DCRN_SDR_ICINTSTAT
) | ICINTSTAT_ICTX
));
293 static irqreturn_t
mal_rxeob(int irq
, void *dev_instance
)
295 struct mal_instance
*mal
= dev_instance
;
297 u32 r
= get_mal_dcrn(mal
, MAL_RXEOBISR
);
299 MAL_DBG2(mal
, "rxeob %08x" NL
, r
);
301 mal_schedule_poll(mal
);
302 set_mal_dcrn(mal
, MAL_RXEOBISR
, r
);
304 #ifdef CONFIG_PPC_DCR_NATIVE
305 if (mal_has_feature(mal
, MAL_FTR_CLEAR_ICINTSTAT
))
306 mtdcri(SDR0
, DCRN_SDR_ICINTSTAT
,
307 (mfdcri(SDR0
, DCRN_SDR_ICINTSTAT
) | ICINTSTAT_ICRX
));
313 static irqreturn_t
mal_txde(int irq
, void *dev_instance
)
315 struct mal_instance
*mal
= dev_instance
;
317 u32 deir
= get_mal_dcrn(mal
, MAL_TXDEIR
);
318 set_mal_dcrn(mal
, MAL_TXDEIR
, deir
);
320 MAL_DBG(mal
, "txde %08x" NL
, deir
);
324 "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
330 static irqreturn_t
mal_rxde(int irq
, void *dev_instance
)
332 struct mal_instance
*mal
= dev_instance
;
335 u32 deir
= get_mal_dcrn(mal
, MAL_RXDEIR
);
337 MAL_DBG(mal
, "rxde %08x" NL
, deir
);
339 list_for_each(l
, &mal
->list
) {
340 struct mal_commac
*mc
= list_entry(l
, struct mal_commac
, list
);
341 if (deir
& mc
->rx_chan_mask
) {
342 set_bit(MAL_COMMAC_RX_STOPPED
, &mc
->flags
);
343 mc
->ops
->rxde(mc
->dev
);
347 mal_schedule_poll(mal
);
348 set_mal_dcrn(mal
, MAL_RXDEIR
, deir
);
353 static irqreturn_t
mal_int(int irq
, void *dev_instance
)
355 struct mal_instance
*mal
= dev_instance
;
356 u32 esr
= get_mal_dcrn(mal
, MAL_ESR
);
358 if (esr
& MAL_ESR_EVB
) {
359 /* descriptor error */
360 if (esr
& MAL_ESR_DE
) {
361 if (esr
& MAL_ESR_CIDT
)
362 return mal_rxde(irq
, dev_instance
);
364 return mal_txde(irq
, dev_instance
);
366 return mal_serr(irq
, dev_instance
);
372 void mal_poll_disable(struct mal_instance
*mal
, struct mal_commac
*commac
)
374 /* Spinlock-type semantics: only one caller disable poll at a time */
375 while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED
, &commac
->flags
))
378 /* Synchronize with the MAL NAPI poller */
379 napi_synchronize(&mal
->napi
);
382 void mal_poll_enable(struct mal_instance
*mal
, struct mal_commac
*commac
)
385 clear_bit(MAL_COMMAC_POLL_DISABLED
, &commac
->flags
);
387 /* Feels better to trigger a poll here to catch up with events that
388 * may have happened on this channel while disabled. It will most
389 * probably be delayed until the next interrupt but that's mostly a
390 * non-issue in the context where this is called.
392 napi_schedule(&mal
->napi
);
395 static int mal_poll(struct napi_struct
*napi
, int budget
)
397 struct mal_instance
*mal
= container_of(napi
, struct mal_instance
, napi
);
402 MAL_DBG2(mal
, "poll(%d)" NL
, budget
);
404 /* Process TX skbs */
405 list_for_each(l
, &mal
->poll_list
) {
406 struct mal_commac
*mc
=
407 list_entry(l
, struct mal_commac
, poll_list
);
408 mc
->ops
->poll_tx(mc
->dev
);
413 * We _might_ need something more smart here to enforce polling
416 list_for_each(l
, &mal
->poll_list
) {
417 struct mal_commac
*mc
=
418 list_entry(l
, struct mal_commac
, poll_list
);
420 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED
, &mc
->flags
)))
422 n
= mc
->ops
->poll_rx(mc
->dev
, budget
);
427 goto more_work
; // XXX What if this is the last one ?
431 /* We need to disable IRQs to protect from RXDE IRQ here */
432 spin_lock_irqsave(&mal
->lock
, flags
);
433 __napi_complete(napi
);
434 mal_enable_eob_irq(mal
);
435 spin_unlock_irqrestore(&mal
->lock
, flags
);
437 /* Check for "rotting" packet(s) */
438 list_for_each(l
, &mal
->poll_list
) {
439 struct mal_commac
*mc
=
440 list_entry(l
, struct mal_commac
, poll_list
);
441 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED
, &mc
->flags
)))
443 if (unlikely(mc
->ops
->peek_rx(mc
->dev
) ||
444 test_bit(MAL_COMMAC_RX_STOPPED
, &mc
->flags
))) {
445 MAL_DBG2(mal
, "rotting packet" NL
);
446 if (napi_reschedule(napi
))
447 mal_disable_eob_irq(mal
);
449 MAL_DBG2(mal
, "already in poll list" NL
);
456 mc
->ops
->poll_tx(mc
->dev
);
460 MAL_DBG2(mal
, "poll() %d <- %d" NL
, budget
, received
);
464 static void mal_reset(struct mal_instance
*mal
)
468 MAL_DBG(mal
, "reset" NL
);
470 set_mal_dcrn(mal
, MAL_CFG
, MAL_CFG_SR
);
472 /* Wait for reset to complete (1 system clock) */
473 while ((get_mal_dcrn(mal
, MAL_CFG
) & MAL_CFG_SR
) && n
)
477 printk(KERN_ERR
"mal%d: reset timeout\n", mal
->index
);
480 int mal_get_regs_len(struct mal_instance
*mal
)
482 return sizeof(struct emac_ethtool_regs_subhdr
) +
483 sizeof(struct mal_regs
);
486 void *mal_dump_regs(struct mal_instance
*mal
, void *buf
)
488 struct emac_ethtool_regs_subhdr
*hdr
= buf
;
489 struct mal_regs
*regs
= (struct mal_regs
*)(hdr
+ 1);
492 hdr
->version
= mal
->version
;
493 hdr
->index
= mal
->index
;
495 regs
->tx_count
= mal
->num_tx_chans
;
496 regs
->rx_count
= mal
->num_rx_chans
;
498 regs
->cfg
= get_mal_dcrn(mal
, MAL_CFG
);
499 regs
->esr
= get_mal_dcrn(mal
, MAL_ESR
);
500 regs
->ier
= get_mal_dcrn(mal
, MAL_IER
);
501 regs
->tx_casr
= get_mal_dcrn(mal
, MAL_TXCASR
);
502 regs
->tx_carr
= get_mal_dcrn(mal
, MAL_TXCARR
);
503 regs
->tx_eobisr
= get_mal_dcrn(mal
, MAL_TXEOBISR
);
504 regs
->tx_deir
= get_mal_dcrn(mal
, MAL_TXDEIR
);
505 regs
->rx_casr
= get_mal_dcrn(mal
, MAL_RXCASR
);
506 regs
->rx_carr
= get_mal_dcrn(mal
, MAL_RXCARR
);
507 regs
->rx_eobisr
= get_mal_dcrn(mal
, MAL_RXEOBISR
);
508 regs
->rx_deir
= get_mal_dcrn(mal
, MAL_RXDEIR
);
510 for (i
= 0; i
< regs
->tx_count
; ++i
)
511 regs
->tx_ctpr
[i
] = get_mal_dcrn(mal
, MAL_TXCTPR(i
));
513 for (i
= 0; i
< regs
->rx_count
; ++i
) {
514 regs
->rx_ctpr
[i
] = get_mal_dcrn(mal
, MAL_RXCTPR(i
));
515 regs
->rcbs
[i
] = get_mal_dcrn(mal
, MAL_RCBS(i
));
520 static int __devinit
mal_probe(struct platform_device
*ofdev
)
522 struct mal_instance
*mal
;
523 int err
= 0, i
, bd_size
;
524 int index
= mal_count
++;
525 unsigned int dcr_base
;
528 unsigned long irqflags
;
529 irq_handler_t hdlr_serr
, hdlr_txde
, hdlr_rxde
;
531 mal
= kzalloc(sizeof(struct mal_instance
), GFP_KERNEL
);
534 "mal%d: out of memory allocating MAL structure!\n",
540 mal
->version
= of_device_is_compatible(ofdev
->dev
.of_node
, "ibm,mcmal2") ? 2 : 1;
542 MAL_DBG(mal
, "probe" NL
);
544 prop
= of_get_property(ofdev
->dev
.of_node
, "num-tx-chans", NULL
);
547 "mal%d: can't find MAL num-tx-chans property!\n",
552 mal
->num_tx_chans
= prop
[0];
554 prop
= of_get_property(ofdev
->dev
.of_node
, "num-rx-chans", NULL
);
557 "mal%d: can't find MAL num-rx-chans property!\n",
562 mal
->num_rx_chans
= prop
[0];
564 dcr_base
= dcr_resource_start(ofdev
->dev
.of_node
, 0);
567 "mal%d: can't find DCR resource!\n", index
);
571 mal
->dcr_host
= dcr_map(ofdev
->dev
.of_node
, dcr_base
, 0x100);
572 if (!DCR_MAP_OK(mal
->dcr_host
)) {
574 "mal%d: failed to map DCRs !\n", index
);
579 if (of_device_is_compatible(ofdev
->dev
.of_node
, "ibm,mcmal-405ez")) {
580 #if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
581 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
582 mal
->features
|= (MAL_FTR_CLEAR_ICINTSTAT
|
583 MAL_FTR_COMMON_ERR_INT
);
585 printk(KERN_ERR
"%s: Support for 405EZ not enabled!\n",
586 ofdev
->dev
.of_node
->full_name
);
592 mal
->txeob_irq
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 0);
593 mal
->rxeob_irq
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 1);
594 mal
->serr_irq
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 2);
596 if (mal_has_feature(mal
, MAL_FTR_COMMON_ERR_INT
)) {
597 mal
->txde_irq
= mal
->rxde_irq
= mal
->serr_irq
;
599 mal
->txde_irq
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 3);
600 mal
->rxde_irq
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 4);
603 if (mal
->txeob_irq
== NO_IRQ
|| mal
->rxeob_irq
== NO_IRQ
||
604 mal
->serr_irq
== NO_IRQ
|| mal
->txde_irq
== NO_IRQ
||
605 mal
->rxde_irq
== NO_IRQ
) {
607 "mal%d: failed to map interrupts !\n", index
);
612 INIT_LIST_HEAD(&mal
->poll_list
);
613 INIT_LIST_HEAD(&mal
->list
);
614 spin_lock_init(&mal
->lock
);
616 init_dummy_netdev(&mal
->dummy_dev
);
618 netif_napi_add(&mal
->dummy_dev
, &mal
->napi
, mal_poll
,
619 CONFIG_IBM_NEW_EMAC_POLL_WEIGHT
);
621 /* Load power-on reset defaults */
624 /* Set the MAL configuration register */
625 cfg
= (mal
->version
== 2) ? MAL2_CFG_DEFAULT
: MAL1_CFG_DEFAULT
;
626 cfg
|= MAL_CFG_PLBB
| MAL_CFG_OPBBL
| MAL_CFG_LEA
;
628 /* Current Axon is not happy with priority being non-0, it can
629 * deadlock, fix it up here
631 if (of_device_is_compatible(ofdev
->dev
.of_node
, "ibm,mcmal-axon"))
632 cfg
&= ~(MAL2_CFG_RPP_10
| MAL2_CFG_WPP_10
);
634 /* Apply configuration */
635 set_mal_dcrn(mal
, MAL_CFG
, cfg
);
637 /* Allocate space for BD rings */
638 BUG_ON(mal
->num_tx_chans
<= 0 || mal
->num_tx_chans
> 32);
639 BUG_ON(mal
->num_rx_chans
<= 0 || mal
->num_rx_chans
> 32);
641 bd_size
= sizeof(struct mal_descriptor
) *
642 (NUM_TX_BUFF
* mal
->num_tx_chans
+
643 NUM_RX_BUFF
* mal
->num_rx_chans
);
645 dma_alloc_coherent(&ofdev
->dev
, bd_size
, &mal
->bd_dma
,
647 if (mal
->bd_virt
== NULL
) {
649 "mal%d: out of memory allocating RX/TX descriptors!\n",
654 memset(mal
->bd_virt
, 0, bd_size
);
656 for (i
= 0; i
< mal
->num_tx_chans
; ++i
)
657 set_mal_dcrn(mal
, MAL_TXCTPR(i
), mal
->bd_dma
+
658 sizeof(struct mal_descriptor
) *
659 mal_tx_bd_offset(mal
, i
));
661 for (i
= 0; i
< mal
->num_rx_chans
; ++i
)
662 set_mal_dcrn(mal
, MAL_RXCTPR(i
), mal
->bd_dma
+
663 sizeof(struct mal_descriptor
) *
664 mal_rx_bd_offset(mal
, i
));
666 if (mal_has_feature(mal
, MAL_FTR_COMMON_ERR_INT
)) {
667 irqflags
= IRQF_SHARED
;
668 hdlr_serr
= hdlr_txde
= hdlr_rxde
= mal_int
;
671 hdlr_serr
= mal_serr
;
672 hdlr_txde
= mal_txde
;
673 hdlr_rxde
= mal_rxde
;
676 err
= request_irq(mal
->serr_irq
, hdlr_serr
, irqflags
, "MAL SERR", mal
);
679 err
= request_irq(mal
->txde_irq
, hdlr_txde
, irqflags
, "MAL TX DE", mal
);
682 err
= request_irq(mal
->txeob_irq
, mal_txeob
, 0, "MAL TX EOB", mal
);
685 err
= request_irq(mal
->rxde_irq
, hdlr_rxde
, irqflags
, "MAL RX DE", mal
);
688 err
= request_irq(mal
->rxeob_irq
, mal_rxeob
, 0, "MAL RX EOB", mal
);
692 /* Enable all MAL SERR interrupt sources */
693 if (mal
->version
== 2)
694 set_mal_dcrn(mal
, MAL_IER
, MAL2_IER_EVENTS
);
696 set_mal_dcrn(mal
, MAL_IER
, MAL1_IER_EVENTS
);
698 /* Enable EOB interrupt */
699 mal_enable_eob_irq(mal
);
702 "MAL v%d %s, %d TX channels, %d RX channels\n",
703 mal
->version
, ofdev
->dev
.of_node
->full_name
,
704 mal
->num_tx_chans
, mal
->num_rx_chans
);
706 /* Advertise this instance to the rest of the world */
708 dev_set_drvdata(&ofdev
->dev
, mal
);
710 mal_dbg_register(mal
);
715 free_irq(mal
->rxde_irq
, mal
);
717 free_irq(mal
->txeob_irq
, mal
);
719 free_irq(mal
->txde_irq
, mal
);
721 free_irq(mal
->serr_irq
, mal
);
723 dma_free_coherent(&ofdev
->dev
, bd_size
, mal
->bd_virt
, mal
->bd_dma
);
725 dcr_unmap(mal
->dcr_host
, 0x100);
732 static int __devexit
mal_remove(struct platform_device
*ofdev
)
734 struct mal_instance
*mal
= dev_get_drvdata(&ofdev
->dev
);
736 MAL_DBG(mal
, "remove" NL
);
738 /* Synchronize with scheduled polling */
739 napi_disable(&mal
->napi
);
741 if (!list_empty(&mal
->list
)) {
742 /* This is *very* bad */
744 "mal%d: commac list is not empty on remove!\n",
749 dev_set_drvdata(&ofdev
->dev
, NULL
);
751 free_irq(mal
->serr_irq
, mal
);
752 free_irq(mal
->txde_irq
, mal
);
753 free_irq(mal
->txeob_irq
, mal
);
754 free_irq(mal
->rxde_irq
, mal
);
755 free_irq(mal
->rxeob_irq
, mal
);
759 mal_dbg_unregister(mal
);
761 dma_free_coherent(&ofdev
->dev
,
762 sizeof(struct mal_descriptor
) *
763 (NUM_TX_BUFF
* mal
->num_tx_chans
+
764 NUM_RX_BUFF
* mal
->num_rx_chans
), mal
->bd_virt
,
771 static struct of_device_id mal_platform_match
[] =
774 .compatible
= "ibm,mcmal",
777 .compatible
= "ibm,mcmal2",
779 /* Backward compat */
782 .compatible
= "ibm,mcmal",
786 .compatible
= "ibm,mcmal2",
791 static struct platform_driver mal_of_driver
= {
794 .owner
= THIS_MODULE
,
795 .of_match_table
= mal_platform_match
,
798 .remove
= mal_remove
,
801 int __init
mal_init(void)
803 return platform_driver_register(&mal_of_driver
);
808 platform_driver_unregister(&mal_of_driver
);