2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/export.h>
13 #include <bcm63xx_dev_enet.h>
14 #include <bcm63xx_io.h>
15 #include <bcm63xx_regs.h>
17 static const unsigned long bcm6348_regs_enetdmac
[] = {
18 [ENETDMAC_CHANCFG
] = ENETDMAC_CHANCFG_REG
,
19 [ENETDMAC_IR
] = ENETDMAC_IR_REG
,
20 [ENETDMAC_IRMASK
] = ENETDMAC_IRMASK_REG
,
21 [ENETDMAC_MAXBURST
] = ENETDMAC_MAXBURST_REG
,
24 static const unsigned long bcm6345_regs_enetdmac
[] = {
25 [ENETDMAC_CHANCFG
] = ENETDMA_6345_CHANCFG_REG
,
26 [ENETDMAC_IR
] = ENETDMA_6345_IR_REG
,
27 [ENETDMAC_IRMASK
] = ENETDMA_6345_IRMASK_REG
,
28 [ENETDMAC_MAXBURST
] = ENETDMA_6345_MAXBURST_REG
,
29 [ENETDMAC_BUFALLOC
] = ENETDMA_6345_BUFALLOC_REG
,
30 [ENETDMAC_RSTART
] = ENETDMA_6345_RSTART_REG
,
31 [ENETDMAC_FC
] = ENETDMA_6345_FC_REG
,
32 [ENETDMAC_LEN
] = ENETDMA_6345_LEN_REG
,
35 const unsigned long *bcm63xx_regs_enetdmac
;
36 EXPORT_SYMBOL(bcm63xx_regs_enetdmac
);
38 static __init
void bcm63xx_enetdmac_regs_init(void)
41 bcm63xx_regs_enetdmac
= bcm6345_regs_enetdmac
;
43 bcm63xx_regs_enetdmac
= bcm6348_regs_enetdmac
;
46 static struct resource shared_res
[] = {
48 .start
= -1, /* filled at runtime */
49 .end
= -1, /* filled at runtime */
50 .flags
= IORESOURCE_MEM
,
53 .start
= -1, /* filled at runtime */
54 .end
= -1, /* filled at runtime */
55 .flags
= IORESOURCE_MEM
,
58 .start
= -1, /* filled at runtime */
59 .end
= -1, /* filled at runtime */
60 .flags
= IORESOURCE_MEM
,
64 static struct platform_device bcm63xx_enet_shared_device
= {
65 .name
= "bcm63xx_enet_shared",
67 .num_resources
= ARRAY_SIZE(shared_res
),
68 .resource
= shared_res
,
71 static int shared_device_registered
;
73 static u64 enet_dmamask
= DMA_BIT_MASK(32);
75 static struct resource enet0_res
[] = {
77 .start
= -1, /* filled at runtime */
78 .end
= -1, /* filled at runtime */
79 .flags
= IORESOURCE_MEM
,
82 .start
= -1, /* filled at runtime */
83 .flags
= IORESOURCE_IRQ
,
86 .start
= -1, /* filled at runtime */
87 .flags
= IORESOURCE_IRQ
,
90 .start
= -1, /* filled at runtime */
91 .flags
= IORESOURCE_IRQ
,
95 static struct bcm63xx_enet_platform_data enet0_pd
;
97 static struct platform_device bcm63xx_enet0_device
= {
98 .name
= "bcm63xx_enet",
100 .num_resources
= ARRAY_SIZE(enet0_res
),
101 .resource
= enet0_res
,
103 .platform_data
= &enet0_pd
,
104 .dma_mask
= &enet_dmamask
,
105 .coherent_dma_mask
= DMA_BIT_MASK(32),
109 static struct resource enet1_res
[] = {
111 .start
= -1, /* filled at runtime */
112 .end
= -1, /* filled at runtime */
113 .flags
= IORESOURCE_MEM
,
116 .start
= -1, /* filled at runtime */
117 .flags
= IORESOURCE_IRQ
,
120 .start
= -1, /* filled at runtime */
121 .flags
= IORESOURCE_IRQ
,
124 .start
= -1, /* filled at runtime */
125 .flags
= IORESOURCE_IRQ
,
129 static struct bcm63xx_enet_platform_data enet1_pd
;
131 static struct platform_device bcm63xx_enet1_device
= {
132 .name
= "bcm63xx_enet",
134 .num_resources
= ARRAY_SIZE(enet1_res
),
135 .resource
= enet1_res
,
137 .platform_data
= &enet1_pd
,
138 .dma_mask
= &enet_dmamask
,
139 .coherent_dma_mask
= DMA_BIT_MASK(32),
143 static struct resource enetsw_res
[] = {
145 /* start & end filled at runtime */
146 .flags
= IORESOURCE_MEM
,
149 /* start filled at runtime */
150 .flags
= IORESOURCE_IRQ
,
153 /* start filled at runtime */
154 .flags
= IORESOURCE_IRQ
,
158 static struct bcm63xx_enetsw_platform_data enetsw_pd
;
160 static struct platform_device bcm63xx_enetsw_device
= {
161 .name
= "bcm63xx_enetsw",
162 .num_resources
= ARRAY_SIZE(enetsw_res
),
163 .resource
= enetsw_res
,
165 .platform_data
= &enetsw_pd
,
166 .dma_mask
= &enet_dmamask
,
167 .coherent_dma_mask
= DMA_BIT_MASK(32),
171 static int __init
register_shared(void)
175 if (shared_device_registered
)
178 bcm63xx_enetdmac_regs_init();
180 shared_res
[0].start
= bcm63xx_regset_address(RSET_ENETDMA
);
181 shared_res
[0].end
= shared_res
[0].start
;
182 if (BCMCPU_IS_6345())
183 shared_res
[0].end
+= (RSET_6345_ENETDMA_SIZE
) - 1;
185 shared_res
[0].end
+= (RSET_ENETDMA_SIZE
) - 1;
187 if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
189 else if (BCMCPU_IS_6345())
194 shared_res
[1].start
= bcm63xx_regset_address(RSET_ENETDMAC
);
195 shared_res
[1].end
= shared_res
[1].start
;
196 shared_res
[1].end
+= RSET_ENETDMAC_SIZE(chan_count
) - 1;
198 shared_res
[2].start
= bcm63xx_regset_address(RSET_ENETDMAS
);
199 shared_res
[2].end
= shared_res
[2].start
;
200 shared_res
[2].end
+= RSET_ENETDMAS_SIZE(chan_count
) - 1;
202 ret
= platform_device_register(&bcm63xx_enet_shared_device
);
205 shared_device_registered
= 1;
210 int __init
bcm63xx_enet_register(int unit
,
211 const struct bcm63xx_enet_platform_data
*pd
)
213 struct platform_device
*pdev
;
214 struct bcm63xx_enet_platform_data
*dpd
;
220 if (unit
== 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
223 ret
= register_shared();
228 enet0_res
[0].start
= bcm63xx_regset_address(RSET_ENET0
);
229 enet0_res
[0].end
= enet0_res
[0].start
;
230 enet0_res
[0].end
+= RSET_ENET_SIZE
- 1;
231 enet0_res
[1].start
= bcm63xx_get_irq_number(IRQ_ENET0
);
232 enet0_res
[2].start
= bcm63xx_get_irq_number(IRQ_ENET0_RXDMA
);
233 enet0_res
[3].start
= bcm63xx_get_irq_number(IRQ_ENET0_TXDMA
);
234 pdev
= &bcm63xx_enet0_device
;
236 enet1_res
[0].start
= bcm63xx_regset_address(RSET_ENET1
);
237 enet1_res
[0].end
= enet1_res
[0].start
;
238 enet1_res
[0].end
+= RSET_ENET_SIZE
- 1;
239 enet1_res
[1].start
= bcm63xx_get_irq_number(IRQ_ENET1
);
240 enet1_res
[2].start
= bcm63xx_get_irq_number(IRQ_ENET1_RXDMA
);
241 enet1_res
[3].start
= bcm63xx_get_irq_number(IRQ_ENET1_TXDMA
);
242 pdev
= &bcm63xx_enet1_device
;
245 /* copy given platform data */
246 dpd
= pdev
->dev
.platform_data
;
247 memcpy(dpd
, pd
, sizeof(*pd
));
249 /* adjust them in case internal phy is used */
250 if (dpd
->use_internal_phy
) {
252 /* internal phy only exists for enet0 */
257 dpd
->has_phy_interrupt
= 1;
258 dpd
->phy_interrupt
= bcm63xx_get_irq_number(IRQ_ENET_PHY
);
261 dpd
->dma_chan_en_mask
= ENETDMAC_CHANCFG_EN_MASK
;
262 dpd
->dma_chan_int_mask
= ENETDMAC_IR_PKTDONE_MASK
;
263 if (BCMCPU_IS_6345()) {
264 dpd
->dma_chan_en_mask
|= ENETDMAC_CHANCFG_CHAINING_MASK
;
265 dpd
->dma_chan_en_mask
|= ENETDMAC_CHANCFG_WRAP_EN_MASK
;
266 dpd
->dma_chan_en_mask
|= ENETDMAC_CHANCFG_FLOWC_EN_MASK
;
267 dpd
->dma_chan_int_mask
|= ENETDMA_IR_BUFDONE_MASK
;
268 dpd
->dma_chan_int_mask
|= ENETDMA_IR_NOTOWNER_MASK
;
269 dpd
->dma_chan_width
= ENETDMA_6345_CHAN_WIDTH
;
270 dpd
->dma_desc_shift
= ENETDMA_6345_DESC_SHIFT
;
272 dpd
->dma_has_sram
= true;
273 dpd
->dma_chan_width
= ENETDMA_CHAN_WIDTH
;
284 ret
= platform_device_register(pdev
);
291 bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data
*pd
)
295 if (!BCMCPU_IS_6328() && !BCMCPU_IS_6362() && !BCMCPU_IS_6368())
298 ret
= register_shared();
302 enetsw_res
[0].start
= bcm63xx_regset_address(RSET_ENETSW
);
303 enetsw_res
[0].end
= enetsw_res
[0].start
;
304 enetsw_res
[0].end
+= RSET_ENETSW_SIZE
- 1;
305 enetsw_res
[1].start
= bcm63xx_get_irq_number(IRQ_ENETSW_RXDMA0
);
306 enetsw_res
[2].start
= bcm63xx_get_irq_number(IRQ_ENETSW_TXDMA0
);
307 if (!enetsw_res
[2].start
)
308 enetsw_res
[2].start
= -1;
310 memcpy(bcm63xx_enetsw_device
.dev
.platform_data
, pd
, sizeof(*pd
));
312 if (BCMCPU_IS_6328())
313 enetsw_pd
.num_ports
= ENETSW_PORTS_6328
;
314 else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
315 enetsw_pd
.num_ports
= ENETSW_PORTS_6368
;
317 enetsw_pd
.dma_has_sram
= true;
318 enetsw_pd
.dma_chan_width
= ENETDMA_CHAN_WIDTH
;
319 enetsw_pd
.dma_chan_en_mask
= ENETDMAC_CHANCFG_EN_MASK
;
320 enetsw_pd
.dma_chan_int_mask
= ENETDMAC_IR_PKTDONE_MASK
;
322 ret
= platform_device_register(&bcm63xx_enetsw_device
);