change console=tty0 to enable linux framebuffer console
[jz_uboot.git] / cpu / ixp / npe / npe.c
blobab7ca8bef04414b36c75e4d096b6bdb35683432f
1 /*
2 * (C) Copyright 2005-2006
3 * Stefan Roese, DENX Software Engineering, sr@denx.de.
5 * See file CREDITS for list of people who contributed to this
6 * project.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
24 #if 0
25 #define DEBUG /* define for debug output */
26 #endif
28 #include <config.h>
29 #include <common.h>
30 #include <net.h>
31 #include <miiphy.h>
32 #include <malloc.h>
33 #include <asm/processor.h>
34 #include <asm/arch-ixp/ixp425.h>
36 #include <IxOsal.h>
37 #include <IxEthAcc.h>
38 #include <IxEthDB.h>
39 #include <IxNpeDl.h>
40 #include <IxQMgr.h>
41 #include <IxNpeMh.h>
42 #include <ix_ossl.h>
43 #include <IxFeatureCtrl.h>
45 #include <npe.h>
47 #ifdef CONFIG_IXP4XX_NPE
49 static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL;
50 static int npe_exists[NPE_NUM_PORTS];
51 static int npe_used[NPE_NUM_PORTS];
53 /* A little extra so we can align to cacheline. */
54 static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CFG_CACHELINE_SIZE - 1];
55 static u8 *npe_alloc_end;
56 static u8 *npe_alloc_free;
58 static void *npe_alloc(int size)
60 static int count = 0;
61 void *p = NULL;
63 size = (size + (CFG_CACHELINE_SIZE-1)) & ~(CFG_CACHELINE_SIZE-1);
64 count++;
66 if ((npe_alloc_free + size) < npe_alloc_end) {
67 p = npe_alloc_free;
68 npe_alloc_free += size;
69 } else {
70 printf("%s: failed (count=%d, size=%d)!\n", count, size);
72 return p;
75 /* Not interrupt safe! */
76 static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new)
78 IX_OSAL_MBUF *m = *q;
80 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL;
82 if (m) {
83 while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m))
84 m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
85 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new;
86 } else
87 *q = new;
90 /* Not interrupt safe! */
91 static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q)
93 IX_OSAL_MBUF *m = *q;
94 if (m)
95 *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
96 return m;
99 static void reset_tx_mbufs(struct npe* p_npe)
101 IX_OSAL_MBUF *m;
102 int i;
104 p_npe->txQHead = NULL;
106 for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) {
107 m = &p_npe->tx_mbufs[i];
109 memset(m, 0, sizeof(*m));
111 IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE];
112 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
113 mbuf_enqueue(&p_npe->txQHead, m);
117 static void reset_rx_mbufs(struct npe* p_npe)
119 IX_OSAL_MBUF *m;
120 int i;
122 p_npe->rxQHead = NULL;
124 HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE *
125 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
127 for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) {
128 m = &p_npe->rx_mbufs[i];
130 memset(m, 0, sizeof(*m));
132 IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE];
133 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
135 if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
136 printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id);
137 break;
142 static void init_rx_mbufs(struct npe* p_npe)
144 p_npe->rxQHead = NULL;
146 p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE *
147 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
148 if (p_npe->rx_pkts == NULL) {
149 printf("alloc of packets failed.\n");
150 return;
153 p_npe->rx_mbufs = (IX_OSAL_MBUF *)
154 npe_alloc(sizeof(IX_OSAL_MBUF) *
155 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
156 if (p_npe->rx_mbufs == NULL) {
157 printf("alloc of mbufs failed.\n");
158 return;
161 reset_rx_mbufs(p_npe);
164 static void init_tx_mbufs(struct npe* p_npe)
166 p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE *
167 CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
168 if (p_npe->tx_pkts == NULL) {
169 printf("alloc of packets failed.\n");
170 return;
173 p_npe->tx_mbufs = (IX_OSAL_MBUF *)
174 npe_alloc(sizeof(IX_OSAL_MBUF) *
175 CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
176 if (p_npe->tx_mbufs == NULL) {
177 printf("alloc of mbufs failed.\n");
178 return;
181 reset_tx_mbufs(p_npe);
184 /* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */
185 static int __eth_to_npe(int eth_id)
187 switch(eth_id) {
188 case IX_ETH_PORT_1:
189 return IX_NPEMH_NPEID_NPEB;
191 case IX_ETH_PORT_2:
192 return IX_NPEMH_NPEID_NPEC;
194 case IX_ETH_PORT_3:
195 return IX_NPEMH_NPEID_NPEA;
197 return 0;
200 /* Poll the CSR machinery. */
201 static void npe_poll(int eth_id)
203 if (qDispatcherFunc != NULL) {
204 ixNpeMhMessagesReceive(__eth_to_npe(eth_id));
205 (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP);
209 /* ethAcc RX callback */
210 static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
212 struct npe* p_npe = (struct npe *)cbTag;
214 if (IX_OSAL_MBUF_MLEN(m) > 0) {
215 mbuf_enqueue(&p_npe->rxQHead, m);
217 if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) {
218 debug("Rx overflow: rx_write=%d rx_read=%d\n",
219 p_npe->rx_write, p_npe->rx_read);
220 } else {
221 debug("Received message #%d (len=%d)\n", p_npe->rx_write,
222 IX_OSAL_MBUF_MLEN(m));
223 memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m),
224 IX_OSAL_MBUF_MLEN(m));
225 p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m);
226 p_npe->rx_write++;
227 if (p_npe->rx_write == PKTBUFSRX)
228 p_npe->rx_write = 0;
230 #ifdef CONFIG_PRINT_RX_FRAMES
232 u8 *ptr = IX_OSAL_MBUF_MDATA(m);
233 int i;
235 for (i=0; i<60; i++) {
236 debug("%02x ", *ptr++);
238 debug("\n");
240 #endif
243 m = mbuf_dequeue(&p_npe->rxQHead);
244 } else {
245 debug("Received frame with length 0!!!\n");
246 m = mbuf_dequeue(&p_npe->rxQHead);
249 /* Now return mbuf to NPE */
250 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
251 IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
252 IX_OSAL_MBUF_FLAGS(m) = 0;
254 if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
255 debug("npe_rx_callback: Error returning mbuf.\n");
259 /* ethAcc TX callback */
260 static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m)
262 struct npe* p_npe = (struct npe *)cbTag;
264 debug("%s\n", __FUNCTION__);
266 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
267 IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
268 IX_OSAL_MBUF_FLAGS(m) = 0;
270 mbuf_enqueue(&p_npe->txQHead, m);
274 static int npe_set_mac_address(struct eth_device *dev)
276 struct npe *p_npe = (struct npe *)dev->priv;
277 IxEthAccMacAddr npeMac;
279 debug("%s\n", __FUNCTION__);
281 /* Set MAC address */
282 memcpy(npeMac.macAddress, dev->enetaddr, 6);
284 if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) {
285 printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n",
286 npeMac.macAddress[0], npeMac.macAddress[1],
287 npeMac.macAddress[2], npeMac.macAddress[3],
288 npeMac.macAddress[4], npeMac.macAddress[5]);
289 return 0;
292 return 1;
295 /* Boot-time CSR library initialization. */
296 static int npe_csr_load(void)
298 int i;
300 if (ixQMgrInit() != IX_SUCCESS) {
301 debug("Error initialising queue manager!\n");
302 return 0;
305 ixQMgrDispatcherLoopGet(&qDispatcherFunc);
307 if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) {
308 printf("Error initialising NPE Message handler!\n");
309 return 0;
312 if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] &&
313 ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
314 != IX_SUCCESS) {
315 printf("Error downloading firmware to NPE-B!\n");
316 return 0;
319 if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] &&
320 ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
321 != IX_SUCCESS) {
322 printf("Error downloading firmware to NPE-C!\n");
323 return 0;
326 /* don't need this for U-Boot */
327 ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, FALSE);
329 if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) {
330 printf("Error initialising Ethernet access driver!\n");
331 return 0;
334 for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
335 if (!npe_used[i] || !npe_exists[i])
336 continue;
337 if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) {
338 printf("Error initialising Ethernet port%d!\n", i);
340 if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) {
341 printf("Error setting scheduling discipline for port %d.\n", i);
343 if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) {
344 printf("Error disabling RX FCS for port %d.\n", i);
346 if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) {
347 printf("Error enabling TX FCS for port %d.\n", i);
351 return 1;
354 static int npe_init(struct eth_device *dev, bd_t * bis)
356 struct npe *p_npe = (struct npe *)dev->priv;
357 int i;
358 u16 reg_short;
359 int speed;
360 int duplex;
362 debug("%s: 1\n", __FUNCTION__);
364 miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, &reg_short);
367 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
369 if ((reg_short & PHY_BMSR_AUTN_ABLE) && !(reg_short & PHY_BMSR_AUTN_COMP)) {
370 puts ("Waiting for PHY auto negotiation to complete");
371 i = 0;
372 while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
374 * Timeout reached ?
376 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
377 puts (" TIMEOUT !\n");
378 break;
381 if ((i++ % 1000) == 0) {
382 putc ('.');
383 miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, &reg_short);
385 udelay (1000); /* 1 ms */
387 puts (" done\n");
388 udelay (500000); /* another 500 ms (results in faster booting) */
391 speed = miiphy_speed (dev->name, p_npe->phy_no);
392 duplex = miiphy_duplex (dev->name, p_npe->phy_no);
394 if (p_npe->print_speed) {
395 p_npe->print_speed = 0;
396 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
397 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
400 npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
401 npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
402 CFG_CACHELINE_SIZE - 1) & ~(CFG_CACHELINE_SIZE - 1));
404 /* initialize mbuf pool */
405 init_rx_mbufs(p_npe);
406 init_tx_mbufs(p_npe);
408 if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback,
409 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
410 printf("can't register RX callback!\n");
411 return 0;
414 if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback,
415 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
416 printf("can't register TX callback!\n");
417 return 0;
420 npe_set_mac_address(dev);
422 if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
423 printf("can't enable port!\n");
424 return 0;
427 p_npe->active = 1;
429 return 1;
432 #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
433 /* Uninitialize CSR library. */
434 static void npe_csr_unload(void)
436 ixEthAccUnload();
437 ixEthDBUnload();
438 ixNpeMhUnload();
439 ixQMgrUnload();
442 /* callback which is used by ethAcc to recover RX buffers when stopping */
443 static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
445 debug("%s\n", __FUNCTION__);
448 /* callback which is used by ethAcc to recover TX buffers when stopping */
449 static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m)
451 debug("%s\n", __FUNCTION__);
453 #endif
455 static void npe_halt(struct eth_device *dev)
457 struct npe *p_npe = (struct npe *)dev->priv;
458 int i;
460 debug("%s\n", __FUNCTION__);
462 /* Delay to give time for recovery of mbufs */
463 for (i = 0; i < 100; i++) {
464 npe_poll(p_npe->eth_id);
465 udelay(100);
468 #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
469 if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback,
470 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
471 debug("Error registering rx callback!\n");
474 if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback,
475 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
476 debug("Error registering tx callback!\n");
479 if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
480 debug("npe_stop: Error disabling NPEB!\n");
483 /* Delay to give time for recovery of mbufs */
484 for (i = 0; i < 100; i++) {
485 npe_poll(p_npe->eth_id);
486 udelay(10000);
490 * For U-Boot only, we are probably launching Linux or other OS that
491 * needs a clean slate for its NPE library.
493 #if 0 /* test-only */
494 for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
495 if (npe_used[i] && npe_exists[i])
496 if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS)
497 printf("Failed to stop and reset NPE B.\n");
499 #endif
501 #endif
502 p_npe->active = 0;
506 static int npe_send(struct eth_device *dev, volatile void *packet, int len)
508 struct npe *p_npe = (struct npe *)dev->priv;
509 u8 *dest;
510 int err;
511 IX_OSAL_MBUF *m;
513 debug("%s\n", __FUNCTION__);
514 m = mbuf_dequeue(&p_npe->txQHead);
515 dest = IX_OSAL_MBUF_MDATA(m);
516 IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len;
517 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL;
519 memcpy(dest, (char *)packet, len);
521 if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY))
522 != IX_ETH_ACC_SUCCESS) {
523 printf("npe_send: Can't submit frame. err[%d]\n", err);
524 mbuf_enqueue(&p_npe->txQHead, m);
525 return 0;
528 #ifdef DEBUG_PRINT_TX_FRAMES
530 u8 *ptr = IX_OSAL_MBUF_MDATA(m);
531 int i;
533 for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) {
534 printf("%02x ", *ptr++);
536 printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m));
538 #endif
540 npe_poll(p_npe->eth_id);
542 return len;
545 static int npe_rx(struct eth_device *dev)
547 struct npe *p_npe = (struct npe *)dev->priv;
549 debug("%s\n", __FUNCTION__);
550 npe_poll(p_npe->eth_id);
552 debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read);
553 while (p_npe->rx_write != p_npe->rx_read) {
554 debug("Reading message #%d\n", p_npe->rx_read);
555 NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]);
556 p_npe->rx_read++;
557 if (p_npe->rx_read == PKTBUFSRX)
558 p_npe->rx_read = 0;
561 return 0;
564 int npe_initialize(bd_t * bis)
566 static int virgin = 0;
567 struct eth_device *dev;
568 int eth_num = 0;
569 struct npe *p_npe = NULL;
571 for (eth_num = 0; eth_num < CFG_NPE_NUMS; eth_num++) {
573 /* See if we can actually bring up the interface, otherwise, skip it */
574 switch (eth_num) {
575 default: /* fall through */
576 case 0:
577 if (memcmp (bis->bi_enetaddr, "\0\0\0\0\0\0", 6) == 0) {
578 continue;
580 break;
581 #ifdef CONFIG_HAS_ETH1
582 case 1:
583 if (memcmp (bis->bi_enet1addr, "\0\0\0\0\0\0", 6) == 0) {
584 continue;
586 break;
587 #endif
590 /* Allocate device structure */
591 dev = (struct eth_device *)malloc(sizeof(*dev));
592 if (dev == NULL) {
593 printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num);
594 return -1;
596 memset(dev, 0, sizeof(*dev));
598 /* Allocate our private use data */
599 p_npe = (struct npe *)malloc(sizeof(struct npe));
600 if (p_npe == NULL) {
601 printf("%s: Cannot allocate private hw data for eth_device %d",
602 __FUNCTION__, eth_num);
603 free(dev);
604 return -1;
606 memset(p_npe, 0, sizeof(struct npe));
608 switch (eth_num) {
609 default: /* fall through */
610 case 0:
611 memcpy(dev->enetaddr, bis->bi_enetaddr, 6);
612 p_npe->eth_id = 0;
613 p_npe->phy_no = CONFIG_PHY_ADDR;
614 break;
616 #ifdef CONFIG_HAS_ETH1
617 case 1:
618 memcpy(dev->enetaddr, bis->bi_enet1addr, 6);
619 p_npe->eth_id = 1;
620 p_npe->phy_no = CONFIG_PHY1_ADDR;
621 break;
622 #endif
625 sprintf(dev->name, "NPE%d", eth_num);
626 dev->priv = (void *)p_npe;
627 dev->init = npe_init;
628 dev->halt = npe_halt;
629 dev->send = npe_send;
630 dev->recv = npe_rx;
632 p_npe->print_speed = 1;
634 if (0 == virgin) {
635 virgin = 1;
637 if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) {
638 switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) {
639 case IX_FEATURE_CTRL_SILICON_TYPE_B0:
641 * If it is B0 Silicon, we only enable port when its corresponding
642 * Eth Coprocessor is available.
644 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
645 IX_FEATURE_CTRL_COMPONENT_ENABLED)
646 npe_exists[IX_ETH_PORT_1] = TRUE;
648 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
649 IX_FEATURE_CTRL_COMPONENT_ENABLED)
650 npe_exists[IX_ETH_PORT_2] = TRUE;
651 break;
652 case IX_FEATURE_CTRL_SILICON_TYPE_A0:
654 * If it is A0 Silicon, we enable both as both Eth Coprocessors
655 * are available.
657 npe_exists[IX_ETH_PORT_1] = TRUE;
658 npe_exists[IX_ETH_PORT_2] = TRUE;
659 break;
661 } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) {
662 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
663 IX_FEATURE_CTRL_COMPONENT_ENABLED)
664 npe_exists[IX_ETH_PORT_1] = TRUE;
666 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
667 IX_FEATURE_CTRL_COMPONENT_ENABLED)
668 npe_exists[IX_ETH_PORT_2] = TRUE;
671 npe_used[IX_ETH_PORT_1] = 1;
672 npe_used[IX_ETH_PORT_2] = 1;
674 npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
675 npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
676 CFG_CACHELINE_SIZE - 1)
677 & ~(CFG_CACHELINE_SIZE - 1));
679 if (!npe_csr_load())
680 return 0;
683 eth_register(dev);
685 #if defined(CONFIG_MII) || (CONFIG_COMMANDS & CFG_CMD_MII)
686 miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write);
687 #endif
689 } /* end for each supported device */
691 return 1;
694 #endif /* CONFIG_IXP4XX_NPE */