Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / atm / fore200e.c
blob8c5cd65818b7f3ccc3614d74493dc6cffa31c06b
1 /*
2 A FORE Systems 200E-series driver for ATM on Linux.
3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
7 This driver simultaneously supports PCA-200E and SBA-200E adapters
8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
29 #include <linux/capability.h>
30 #include <linux/interrupt.h>
31 #include <linux/bitops.h>
32 #include <linux/pci.h>
33 #include <linux/module.h>
34 #include <linux/atmdev.h>
35 #include <linux/sonet.h>
36 #include <linux/atm_suni.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/delay.h>
39 #include <asm/io.h>
40 #include <asm/string.h>
41 #include <asm/page.h>
42 #include <asm/irq.h>
43 #include <asm/dma.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
46 #include <asm/atomic.h>
48 #ifdef CONFIG_ATM_FORE200E_SBA
49 #include <asm/idprom.h>
50 #include <asm/sbus.h>
51 #include <asm/openprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pgtable.h>
54 #endif
56 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
57 #define FORE200E_USE_TASKLET
58 #endif
60 #if 0 /* enable the debugging code of the buffer supply queues */
61 #define FORE200E_BSQ_DEBUG
62 #endif
64 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
65 #define FORE200E_52BYTE_AAL0_SDU
66 #endif
68 #include "fore200e.h"
69 #include "suni.h"
71 #define FORE200E_VERSION "0.3e"
73 #define FORE200E "fore200e: "
75 #if 0 /* override .config */
76 #define CONFIG_ATM_FORE200E_DEBUG 1
77 #endif
78 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
79 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
80 printk(FORE200E format, ##args); } while (0)
81 #else
82 #define DPRINTK(level, format, args...) do {} while (0)
83 #endif
86 #define FORE200E_ALIGN(addr, alignment) \
87 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
89 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
91 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
93 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
95 #if 1
96 #define ASSERT(expr) if (!(expr)) { \
97 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
98 <<<<<<< HEAD:drivers/atm/fore200e.c
99 __FUNCTION__, __LINE__, #expr); \
100 panic(FORE200E "%s", __FUNCTION__); \
101 =======
102 __func__, __LINE__, #expr); \
103 panic(FORE200E "%s", __func__); \
104 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/atm/fore200e.c
106 #else
107 #define ASSERT(expr) do {} while (0)
108 #endif
111 static const struct atmdev_ops fore200e_ops;
112 static const struct fore200e_bus fore200e_bus[];
114 static LIST_HEAD(fore200e_boards);
117 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
118 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
119 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
122 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
123 { BUFFER_S1_NBR, BUFFER_L1_NBR },
124 { BUFFER_S2_NBR, BUFFER_L2_NBR }
127 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
128 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
129 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
133 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
134 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
135 #endif
138 #if 0 /* currently unused */
139 static int
140 fore200e_fore2atm_aal(enum fore200e_aal aal)
142 switch(aal) {
143 case FORE200E_AAL0: return ATM_AAL0;
144 case FORE200E_AAL34: return ATM_AAL34;
145 case FORE200E_AAL5: return ATM_AAL5;
148 return -EINVAL;
150 #endif
153 static enum fore200e_aal
154 fore200e_atm2fore_aal(int aal)
156 switch(aal) {
157 case ATM_AAL0: return FORE200E_AAL0;
158 case ATM_AAL34: return FORE200E_AAL34;
159 case ATM_AAL1:
160 case ATM_AAL2:
161 case ATM_AAL5: return FORE200E_AAL5;
164 return -EINVAL;
168 static char*
169 fore200e_irq_itoa(int irq)
171 static char str[8];
172 sprintf(str, "%d", irq);
173 return str;
177 /* allocate and align a chunk of memory intended to hold the data behing exchanged
178 between the driver and the adapter (using streaming DVMA) */
180 static int
181 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
183 unsigned long offset = 0;
185 if (alignment <= sizeof(int))
186 alignment = 0;
188 chunk->alloc_size = size + alignment;
189 chunk->align_size = size;
190 chunk->direction = direction;
192 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
193 if (chunk->alloc_addr == NULL)
194 return -ENOMEM;
196 if (alignment > 0)
197 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
199 chunk->align_addr = chunk->alloc_addr + offset;
201 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
203 return 0;
207 /* free a chunk of memory */
209 static void
210 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
212 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
214 kfree(chunk->alloc_addr);
218 static void
219 fore200e_spin(int msecs)
221 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
222 while (time_before(jiffies, timeout));
226 static int
227 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
229 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
230 int ok;
232 mb();
233 do {
234 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
235 break;
237 } while (time_before(jiffies, timeout));
239 #if 1
240 if (!ok) {
241 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
242 *addr, val);
244 #endif
246 return ok;
250 static int
251 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
253 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
254 int ok;
256 do {
257 if ((ok = (fore200e->bus->read(addr) == val)))
258 break;
260 } while (time_before(jiffies, timeout));
262 #if 1
263 if (!ok) {
264 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
265 fore200e->bus->read(addr), val);
267 #endif
269 return ok;
273 static void
274 fore200e_free_rx_buf(struct fore200e* fore200e)
276 int scheme, magn, nbr;
277 struct buffer* buffer;
279 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
280 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
282 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
284 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
286 struct chunk* data = &buffer[ nbr ].data;
288 if (data->alloc_addr != NULL)
289 fore200e_chunk_free(fore200e, data);
297 static void
298 fore200e_uninit_bs_queue(struct fore200e* fore200e)
300 int scheme, magn;
302 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
303 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
305 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
306 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
308 if (status->alloc_addr)
309 fore200e->bus->dma_chunk_free(fore200e, status);
311 if (rbd_block->alloc_addr)
312 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
318 static int
319 fore200e_reset(struct fore200e* fore200e, int diag)
321 int ok;
323 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
325 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
327 fore200e->bus->reset(fore200e);
329 if (diag) {
330 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
331 if (ok == 0) {
333 printk(FORE200E "device %s self-test failed\n", fore200e->name);
334 return -ENODEV;
337 printk(FORE200E "device %s self-test passed\n", fore200e->name);
339 fore200e->state = FORE200E_STATE_RESET;
342 return 0;
346 static void
347 fore200e_shutdown(struct fore200e* fore200e)
349 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
350 fore200e->name, fore200e->phys_base,
351 fore200e_irq_itoa(fore200e->irq));
353 if (fore200e->state > FORE200E_STATE_RESET) {
354 /* first, reset the board to prevent further interrupts or data transfers */
355 fore200e_reset(fore200e, 0);
358 /* then, release all allocated resources */
359 switch(fore200e->state) {
361 case FORE200E_STATE_COMPLETE:
362 kfree(fore200e->stats);
364 case FORE200E_STATE_IRQ:
365 free_irq(fore200e->irq, fore200e->atm_dev);
367 case FORE200E_STATE_ALLOC_BUF:
368 fore200e_free_rx_buf(fore200e);
370 case FORE200E_STATE_INIT_BSQ:
371 fore200e_uninit_bs_queue(fore200e);
373 case FORE200E_STATE_INIT_RXQ:
374 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
375 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
377 case FORE200E_STATE_INIT_TXQ:
378 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
379 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
381 case FORE200E_STATE_INIT_CMDQ:
382 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
384 case FORE200E_STATE_INITIALIZE:
385 /* nothing to do for that state */
387 case FORE200E_STATE_START_FW:
388 /* nothing to do for that state */
390 case FORE200E_STATE_LOAD_FW:
391 /* nothing to do for that state */
393 case FORE200E_STATE_RESET:
394 /* nothing to do for that state */
396 case FORE200E_STATE_MAP:
397 fore200e->bus->unmap(fore200e);
399 case FORE200E_STATE_CONFIGURE:
400 /* nothing to do for that state */
402 case FORE200E_STATE_REGISTER:
403 /* XXX shouldn't we *start* by deregistering the device? */
404 atm_dev_deregister(fore200e->atm_dev);
406 case FORE200E_STATE_BLANK:
407 /* nothing to do for that state */
408 break;
413 #ifdef CONFIG_ATM_FORE200E_PCA
415 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
417 /* on big-endian hosts, the board is configured to convert
418 the endianess of slave RAM accesses */
419 return le32_to_cpu(readl(addr));
423 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
425 /* on big-endian hosts, the board is configured to convert
426 the endianess of slave RAM accesses */
427 writel(cpu_to_le32(val), addr);
431 static u32
432 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
434 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
436 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
437 virt_addr, size, direction, dma_addr);
439 return dma_addr;
443 static void
444 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
446 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
447 dma_addr, size, direction);
449 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
453 static void
454 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
456 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
458 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
461 static void
462 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
464 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
466 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
470 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
471 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
473 static int
474 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
475 int size, int nbr, int alignment)
477 /* returned chunks are page-aligned */
478 chunk->alloc_size = size * nbr;
479 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
480 chunk->alloc_size,
481 &chunk->dma_addr);
483 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
484 return -ENOMEM;
486 chunk->align_addr = chunk->alloc_addr;
488 return 0;
492 /* free a DMA consistent chunk of memory */
494 static void
495 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
497 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
498 chunk->alloc_size,
499 chunk->alloc_addr,
500 chunk->dma_addr);
504 static int
505 fore200e_pca_irq_check(struct fore200e* fore200e)
507 /* this is a 1 bit register */
508 int irq_posted = readl(fore200e->regs.pca.psr);
510 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
511 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
512 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
514 #endif
516 return irq_posted;
520 static void
521 fore200e_pca_irq_ack(struct fore200e* fore200e)
523 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
527 static void
528 fore200e_pca_reset(struct fore200e* fore200e)
530 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
531 fore200e_spin(10);
532 writel(0, fore200e->regs.pca.hcr);
536 static int __devinit
537 fore200e_pca_map(struct fore200e* fore200e)
539 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
541 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
543 if (fore200e->virt_base == NULL) {
544 printk(FORE200E "can't map device %s\n", fore200e->name);
545 return -EFAULT;
548 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
550 /* gain access to the PCA specific registers */
551 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
552 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
553 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
555 fore200e->state = FORE200E_STATE_MAP;
556 return 0;
560 static void
561 fore200e_pca_unmap(struct fore200e* fore200e)
563 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
565 if (fore200e->virt_base != NULL)
566 iounmap(fore200e->virt_base);
570 static int __devinit
571 fore200e_pca_configure(struct fore200e* fore200e)
573 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
574 u8 master_ctrl, latency;
576 DPRINTK(2, "device %s being configured\n", fore200e->name);
578 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
579 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
580 return -EIO;
583 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
585 master_ctrl = master_ctrl
586 #if defined(__BIG_ENDIAN)
587 /* request the PCA board to convert the endianess of slave RAM accesses */
588 | PCA200E_CTRL_CONVERT_ENDIAN
589 #endif
590 #if 0
591 | PCA200E_CTRL_DIS_CACHE_RD
592 | PCA200E_CTRL_DIS_WRT_INVAL
593 | PCA200E_CTRL_ENA_CONT_REQ_MODE
594 | PCA200E_CTRL_2_CACHE_WRT_INVAL
595 #endif
596 | PCA200E_CTRL_LARGE_PCI_BURSTS;
598 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
600 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
601 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
602 this may impact the performances of other PCI devices on the same bus, though */
603 latency = 192;
604 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
606 fore200e->state = FORE200E_STATE_CONFIGURE;
607 return 0;
611 static int __init
612 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
614 struct host_cmdq* cmdq = &fore200e->host_cmdq;
615 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
616 struct prom_opcode opcode;
617 int ok;
618 u32 prom_dma;
620 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
622 opcode.opcode = OPCODE_GET_PROM;
623 opcode.pad = 0;
625 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
627 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
629 *entry->status = STATUS_PENDING;
631 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
633 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
635 *entry->status = STATUS_FREE;
637 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
639 if (ok == 0) {
640 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
641 return -EIO;
644 #if defined(__BIG_ENDIAN)
646 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
648 /* MAC address is stored as little-endian */
649 swap_here(&prom->mac_addr[0]);
650 swap_here(&prom->mac_addr[4]);
651 #endif
653 return 0;
657 static int
658 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
660 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
662 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
663 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
666 #endif /* CONFIG_ATM_FORE200E_PCA */
669 #ifdef CONFIG_ATM_FORE200E_SBA
671 static u32
672 fore200e_sba_read(volatile u32 __iomem *addr)
674 return sbus_readl(addr);
678 static void
679 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
681 sbus_writel(val, addr);
685 static u32
686 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
688 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
690 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
691 virt_addr, size, direction, dma_addr);
693 return dma_addr;
697 static void
698 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
700 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
701 dma_addr, size, direction);
703 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
707 static void
708 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
710 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
712 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
715 static void
716 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
718 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
720 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
724 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
725 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
727 static int
728 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
729 int size, int nbr, int alignment)
731 chunk->alloc_size = chunk->align_size = size * nbr;
733 /* returned chunks are page-aligned */
734 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
735 chunk->alloc_size,
736 &chunk->dma_addr);
738 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
739 return -ENOMEM;
741 chunk->align_addr = chunk->alloc_addr;
743 return 0;
747 /* free a DVMA consistent chunk of memory */
749 static void
750 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
752 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
753 chunk->alloc_size,
754 chunk->alloc_addr,
755 chunk->dma_addr);
759 static void
760 fore200e_sba_irq_enable(struct fore200e* fore200e)
762 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
763 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
767 static int
768 fore200e_sba_irq_check(struct fore200e* fore200e)
770 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
774 static void
775 fore200e_sba_irq_ack(struct fore200e* fore200e)
777 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
778 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
782 static void
783 fore200e_sba_reset(struct fore200e* fore200e)
785 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
786 fore200e_spin(10);
787 fore200e->bus->write(0, fore200e->regs.sba.hcr);
791 static int __init
792 fore200e_sba_map(struct fore200e* fore200e)
794 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
795 unsigned int bursts;
797 /* gain access to the SBA specific registers */
798 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
799 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
800 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
801 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
803 if (fore200e->virt_base == NULL) {
804 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
805 return -EFAULT;
808 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
810 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
812 /* get the supported DVMA burst sizes */
813 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
815 if (sbus_can_dma_64bit(sbus_dev))
816 sbus_set_sbus64(sbus_dev, bursts);
818 fore200e->state = FORE200E_STATE_MAP;
819 return 0;
823 static void
824 fore200e_sba_unmap(struct fore200e* fore200e)
826 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
827 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
828 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
829 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
833 static int __init
834 fore200e_sba_configure(struct fore200e* fore200e)
836 fore200e->state = FORE200E_STATE_CONFIGURE;
837 return 0;
841 static struct fore200e* __init
842 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
844 struct fore200e* fore200e;
845 struct sbus_bus* sbus_bus;
846 struct sbus_dev* sbus_dev = NULL;
848 unsigned int count = 0;
850 for_each_sbus (sbus_bus) {
851 for_each_sbusdev (sbus_dev, sbus_bus) {
852 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
853 if (count >= index)
854 goto found;
855 count++;
859 return NULL;
861 found:
862 if (sbus_dev->num_registers != 4) {
863 printk(FORE200E "this %s device has %d instead of 4 registers\n",
864 bus->model_name, sbus_dev->num_registers);
865 return NULL;
868 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
869 if (fore200e == NULL)
870 return NULL;
872 fore200e->bus = bus;
873 fore200e->bus_dev = sbus_dev;
874 fore200e->irq = sbus_dev->irqs[ 0 ];
876 fore200e->phys_base = (unsigned long)sbus_dev;
878 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
880 return fore200e;
884 static int __init
885 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
887 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
888 int len;
890 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
891 if (len < 0)
892 return -EBUSY;
894 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
895 if (len < 0)
896 return -EBUSY;
898 prom_getproperty(sbus_dev->prom_node, "serialnumber",
899 (char*)&prom->serial_number, sizeof(prom->serial_number));
901 prom_getproperty(sbus_dev->prom_node, "promversion",
902 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
904 return 0;
908 static int
909 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
911 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
913 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
915 #endif /* CONFIG_ATM_FORE200E_SBA */
918 static void
919 fore200e_tx_irq(struct fore200e* fore200e)
921 struct host_txq* txq = &fore200e->host_txq;
922 struct host_txq_entry* entry;
923 struct atm_vcc* vcc;
924 struct fore200e_vc_map* vc_map;
926 if (fore200e->host_txq.txing == 0)
927 return;
929 for (;;) {
931 entry = &txq->host_entry[ txq->tail ];
933 if ((*entry->status & STATUS_COMPLETE) == 0) {
934 break;
937 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
938 entry, txq->tail, entry->vc_map, entry->skb);
940 /* free copy of misaligned data */
941 kfree(entry->data);
943 /* remove DMA mapping */
944 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
945 DMA_TO_DEVICE);
947 vc_map = entry->vc_map;
949 /* vcc closed since the time the entry was submitted for tx? */
950 if ((vc_map->vcc == NULL) ||
951 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
953 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
954 fore200e->atm_dev->number);
956 dev_kfree_skb_any(entry->skb);
958 else {
959 ASSERT(vc_map->vcc);
961 /* vcc closed then immediately re-opened? */
962 if (vc_map->incarn != entry->incarn) {
964 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
965 if the same vcc is immediately re-opened, those pending PDUs must
966 not be popped after the completion of their emission, as they refer
967 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
968 would be decremented by the size of the (unrelated) skb, possibly
969 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
970 we thus bind the tx entry to the current incarnation of the vcc
971 when the entry is submitted for tx. When the tx later completes,
972 if the incarnation number of the tx entry does not match the one
973 of the vcc, then this implies that the vcc has been closed then re-opened.
974 we thus just drop the skb here. */
976 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
977 fore200e->atm_dev->number);
979 dev_kfree_skb_any(entry->skb);
981 else {
982 vcc = vc_map->vcc;
983 ASSERT(vcc);
985 /* notify tx completion */
986 if (vcc->pop) {
987 vcc->pop(vcc, entry->skb);
989 else {
990 dev_kfree_skb_any(entry->skb);
992 #if 1
993 /* race fixed by the above incarnation mechanism, but... */
994 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
995 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
997 #endif
998 /* check error condition */
999 if (*entry->status & STATUS_ERROR)
1000 atomic_inc(&vcc->stats->tx_err);
1001 else
1002 atomic_inc(&vcc->stats->tx);
1006 *entry->status = STATUS_FREE;
1008 fore200e->host_txq.txing--;
1010 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1015 #ifdef FORE200E_BSQ_DEBUG
1016 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1018 struct buffer* buffer;
1019 int count = 0;
1021 buffer = bsq->freebuf;
1022 while (buffer) {
1024 if (buffer->supplied) {
1025 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1026 where, scheme, magn, buffer->index);
1029 if (buffer->magn != magn) {
1030 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1031 where, scheme, magn, buffer->index, buffer->magn);
1034 if (buffer->scheme != scheme) {
1035 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1036 where, scheme, magn, buffer->index, buffer->scheme);
1039 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1040 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1041 where, scheme, magn, buffer->index);
1044 count++;
1045 buffer = buffer->next;
1048 if (count != bsq->freebuf_count) {
1049 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1050 where, scheme, magn, count, bsq->freebuf_count);
1052 return 0;
1054 #endif
1057 static void
1058 fore200e_supply(struct fore200e* fore200e)
1060 int scheme, magn, i;
1062 struct host_bsq* bsq;
1063 struct host_bsq_entry* entry;
1064 struct buffer* buffer;
1066 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1067 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1069 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1071 #ifdef FORE200E_BSQ_DEBUG
1072 bsq_audit(1, bsq, scheme, magn);
1073 #endif
1074 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1076 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1077 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1079 entry = &bsq->host_entry[ bsq->head ];
1081 for (i = 0; i < RBD_BLK_SIZE; i++) {
1083 /* take the first buffer in the free buffer list */
1084 buffer = bsq->freebuf;
1085 if (!buffer) {
1086 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1087 scheme, magn, bsq->freebuf_count);
1088 return;
1090 bsq->freebuf = buffer->next;
1092 #ifdef FORE200E_BSQ_DEBUG
1093 if (buffer->supplied)
1094 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1095 scheme, magn, buffer->index);
1096 buffer->supplied = 1;
1097 #endif
1098 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1099 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1102 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1104 /* decrease accordingly the number of free rx buffers */
1105 bsq->freebuf_count -= RBD_BLK_SIZE;
1107 *entry->status = STATUS_PENDING;
1108 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1115 static int
1116 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1118 struct sk_buff* skb;
1119 struct buffer* buffer;
1120 struct fore200e_vcc* fore200e_vcc;
1121 int i, pdu_len = 0;
1122 #ifdef FORE200E_52BYTE_AAL0_SDU
1123 u32 cell_header = 0;
1124 #endif
1126 ASSERT(vcc);
1128 fore200e_vcc = FORE200E_VCC(vcc);
1129 ASSERT(fore200e_vcc);
1131 #ifdef FORE200E_52BYTE_AAL0_SDU
1132 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1134 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1135 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1136 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1137 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1138 rpd->atm_header.clp;
1139 pdu_len = 4;
1141 #endif
1143 /* compute total PDU length */
1144 for (i = 0; i < rpd->nseg; i++)
1145 pdu_len += rpd->rsd[ i ].length;
1147 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1148 if (skb == NULL) {
1149 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1151 atomic_inc(&vcc->stats->rx_drop);
1152 return -ENOMEM;
1155 __net_timestamp(skb);
1157 #ifdef FORE200E_52BYTE_AAL0_SDU
1158 if (cell_header) {
1159 *((u32*)skb_put(skb, 4)) = cell_header;
1161 #endif
1163 /* reassemble segments */
1164 for (i = 0; i < rpd->nseg; i++) {
1166 /* rebuild rx buffer address from rsd handle */
1167 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1169 /* Make device DMA transfer visible to CPU. */
1170 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1172 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1174 /* Now let the device get at it again. */
1175 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1178 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1180 if (pdu_len < fore200e_vcc->rx_min_pdu)
1181 fore200e_vcc->rx_min_pdu = pdu_len;
1182 if (pdu_len > fore200e_vcc->rx_max_pdu)
1183 fore200e_vcc->rx_max_pdu = pdu_len;
1184 fore200e_vcc->rx_pdu++;
1186 /* push PDU */
1187 if (atm_charge(vcc, skb->truesize) == 0) {
1189 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1190 vcc->itf, vcc->vpi, vcc->vci);
1192 dev_kfree_skb_any(skb);
1194 atomic_inc(&vcc->stats->rx_drop);
1195 return -ENOMEM;
1198 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1200 vcc->push(vcc, skb);
1201 atomic_inc(&vcc->stats->rx);
1203 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1205 return 0;
1209 static void
1210 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1212 struct host_bsq* bsq;
1213 struct buffer* buffer;
1214 int i;
1216 for (i = 0; i < rpd->nseg; i++) {
1218 /* rebuild rx buffer address from rsd handle */
1219 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1221 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1223 #ifdef FORE200E_BSQ_DEBUG
1224 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1226 if (buffer->supplied == 0)
1227 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1228 buffer->scheme, buffer->magn, buffer->index);
1229 buffer->supplied = 0;
1230 #endif
1232 /* re-insert the buffer into the free buffer list */
1233 buffer->next = bsq->freebuf;
1234 bsq->freebuf = buffer;
1236 /* then increment the number of free rx buffers */
1237 bsq->freebuf_count++;
1242 static void
1243 fore200e_rx_irq(struct fore200e* fore200e)
1245 struct host_rxq* rxq = &fore200e->host_rxq;
1246 struct host_rxq_entry* entry;
1247 struct atm_vcc* vcc;
1248 struct fore200e_vc_map* vc_map;
1250 for (;;) {
1252 entry = &rxq->host_entry[ rxq->head ];
1254 /* no more received PDUs */
1255 if ((*entry->status & STATUS_COMPLETE) == 0)
1256 break;
1258 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1260 if ((vc_map->vcc == NULL) ||
1261 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1263 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1264 fore200e->atm_dev->number,
1265 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1267 else {
1268 vcc = vc_map->vcc;
1269 ASSERT(vcc);
1271 if ((*entry->status & STATUS_ERROR) == 0) {
1273 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1275 else {
1276 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1277 fore200e->atm_dev->number,
1278 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1279 atomic_inc(&vcc->stats->rx_err);
1283 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1285 fore200e_collect_rpd(fore200e, entry->rpd);
1287 /* rewrite the rpd address to ack the received PDU */
1288 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1289 *entry->status = STATUS_FREE;
1291 fore200e_supply(fore200e);
1296 #ifndef FORE200E_USE_TASKLET
1297 static void
1298 fore200e_irq(struct fore200e* fore200e)
1300 unsigned long flags;
1302 spin_lock_irqsave(&fore200e->q_lock, flags);
1303 fore200e_rx_irq(fore200e);
1304 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1306 spin_lock_irqsave(&fore200e->q_lock, flags);
1307 fore200e_tx_irq(fore200e);
1308 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1310 #endif
1313 static irqreturn_t
1314 fore200e_interrupt(int irq, void* dev)
1316 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1318 if (fore200e->bus->irq_check(fore200e) == 0) {
1320 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1321 return IRQ_NONE;
1323 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1325 #ifdef FORE200E_USE_TASKLET
1326 tasklet_schedule(&fore200e->tx_tasklet);
1327 tasklet_schedule(&fore200e->rx_tasklet);
1328 #else
1329 fore200e_irq(fore200e);
1330 #endif
1332 fore200e->bus->irq_ack(fore200e);
1333 return IRQ_HANDLED;
1337 #ifdef FORE200E_USE_TASKLET
1338 static void
1339 fore200e_tx_tasklet(unsigned long data)
1341 struct fore200e* fore200e = (struct fore200e*) data;
1342 unsigned long flags;
1344 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1346 spin_lock_irqsave(&fore200e->q_lock, flags);
1347 fore200e_tx_irq(fore200e);
1348 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1352 static void
1353 fore200e_rx_tasklet(unsigned long data)
1355 struct fore200e* fore200e = (struct fore200e*) data;
1356 unsigned long flags;
1358 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1360 spin_lock_irqsave(&fore200e->q_lock, flags);
1361 fore200e_rx_irq((struct fore200e*) data);
1362 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1364 #endif
1367 static int
1368 fore200e_select_scheme(struct atm_vcc* vcc)
1370 /* fairly balance the VCs over (identical) buffer schemes */
1371 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1373 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1374 vcc->itf, vcc->vpi, vcc->vci, scheme);
1376 return scheme;
1380 static int
1381 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1383 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1384 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1385 struct activate_opcode activ_opcode;
1386 struct deactivate_opcode deactiv_opcode;
1387 struct vpvc vpvc;
1388 int ok;
1389 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1391 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1393 if (activate) {
1394 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1396 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1397 activ_opcode.aal = aal;
1398 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1399 activ_opcode.pad = 0;
1401 else {
1402 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1403 deactiv_opcode.pad = 0;
1406 vpvc.vci = vcc->vci;
1407 vpvc.vpi = vcc->vpi;
1409 *entry->status = STATUS_PENDING;
1411 if (activate) {
1413 #ifdef FORE200E_52BYTE_AAL0_SDU
1414 mtu = 48;
1415 #endif
1416 /* the MTU is not used by the cp, except in the case of AAL0 */
1417 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1418 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1419 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1421 else {
1422 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1423 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1426 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1428 *entry->status = STATUS_FREE;
1430 if (ok == 0) {
1431 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1432 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1433 return -EIO;
1436 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1437 activate ? "open" : "clos");
1439 return 0;
1443 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1445 static void
1446 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1448 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1450 /* compute the data cells to idle cells ratio from the tx PCR */
1451 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1452 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1454 else {
1455 /* disable rate control */
1456 rate->data_cells = rate->idle_cells = 0;
1461 static int
1462 fore200e_open(struct atm_vcc *vcc)
1464 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1465 struct fore200e_vcc* fore200e_vcc;
1466 struct fore200e_vc_map* vc_map;
1467 unsigned long flags;
1468 int vci = vcc->vci;
1469 short vpi = vcc->vpi;
1471 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1472 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1474 spin_lock_irqsave(&fore200e->q_lock, flags);
1476 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1477 if (vc_map->vcc) {
1479 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1481 printk(FORE200E "VC %d.%d.%d already in use\n",
1482 fore200e->atm_dev->number, vpi, vci);
1484 return -EINVAL;
1487 vc_map->vcc = vcc;
1489 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1491 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1492 if (fore200e_vcc == NULL) {
1493 vc_map->vcc = NULL;
1494 return -ENOMEM;
1497 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1498 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1499 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1500 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1501 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1502 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1503 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1505 /* pseudo-CBR bandwidth requested? */
1506 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1508 mutex_lock(&fore200e->rate_mtx);
1509 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1510 mutex_unlock(&fore200e->rate_mtx);
1512 kfree(fore200e_vcc);
1513 vc_map->vcc = NULL;
1514 return -EAGAIN;
1517 /* reserve bandwidth */
1518 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1519 mutex_unlock(&fore200e->rate_mtx);
1522 vcc->itf = vcc->dev->number;
1524 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1525 set_bit(ATM_VF_ADDR, &vcc->flags);
1527 vcc->dev_data = fore200e_vcc;
1529 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1531 vc_map->vcc = NULL;
1533 clear_bit(ATM_VF_ADDR, &vcc->flags);
1534 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1536 vcc->dev_data = NULL;
1538 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1540 kfree(fore200e_vcc);
1541 return -EINVAL;
1544 /* compute rate control parameters */
1545 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1547 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1548 set_bit(ATM_VF_HASQOS, &vcc->flags);
1550 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1551 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1552 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1553 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1556 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1557 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1558 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1560 /* new incarnation of the vcc */
1561 vc_map->incarn = ++fore200e->incarn_count;
1563 /* VC unusable before this flag is set */
1564 set_bit(ATM_VF_READY, &vcc->flags);
1566 return 0;
1570 static void
1571 fore200e_close(struct atm_vcc* vcc)
1573 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1574 struct fore200e_vcc* fore200e_vcc;
1575 struct fore200e_vc_map* vc_map;
1576 unsigned long flags;
1578 ASSERT(vcc);
1579 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1580 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1582 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1584 clear_bit(ATM_VF_READY, &vcc->flags);
1586 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1588 spin_lock_irqsave(&fore200e->q_lock, flags);
1590 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1592 /* the vc is no longer considered as "in use" by fore200e_open() */
1593 vc_map->vcc = NULL;
1595 vcc->itf = vcc->vci = vcc->vpi = 0;
1597 fore200e_vcc = FORE200E_VCC(vcc);
1598 vcc->dev_data = NULL;
1600 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1602 /* release reserved bandwidth, if any */
1603 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1605 mutex_lock(&fore200e->rate_mtx);
1606 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1607 mutex_unlock(&fore200e->rate_mtx);
1609 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1612 clear_bit(ATM_VF_ADDR, &vcc->flags);
1613 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1615 ASSERT(fore200e_vcc);
1616 kfree(fore200e_vcc);
1620 static int
1621 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1623 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1624 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1625 struct fore200e_vc_map* vc_map;
1626 struct host_txq* txq = &fore200e->host_txq;
1627 struct host_txq_entry* entry;
1628 struct tpd* tpd;
1629 struct tpd_haddr tpd_haddr;
1630 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1631 int tx_copy = 0;
1632 int tx_len = skb->len;
1633 u32* cell_header = NULL;
1634 unsigned char* skb_data;
1635 int skb_len;
1636 unsigned char* data;
1637 unsigned long flags;
1639 ASSERT(vcc);
1640 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
1641 ASSERT(fore200e);
1642 ASSERT(fore200e_vcc);
1644 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1645 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1646 dev_kfree_skb_any(skb);
1647 return -EINVAL;
1650 #ifdef FORE200E_52BYTE_AAL0_SDU
1651 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1652 cell_header = (u32*) skb->data;
1653 skb_data = skb->data + 4; /* skip 4-byte cell header */
1654 skb_len = tx_len = skb->len - 4;
1656 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1658 else
1659 #endif
1661 skb_data = skb->data;
1662 skb_len = skb->len;
1665 if (((unsigned long)skb_data) & 0x3) {
1667 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1668 tx_copy = 1;
1669 tx_len = skb_len;
1672 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1674 /* this simply NUKES the PCA board */
1675 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1676 tx_copy = 1;
1677 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1680 if (tx_copy) {
1681 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1682 if (data == NULL) {
1683 if (vcc->pop) {
1684 vcc->pop(vcc, skb);
1686 else {
1687 dev_kfree_skb_any(skb);
1689 return -ENOMEM;
1692 memcpy(data, skb_data, skb_len);
1693 if (skb_len < tx_len)
1694 memset(data + skb_len, 0x00, tx_len - skb_len);
1696 else {
1697 data = skb_data;
1700 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1701 ASSERT(vc_map->vcc == vcc);
1703 retry_here:
1705 spin_lock_irqsave(&fore200e->q_lock, flags);
1707 entry = &txq->host_entry[ txq->head ];
1709 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1711 /* try to free completed tx queue entries */
1712 fore200e_tx_irq(fore200e);
1714 if (*entry->status != STATUS_FREE) {
1716 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1718 /* retry once again? */
1719 if (--retry > 0) {
1720 udelay(50);
1721 goto retry_here;
1724 atomic_inc(&vcc->stats->tx_err);
1726 fore200e->tx_sat++;
1727 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1728 fore200e->name, fore200e->cp_queues->heartbeat);
1729 if (vcc->pop) {
1730 vcc->pop(vcc, skb);
1732 else {
1733 dev_kfree_skb_any(skb);
1736 if (tx_copy)
1737 kfree(data);
1739 return -ENOBUFS;
1743 entry->incarn = vc_map->incarn;
1744 entry->vc_map = vc_map;
1745 entry->skb = skb;
1746 entry->data = tx_copy ? data : NULL;
1748 tpd = entry->tpd;
1749 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1750 tpd->tsd[ 0 ].length = tx_len;
1752 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1753 txq->txing++;
1755 /* The dma_map call above implies a dma_sync so the device can use it,
1756 * thus no explicit dma_sync call is necessary here.
1759 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1760 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1761 tpd->tsd[0].length, skb_len);
1763 if (skb_len < fore200e_vcc->tx_min_pdu)
1764 fore200e_vcc->tx_min_pdu = skb_len;
1765 if (skb_len > fore200e_vcc->tx_max_pdu)
1766 fore200e_vcc->tx_max_pdu = skb_len;
1767 fore200e_vcc->tx_pdu++;
1769 /* set tx rate control information */
1770 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1771 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1773 if (cell_header) {
1774 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1775 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1776 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1777 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1778 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1780 else {
1781 /* set the ATM header, common to all cells conveying the PDU */
1782 tpd->atm_header.clp = 0;
1783 tpd->atm_header.plt = 0;
1784 tpd->atm_header.vci = vcc->vci;
1785 tpd->atm_header.vpi = vcc->vpi;
1786 tpd->atm_header.gfc = 0;
1789 tpd->spec.length = tx_len;
1790 tpd->spec.nseg = 1;
1791 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1792 tpd->spec.intr = 1;
1794 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1795 tpd_haddr.pad = 0;
1796 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1798 *entry->status = STATUS_PENDING;
1799 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1801 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1803 return 0;
1807 static int
1808 fore200e_getstats(struct fore200e* fore200e)
1810 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1811 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1812 struct stats_opcode opcode;
1813 int ok;
1814 u32 stats_dma_addr;
1816 if (fore200e->stats == NULL) {
1817 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1818 if (fore200e->stats == NULL)
1819 return -ENOMEM;
1822 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1823 sizeof(struct stats), DMA_FROM_DEVICE);
1825 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1827 opcode.opcode = OPCODE_GET_STATS;
1828 opcode.pad = 0;
1830 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1832 *entry->status = STATUS_PENDING;
1834 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1836 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1838 *entry->status = STATUS_FREE;
1840 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1842 if (ok == 0) {
1843 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1844 return -EIO;
1847 return 0;
1851 static int
1852 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1854 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1856 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1857 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1859 return -EINVAL;
1863 static int
1864 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1866 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1868 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1869 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1871 return -EINVAL;
1875 #if 0 /* currently unused */
1876 static int
1877 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1879 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1880 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1881 struct oc3_opcode opcode;
1882 int ok;
1883 u32 oc3_regs_dma_addr;
1885 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1887 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1889 opcode.opcode = OPCODE_GET_OC3;
1890 opcode.reg = 0;
1891 opcode.value = 0;
1892 opcode.mask = 0;
1894 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1896 *entry->status = STATUS_PENDING;
1898 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1900 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1902 *entry->status = STATUS_FREE;
1904 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1906 if (ok == 0) {
1907 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1908 return -EIO;
1911 return 0;
1913 #endif
1916 static int
1917 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1919 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1920 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1921 struct oc3_opcode opcode;
1922 int ok;
1924 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1926 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1928 opcode.opcode = OPCODE_SET_OC3;
1929 opcode.reg = reg;
1930 opcode.value = value;
1931 opcode.mask = mask;
1933 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1935 *entry->status = STATUS_PENDING;
1937 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1939 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1941 *entry->status = STATUS_FREE;
1943 if (ok == 0) {
1944 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1945 return -EIO;
1948 return 0;
1952 static int
1953 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1955 u32 mct_value, mct_mask;
1956 int error;
1958 if (!capable(CAP_NET_ADMIN))
1959 return -EPERM;
1961 switch (loop_mode) {
1963 case ATM_LM_NONE:
1964 mct_value = 0;
1965 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1966 break;
1968 case ATM_LM_LOC_PHY:
1969 mct_value = mct_mask = SUNI_MCT_DLE;
1970 break;
1972 case ATM_LM_RMT_PHY:
1973 mct_value = mct_mask = SUNI_MCT_LLE;
1974 break;
1976 default:
1977 return -EINVAL;
1980 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1981 if (error == 0)
1982 fore200e->loop_mode = loop_mode;
1984 return error;
1988 static int
1989 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1991 struct sonet_stats tmp;
1993 if (fore200e_getstats(fore200e) < 0)
1994 return -EIO;
1996 tmp.section_bip = cpu_to_be32(fore200e->stats->oc3.section_bip8_errors);
1997 tmp.line_bip = cpu_to_be32(fore200e->stats->oc3.line_bip24_errors);
1998 tmp.path_bip = cpu_to_be32(fore200e->stats->oc3.path_bip8_errors);
1999 tmp.line_febe = cpu_to_be32(fore200e->stats->oc3.line_febe_errors);
2000 tmp.path_febe = cpu_to_be32(fore200e->stats->oc3.path_febe_errors);
2001 tmp.corr_hcs = cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors);
2002 tmp.uncorr_hcs = cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors);
2003 tmp.tx_cells = cpu_to_be32(fore200e->stats->aal0.cells_transmitted) +
2004 cpu_to_be32(fore200e->stats->aal34.cells_transmitted) +
2005 cpu_to_be32(fore200e->stats->aal5.cells_transmitted);
2006 tmp.rx_cells = cpu_to_be32(fore200e->stats->aal0.cells_received) +
2007 cpu_to_be32(fore200e->stats->aal34.cells_received) +
2008 cpu_to_be32(fore200e->stats->aal5.cells_received);
2010 if (arg)
2011 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2013 return 0;
2017 static int
2018 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2020 struct fore200e* fore200e = FORE200E_DEV(dev);
2022 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2024 switch (cmd) {
2026 case SONET_GETSTAT:
2027 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2029 case SONET_GETDIAG:
2030 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2032 case ATM_SETLOOP:
2033 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2035 case ATM_GETLOOP:
2036 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2038 case ATM_QUERYLOOP:
2039 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2042 return -ENOSYS; /* not implemented */
2046 static int
2047 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2049 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2050 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2052 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2053 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2054 return -EINVAL;
2057 DPRINTK(2, "change_qos %d.%d.%d, "
2058 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2059 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2060 "available_cell_rate = %u",
2061 vcc->itf, vcc->vpi, vcc->vci,
2062 fore200e_traffic_class[ qos->txtp.traffic_class ],
2063 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2064 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2065 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2066 flags, fore200e->available_cell_rate);
2068 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2070 mutex_lock(&fore200e->rate_mtx);
2071 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2072 mutex_unlock(&fore200e->rate_mtx);
2073 return -EAGAIN;
2076 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2077 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2079 mutex_unlock(&fore200e->rate_mtx);
2081 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2083 /* update rate control parameters */
2084 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2086 set_bit(ATM_VF_HASQOS, &vcc->flags);
2088 return 0;
2091 return -EINVAL;
2095 static int __devinit
2096 fore200e_irq_request(struct fore200e* fore200e)
2098 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
2100 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2101 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2102 return -EBUSY;
2105 printk(FORE200E "IRQ %s reserved for device %s\n",
2106 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2108 #ifdef FORE200E_USE_TASKLET
2109 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2110 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2111 #endif
2113 fore200e->state = FORE200E_STATE_IRQ;
2114 return 0;
2118 static int __devinit
2119 fore200e_get_esi(struct fore200e* fore200e)
2121 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2122 int ok, i;
2124 if (!prom)
2125 return -ENOMEM;
2127 ok = fore200e->bus->prom_read(fore200e, prom);
2128 if (ok < 0) {
2129 kfree(prom);
2130 return -EBUSY;
2133 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2134 fore200e->name,
2135 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2136 prom->serial_number & 0xFFFF,
2137 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2138 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2140 for (i = 0; i < ESI_LEN; i++) {
2141 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2144 kfree(prom);
2146 return 0;
2150 static int __devinit
2151 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2153 int scheme, magn, nbr, size, i;
2155 struct host_bsq* bsq;
2156 struct buffer* buffer;
2158 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2159 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2161 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2163 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2164 size = fore200e_rx_buf_size[ scheme ][ magn ];
2166 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2168 /* allocate the array of receive buffers */
2169 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2171 if (buffer == NULL)
2172 return -ENOMEM;
2174 bsq->freebuf = NULL;
2176 for (i = 0; i < nbr; i++) {
2178 buffer[ i ].scheme = scheme;
2179 buffer[ i ].magn = magn;
2180 #ifdef FORE200E_BSQ_DEBUG
2181 buffer[ i ].index = i;
2182 buffer[ i ].supplied = 0;
2183 #endif
2185 /* allocate the receive buffer body */
2186 if (fore200e_chunk_alloc(fore200e,
2187 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2188 DMA_FROM_DEVICE) < 0) {
2190 while (i > 0)
2191 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2192 kfree(buffer);
2194 return -ENOMEM;
2197 /* insert the buffer into the free buffer list */
2198 buffer[ i ].next = bsq->freebuf;
2199 bsq->freebuf = &buffer[ i ];
2201 /* all the buffers are free, initially */
2202 bsq->freebuf_count = nbr;
2204 #ifdef FORE200E_BSQ_DEBUG
2205 bsq_audit(3, bsq, scheme, magn);
2206 #endif
2210 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2211 return 0;
2215 static int __devinit
2216 fore200e_init_bs_queue(struct fore200e* fore200e)
2218 int scheme, magn, i;
2220 struct host_bsq* bsq;
2221 struct cp_bsq_entry __iomem * cp_entry;
2223 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2224 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2226 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2228 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2230 /* allocate and align the array of status words */
2231 if (fore200e->bus->dma_chunk_alloc(fore200e,
2232 &bsq->status,
2233 sizeof(enum status),
2234 QUEUE_SIZE_BS,
2235 fore200e->bus->status_alignment) < 0) {
2236 return -ENOMEM;
2239 /* allocate and align the array of receive buffer descriptors */
2240 if (fore200e->bus->dma_chunk_alloc(fore200e,
2241 &bsq->rbd_block,
2242 sizeof(struct rbd_block),
2243 QUEUE_SIZE_BS,
2244 fore200e->bus->descr_alignment) < 0) {
2246 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2247 return -ENOMEM;
2250 /* get the base address of the cp resident buffer supply queue entries */
2251 cp_entry = fore200e->virt_base +
2252 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2254 /* fill the host resident and cp resident buffer supply queue entries */
2255 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2257 bsq->host_entry[ i ].status =
2258 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2259 bsq->host_entry[ i ].rbd_block =
2260 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2261 bsq->host_entry[ i ].rbd_block_dma =
2262 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2263 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2265 *bsq->host_entry[ i ].status = STATUS_FREE;
2267 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2268 &cp_entry[ i ].status_haddr);
2273 fore200e->state = FORE200E_STATE_INIT_BSQ;
2274 return 0;
2278 static int __devinit
2279 fore200e_init_rx_queue(struct fore200e* fore200e)
2281 struct host_rxq* rxq = &fore200e->host_rxq;
2282 struct cp_rxq_entry __iomem * cp_entry;
2283 int i;
2285 DPRINTK(2, "receive queue is being initialized\n");
2287 /* allocate and align the array of status words */
2288 if (fore200e->bus->dma_chunk_alloc(fore200e,
2289 &rxq->status,
2290 sizeof(enum status),
2291 QUEUE_SIZE_RX,
2292 fore200e->bus->status_alignment) < 0) {
2293 return -ENOMEM;
2296 /* allocate and align the array of receive PDU descriptors */
2297 if (fore200e->bus->dma_chunk_alloc(fore200e,
2298 &rxq->rpd,
2299 sizeof(struct rpd),
2300 QUEUE_SIZE_RX,
2301 fore200e->bus->descr_alignment) < 0) {
2303 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2304 return -ENOMEM;
2307 /* get the base address of the cp resident rx queue entries */
2308 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2310 /* fill the host resident and cp resident rx entries */
2311 for (i=0; i < QUEUE_SIZE_RX; i++) {
2313 rxq->host_entry[ i ].status =
2314 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2315 rxq->host_entry[ i ].rpd =
2316 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2317 rxq->host_entry[ i ].rpd_dma =
2318 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2319 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2321 *rxq->host_entry[ i ].status = STATUS_FREE;
2323 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2324 &cp_entry[ i ].status_haddr);
2326 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2327 &cp_entry[ i ].rpd_haddr);
2330 /* set the head entry of the queue */
2331 rxq->head = 0;
2333 fore200e->state = FORE200E_STATE_INIT_RXQ;
2334 return 0;
2338 static int __devinit
2339 fore200e_init_tx_queue(struct fore200e* fore200e)
2341 struct host_txq* txq = &fore200e->host_txq;
2342 struct cp_txq_entry __iomem * cp_entry;
2343 int i;
2345 DPRINTK(2, "transmit queue is being initialized\n");
2347 /* allocate and align the array of status words */
2348 if (fore200e->bus->dma_chunk_alloc(fore200e,
2349 &txq->status,
2350 sizeof(enum status),
2351 QUEUE_SIZE_TX,
2352 fore200e->bus->status_alignment) < 0) {
2353 return -ENOMEM;
2356 /* allocate and align the array of transmit PDU descriptors */
2357 if (fore200e->bus->dma_chunk_alloc(fore200e,
2358 &txq->tpd,
2359 sizeof(struct tpd),
2360 QUEUE_SIZE_TX,
2361 fore200e->bus->descr_alignment) < 0) {
2363 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2364 return -ENOMEM;
2367 /* get the base address of the cp resident tx queue entries */
2368 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2370 /* fill the host resident and cp resident tx entries */
2371 for (i=0; i < QUEUE_SIZE_TX; i++) {
2373 txq->host_entry[ i ].status =
2374 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2375 txq->host_entry[ i ].tpd =
2376 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2377 txq->host_entry[ i ].tpd_dma =
2378 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2379 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2381 *txq->host_entry[ i ].status = STATUS_FREE;
2383 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2384 &cp_entry[ i ].status_haddr);
2386 /* although there is a one-to-one mapping of tx queue entries and tpds,
2387 we do not write here the DMA (physical) base address of each tpd into
2388 the related cp resident entry, because the cp relies on this write
2389 operation to detect that a new pdu has been submitted for tx */
2392 /* set the head and tail entries of the queue */
2393 txq->head = 0;
2394 txq->tail = 0;
2396 fore200e->state = FORE200E_STATE_INIT_TXQ;
2397 return 0;
2401 static int __devinit
2402 fore200e_init_cmd_queue(struct fore200e* fore200e)
2404 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2405 struct cp_cmdq_entry __iomem * cp_entry;
2406 int i;
2408 DPRINTK(2, "command queue is being initialized\n");
2410 /* allocate and align the array of status words */
2411 if (fore200e->bus->dma_chunk_alloc(fore200e,
2412 &cmdq->status,
2413 sizeof(enum status),
2414 QUEUE_SIZE_CMD,
2415 fore200e->bus->status_alignment) < 0) {
2416 return -ENOMEM;
2419 /* get the base address of the cp resident cmd queue entries */
2420 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2422 /* fill the host resident and cp resident cmd entries */
2423 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2425 cmdq->host_entry[ i ].status =
2426 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2427 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2429 *cmdq->host_entry[ i ].status = STATUS_FREE;
2431 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2432 &cp_entry[ i ].status_haddr);
2435 /* set the head entry of the queue */
2436 cmdq->head = 0;
2438 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2439 return 0;
2443 static void __devinit
2444 fore200e_param_bs_queue(struct fore200e* fore200e,
2445 enum buffer_scheme scheme, enum buffer_magn magn,
2446 int queue_length, int pool_size, int supply_blksize)
2448 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2450 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2451 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2452 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2453 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2457 static int __devinit
2458 fore200e_initialize(struct fore200e* fore200e)
2460 struct cp_queues __iomem * cpq;
2461 int ok, scheme, magn;
2463 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2465 mutex_init(&fore200e->rate_mtx);
2466 spin_lock_init(&fore200e->q_lock);
2468 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2470 /* enable cp to host interrupts */
2471 fore200e->bus->write(1, &cpq->imask);
2473 if (fore200e->bus->irq_enable)
2474 fore200e->bus->irq_enable(fore200e);
2476 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2478 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2479 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2480 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2482 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2483 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2485 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2486 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2487 fore200e_param_bs_queue(fore200e, scheme, magn,
2488 QUEUE_SIZE_BS,
2489 fore200e_rx_buf_nbr[ scheme ][ magn ],
2490 RBD_BLK_SIZE);
2492 /* issue the initialize command */
2493 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2494 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2496 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2497 if (ok == 0) {
2498 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2499 return -ENODEV;
2502 printk(FORE200E "device %s initialized\n", fore200e->name);
2504 fore200e->state = FORE200E_STATE_INITIALIZE;
2505 return 0;
2509 static void __devinit
2510 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2512 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2514 #if 0
2515 printk("%c", c);
2516 #endif
2517 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2521 static int __devinit
2522 fore200e_monitor_getc(struct fore200e* fore200e)
2524 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2525 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2526 int c;
2528 while (time_before(jiffies, timeout)) {
2530 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2532 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2534 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2535 #if 0
2536 printk("%c", c & 0xFF);
2537 #endif
2538 return c & 0xFF;
2542 return -1;
2546 static void __devinit
2547 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2549 while (*str) {
2551 /* the i960 monitor doesn't accept any new character if it has something to say */
2552 while (fore200e_monitor_getc(fore200e) >= 0);
2554 fore200e_monitor_putc(fore200e, *str++);
2557 while (fore200e_monitor_getc(fore200e) >= 0);
2561 static int __devinit
2562 fore200e_start_fw(struct fore200e* fore200e)
2564 int ok;
2565 char cmd[ 48 ];
2566 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2568 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2570 #if defined(__sparc_v9__)
2571 /* reported to be required by SBA cards on some sparc64 hosts */
2572 fore200e_spin(100);
2573 #endif
2575 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2577 fore200e_monitor_puts(fore200e, cmd);
2579 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2580 if (ok == 0) {
2581 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2582 return -ENODEV;
2585 printk(FORE200E "device %s firmware started\n", fore200e->name);
2587 fore200e->state = FORE200E_STATE_START_FW;
2588 return 0;
2592 static int __devinit
2593 fore200e_load_fw(struct fore200e* fore200e)
2595 u32* fw_data = (u32*) fore200e->bus->fw_data;
2596 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2598 struct fw_header* fw_header = (struct fw_header*) fw_data;
2600 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2602 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2603 fore200e->name, load_addr, fw_size);
2605 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2606 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2607 return -ENODEV;
2610 for (; fw_size--; fw_data++, load_addr++)
2611 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2613 fore200e->state = FORE200E_STATE_LOAD_FW;
2614 return 0;
2618 static int __devinit
2619 fore200e_register(struct fore200e* fore200e)
2621 struct atm_dev* atm_dev;
2623 DPRINTK(2, "device %s being registered\n", fore200e->name);
2625 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2626 NULL);
2627 if (atm_dev == NULL) {
2628 printk(FORE200E "unable to register device %s\n", fore200e->name);
2629 return -ENODEV;
2632 atm_dev->dev_data = fore200e;
2633 fore200e->atm_dev = atm_dev;
2635 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2636 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2638 fore200e->available_cell_rate = ATM_OC3_PCR;
2640 fore200e->state = FORE200E_STATE_REGISTER;
2641 return 0;
2645 static int __devinit
2646 fore200e_init(struct fore200e* fore200e)
2648 if (fore200e_register(fore200e) < 0)
2649 return -ENODEV;
2651 if (fore200e->bus->configure(fore200e) < 0)
2652 return -ENODEV;
2654 if (fore200e->bus->map(fore200e) < 0)
2655 return -ENODEV;
2657 if (fore200e_reset(fore200e, 1) < 0)
2658 return -ENODEV;
2660 if (fore200e_load_fw(fore200e) < 0)
2661 return -ENODEV;
2663 if (fore200e_start_fw(fore200e) < 0)
2664 return -ENODEV;
2666 if (fore200e_initialize(fore200e) < 0)
2667 return -ENODEV;
2669 if (fore200e_init_cmd_queue(fore200e) < 0)
2670 return -ENOMEM;
2672 if (fore200e_init_tx_queue(fore200e) < 0)
2673 return -ENOMEM;
2675 if (fore200e_init_rx_queue(fore200e) < 0)
2676 return -ENOMEM;
2678 if (fore200e_init_bs_queue(fore200e) < 0)
2679 return -ENOMEM;
2681 if (fore200e_alloc_rx_buf(fore200e) < 0)
2682 return -ENOMEM;
2684 if (fore200e_get_esi(fore200e) < 0)
2685 return -EIO;
2687 if (fore200e_irq_request(fore200e) < 0)
2688 return -EBUSY;
2690 fore200e_supply(fore200e);
2692 /* all done, board initialization is now complete */
2693 fore200e->state = FORE200E_STATE_COMPLETE;
2694 return 0;
2697 #ifdef CONFIG_ATM_FORE200E_PCA
2698 static int __devinit
2699 fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2701 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
2702 struct fore200e* fore200e;
2703 int err = 0;
2704 static int index = 0;
2706 if (pci_enable_device(pci_dev)) {
2707 err = -EINVAL;
2708 goto out;
2711 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2712 if (fore200e == NULL) {
2713 err = -ENOMEM;
2714 goto out_disable;
2717 fore200e->bus = bus;
2718 fore200e->bus_dev = pci_dev;
2719 fore200e->irq = pci_dev->irq;
2720 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2722 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
2724 pci_set_master(pci_dev);
2726 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2727 fore200e->bus->model_name,
2728 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2730 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2732 err = fore200e_init(fore200e);
2733 if (err < 0) {
2734 fore200e_shutdown(fore200e);
2735 goto out_free;
2738 ++index;
2739 pci_set_drvdata(pci_dev, fore200e);
2741 out:
2742 return err;
2744 out_free:
2745 kfree(fore200e);
2746 out_disable:
2747 pci_disable_device(pci_dev);
2748 goto out;
2752 static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
2754 struct fore200e *fore200e;
2756 fore200e = pci_get_drvdata(pci_dev);
2758 fore200e_shutdown(fore200e);
2759 kfree(fore200e);
2760 pci_disable_device(pci_dev);
2764 static struct pci_device_id fore200e_pca_tbl[] = {
2765 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
2766 0, 0, (unsigned long) &fore200e_bus[0] },
2767 { 0, }
2770 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2772 static struct pci_driver fore200e_pca_driver = {
2773 .name = "fore_200e",
2774 .probe = fore200e_pca_detect,
2775 .remove = __devexit_p(fore200e_pca_remove_one),
2776 .id_table = fore200e_pca_tbl,
2778 #endif
2781 static int __init
2782 fore200e_module_init(void)
2784 const struct fore200e_bus* bus;
2785 struct fore200e* fore200e;
2786 int index;
2788 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2790 /* for each configured bus interface */
2791 for (bus = fore200e_bus; bus->model_name; bus++) {
2793 /* detect all boards present on that bus */
2794 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
2796 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2797 fore200e->bus->model_name,
2798 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2800 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2802 if (fore200e_init(fore200e) < 0) {
2804 fore200e_shutdown(fore200e);
2805 break;
2808 list_add(&fore200e->entry, &fore200e_boards);
2812 #ifdef CONFIG_ATM_FORE200E_PCA
2813 if (!pci_register_driver(&fore200e_pca_driver))
2814 return 0;
2815 #endif
2817 if (!list_empty(&fore200e_boards))
2818 return 0;
2820 return -ENODEV;
2824 static void __exit
2825 fore200e_module_cleanup(void)
2827 struct fore200e *fore200e, *next;
2829 #ifdef CONFIG_ATM_FORE200E_PCA
2830 pci_unregister_driver(&fore200e_pca_driver);
2831 #endif
2833 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
2834 fore200e_shutdown(fore200e);
2835 kfree(fore200e);
2837 DPRINTK(1, "module being removed\n");
2841 static int
2842 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2844 struct fore200e* fore200e = FORE200E_DEV(dev);
2845 struct fore200e_vcc* fore200e_vcc;
2846 struct atm_vcc* vcc;
2847 int i, len, left = *pos;
2848 unsigned long flags;
2850 if (!left--) {
2852 if (fore200e_getstats(fore200e) < 0)
2853 return -EIO;
2855 len = sprintf(page,"\n"
2856 " device:\n"
2857 " internal name:\t\t%s\n", fore200e->name);
2859 /* print bus-specific information */
2860 if (fore200e->bus->proc_read)
2861 len += fore200e->bus->proc_read(fore200e, page + len);
2863 len += sprintf(page + len,
2864 " interrupt line:\t\t%s\n"
2865 " physical base address:\t0x%p\n"
2866 " virtual base address:\t0x%p\n"
2867 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2868 " board serial number:\t\t%d\n\n",
2869 fore200e_irq_itoa(fore200e->irq),
2870 (void*)fore200e->phys_base,
2871 fore200e->virt_base,
2872 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2873 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2874 fore200e->esi[4] * 256 + fore200e->esi[5]);
2876 return len;
2879 if (!left--)
2880 return sprintf(page,
2881 " free small bufs, scheme 1:\t%d\n"
2882 " free large bufs, scheme 1:\t%d\n"
2883 " free small bufs, scheme 2:\t%d\n"
2884 " free large bufs, scheme 2:\t%d\n",
2885 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2886 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2887 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2888 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2890 if (!left--) {
2891 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2893 len = sprintf(page,"\n\n"
2894 " cell processor:\n"
2895 " heartbeat state:\t\t");
2897 if (hb >> 16 != 0xDEAD)
2898 len += sprintf(page + len, "0x%08x\n", hb);
2899 else
2900 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2902 return len;
2905 if (!left--) {
2906 static const char* media_name[] = {
2907 "unshielded twisted pair",
2908 "multimode optical fiber ST",
2909 "multimode optical fiber SC",
2910 "single-mode optical fiber ST",
2911 "single-mode optical fiber SC",
2912 "unknown"
2915 static const char* oc3_mode[] = {
2916 "normal operation",
2917 "diagnostic loopback",
2918 "line loopback",
2919 "unknown"
2922 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2923 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2924 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2925 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2926 u32 oc3_index;
2928 if ((media_index < 0) || (media_index > 4))
2929 media_index = 5;
2931 switch (fore200e->loop_mode) {
2932 case ATM_LM_NONE: oc3_index = 0;
2933 break;
2934 case ATM_LM_LOC_PHY: oc3_index = 1;
2935 break;
2936 case ATM_LM_RMT_PHY: oc3_index = 2;
2937 break;
2938 default: oc3_index = 3;
2941 return sprintf(page,
2942 " firmware release:\t\t%d.%d.%d\n"
2943 " monitor release:\t\t%d.%d\n"
2944 " media type:\t\t\t%s\n"
2945 " OC-3 revision:\t\t0x%x\n"
2946 " OC-3 mode:\t\t\t%s",
2947 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2948 mon960_release >> 16, mon960_release << 16 >> 16,
2949 media_name[ media_index ],
2950 oc3_revision,
2951 oc3_mode[ oc3_index ]);
2954 if (!left--) {
2955 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2957 return sprintf(page,
2958 "\n\n"
2959 " monitor:\n"
2960 " version number:\t\t%d\n"
2961 " boot status word:\t\t0x%08x\n",
2962 fore200e->bus->read(&cp_monitor->mon_version),
2963 fore200e->bus->read(&cp_monitor->bstat));
2966 if (!left--)
2967 return sprintf(page,
2968 "\n"
2969 " device statistics:\n"
2970 " 4b5b:\n"
2971 " crc_header_errors:\t\t%10u\n"
2972 " framing_errors:\t\t%10u\n",
2973 cpu_to_be32(fore200e->stats->phy.crc_header_errors),
2974 cpu_to_be32(fore200e->stats->phy.framing_errors));
2976 if (!left--)
2977 return sprintf(page, "\n"
2978 " OC-3:\n"
2979 " section_bip8_errors:\t%10u\n"
2980 " path_bip8_errors:\t\t%10u\n"
2981 " line_bip24_errors:\t\t%10u\n"
2982 " line_febe_errors:\t\t%10u\n"
2983 " path_febe_errors:\t\t%10u\n"
2984 " corr_hcs_errors:\t\t%10u\n"
2985 " ucorr_hcs_errors:\t\t%10u\n",
2986 cpu_to_be32(fore200e->stats->oc3.section_bip8_errors),
2987 cpu_to_be32(fore200e->stats->oc3.path_bip8_errors),
2988 cpu_to_be32(fore200e->stats->oc3.line_bip24_errors),
2989 cpu_to_be32(fore200e->stats->oc3.line_febe_errors),
2990 cpu_to_be32(fore200e->stats->oc3.path_febe_errors),
2991 cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors),
2992 cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors));
2994 if (!left--)
2995 return sprintf(page,"\n"
2996 " ATM:\t\t\t\t cells\n"
2997 " TX:\t\t\t%10u\n"
2998 " RX:\t\t\t%10u\n"
2999 " vpi out of range:\t\t%10u\n"
3000 " vpi no conn:\t\t%10u\n"
3001 " vci out of range:\t\t%10u\n"
3002 " vci no conn:\t\t%10u\n",
3003 cpu_to_be32(fore200e->stats->atm.cells_transmitted),
3004 cpu_to_be32(fore200e->stats->atm.cells_received),
3005 cpu_to_be32(fore200e->stats->atm.vpi_bad_range),
3006 cpu_to_be32(fore200e->stats->atm.vpi_no_conn),
3007 cpu_to_be32(fore200e->stats->atm.vci_bad_range),
3008 cpu_to_be32(fore200e->stats->atm.vci_no_conn));
3010 if (!left--)
3011 return sprintf(page,"\n"
3012 " AAL0:\t\t\t cells\n"
3013 " TX:\t\t\t%10u\n"
3014 " RX:\t\t\t%10u\n"
3015 " dropped:\t\t\t%10u\n",
3016 cpu_to_be32(fore200e->stats->aal0.cells_transmitted),
3017 cpu_to_be32(fore200e->stats->aal0.cells_received),
3018 cpu_to_be32(fore200e->stats->aal0.cells_dropped));
3020 if (!left--)
3021 return sprintf(page,"\n"
3022 " AAL3/4:\n"
3023 " SAR sublayer:\t\t cells\n"
3024 " TX:\t\t\t%10u\n"
3025 " RX:\t\t\t%10u\n"
3026 " dropped:\t\t\t%10u\n"
3027 " CRC errors:\t\t%10u\n"
3028 " protocol errors:\t\t%10u\n\n"
3029 " CS sublayer:\t\t PDUs\n"
3030 " TX:\t\t\t%10u\n"
3031 " RX:\t\t\t%10u\n"
3032 " dropped:\t\t\t%10u\n"
3033 " protocol errors:\t\t%10u\n",
3034 cpu_to_be32(fore200e->stats->aal34.cells_transmitted),
3035 cpu_to_be32(fore200e->stats->aal34.cells_received),
3036 cpu_to_be32(fore200e->stats->aal34.cells_dropped),
3037 cpu_to_be32(fore200e->stats->aal34.cells_crc_errors),
3038 cpu_to_be32(fore200e->stats->aal34.cells_protocol_errors),
3039 cpu_to_be32(fore200e->stats->aal34.cspdus_transmitted),
3040 cpu_to_be32(fore200e->stats->aal34.cspdus_received),
3041 cpu_to_be32(fore200e->stats->aal34.cspdus_dropped),
3042 cpu_to_be32(fore200e->stats->aal34.cspdus_protocol_errors));
3044 if (!left--)
3045 return sprintf(page,"\n"
3046 " AAL5:\n"
3047 " SAR sublayer:\t\t cells\n"
3048 " TX:\t\t\t%10u\n"
3049 " RX:\t\t\t%10u\n"
3050 " dropped:\t\t\t%10u\n"
3051 " congestions:\t\t%10u\n\n"
3052 " CS sublayer:\t\t PDUs\n"
3053 " TX:\t\t\t%10u\n"
3054 " RX:\t\t\t%10u\n"
3055 " dropped:\t\t\t%10u\n"
3056 " CRC errors:\t\t%10u\n"
3057 " protocol errors:\t\t%10u\n",
3058 cpu_to_be32(fore200e->stats->aal5.cells_transmitted),
3059 cpu_to_be32(fore200e->stats->aal5.cells_received),
3060 cpu_to_be32(fore200e->stats->aal5.cells_dropped),
3061 cpu_to_be32(fore200e->stats->aal5.congestion_experienced),
3062 cpu_to_be32(fore200e->stats->aal5.cspdus_transmitted),
3063 cpu_to_be32(fore200e->stats->aal5.cspdus_received),
3064 cpu_to_be32(fore200e->stats->aal5.cspdus_dropped),
3065 cpu_to_be32(fore200e->stats->aal5.cspdus_crc_errors),
3066 cpu_to_be32(fore200e->stats->aal5.cspdus_protocol_errors));
3068 if (!left--)
3069 return sprintf(page,"\n"
3070 " AUX:\t\t allocation failures\n"
3071 " small b1:\t\t\t%10u\n"
3072 " large b1:\t\t\t%10u\n"
3073 " small b2:\t\t\t%10u\n"
3074 " large b2:\t\t\t%10u\n"
3075 " RX PDUs:\t\t\t%10u\n"
3076 " TX PDUs:\t\t\t%10lu\n",
3077 cpu_to_be32(fore200e->stats->aux.small_b1_failed),
3078 cpu_to_be32(fore200e->stats->aux.large_b1_failed),
3079 cpu_to_be32(fore200e->stats->aux.small_b2_failed),
3080 cpu_to_be32(fore200e->stats->aux.large_b2_failed),
3081 cpu_to_be32(fore200e->stats->aux.rpd_alloc_failed),
3082 fore200e->tx_sat);
3084 if (!left--)
3085 return sprintf(page,"\n"
3086 " receive carrier:\t\t\t%s\n",
3087 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3089 if (!left--) {
3090 return sprintf(page,"\n"
3091 " VCCs:\n address VPI VCI AAL "
3092 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3095 for (i = 0; i < NBR_CONNECT; i++) {
3097 vcc = fore200e->vc_map[i].vcc;
3099 if (vcc == NULL)
3100 continue;
3102 spin_lock_irqsave(&fore200e->q_lock, flags);
3104 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3106 fore200e_vcc = FORE200E_VCC(vcc);
3107 ASSERT(fore200e_vcc);
3109 len = sprintf(page,
3110 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3111 (u32)(unsigned long)vcc,
3112 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3113 fore200e_vcc->tx_pdu,
3114 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3115 fore200e_vcc->tx_max_pdu,
3116 fore200e_vcc->rx_pdu,
3117 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3118 fore200e_vcc->rx_max_pdu);
3120 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3121 return len;
3124 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3127 return 0;
3130 module_init(fore200e_module_init);
3131 module_exit(fore200e_module_cleanup);
3134 static const struct atmdev_ops fore200e_ops =
3136 .open = fore200e_open,
3137 .close = fore200e_close,
3138 .ioctl = fore200e_ioctl,
3139 .getsockopt = fore200e_getsockopt,
3140 .setsockopt = fore200e_setsockopt,
3141 .send = fore200e_send,
3142 .change_qos = fore200e_change_qos,
3143 .proc_read = fore200e_proc_read,
3144 .owner = THIS_MODULE
3148 #ifdef CONFIG_ATM_FORE200E_PCA
3149 extern const unsigned char _fore200e_pca_fw_data[];
3150 extern const unsigned int _fore200e_pca_fw_size;
3151 #endif
3152 #ifdef CONFIG_ATM_FORE200E_SBA
3153 extern const unsigned char _fore200e_sba_fw_data[];
3154 extern const unsigned int _fore200e_sba_fw_size;
3155 #endif
3157 static const struct fore200e_bus fore200e_bus[] = {
3158 #ifdef CONFIG_ATM_FORE200E_PCA
3159 { "PCA-200E", "pca200e", 32, 4, 32,
3160 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3161 fore200e_pca_read,
3162 fore200e_pca_write,
3163 fore200e_pca_dma_map,
3164 fore200e_pca_dma_unmap,
3165 fore200e_pca_dma_sync_for_cpu,
3166 fore200e_pca_dma_sync_for_device,
3167 fore200e_pca_dma_chunk_alloc,
3168 fore200e_pca_dma_chunk_free,
3169 NULL,
3170 fore200e_pca_configure,
3171 fore200e_pca_map,
3172 fore200e_pca_reset,
3173 fore200e_pca_prom_read,
3174 fore200e_pca_unmap,
3175 NULL,
3176 fore200e_pca_irq_check,
3177 fore200e_pca_irq_ack,
3178 fore200e_pca_proc_read,
3180 #endif
3181 #ifdef CONFIG_ATM_FORE200E_SBA
3182 { "SBA-200E", "sba200e", 32, 64, 32,
3183 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3184 fore200e_sba_read,
3185 fore200e_sba_write,
3186 fore200e_sba_dma_map,
3187 fore200e_sba_dma_unmap,
3188 fore200e_sba_dma_sync_for_cpu,
3189 fore200e_sba_dma_sync_for_device,
3190 fore200e_sba_dma_chunk_alloc,
3191 fore200e_sba_dma_chunk_free,
3192 fore200e_sba_detect,
3193 fore200e_sba_configure,
3194 fore200e_sba_map,
3195 fore200e_sba_reset,
3196 fore200e_sba_prom_read,
3197 fore200e_sba_unmap,
3198 fore200e_sba_irq_enable,
3199 fore200e_sba_irq_check,
3200 fore200e_sba_irq_ack,
3201 fore200e_sba_proc_read,
3203 #endif
3207 #ifdef MODULE_LICENSE
3208 MODULE_LICENSE("GPL");
3209 #endif