[PATCH] aic7xxx_osm build fix
[cris-mirror.git] / drivers / atm / he.c
blob3022c548a132317e4b8c9334b0cd520b63bde32e
1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
3 /*
5 he.c
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 he.c
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
46 AUTHORS:
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
50 NOTES:
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/version.h>
61 #include <linux/kernel.h>
62 #include <linux/skbuff.h>
63 #include <linux/pci.h>
64 #include <linux/errno.h>
65 #include <linux/types.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/init.h>
69 #include <linux/mm.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
73 #include <linux/dma-mapping.h>
74 #include <asm/io.h>
75 #include <asm/byteorder.h>
76 #include <asm/uaccess.h>
78 #include <linux/atmdev.h>
79 #include <linux/atm.h>
80 #include <linux/sonet.h>
82 #define USE_TASKLET
83 #undef USE_SCATTERGATHER
84 #undef USE_CHECKSUM_HW /* still confused about this */
85 #define USE_RBPS
86 #undef USE_RBPS_POOL /* if memory is tight try this */
87 #undef USE_RBPL_POOL /* if memory is tight try this */
88 #define USE_TPD_POOL
89 /* #undef CONFIG_ATM_HE_USE_SUNI */
90 /* #undef HE_DEBUG */
92 #include "he.h"
93 #include "suni.h"
94 #include <linux/atm_he.h>
96 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
98 #ifdef HE_DEBUG
99 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
100 #else /* !HE_DEBUG */
101 #define HPRINTK(fmt,args...) do { } while (0)
102 #endif /* HE_DEBUG */
104 /* version definition */
106 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
108 /* declarations */
110 static int he_open(struct atm_vcc *vcc);
111 static void he_close(struct atm_vcc *vcc);
112 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
113 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
114 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
115 static void he_tasklet(unsigned long data);
116 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
117 static int he_start(struct atm_dev *dev);
118 static void he_stop(struct he_dev *dev);
119 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
120 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
122 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
124 /* globals */
126 static struct he_dev *he_devs;
127 static int disable64;
128 static short nvpibits = -1;
129 static short nvcibits = -1;
130 static short rx_skb_reserve = 16;
131 static int irq_coalesce = 1;
132 static int sdh = 0;
134 /* Read from EEPROM = 0000 0011b */
135 static unsigned int readtab[] = {
136 CS_HIGH | CLK_HIGH,
137 CS_LOW | CLK_LOW,
138 CLK_HIGH, /* 0 */
139 CLK_LOW,
140 CLK_HIGH, /* 0 */
141 CLK_LOW,
142 CLK_HIGH, /* 0 */
143 CLK_LOW,
144 CLK_HIGH, /* 0 */
145 CLK_LOW,
146 CLK_HIGH, /* 0 */
147 CLK_LOW,
148 CLK_HIGH, /* 0 */
149 CLK_LOW | SI_HIGH,
150 CLK_HIGH | SI_HIGH, /* 1 */
151 CLK_LOW | SI_HIGH,
152 CLK_HIGH | SI_HIGH /* 1 */
155 /* Clock to read from/write to the EEPROM */
156 static unsigned int clocktab[] = {
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW,
162 CLK_HIGH,
163 CLK_LOW,
164 CLK_HIGH,
165 CLK_LOW,
166 CLK_HIGH,
167 CLK_LOW,
168 CLK_HIGH,
169 CLK_LOW,
170 CLK_HIGH,
171 CLK_LOW,
172 CLK_HIGH,
173 CLK_LOW
176 static struct atmdev_ops he_ops =
178 .open = he_open,
179 .close = he_close,
180 .ioctl = he_ioctl,
181 .send = he_send,
182 .phy_put = he_phy_put,
183 .phy_get = he_phy_get,
184 .proc_read = he_proc_read,
185 .owner = THIS_MODULE
188 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
189 #define he_readl(dev, reg) readl((dev)->membase + (reg))
191 /* section 2.12 connection memory access */
193 static __inline__ void
194 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
195 unsigned flags)
197 he_writel(he_dev, val, CON_DAT);
198 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
199 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
200 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
203 #define he_writel_rcm(dev, val, reg) \
204 he_writel_internal(dev, val, reg, CON_CTL_RCM)
206 #define he_writel_tcm(dev, val, reg) \
207 he_writel_internal(dev, val, reg, CON_CTL_TCM)
209 #define he_writel_mbox(dev, val, reg) \
210 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
212 static unsigned
213 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
215 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
216 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
217 return he_readl(he_dev, CON_DAT);
220 #define he_readl_rcm(dev, reg) \
221 he_readl_internal(dev, reg, CON_CTL_RCM)
223 #define he_readl_tcm(dev, reg) \
224 he_readl_internal(dev, reg, CON_CTL_TCM)
226 #define he_readl_mbox(dev, reg) \
227 he_readl_internal(dev, reg, CON_CTL_MBOX)
230 /* figure 2.2 connection id */
232 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
234 /* 2.5.1 per connection transmit state registers */
236 #define he_writel_tsr0(dev, val, cid) \
237 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
238 #define he_readl_tsr0(dev, cid) \
239 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
241 #define he_writel_tsr1(dev, val, cid) \
242 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
244 #define he_writel_tsr2(dev, val, cid) \
245 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
247 #define he_writel_tsr3(dev, val, cid) \
248 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
250 #define he_writel_tsr4(dev, val, cid) \
251 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
253 /* from page 2-20
255 * NOTE While the transmit connection is active, bits 23 through 0
256 * of this register must not be written by the host. Byte
257 * enables should be used during normal operation when writing
258 * the most significant byte.
261 #define he_writel_tsr4_upper(dev, val, cid) \
262 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
263 CON_CTL_TCM \
264 | CON_BYTE_DISABLE_2 \
265 | CON_BYTE_DISABLE_1 \
266 | CON_BYTE_DISABLE_0)
268 #define he_readl_tsr4(dev, cid) \
269 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
271 #define he_writel_tsr5(dev, val, cid) \
272 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
274 #define he_writel_tsr6(dev, val, cid) \
275 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
277 #define he_writel_tsr7(dev, val, cid) \
278 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
281 #define he_writel_tsr8(dev, val, cid) \
282 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
284 #define he_writel_tsr9(dev, val, cid) \
285 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
287 #define he_writel_tsr10(dev, val, cid) \
288 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
290 #define he_writel_tsr11(dev, val, cid) \
291 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
294 #define he_writel_tsr12(dev, val, cid) \
295 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
297 #define he_writel_tsr13(dev, val, cid) \
298 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
301 #define he_writel_tsr14(dev, val, cid) \
302 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
304 #define he_writel_tsr14_upper(dev, val, cid) \
305 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
306 CON_CTL_TCM \
307 | CON_BYTE_DISABLE_2 \
308 | CON_BYTE_DISABLE_1 \
309 | CON_BYTE_DISABLE_0)
311 /* 2.7.1 per connection receive state registers */
313 #define he_writel_rsr0(dev, val, cid) \
314 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
315 #define he_readl_rsr0(dev, cid) \
316 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
318 #define he_writel_rsr1(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
321 #define he_writel_rsr2(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
324 #define he_writel_rsr3(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
327 #define he_writel_rsr4(dev, val, cid) \
328 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
330 #define he_writel_rsr5(dev, val, cid) \
331 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
333 #define he_writel_rsr6(dev, val, cid) \
334 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
336 #define he_writel_rsr7(dev, val, cid) \
337 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
339 static __inline__ struct atm_vcc*
340 __find_vcc(struct he_dev *he_dev, unsigned cid)
342 struct hlist_head *head;
343 struct atm_vcc *vcc;
344 struct hlist_node *node;
345 struct sock *s;
346 short vpi;
347 int vci;
349 vpi = cid >> he_dev->vcibits;
350 vci = cid & ((1 << he_dev->vcibits) - 1);
351 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
353 sk_for_each(s, node, head) {
354 vcc = atm_sk(s);
355 if (vcc->dev == he_dev->atm_dev &&
356 vcc->vci == vci && vcc->vpi == vpi &&
357 vcc->qos.rxtp.traffic_class != ATM_NONE) {
358 return vcc;
361 return NULL;
364 static int __devinit
365 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
367 struct atm_dev *atm_dev = NULL;
368 struct he_dev *he_dev = NULL;
369 int err = 0;
371 printk(KERN_INFO "he: %s\n", version);
373 if (pci_enable_device(pci_dev))
374 return -EIO;
375 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
376 printk(KERN_WARNING "he: no suitable dma available\n");
377 err = -EIO;
378 goto init_one_failure;
381 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
382 if (!atm_dev) {
383 err = -ENODEV;
384 goto init_one_failure;
386 pci_set_drvdata(pci_dev, atm_dev);
388 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
389 GFP_KERNEL);
390 if (!he_dev) {
391 err = -ENOMEM;
392 goto init_one_failure;
394 memset(he_dev, 0, sizeof(struct he_dev));
396 he_dev->pci_dev = pci_dev;
397 he_dev->atm_dev = atm_dev;
398 he_dev->atm_dev->dev_data = he_dev;
399 atm_dev->dev_data = he_dev;
400 he_dev->number = atm_dev->number;
401 if (he_start(atm_dev)) {
402 he_stop(he_dev);
403 err = -ENODEV;
404 goto init_one_failure;
406 he_dev->next = NULL;
407 if (he_devs)
408 he_dev->next = he_devs;
409 he_devs = he_dev;
410 return 0;
412 init_one_failure:
413 if (atm_dev)
414 atm_dev_deregister(atm_dev);
415 if (he_dev)
416 kfree(he_dev);
417 pci_disable_device(pci_dev);
418 return err;
421 static void __devexit
422 he_remove_one (struct pci_dev *pci_dev)
424 struct atm_dev *atm_dev;
425 struct he_dev *he_dev;
427 atm_dev = pci_get_drvdata(pci_dev);
428 he_dev = HE_DEV(atm_dev);
430 /* need to remove from he_devs */
432 he_stop(he_dev);
433 atm_dev_deregister(atm_dev);
434 kfree(he_dev);
436 pci_set_drvdata(pci_dev, NULL);
437 pci_disable_device(pci_dev);
441 static unsigned
442 rate_to_atmf(unsigned rate) /* cps to atm forum format */
444 #define NONZERO (1 << 14)
446 unsigned exp = 0;
448 if (rate == 0)
449 return 0;
451 rate <<= 9;
452 while (rate > 0x3ff) {
453 ++exp;
454 rate >>= 1;
457 return (NONZERO | (exp << 9) | (rate & 0x1ff));
460 static void __init
461 he_init_rx_lbfp0(struct he_dev *he_dev)
463 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
464 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
465 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
466 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
468 lbufd_index = 0;
469 lbm_offset = he_readl(he_dev, RCMLBM_BA);
471 he_writel(he_dev, lbufd_index, RLBF0_H);
473 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
474 lbufd_index += 2;
475 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
477 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
478 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
480 if (++lbuf_count == lbufs_per_row) {
481 lbuf_count = 0;
482 row_offset += he_dev->bytes_per_row;
484 lbm_offset += 4;
487 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
488 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
491 static void __init
492 he_init_rx_lbfp1(struct he_dev *he_dev)
494 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
495 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
496 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
497 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
499 lbufd_index = 1;
500 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
502 he_writel(he_dev, lbufd_index, RLBF1_H);
504 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
505 lbufd_index += 2;
506 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
508 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
509 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
511 if (++lbuf_count == lbufs_per_row) {
512 lbuf_count = 0;
513 row_offset += he_dev->bytes_per_row;
515 lbm_offset += 4;
518 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
519 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
522 static void __init
523 he_init_tx_lbfp(struct he_dev *he_dev)
525 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
526 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
527 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
528 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
530 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
531 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
533 he_writel(he_dev, lbufd_index, TLBF_H);
535 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
536 lbufd_index += 1;
537 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
539 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
540 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
542 if (++lbuf_count == lbufs_per_row) {
543 lbuf_count = 0;
544 row_offset += he_dev->bytes_per_row;
546 lbm_offset += 2;
549 he_writel(he_dev, lbufd_index - 1, TLBF_T);
552 static int __init
553 he_init_tpdrq(struct he_dev *he_dev)
555 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
556 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
557 if (he_dev->tpdrq_base == NULL) {
558 hprintk("failed to alloc tpdrq\n");
559 return -ENOMEM;
561 memset(he_dev->tpdrq_base, 0,
562 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
564 he_dev->tpdrq_tail = he_dev->tpdrq_base;
565 he_dev->tpdrq_head = he_dev->tpdrq_base;
567 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
568 he_writel(he_dev, 0, TPDRQ_T);
569 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
571 return 0;
574 static void __init
575 he_init_cs_block(struct he_dev *he_dev)
577 unsigned clock, rate, delta;
578 int reg;
580 /* 5.1.7 cs block initialization */
582 for (reg = 0; reg < 0x20; ++reg)
583 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
585 /* rate grid timer reload values */
587 clock = he_is622(he_dev) ? 66667000 : 50000000;
588 rate = he_dev->atm_dev->link_rate;
589 delta = rate / 16 / 2;
591 for (reg = 0; reg < 0x10; ++reg) {
592 /* 2.4 internal transmit function
594 * we initialize the first row in the rate grid.
595 * values are period (in clock cycles) of timer
597 unsigned period = clock / rate;
599 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
600 rate -= delta;
603 if (he_is622(he_dev)) {
604 /* table 5.2 (4 cells per lbuf) */
605 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
606 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
607 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
608 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
609 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
611 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
612 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
613 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
614 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
615 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
616 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
617 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
619 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
621 /* table 5.8 */
622 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
623 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
624 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
625 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
626 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
627 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
629 /* table 5.9 */
630 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
631 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
632 } else {
633 /* table 5.1 (4 cells per lbuf) */
634 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
635 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
636 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
637 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
638 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
640 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
641 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
642 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
643 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
644 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
645 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
646 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
648 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
650 /* table 5.8 */
651 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
652 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
653 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
654 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
655 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
656 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
658 /* table 5.9 */
659 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
660 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
663 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
665 for (reg = 0; reg < 0x8; ++reg)
666 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
670 static int __init
671 he_init_cs_block_rcm(struct he_dev *he_dev)
673 unsigned (*rategrid)[16][16];
674 unsigned rate, delta;
675 int i, j, reg;
677 unsigned rate_atmf, exp, man;
678 unsigned long long rate_cps;
679 int mult, buf, buf_limit = 4;
681 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
682 if (!rategrid)
683 return -ENOMEM;
685 /* initialize rate grid group table */
687 for (reg = 0x0; reg < 0xff; ++reg)
688 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
690 /* initialize rate controller groups */
692 for (reg = 0x100; reg < 0x1ff; ++reg)
693 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
695 /* initialize tNrm lookup table */
697 /* the manual makes reference to a routine in a sample driver
698 for proper configuration; fortunately, we only need this
699 in order to support abr connection */
701 /* initialize rate to group table */
703 rate = he_dev->atm_dev->link_rate;
704 delta = rate / 32;
707 * 2.4 transmit internal functions
709 * we construct a copy of the rate grid used by the scheduler
710 * in order to construct the rate to group table below
713 for (j = 0; j < 16; j++) {
714 (*rategrid)[0][j] = rate;
715 rate -= delta;
718 for (i = 1; i < 16; i++)
719 for (j = 0; j < 16; j++)
720 if (i > 14)
721 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
722 else
723 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
726 * 2.4 transmit internal function
728 * this table maps the upper 5 bits of exponent and mantissa
729 * of the atm forum representation of the rate into an index
730 * on rate grid
733 rate_atmf = 0;
734 while (rate_atmf < 0x400) {
735 man = (rate_atmf & 0x1f) << 4;
736 exp = rate_atmf >> 5;
739 instead of '/ 512', use '>> 9' to prevent a call
740 to divdu3 on x86 platforms
742 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
744 if (rate_cps < 10)
745 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
747 for (i = 255; i > 0; i--)
748 if ((*rategrid)[i/16][i%16] >= rate_cps)
749 break; /* pick nearest rate instead? */
752 * each table entry is 16 bits: (rate grid index (8 bits)
753 * and a buffer limit (8 bits)
754 * there are two table entries in each 32-bit register
757 #ifdef notdef
758 buf = rate_cps * he_dev->tx_numbuffs /
759 (he_dev->atm_dev->link_rate * 2);
760 #else
761 /* this is pretty, but avoids _divdu3 and is mostly correct */
762 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
763 if (rate_cps > (272 * mult))
764 buf = 4;
765 else if (rate_cps > (204 * mult))
766 buf = 3;
767 else if (rate_cps > (136 * mult))
768 buf = 2;
769 else if (rate_cps > (68 * mult))
770 buf = 1;
771 else
772 buf = 0;
773 #endif
774 if (buf > buf_limit)
775 buf = buf_limit;
776 reg = (reg << 16) | ((i << 8) | buf);
778 #define RTGTBL_OFFSET 0x400
780 if (rate_atmf & 0x1)
781 he_writel_rcm(he_dev, reg,
782 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
784 ++rate_atmf;
787 kfree(rategrid);
788 return 0;
791 static int __init
792 he_init_group(struct he_dev *he_dev, int group)
794 int i;
796 #ifdef USE_RBPS
797 /* small buffer pool */
798 #ifdef USE_RBPS_POOL
799 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
800 CONFIG_RBPS_BUFSIZE, 8, 0);
801 if (he_dev->rbps_pool == NULL) {
802 hprintk("unable to create rbps pages\n");
803 return -ENOMEM;
805 #else /* !USE_RBPS_POOL */
806 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
807 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
808 if (he_dev->rbps_pages == NULL) {
809 hprintk("unable to create rbps page pool\n");
810 return -ENOMEM;
812 #endif /* USE_RBPS_POOL */
814 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
815 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
816 if (he_dev->rbps_base == NULL) {
817 hprintk("failed to alloc rbps\n");
818 return -ENOMEM;
820 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
821 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
823 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
824 dma_addr_t dma_handle;
825 void *cpuaddr;
827 #ifdef USE_RBPS_POOL
828 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
829 if (cpuaddr == NULL)
830 return -ENOMEM;
831 #else
832 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
833 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
834 #endif
836 he_dev->rbps_virt[i].virt = cpuaddr;
837 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
838 he_dev->rbps_base[i].phys = dma_handle;
841 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
843 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
844 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
845 G0_RBPS_T + (group * 32));
846 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
847 G0_RBPS_BS + (group * 32));
848 he_writel(he_dev,
849 RBP_THRESH(CONFIG_RBPS_THRESH) |
850 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
851 RBP_INT_ENB,
852 G0_RBPS_QI + (group * 32));
853 #else /* !USE_RBPS */
854 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
855 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
856 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
857 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
858 G0_RBPS_BS + (group * 32));
859 #endif /* USE_RBPS */
861 /* large buffer pool */
862 #ifdef USE_RBPL_POOL
863 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
864 CONFIG_RBPL_BUFSIZE, 8, 0);
865 if (he_dev->rbpl_pool == NULL) {
866 hprintk("unable to create rbpl pool\n");
867 return -ENOMEM;
869 #else /* !USE_RBPL_POOL */
870 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
871 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
872 if (he_dev->rbpl_pages == NULL) {
873 hprintk("unable to create rbpl pages\n");
874 return -ENOMEM;
876 #endif /* USE_RBPL_POOL */
878 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
879 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
880 if (he_dev->rbpl_base == NULL) {
881 hprintk("failed to alloc rbpl\n");
882 return -ENOMEM;
884 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
885 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
887 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
888 dma_addr_t dma_handle;
889 void *cpuaddr;
891 #ifdef USE_RBPL_POOL
892 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
893 if (cpuaddr == NULL)
894 return -ENOMEM;
895 #else
896 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
897 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
898 #endif
900 he_dev->rbpl_virt[i].virt = cpuaddr;
901 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
902 he_dev->rbpl_base[i].phys = dma_handle;
904 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
906 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
907 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
908 G0_RBPL_T + (group * 32));
909 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
910 G0_RBPL_BS + (group * 32));
911 he_writel(he_dev,
912 RBP_THRESH(CONFIG_RBPL_THRESH) |
913 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
914 RBP_INT_ENB,
915 G0_RBPL_QI + (group * 32));
917 /* rx buffer ready queue */
919 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
920 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
921 if (he_dev->rbrq_base == NULL) {
922 hprintk("failed to allocate rbrq\n");
923 return -ENOMEM;
925 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
927 he_dev->rbrq_head = he_dev->rbrq_base;
928 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
929 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
930 he_writel(he_dev,
931 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
932 G0_RBRQ_Q + (group * 16));
933 if (irq_coalesce) {
934 hprintk("coalescing interrupts\n");
935 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
936 G0_RBRQ_I + (group * 16));
937 } else
938 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
939 G0_RBRQ_I + (group * 16));
941 /* tx buffer ready queue */
943 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
944 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
945 if (he_dev->tbrq_base == NULL) {
946 hprintk("failed to allocate tbrq\n");
947 return -ENOMEM;
949 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
951 he_dev->tbrq_head = he_dev->tbrq_base;
953 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
954 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
955 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
956 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
958 return 0;
961 static int __init
962 he_init_irq(struct he_dev *he_dev)
964 int i;
966 /* 2.9.3.5 tail offset for each interrupt queue is located after the
967 end of the interrupt queue */
969 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
970 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
971 if (he_dev->irq_base == NULL) {
972 hprintk("failed to allocate irq\n");
973 return -ENOMEM;
975 he_dev->irq_tailoffset = (unsigned *)
976 &he_dev->irq_base[CONFIG_IRQ_SIZE];
977 *he_dev->irq_tailoffset = 0;
978 he_dev->irq_head = he_dev->irq_base;
979 he_dev->irq_tail = he_dev->irq_base;
981 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
982 he_dev->irq_base[i].isw = ITYPE_INVALID;
984 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
985 he_writel(he_dev,
986 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
987 IRQ0_HEAD);
988 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
989 he_writel(he_dev, 0x0, IRQ0_DATA);
991 he_writel(he_dev, 0x0, IRQ1_BASE);
992 he_writel(he_dev, 0x0, IRQ1_HEAD);
993 he_writel(he_dev, 0x0, IRQ1_CNTL);
994 he_writel(he_dev, 0x0, IRQ1_DATA);
996 he_writel(he_dev, 0x0, IRQ2_BASE);
997 he_writel(he_dev, 0x0, IRQ2_HEAD);
998 he_writel(he_dev, 0x0, IRQ2_CNTL);
999 he_writel(he_dev, 0x0, IRQ2_DATA);
1001 he_writel(he_dev, 0x0, IRQ3_BASE);
1002 he_writel(he_dev, 0x0, IRQ3_HEAD);
1003 he_writel(he_dev, 0x0, IRQ3_CNTL);
1004 he_writel(he_dev, 0x0, IRQ3_DATA);
1006 /* 2.9.3.2 interrupt queue mapping registers */
1008 he_writel(he_dev, 0x0, GRP_10_MAP);
1009 he_writel(he_dev, 0x0, GRP_32_MAP);
1010 he_writel(he_dev, 0x0, GRP_54_MAP);
1011 he_writel(he_dev, 0x0, GRP_76_MAP);
1013 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
1014 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1015 return -EINVAL;
1018 he_dev->irq = he_dev->pci_dev->irq;
1020 return 0;
1023 static int __init
1024 he_start(struct atm_dev *dev)
1026 struct he_dev *he_dev;
1027 struct pci_dev *pci_dev;
1028 unsigned long membase;
1030 u16 command;
1031 u32 gen_cntl_0, host_cntl, lb_swap;
1032 u8 cache_size, timer;
1034 unsigned err;
1035 unsigned int status, reg;
1036 int i, group;
1038 he_dev = HE_DEV(dev);
1039 pci_dev = he_dev->pci_dev;
1041 membase = pci_resource_start(pci_dev, 0);
1042 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1045 * pci bus controller initialization
1048 /* 4.3 pci bus controller-specific initialization */
1049 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1050 hprintk("can't read GEN_CNTL_0\n");
1051 return -EINVAL;
1053 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1054 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1055 hprintk("can't write GEN_CNTL_0.\n");
1056 return -EINVAL;
1059 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1060 hprintk("can't read PCI_COMMAND.\n");
1061 return -EINVAL;
1064 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1065 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1066 hprintk("can't enable memory.\n");
1067 return -EINVAL;
1070 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1071 hprintk("can't read cache line size?\n");
1072 return -EINVAL;
1075 if (cache_size < 16) {
1076 cache_size = 16;
1077 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1078 hprintk("can't set cache line size to %d\n", cache_size);
1081 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1082 hprintk("can't read latency timer?\n");
1083 return -EINVAL;
1086 /* from table 3.9
1088 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1090 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1091 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1094 #define LAT_TIMER 209
1095 if (timer < LAT_TIMER) {
1096 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1097 timer = LAT_TIMER;
1098 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1099 hprintk("can't set latency timer to %d\n", timer);
1102 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1103 hprintk("can't set up page mapping\n");
1104 return -EINVAL;
1107 /* 4.4 card reset */
1108 he_writel(he_dev, 0x0, RESET_CNTL);
1109 he_writel(he_dev, 0xff, RESET_CNTL);
1111 udelay(16*1000); /* 16 ms */
1112 status = he_readl(he_dev, RESET_CNTL);
1113 if ((status & BOARD_RST_STATUS) == 0) {
1114 hprintk("reset failed\n");
1115 return -EINVAL;
1118 /* 4.5 set bus width */
1119 host_cntl = he_readl(he_dev, HOST_CNTL);
1120 if (host_cntl & PCI_BUS_SIZE64)
1121 gen_cntl_0 |= ENBL_64;
1122 else
1123 gen_cntl_0 &= ~ENBL_64;
1125 if (disable64 == 1) {
1126 hprintk("disabling 64-bit pci bus transfers\n");
1127 gen_cntl_0 &= ~ENBL_64;
1130 if (gen_cntl_0 & ENBL_64)
1131 hprintk("64-bit transfers enabled\n");
1133 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1135 /* 4.7 read prom contents */
1136 for (i = 0; i < PROD_ID_LEN; ++i)
1137 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1139 he_dev->media = read_prom_byte(he_dev, MEDIA);
1141 for (i = 0; i < 6; ++i)
1142 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1144 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1145 he_dev->prod_id,
1146 he_dev->media & 0x40 ? "SM" : "MM",
1147 dev->esi[0],
1148 dev->esi[1],
1149 dev->esi[2],
1150 dev->esi[3],
1151 dev->esi[4],
1152 dev->esi[5]);
1153 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1154 ATM_OC12_PCR : ATM_OC3_PCR;
1156 /* 4.6 set host endianess */
1157 lb_swap = he_readl(he_dev, LB_SWAP);
1158 if (he_is622(he_dev))
1159 lb_swap &= ~XFER_SIZE; /* 4 cells */
1160 else
1161 lb_swap |= XFER_SIZE; /* 8 cells */
1162 #ifdef __BIG_ENDIAN
1163 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1164 #else
1165 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1166 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1167 #endif /* __BIG_ENDIAN */
1168 he_writel(he_dev, lb_swap, LB_SWAP);
1170 /* 4.8 sdram controller initialization */
1171 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1173 /* 4.9 initialize rnum value */
1174 lb_swap |= SWAP_RNUM_MAX(0xf);
1175 he_writel(he_dev, lb_swap, LB_SWAP);
1177 /* 4.10 initialize the interrupt queues */
1178 if ((err = he_init_irq(he_dev)) != 0)
1179 return err;
1181 #ifdef USE_TASKLET
1182 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1183 #endif
1184 spin_lock_init(&he_dev->global_lock);
1186 /* 4.11 enable pci bus controller state machines */
1187 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1188 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1189 he_writel(he_dev, host_cntl, HOST_CNTL);
1191 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1192 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1195 * atm network controller initialization
1198 /* 5.1.1 generic configuration state */
1201 * local (cell) buffer memory map
1203 * HE155 HE622
1205 * 0 ____________1023 bytes 0 _______________________2047 bytes
1206 * | | | | |
1207 * | utility | | rx0 | |
1208 * 5|____________| 255|___________________| u |
1209 * 6| | 256| | t |
1210 * | | | | i |
1211 * | rx0 | row | tx | l |
1212 * | | | | i |
1213 * | | 767|___________________| t |
1214 * 517|____________| 768| | y |
1215 * row 518| | | rx1 | |
1216 * | | 1023|___________________|___|
1217 * | |
1218 * | tx |
1219 * | |
1220 * | |
1221 * 1535|____________|
1222 * 1536| |
1223 * | rx1 |
1224 * 2047|____________|
1228 /* total 4096 connections */
1229 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1230 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1232 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1233 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1234 return -ENODEV;
1237 if (nvpibits != -1) {
1238 he_dev->vpibits = nvpibits;
1239 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1242 if (nvcibits != -1) {
1243 he_dev->vcibits = nvcibits;
1244 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1248 if (he_is622(he_dev)) {
1249 he_dev->cells_per_row = 40;
1250 he_dev->bytes_per_row = 2048;
1251 he_dev->r0_numrows = 256;
1252 he_dev->tx_numrows = 512;
1253 he_dev->r1_numrows = 256;
1254 he_dev->r0_startrow = 0;
1255 he_dev->tx_startrow = 256;
1256 he_dev->r1_startrow = 768;
1257 } else {
1258 he_dev->cells_per_row = 20;
1259 he_dev->bytes_per_row = 1024;
1260 he_dev->r0_numrows = 512;
1261 he_dev->tx_numrows = 1018;
1262 he_dev->r1_numrows = 512;
1263 he_dev->r0_startrow = 6;
1264 he_dev->tx_startrow = 518;
1265 he_dev->r1_startrow = 1536;
1268 he_dev->cells_per_lbuf = 4;
1269 he_dev->buffer_limit = 4;
1270 he_dev->r0_numbuffs = he_dev->r0_numrows *
1271 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1272 if (he_dev->r0_numbuffs > 2560)
1273 he_dev->r0_numbuffs = 2560;
1275 he_dev->r1_numbuffs = he_dev->r1_numrows *
1276 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1277 if (he_dev->r1_numbuffs > 2560)
1278 he_dev->r1_numbuffs = 2560;
1280 he_dev->tx_numbuffs = he_dev->tx_numrows *
1281 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1282 if (he_dev->tx_numbuffs > 5120)
1283 he_dev->tx_numbuffs = 5120;
1285 /* 5.1.2 configure hardware dependent registers */
1287 he_writel(he_dev,
1288 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1289 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1290 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1291 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1292 LBARB);
1294 he_writel(he_dev, BANK_ON |
1295 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1296 SDRAMCON);
1298 he_writel(he_dev,
1299 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1300 RM_RW_WAIT(1), RCMCONFIG);
1301 he_writel(he_dev,
1302 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1303 TM_RW_WAIT(1), TCMCONFIG);
1305 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1307 he_writel(he_dev,
1308 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1309 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1310 RX_VALVP(he_dev->vpibits) |
1311 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1313 he_writel(he_dev, DRF_THRESH(0x20) |
1314 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1315 TX_VCI_MASK(he_dev->vcibits) |
1316 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1318 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1320 he_writel(he_dev, PHY_INT_ENB |
1321 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1322 RH_CONFIG);
1324 /* 5.1.3 initialize connection memory */
1326 for (i = 0; i < TCM_MEM_SIZE; ++i)
1327 he_writel_tcm(he_dev, 0, i);
1329 for (i = 0; i < RCM_MEM_SIZE; ++i)
1330 he_writel_rcm(he_dev, 0, i);
1333 * transmit connection memory map
1335 * tx memory
1336 * 0x0 ___________________
1337 * | |
1338 * | |
1339 * | TSRa |
1340 * | |
1341 * | |
1342 * 0x8000|___________________|
1343 * | |
1344 * | TSRb |
1345 * 0xc000|___________________|
1346 * | |
1347 * | TSRc |
1348 * 0xe000|___________________|
1349 * | TSRd |
1350 * 0xf000|___________________|
1351 * | tmABR |
1352 * 0x10000|___________________|
1353 * | |
1354 * | tmTPD |
1355 * |___________________|
1356 * | |
1357 * ....
1358 * 0x1ffff|___________________|
1363 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1364 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1365 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1366 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1367 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1371 * receive connection memory map
1373 * 0x0 ___________________
1374 * | |
1375 * | |
1376 * | RSRa |
1377 * | |
1378 * | |
1379 * 0x8000|___________________|
1380 * | |
1381 * | rx0/1 |
1382 * | LBM | link lists of local
1383 * | tx | buffer memory
1384 * | |
1385 * 0xd000|___________________|
1386 * | |
1387 * | rmABR |
1388 * 0xe000|___________________|
1389 * | |
1390 * | RSRb |
1391 * |___________________|
1392 * | |
1393 * ....
1394 * 0xffff|___________________|
1397 he_writel(he_dev, 0x08000, RCMLBM_BA);
1398 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1399 he_writel(he_dev, 0x0d800, RCMABR_BA);
1401 /* 5.1.4 initialize local buffer free pools linked lists */
1403 he_init_rx_lbfp0(he_dev);
1404 he_init_rx_lbfp1(he_dev);
1406 he_writel(he_dev, 0x0, RLBC_H);
1407 he_writel(he_dev, 0x0, RLBC_T);
1408 he_writel(he_dev, 0x0, RLBC_H2);
1410 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1411 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1413 he_init_tx_lbfp(he_dev);
1415 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1417 /* 5.1.5 initialize intermediate receive queues */
1419 if (he_is622(he_dev)) {
1420 he_writel(he_dev, 0x000f, G0_INMQ_S);
1421 he_writel(he_dev, 0x200f, G0_INMQ_L);
1423 he_writel(he_dev, 0x001f, G1_INMQ_S);
1424 he_writel(he_dev, 0x201f, G1_INMQ_L);
1426 he_writel(he_dev, 0x002f, G2_INMQ_S);
1427 he_writel(he_dev, 0x202f, G2_INMQ_L);
1429 he_writel(he_dev, 0x003f, G3_INMQ_S);
1430 he_writel(he_dev, 0x203f, G3_INMQ_L);
1432 he_writel(he_dev, 0x004f, G4_INMQ_S);
1433 he_writel(he_dev, 0x204f, G4_INMQ_L);
1435 he_writel(he_dev, 0x005f, G5_INMQ_S);
1436 he_writel(he_dev, 0x205f, G5_INMQ_L);
1438 he_writel(he_dev, 0x006f, G6_INMQ_S);
1439 he_writel(he_dev, 0x206f, G6_INMQ_L);
1441 he_writel(he_dev, 0x007f, G7_INMQ_S);
1442 he_writel(he_dev, 0x207f, G7_INMQ_L);
1443 } else {
1444 he_writel(he_dev, 0x0000, G0_INMQ_S);
1445 he_writel(he_dev, 0x0008, G0_INMQ_L);
1447 he_writel(he_dev, 0x0001, G1_INMQ_S);
1448 he_writel(he_dev, 0x0009, G1_INMQ_L);
1450 he_writel(he_dev, 0x0002, G2_INMQ_S);
1451 he_writel(he_dev, 0x000a, G2_INMQ_L);
1453 he_writel(he_dev, 0x0003, G3_INMQ_S);
1454 he_writel(he_dev, 0x000b, G3_INMQ_L);
1456 he_writel(he_dev, 0x0004, G4_INMQ_S);
1457 he_writel(he_dev, 0x000c, G4_INMQ_L);
1459 he_writel(he_dev, 0x0005, G5_INMQ_S);
1460 he_writel(he_dev, 0x000d, G5_INMQ_L);
1462 he_writel(he_dev, 0x0006, G6_INMQ_S);
1463 he_writel(he_dev, 0x000e, G6_INMQ_L);
1465 he_writel(he_dev, 0x0007, G7_INMQ_S);
1466 he_writel(he_dev, 0x000f, G7_INMQ_L);
1469 /* 5.1.6 application tunable parameters */
1471 he_writel(he_dev, 0x0, MCC);
1472 he_writel(he_dev, 0x0, OEC);
1473 he_writel(he_dev, 0x0, DCC);
1474 he_writel(he_dev, 0x0, CEC);
1476 /* 5.1.7 cs block initialization */
1478 he_init_cs_block(he_dev);
1480 /* 5.1.8 cs block connection memory initialization */
1482 if (he_init_cs_block_rcm(he_dev) < 0)
1483 return -ENOMEM;
1485 /* 5.1.10 initialize host structures */
1487 he_init_tpdrq(he_dev);
1489 #ifdef USE_TPD_POOL
1490 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1491 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1492 if (he_dev->tpd_pool == NULL) {
1493 hprintk("unable to create tpd pci_pool\n");
1494 return -ENOMEM;
1497 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1498 #else
1499 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1500 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1501 if (!he_dev->tpd_base)
1502 return -ENOMEM;
1504 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1505 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1506 he_dev->tpd_base[i].inuse = 0;
1509 he_dev->tpd_head = he_dev->tpd_base;
1510 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1511 #endif
1513 if (he_init_group(he_dev, 0) != 0)
1514 return -ENOMEM;
1516 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1517 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1518 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1519 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1520 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1521 G0_RBPS_BS + (group * 32));
1523 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1524 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1525 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1526 G0_RBPL_QI + (group * 32));
1527 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1529 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1530 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1531 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1532 G0_RBRQ_Q + (group * 16));
1533 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1535 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1536 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1537 he_writel(he_dev, TBRQ_THRESH(0x1),
1538 G0_TBRQ_THRESH + (group * 16));
1539 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1542 /* host status page */
1544 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1545 sizeof(struct he_hsp), &he_dev->hsp_phys);
1546 if (he_dev->hsp == NULL) {
1547 hprintk("failed to allocate host status page\n");
1548 return -ENOMEM;
1550 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1551 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1553 /* initialize framer */
1555 #ifdef CONFIG_ATM_HE_USE_SUNI
1556 suni_init(he_dev->atm_dev);
1557 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1558 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1559 #endif /* CONFIG_ATM_HE_USE_SUNI */
1561 if (sdh) {
1562 /* this really should be in suni.c but for now... */
1563 int val;
1565 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1566 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1567 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1570 /* 5.1.12 enable transmit and receive */
1572 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1573 reg |= TX_ENABLE|ER_ENABLE;
1574 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1576 reg = he_readl(he_dev, RC_CONFIG);
1577 reg |= RX_ENABLE;
1578 he_writel(he_dev, reg, RC_CONFIG);
1580 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1581 he_dev->cs_stper[i].inuse = 0;
1582 he_dev->cs_stper[i].pcr = -1;
1584 he_dev->total_bw = 0;
1587 /* atm linux initialization */
1589 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1590 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1592 he_dev->irq_peak = 0;
1593 he_dev->rbrq_peak = 0;
1594 he_dev->rbpl_peak = 0;
1595 he_dev->tbrq_peak = 0;
1597 HPRINTK("hell bent for leather!\n");
1599 return 0;
1602 static void
1603 he_stop(struct he_dev *he_dev)
1605 u16 command;
1606 u32 gen_cntl_0, reg;
1607 struct pci_dev *pci_dev;
1609 pci_dev = he_dev->pci_dev;
1611 /* disable interrupts */
1613 if (he_dev->membase) {
1614 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1615 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1616 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1618 #ifdef USE_TASKLET
1619 tasklet_disable(&he_dev->tasklet);
1620 #endif
1622 /* disable recv and transmit */
1624 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1625 reg &= ~(TX_ENABLE|ER_ENABLE);
1626 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1628 reg = he_readl(he_dev, RC_CONFIG);
1629 reg &= ~(RX_ENABLE);
1630 he_writel(he_dev, reg, RC_CONFIG);
1633 #ifdef CONFIG_ATM_HE_USE_SUNI
1634 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1635 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1636 #endif /* CONFIG_ATM_HE_USE_SUNI */
1638 if (he_dev->irq)
1639 free_irq(he_dev->irq, he_dev);
1641 if (he_dev->irq_base)
1642 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1643 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1645 if (he_dev->hsp)
1646 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1647 he_dev->hsp, he_dev->hsp_phys);
1649 if (he_dev->rbpl_base) {
1650 #ifdef USE_RBPL_POOL
1651 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1652 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1653 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1655 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1657 #else
1658 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1659 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1660 #endif
1661 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1662 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1665 #ifdef USE_RBPL_POOL
1666 if (he_dev->rbpl_pool)
1667 pci_pool_destroy(he_dev->rbpl_pool);
1668 #endif
1670 #ifdef USE_RBPS
1671 if (he_dev->rbps_base) {
1672 #ifdef USE_RBPS_POOL
1673 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1674 void *cpuaddr = he_dev->rbps_virt[i].virt;
1675 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1677 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1679 #else
1680 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1681 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1682 #endif
1683 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1684 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1687 #ifdef USE_RBPS_POOL
1688 if (he_dev->rbps_pool)
1689 pci_pool_destroy(he_dev->rbps_pool);
1690 #endif
1692 #endif /* USE_RBPS */
1694 if (he_dev->rbrq_base)
1695 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1696 he_dev->rbrq_base, he_dev->rbrq_phys);
1698 if (he_dev->tbrq_base)
1699 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1700 he_dev->tbrq_base, he_dev->tbrq_phys);
1702 if (he_dev->tpdrq_base)
1703 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1704 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1706 #ifdef USE_TPD_POOL
1707 if (he_dev->tpd_pool)
1708 pci_pool_destroy(he_dev->tpd_pool);
1709 #else
1710 if (he_dev->tpd_base)
1711 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1712 he_dev->tpd_base, he_dev->tpd_base_phys);
1713 #endif
1715 if (he_dev->pci_dev) {
1716 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1717 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1718 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1721 if (he_dev->membase)
1722 iounmap(he_dev->membase);
1725 static struct he_tpd *
1726 __alloc_tpd(struct he_dev *he_dev)
1728 #ifdef USE_TPD_POOL
1729 struct he_tpd *tpd;
1730 dma_addr_t dma_handle;
1732 tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1733 if (tpd == NULL)
1734 return NULL;
1736 tpd->status = TPD_ADDR(dma_handle);
1737 tpd->reserved = 0;
1738 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1739 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1740 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1742 return tpd;
1743 #else
1744 int i;
1746 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1747 ++he_dev->tpd_head;
1748 if (he_dev->tpd_head > he_dev->tpd_end) {
1749 he_dev->tpd_head = he_dev->tpd_base;
1752 if (!he_dev->tpd_head->inuse) {
1753 he_dev->tpd_head->inuse = 1;
1754 he_dev->tpd_head->status &= TPD_MASK;
1755 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1756 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1757 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1758 return he_dev->tpd_head;
1761 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1762 return NULL;
1763 #endif
1766 #define AAL5_LEN(buf,len) \
1767 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1768 (((unsigned char *)(buf))[(len)-5]))
1770 /* 2.10.1.2 receive
1772 * aal5 packets can optionally return the tcp checksum in the lower
1773 * 16 bits of the crc (RSR0_TCP_CKSUM)
1776 #define TCP_CKSUM(buf,len) \
1777 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1778 (((unsigned char *)(buf))[(len-1)]))
1780 static int
1781 he_service_rbrq(struct he_dev *he_dev, int group)
1783 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1784 ((unsigned long)he_dev->rbrq_base |
1785 he_dev->hsp->group[group].rbrq_tail);
1786 struct he_rbp *rbp = NULL;
1787 unsigned cid, lastcid = -1;
1788 unsigned buf_len = 0;
1789 struct sk_buff *skb;
1790 struct atm_vcc *vcc = NULL;
1791 struct he_vcc *he_vcc;
1792 struct he_iovec *iov;
1793 int pdus_assembled = 0;
1794 int updated = 0;
1796 read_lock(&vcc_sklist_lock);
1797 while (he_dev->rbrq_head != rbrq_tail) {
1798 ++updated;
1800 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1801 he_dev->rbrq_head, group,
1802 RBRQ_ADDR(he_dev->rbrq_head),
1803 RBRQ_BUFLEN(he_dev->rbrq_head),
1804 RBRQ_CID(he_dev->rbrq_head),
1805 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1806 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1807 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1808 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1809 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1810 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1812 #ifdef USE_RBPS
1813 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1814 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1815 else
1816 #endif
1817 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1819 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1820 cid = RBRQ_CID(he_dev->rbrq_head);
1822 if (cid != lastcid)
1823 vcc = __find_vcc(he_dev, cid);
1824 lastcid = cid;
1826 if (vcc == NULL) {
1827 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1828 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1829 rbp->status &= ~RBP_LOANED;
1831 goto next_rbrq_entry;
1834 he_vcc = HE_VCC(vcc);
1835 if (he_vcc == NULL) {
1836 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1837 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1838 rbp->status &= ~RBP_LOANED;
1839 goto next_rbrq_entry;
1842 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1843 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1844 atomic_inc(&vcc->stats->rx_drop);
1845 goto return_host_buffers;
1848 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1849 he_vcc->iov_tail->iov_len = buf_len;
1850 he_vcc->pdu_len += buf_len;
1851 ++he_vcc->iov_tail;
1853 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1854 lastcid = -1;
1855 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1856 wake_up(&he_vcc->rx_waitq);
1857 goto return_host_buffers;
1860 #ifdef notdef
1861 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1862 hprintk("iovec full! cid 0x%x\n", cid);
1863 goto return_host_buffers;
1865 #endif
1866 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1867 goto next_rbrq_entry;
1869 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1870 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1871 HPRINTK("%s%s (%d.%d)\n",
1872 RBRQ_CRC_ERR(he_dev->rbrq_head)
1873 ? "CRC_ERR " : "",
1874 RBRQ_LEN_ERR(he_dev->rbrq_head)
1875 ? "LEN_ERR" : "",
1876 vcc->vpi, vcc->vci);
1877 atomic_inc(&vcc->stats->rx_err);
1878 goto return_host_buffers;
1881 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1882 GFP_ATOMIC);
1883 if (!skb) {
1884 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1885 goto return_host_buffers;
1888 if (rx_skb_reserve > 0)
1889 skb_reserve(skb, rx_skb_reserve);
1891 do_gettimeofday(&skb->stamp);
1893 for (iov = he_vcc->iov_head;
1894 iov < he_vcc->iov_tail; ++iov) {
1895 #ifdef USE_RBPS
1896 if (iov->iov_base & RBP_SMALLBUF)
1897 memcpy(skb_put(skb, iov->iov_len),
1898 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1899 else
1900 #endif
1901 memcpy(skb_put(skb, iov->iov_len),
1902 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1905 switch (vcc->qos.aal) {
1906 case ATM_AAL0:
1907 /* 2.10.1.5 raw cell receive */
1908 skb->len = ATM_AAL0_SDU;
1909 skb->tail = skb->data + skb->len;
1910 break;
1911 case ATM_AAL5:
1912 /* 2.10.1.2 aal5 receive */
1914 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1915 skb->tail = skb->data + skb->len;
1916 #ifdef USE_CHECKSUM_HW
1917 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1918 skb->ip_summed = CHECKSUM_HW;
1919 skb->csum = TCP_CKSUM(skb->data,
1920 he_vcc->pdu_len);
1922 #endif
1923 break;
1926 #ifdef should_never_happen
1927 if (skb->len > vcc->qos.rxtp.max_sdu)
1928 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1929 #endif
1931 #ifdef notdef
1932 ATM_SKB(skb)->vcc = vcc;
1933 #endif
1934 vcc->push(vcc, skb);
1936 atomic_inc(&vcc->stats->rx);
1938 return_host_buffers:
1939 ++pdus_assembled;
1941 for (iov = he_vcc->iov_head;
1942 iov < he_vcc->iov_tail; ++iov) {
1943 #ifdef USE_RBPS
1944 if (iov->iov_base & RBP_SMALLBUF)
1945 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1946 else
1947 #endif
1948 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1950 rbp->status &= ~RBP_LOANED;
1953 he_vcc->iov_tail = he_vcc->iov_head;
1954 he_vcc->pdu_len = 0;
1956 next_rbrq_entry:
1957 he_dev->rbrq_head = (struct he_rbrq *)
1958 ((unsigned long) he_dev->rbrq_base |
1959 RBRQ_MASK(++he_dev->rbrq_head));
1962 read_unlock(&vcc_sklist_lock);
1964 if (updated) {
1965 if (updated > he_dev->rbrq_peak)
1966 he_dev->rbrq_peak = updated;
1968 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1969 G0_RBRQ_H + (group * 16));
1972 return pdus_assembled;
1975 static void
1976 he_service_tbrq(struct he_dev *he_dev, int group)
1978 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1979 ((unsigned long)he_dev->tbrq_base |
1980 he_dev->hsp->group[group].tbrq_tail);
1981 struct he_tpd *tpd;
1982 int slot, updated = 0;
1983 #ifdef USE_TPD_POOL
1984 struct he_tpd *__tpd;
1985 #endif
1987 /* 2.1.6 transmit buffer return queue */
1989 while (he_dev->tbrq_head != tbrq_tail) {
1990 ++updated;
1992 HPRINTK("tbrq%d 0x%x%s%s\n",
1993 group,
1994 TBRQ_TPD(he_dev->tbrq_head),
1995 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1996 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1997 #ifdef USE_TPD_POOL
1998 tpd = NULL;
1999 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
2000 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
2001 tpd = __tpd;
2002 list_del(&__tpd->entry);
2003 break;
2007 if (tpd == NULL) {
2008 hprintk("unable to locate tpd for dma buffer %x\n",
2009 TBRQ_TPD(he_dev->tbrq_head));
2010 goto next_tbrq_entry;
2012 #else
2013 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2014 #endif
2016 if (TBRQ_EOS(he_dev->tbrq_head)) {
2017 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2018 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2019 if (tpd->vcc)
2020 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2022 goto next_tbrq_entry;
2025 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2026 if (tpd->iovec[slot].addr)
2027 pci_unmap_single(he_dev->pci_dev,
2028 tpd->iovec[slot].addr,
2029 tpd->iovec[slot].len & TPD_LEN_MASK,
2030 PCI_DMA_TODEVICE);
2031 if (tpd->iovec[slot].len & TPD_LST)
2032 break;
2036 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2037 if (tpd->vcc && tpd->vcc->pop)
2038 tpd->vcc->pop(tpd->vcc, tpd->skb);
2039 else
2040 dev_kfree_skb_any(tpd->skb);
2043 next_tbrq_entry:
2044 #ifdef USE_TPD_POOL
2045 if (tpd)
2046 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2047 #else
2048 tpd->inuse = 0;
2049 #endif
2050 he_dev->tbrq_head = (struct he_tbrq *)
2051 ((unsigned long) he_dev->tbrq_base |
2052 TBRQ_MASK(++he_dev->tbrq_head));
2055 if (updated) {
2056 if (updated > he_dev->tbrq_peak)
2057 he_dev->tbrq_peak = updated;
2059 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2060 G0_TBRQ_H + (group * 16));
2065 static void
2066 he_service_rbpl(struct he_dev *he_dev, int group)
2068 struct he_rbp *newtail;
2069 struct he_rbp *rbpl_head;
2070 int moved = 0;
2072 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2073 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2075 for (;;) {
2076 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2077 RBPL_MASK(he_dev->rbpl_tail+1));
2079 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2080 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2081 break;
2083 newtail->status |= RBP_LOANED;
2084 he_dev->rbpl_tail = newtail;
2085 ++moved;
2088 if (moved)
2089 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2092 #ifdef USE_RBPS
2093 static void
2094 he_service_rbps(struct he_dev *he_dev, int group)
2096 struct he_rbp *newtail;
2097 struct he_rbp *rbps_head;
2098 int moved = 0;
2100 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2101 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2103 for (;;) {
2104 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2105 RBPS_MASK(he_dev->rbps_tail+1));
2107 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2108 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2109 break;
2111 newtail->status |= RBP_LOANED;
2112 he_dev->rbps_tail = newtail;
2113 ++moved;
2116 if (moved)
2117 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2119 #endif /* USE_RBPS */
2121 static void
2122 he_tasklet(unsigned long data)
2124 unsigned long flags;
2125 struct he_dev *he_dev = (struct he_dev *) data;
2126 int group, type;
2127 int updated = 0;
2129 HPRINTK("tasklet (0x%lx)\n", data);
2130 #ifdef USE_TASKLET
2131 spin_lock_irqsave(&he_dev->global_lock, flags);
2132 #endif
2134 while (he_dev->irq_head != he_dev->irq_tail) {
2135 ++updated;
2137 type = ITYPE_TYPE(he_dev->irq_head->isw);
2138 group = ITYPE_GROUP(he_dev->irq_head->isw);
2140 switch (type) {
2141 case ITYPE_RBRQ_THRESH:
2142 HPRINTK("rbrq%d threshold\n", group);
2143 /* fall through */
2144 case ITYPE_RBRQ_TIMER:
2145 if (he_service_rbrq(he_dev, group)) {
2146 he_service_rbpl(he_dev, group);
2147 #ifdef USE_RBPS
2148 he_service_rbps(he_dev, group);
2149 #endif /* USE_RBPS */
2151 break;
2152 case ITYPE_TBRQ_THRESH:
2153 HPRINTK("tbrq%d threshold\n", group);
2154 /* fall through */
2155 case ITYPE_TPD_COMPLETE:
2156 he_service_tbrq(he_dev, group);
2157 break;
2158 case ITYPE_RBPL_THRESH:
2159 he_service_rbpl(he_dev, group);
2160 break;
2161 case ITYPE_RBPS_THRESH:
2162 #ifdef USE_RBPS
2163 he_service_rbps(he_dev, group);
2164 #endif /* USE_RBPS */
2165 break;
2166 case ITYPE_PHY:
2167 HPRINTK("phy interrupt\n");
2168 #ifdef CONFIG_ATM_HE_USE_SUNI
2169 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2170 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2171 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2172 spin_lock_irqsave(&he_dev->global_lock, flags);
2173 #endif
2174 break;
2175 case ITYPE_OTHER:
2176 switch (type|group) {
2177 case ITYPE_PARITY:
2178 hprintk("parity error\n");
2179 break;
2180 case ITYPE_ABORT:
2181 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2182 break;
2184 break;
2185 case ITYPE_TYPE(ITYPE_INVALID):
2186 /* see 8.1.1 -- check all queues */
2188 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2190 he_service_rbrq(he_dev, 0);
2191 he_service_rbpl(he_dev, 0);
2192 #ifdef USE_RBPS
2193 he_service_rbps(he_dev, 0);
2194 #endif /* USE_RBPS */
2195 he_service_tbrq(he_dev, 0);
2196 break;
2197 default:
2198 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2201 he_dev->irq_head->isw = ITYPE_INVALID;
2203 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2206 if (updated) {
2207 if (updated > he_dev->irq_peak)
2208 he_dev->irq_peak = updated;
2210 he_writel(he_dev,
2211 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2212 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2213 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2214 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2216 #ifdef USE_TASKLET
2217 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2218 #endif
2221 static irqreturn_t
2222 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2224 unsigned long flags;
2225 struct he_dev *he_dev = (struct he_dev * )dev_id;
2226 int handled = 0;
2228 if (he_dev == NULL)
2229 return IRQ_NONE;
2231 spin_lock_irqsave(&he_dev->global_lock, flags);
2233 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2234 (*he_dev->irq_tailoffset << 2));
2236 if (he_dev->irq_tail == he_dev->irq_head) {
2237 HPRINTK("tailoffset not updated?\n");
2238 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2239 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2240 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2243 #ifdef DEBUG
2244 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2245 hprintk("spurious (or shared) interrupt?\n");
2246 #endif
2248 if (he_dev->irq_head != he_dev->irq_tail) {
2249 handled = 1;
2250 #ifdef USE_TASKLET
2251 tasklet_schedule(&he_dev->tasklet);
2252 #else
2253 he_tasklet((unsigned long) he_dev);
2254 #endif
2255 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2256 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2258 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2259 return IRQ_RETVAL(handled);
2263 static __inline__ void
2264 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2266 struct he_tpdrq *new_tail;
2268 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2269 tpd, cid, he_dev->tpdrq_tail);
2271 /* new_tail = he_dev->tpdrq_tail; */
2272 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2273 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2276 * check to see if we are about to set the tail == head
2277 * if true, update the head pointer from the adapter
2278 * to see if this is really the case (reading the queue
2279 * head for every enqueue would be unnecessarily slow)
2282 if (new_tail == he_dev->tpdrq_head) {
2283 he_dev->tpdrq_head = (struct he_tpdrq *)
2284 (((unsigned long)he_dev->tpdrq_base) |
2285 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2287 if (new_tail == he_dev->tpdrq_head) {
2288 hprintk("tpdrq full (cid 0x%x)\n", cid);
2290 * FIXME
2291 * push tpd onto a transmit backlog queue
2292 * after service_tbrq, service the backlog
2293 * for now, we just drop the pdu
2295 if (tpd->skb) {
2296 if (tpd->vcc->pop)
2297 tpd->vcc->pop(tpd->vcc, tpd->skb);
2298 else
2299 dev_kfree_skb_any(tpd->skb);
2300 atomic_inc(&tpd->vcc->stats->tx_err);
2302 #ifdef USE_TPD_POOL
2303 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2304 #else
2305 tpd->inuse = 0;
2306 #endif
2307 return;
2311 /* 2.1.5 transmit packet descriptor ready queue */
2312 #ifdef USE_TPD_POOL
2313 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2314 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2315 #else
2316 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2317 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2318 #endif
2319 he_dev->tpdrq_tail->cid = cid;
2320 wmb();
2322 he_dev->tpdrq_tail = new_tail;
2324 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2325 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2328 static int
2329 he_open(struct atm_vcc *vcc)
2331 unsigned long flags;
2332 struct he_dev *he_dev = HE_DEV(vcc->dev);
2333 struct he_vcc *he_vcc;
2334 int err = 0;
2335 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2336 short vpi = vcc->vpi;
2337 int vci = vcc->vci;
2339 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2340 return 0;
2342 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2344 set_bit(ATM_VF_ADDR, &vcc->flags);
2346 cid = he_mkcid(he_dev, vpi, vci);
2348 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2349 if (he_vcc == NULL) {
2350 hprintk("unable to allocate he_vcc during open\n");
2351 return -ENOMEM;
2354 he_vcc->iov_tail = he_vcc->iov_head;
2355 he_vcc->pdu_len = 0;
2356 he_vcc->rc_index = -1;
2358 init_waitqueue_head(&he_vcc->rx_waitq);
2359 init_waitqueue_head(&he_vcc->tx_waitq);
2361 vcc->dev_data = he_vcc;
2363 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2364 int pcr_goal;
2366 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2367 if (pcr_goal == 0)
2368 pcr_goal = he_dev->atm_dev->link_rate;
2369 if (pcr_goal < 0) /* means round down, technically */
2370 pcr_goal = -pcr_goal;
2372 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2374 switch (vcc->qos.aal) {
2375 case ATM_AAL5:
2376 tsr0_aal = TSR0_AAL5;
2377 tsr4 = TSR4_AAL5;
2378 break;
2379 case ATM_AAL0:
2380 tsr0_aal = TSR0_AAL0_SDU;
2381 tsr4 = TSR4_AAL0_SDU;
2382 break;
2383 default:
2384 err = -EINVAL;
2385 goto open_failed;
2388 spin_lock_irqsave(&he_dev->global_lock, flags);
2389 tsr0 = he_readl_tsr0(he_dev, cid);
2390 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2392 if (TSR0_CONN_STATE(tsr0) != 0) {
2393 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2394 err = -EBUSY;
2395 goto open_failed;
2398 switch (vcc->qos.txtp.traffic_class) {
2399 case ATM_UBR:
2400 /* 2.3.3.1 open connection ubr */
2402 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2403 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2404 break;
2406 case ATM_CBR:
2407 /* 2.3.3.2 open connection cbr */
2409 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2410 if ((he_dev->total_bw + pcr_goal)
2411 > (he_dev->atm_dev->link_rate * 9 / 10))
2413 err = -EBUSY;
2414 goto open_failed;
2417 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2419 /* find an unused cs_stper register */
2420 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2421 if (he_dev->cs_stper[reg].inuse == 0 ||
2422 he_dev->cs_stper[reg].pcr == pcr_goal)
2423 break;
2425 if (reg == HE_NUM_CS_STPER) {
2426 err = -EBUSY;
2427 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2428 goto open_failed;
2431 he_dev->total_bw += pcr_goal;
2433 he_vcc->rc_index = reg;
2434 ++he_dev->cs_stper[reg].inuse;
2435 he_dev->cs_stper[reg].pcr = pcr_goal;
2437 clock = he_is622(he_dev) ? 66667000 : 50000000;
2438 period = clock / pcr_goal;
2440 HPRINTK("rc_index = %d period = %d\n",
2441 reg, period);
2443 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2444 CS_STPER0 + reg);
2445 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2447 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2448 TSR0_RC_INDEX(reg);
2450 break;
2451 default:
2452 err = -EINVAL;
2453 goto open_failed;
2456 spin_lock_irqsave(&he_dev->global_lock, flags);
2458 he_writel_tsr0(he_dev, tsr0, cid);
2459 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2460 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2461 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2462 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2463 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2465 he_writel_tsr3(he_dev, 0x0, cid);
2466 he_writel_tsr5(he_dev, 0x0, cid);
2467 he_writel_tsr6(he_dev, 0x0, cid);
2468 he_writel_tsr7(he_dev, 0x0, cid);
2469 he_writel_tsr8(he_dev, 0x0, cid);
2470 he_writel_tsr10(he_dev, 0x0, cid);
2471 he_writel_tsr11(he_dev, 0x0, cid);
2472 he_writel_tsr12(he_dev, 0x0, cid);
2473 he_writel_tsr13(he_dev, 0x0, cid);
2474 he_writel_tsr14(he_dev, 0x0, cid);
2475 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2476 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2479 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2480 unsigned aal;
2482 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2483 &HE_VCC(vcc)->rx_waitq);
2485 switch (vcc->qos.aal) {
2486 case ATM_AAL5:
2487 aal = RSR0_AAL5;
2488 break;
2489 case ATM_AAL0:
2490 aal = RSR0_RAWCELL;
2491 break;
2492 default:
2493 err = -EINVAL;
2494 goto open_failed;
2497 spin_lock_irqsave(&he_dev->global_lock, flags);
2499 rsr0 = he_readl_rsr0(he_dev, cid);
2500 if (rsr0 & RSR0_OPEN_CONN) {
2501 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2503 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2504 err = -EBUSY;
2505 goto open_failed;
2508 #ifdef USE_RBPS
2509 rsr1 = RSR1_GROUP(0);
2510 rsr4 = RSR4_GROUP(0);
2511 #else /* !USE_RBPS */
2512 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2513 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2514 #endif /* USE_RBPS */
2515 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2516 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2518 #ifdef USE_CHECKSUM_HW
2519 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2520 rsr0 |= RSR0_TCP_CKSUM;
2521 #endif
2523 he_writel_rsr4(he_dev, rsr4, cid);
2524 he_writel_rsr1(he_dev, rsr1, cid);
2525 /* 5.1.11 last parameter initialized should be
2526 the open/closed indication in rsr0 */
2527 he_writel_rsr0(he_dev,
2528 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2529 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2531 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2534 open_failed:
2536 if (err) {
2537 if (he_vcc)
2538 kfree(he_vcc);
2539 clear_bit(ATM_VF_ADDR, &vcc->flags);
2541 else
2542 set_bit(ATM_VF_READY, &vcc->flags);
2544 return err;
2547 static void
2548 he_close(struct atm_vcc *vcc)
2550 unsigned long flags;
2551 DECLARE_WAITQUEUE(wait, current);
2552 struct he_dev *he_dev = HE_DEV(vcc->dev);
2553 struct he_tpd *tpd;
2554 unsigned cid;
2555 struct he_vcc *he_vcc = HE_VCC(vcc);
2556 #define MAX_RETRY 30
2557 int retry = 0, sleep = 1, tx_inuse;
2559 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2561 clear_bit(ATM_VF_READY, &vcc->flags);
2562 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2564 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2565 int timeout;
2567 HPRINTK("close rx cid 0x%x\n", cid);
2569 /* 2.7.2.2 close receive operation */
2571 /* wait for previous close (if any) to finish */
2573 spin_lock_irqsave(&he_dev->global_lock, flags);
2574 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2575 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2576 udelay(250);
2579 set_current_state(TASK_UNINTERRUPTIBLE);
2580 add_wait_queue(&he_vcc->rx_waitq, &wait);
2582 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2583 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2584 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2585 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2587 timeout = schedule_timeout(30*HZ);
2589 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2590 set_current_state(TASK_RUNNING);
2592 if (timeout == 0)
2593 hprintk("close rx timeout cid 0x%x\n", cid);
2595 HPRINTK("close rx cid 0x%x complete\n", cid);
2599 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2600 volatile unsigned tsr4, tsr0;
2601 int timeout;
2603 HPRINTK("close tx cid 0x%x\n", cid);
2605 /* 2.1.2
2607 * ... the host must first stop queueing packets to the TPDRQ
2608 * on the connection to be closed, then wait for all outstanding
2609 * packets to be transmitted and their buffers returned to the
2610 * TBRQ. When the last packet on the connection arrives in the
2611 * TBRQ, the host issues the close command to the adapter.
2614 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2615 (retry < MAX_RETRY)) {
2616 msleep(sleep);
2617 if (sleep < 250)
2618 sleep = sleep * 2;
2620 ++retry;
2623 if (tx_inuse)
2624 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2626 /* 2.3.1.1 generic close operations with flush */
2628 spin_lock_irqsave(&he_dev->global_lock, flags);
2629 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2630 /* also clears TSR4_SESSION_ENDED */
2632 switch (vcc->qos.txtp.traffic_class) {
2633 case ATM_UBR:
2634 he_writel_tsr1(he_dev,
2635 TSR1_MCR(rate_to_atmf(200000))
2636 | TSR1_PCR(0), cid);
2637 break;
2638 case ATM_CBR:
2639 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2640 break;
2642 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2644 tpd = __alloc_tpd(he_dev);
2645 if (tpd == NULL) {
2646 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2647 goto close_tx_incomplete;
2649 tpd->status |= TPD_EOS | TPD_INT;
2650 tpd->skb = NULL;
2651 tpd->vcc = vcc;
2652 wmb();
2654 set_current_state(TASK_UNINTERRUPTIBLE);
2655 add_wait_queue(&he_vcc->tx_waitq, &wait);
2656 __enqueue_tpd(he_dev, tpd, cid);
2657 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2659 timeout = schedule_timeout(30*HZ);
2661 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2662 set_current_state(TASK_RUNNING);
2664 spin_lock_irqsave(&he_dev->global_lock, flags);
2666 if (timeout == 0) {
2667 hprintk("close tx timeout cid 0x%x\n", cid);
2668 goto close_tx_incomplete;
2671 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2672 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2673 udelay(250);
2676 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2677 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2678 udelay(250);
2681 close_tx_incomplete:
2683 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2684 int reg = he_vcc->rc_index;
2686 HPRINTK("cs_stper reg = %d\n", reg);
2688 if (he_dev->cs_stper[reg].inuse == 0)
2689 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2690 else
2691 --he_dev->cs_stper[reg].inuse;
2693 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2695 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2697 HPRINTK("close tx cid 0x%x complete\n", cid);
2700 kfree(he_vcc);
2702 clear_bit(ATM_VF_ADDR, &vcc->flags);
2705 static int
2706 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2708 unsigned long flags;
2709 struct he_dev *he_dev = HE_DEV(vcc->dev);
2710 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2711 struct he_tpd *tpd;
2712 #ifdef USE_SCATTERGATHER
2713 int i, slot = 0;
2714 #endif
2716 #define HE_TPD_BUFSIZE 0xffff
2718 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2720 if ((skb->len > HE_TPD_BUFSIZE) ||
2721 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2722 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2723 if (vcc->pop)
2724 vcc->pop(vcc, skb);
2725 else
2726 dev_kfree_skb_any(skb);
2727 atomic_inc(&vcc->stats->tx_err);
2728 return -EINVAL;
2731 #ifndef USE_SCATTERGATHER
2732 if (skb_shinfo(skb)->nr_frags) {
2733 hprintk("no scatter/gather support\n");
2734 if (vcc->pop)
2735 vcc->pop(vcc, skb);
2736 else
2737 dev_kfree_skb_any(skb);
2738 atomic_inc(&vcc->stats->tx_err);
2739 return -EINVAL;
2741 #endif
2742 spin_lock_irqsave(&he_dev->global_lock, flags);
2744 tpd = __alloc_tpd(he_dev);
2745 if (tpd == NULL) {
2746 if (vcc->pop)
2747 vcc->pop(vcc, skb);
2748 else
2749 dev_kfree_skb_any(skb);
2750 atomic_inc(&vcc->stats->tx_err);
2751 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2752 return -ENOMEM;
2755 if (vcc->qos.aal == ATM_AAL5)
2756 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2757 else {
2758 char *pti_clp = (void *) (skb->data + 3);
2759 int clp, pti;
2761 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2762 clp = (*pti_clp & ATM_HDR_CLP);
2763 tpd->status |= TPD_CELLTYPE(pti);
2764 if (clp)
2765 tpd->status |= TPD_CLP;
2767 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2770 #ifdef USE_SCATTERGATHER
2771 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2772 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2773 tpd->iovec[slot].len = skb->len - skb->data_len;
2774 ++slot;
2776 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2779 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2780 tpd->vcc = vcc;
2781 tpd->skb = NULL; /* not the last fragment
2782 so dont ->push() yet */
2783 wmb();
2785 __enqueue_tpd(he_dev, tpd, cid);
2786 tpd = __alloc_tpd(he_dev);
2787 if (tpd == NULL) {
2788 if (vcc->pop)
2789 vcc->pop(vcc, skb);
2790 else
2791 dev_kfree_skb_any(skb);
2792 atomic_inc(&vcc->stats->tx_err);
2793 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2794 return -ENOMEM;
2796 tpd->status |= TPD_USERCELL;
2797 slot = 0;
2800 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2801 (void *) page_address(frag->page) + frag->page_offset,
2802 frag->size, PCI_DMA_TODEVICE);
2803 tpd->iovec[slot].len = frag->size;
2804 ++slot;
2808 tpd->iovec[slot - 1].len |= TPD_LST;
2809 #else
2810 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2811 tpd->length0 = skb->len | TPD_LST;
2812 #endif
2813 tpd->status |= TPD_INT;
2815 tpd->vcc = vcc;
2816 tpd->skb = skb;
2817 wmb();
2818 ATM_SKB(skb)->vcc = vcc;
2820 __enqueue_tpd(he_dev, tpd, cid);
2821 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2823 atomic_inc(&vcc->stats->tx);
2825 return 0;
2828 static int
2829 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2831 unsigned long flags;
2832 struct he_dev *he_dev = HE_DEV(atm_dev);
2833 struct he_ioctl_reg reg;
2834 int err = 0;
2836 switch (cmd) {
2837 case HE_GET_REG:
2838 if (!capable(CAP_NET_ADMIN))
2839 return -EPERM;
2841 if (copy_from_user(&reg, arg,
2842 sizeof(struct he_ioctl_reg)))
2843 return -EFAULT;
2845 spin_lock_irqsave(&he_dev->global_lock, flags);
2846 switch (reg.type) {
2847 case HE_REGTYPE_PCI:
2848 reg.val = he_readl(he_dev, reg.addr);
2849 break;
2850 case HE_REGTYPE_RCM:
2851 reg.val =
2852 he_readl_rcm(he_dev, reg.addr);
2853 break;
2854 case HE_REGTYPE_TCM:
2855 reg.val =
2856 he_readl_tcm(he_dev, reg.addr);
2857 break;
2858 case HE_REGTYPE_MBOX:
2859 reg.val =
2860 he_readl_mbox(he_dev, reg.addr);
2861 break;
2862 default:
2863 err = -EINVAL;
2864 break;
2866 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2867 if (err == 0)
2868 if (copy_to_user(arg, &reg,
2869 sizeof(struct he_ioctl_reg)))
2870 return -EFAULT;
2871 break;
2872 default:
2873 #ifdef CONFIG_ATM_HE_USE_SUNI
2874 if (atm_dev->phy && atm_dev->phy->ioctl)
2875 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2876 #else /* CONFIG_ATM_HE_USE_SUNI */
2877 err = -EINVAL;
2878 #endif /* CONFIG_ATM_HE_USE_SUNI */
2879 break;
2882 return err;
2885 static void
2886 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2888 unsigned long flags;
2889 struct he_dev *he_dev = HE_DEV(atm_dev);
2891 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2893 spin_lock_irqsave(&he_dev->global_lock, flags);
2894 he_writel(he_dev, val, FRAMER + (addr*4));
2895 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2896 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2900 static unsigned char
2901 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2903 unsigned long flags;
2904 struct he_dev *he_dev = HE_DEV(atm_dev);
2905 unsigned reg;
2907 spin_lock_irqsave(&he_dev->global_lock, flags);
2908 reg = he_readl(he_dev, FRAMER + (addr*4));
2909 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2911 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2912 return reg;
2915 static int
2916 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2918 unsigned long flags;
2919 struct he_dev *he_dev = HE_DEV(dev);
2920 int left, i;
2921 #ifdef notdef
2922 struct he_rbrq *rbrq_tail;
2923 struct he_tpdrq *tpdrq_head;
2924 int rbpl_head, rbpl_tail;
2925 #endif
2926 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2929 left = *pos;
2930 if (!left--)
2931 return sprintf(page, "%s\n", version);
2933 if (!left--)
2934 return sprintf(page, "%s%s\n\n",
2935 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2937 if (!left--)
2938 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2940 spin_lock_irqsave(&he_dev->global_lock, flags);
2941 mcc += he_readl(he_dev, MCC);
2942 oec += he_readl(he_dev, OEC);
2943 dcc += he_readl(he_dev, DCC);
2944 cec += he_readl(he_dev, CEC);
2945 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2947 if (!left--)
2948 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2949 mcc, oec, dcc, cec);
2951 if (!left--)
2952 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2953 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2955 if (!left--)
2956 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2957 CONFIG_TPDRQ_SIZE);
2959 if (!left--)
2960 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2961 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2963 if (!left--)
2964 return sprintf(page, "tbrq_size = %d peak = %d\n",
2965 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2968 #ifdef notdef
2969 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2970 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2972 inuse = rbpl_head - rbpl_tail;
2973 if (inuse < 0)
2974 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2975 inuse /= sizeof(struct he_rbp);
2977 if (!left--)
2978 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2979 CONFIG_RBPL_SIZE, inuse);
2980 #endif
2982 if (!left--)
2983 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2985 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2986 if (!left--)
2987 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2988 he_dev->cs_stper[i].pcr,
2989 he_dev->cs_stper[i].inuse);
2991 if (!left--)
2992 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2993 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2995 return 0;
2998 /* eeprom routines -- see 4.7 */
3001 read_prom_byte(struct he_dev *he_dev, int addr)
3003 u32 val = 0, tmp_read = 0;
3004 int i, j = 0;
3005 u8 byte_read = 0;
3007 val = readl(he_dev->membase + HOST_CNTL);
3008 val &= 0xFFFFE0FF;
3010 /* Turn on write enable */
3011 val |= 0x800;
3012 he_writel(he_dev, val, HOST_CNTL);
3014 /* Send READ instruction */
3015 for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
3016 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3017 udelay(EEPROM_DELAY);
3020 /* Next, we need to send the byte address to read from */
3021 for (i = 7; i >= 0; i--) {
3022 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3023 udelay(EEPROM_DELAY);
3024 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3025 udelay(EEPROM_DELAY);
3028 j = 0;
3030 val &= 0xFFFFF7FF; /* Turn off write enable */
3031 he_writel(he_dev, val, HOST_CNTL);
3033 /* Now, we can read data from the EEPROM by clocking it in */
3034 for (i = 7; i >= 0; i--) {
3035 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3036 udelay(EEPROM_DELAY);
3037 tmp_read = he_readl(he_dev, HOST_CNTL);
3038 byte_read |= (unsigned char)
3039 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3040 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3041 udelay(EEPROM_DELAY);
3044 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3045 udelay(EEPROM_DELAY);
3047 return byte_read;
3050 MODULE_LICENSE("GPL");
3051 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3052 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3053 module_param(disable64, bool, 0);
3054 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3055 module_param(nvpibits, short, 0);
3056 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3057 module_param(nvcibits, short, 0);
3058 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3059 module_param(rx_skb_reserve, short, 0);
3060 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3061 module_param(irq_coalesce, bool, 0);
3062 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3063 module_param(sdh, bool, 0);
3064 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3066 static struct pci_device_id he_pci_tbl[] = {
3067 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3068 0, 0, 0 },
3069 { 0, }
3072 MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3074 static struct pci_driver he_driver = {
3075 .name = "he",
3076 .probe = he_init_one,
3077 .remove = __devexit_p(he_remove_one),
3078 .id_table = he_pci_tbl,
3081 static int __init he_init(void)
3083 return pci_register_driver(&he_driver);
3086 static void __exit he_cleanup(void)
3088 pci_unregister_driver(&he_driver);
3091 module_init(he_init);
3092 module_exit(he_cleanup);