5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW /* still confused about this */
86 #include <linux/atm_he.h>
88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
93 #define HPRINTK(fmt,args...) do { } while (0)
98 static int he_open(struct atm_vcc
*vcc
);
99 static void he_close(struct atm_vcc
*vcc
);
100 static int he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
);
101 static int he_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void __user
*arg
);
102 static irqreturn_t
he_irq_handler(int irq
, void *dev_id
);
103 static void he_tasklet(unsigned long data
);
104 static int he_proc_read(struct atm_dev
*dev
,loff_t
*pos
,char *page
);
105 static int he_start(struct atm_dev
*dev
);
106 static void he_stop(struct he_dev
*dev
);
107 static void he_phy_put(struct atm_dev
*, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev
*, unsigned long);
110 static u8
read_prom_byte(struct he_dev
*he_dev
, int addr
);
114 static struct he_dev
*he_devs
;
115 static int disable64
;
116 static short nvpibits
= -1;
117 static short nvcibits
= -1;
118 static short rx_skb_reserve
= 16;
119 static int irq_coalesce
= 1;
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab
[] = {
138 CLK_HIGH
| SI_HIGH
, /* 1 */
140 CLK_HIGH
| SI_HIGH
/* 1 */
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab
[] = {
164 static struct atmdev_ops he_ops
=
170 .phy_put
= he_phy_put
,
171 .phy_get
= he_phy_get
,
172 .proc_read
= he_proc_read
,
176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
179 /* section 2.12 connection memory access */
181 static __inline__
void
182 he_writel_internal(struct he_dev
*he_dev
, unsigned val
, unsigned addr
,
185 he_writel(he_dev
, val
, CON_DAT
);
186 (void) he_readl(he_dev
, CON_DAT
); /* flush posted writes */
187 he_writel(he_dev
, flags
| CON_CTL_WRITE
| CON_CTL_ADDR(addr
), CON_CTL
);
188 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
191 #define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
194 #define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
197 #define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
201 he_readl_internal(struct he_dev
*he_dev
, unsigned addr
, unsigned flags
)
203 he_writel(he_dev
, flags
| CON_CTL_READ
| CON_CTL_ADDR(addr
), CON_CTL
);
204 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
205 return he_readl(he_dev
, CON_DAT
);
208 #define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
211 #define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
214 #define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
218 /* figure 2.2 connection id */
220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
222 /* 2.5.1 per connection transmit state registers */
224 #define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
229 #define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
232 #define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
235 #define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
238 #define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
256 #define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
259 #define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
262 #define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
265 #define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
269 #define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
272 #define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
275 #define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
278 #define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
282 #define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
285 #define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
289 #define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
299 /* 2.7.1 per connection receive state registers */
301 #define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
306 #define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
309 #define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
312 #define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
315 #define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
318 #define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
321 #define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
324 #define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
327 static __inline__
struct atm_vcc
*
328 __find_vcc(struct he_dev
*he_dev
, unsigned cid
)
330 struct hlist_head
*head
;
332 struct hlist_node
*node
;
337 vpi
= cid
>> he_dev
->vcibits
;
338 vci
= cid
& ((1 << he_dev
->vcibits
) - 1);
339 head
= &vcc_hash
[vci
& (VCC_HTABLE_SIZE
-1)];
341 sk_for_each(s
, node
, head
) {
343 if (vcc
->dev
== he_dev
->atm_dev
&&
344 vcc
->vci
== vci
&& vcc
->vpi
== vpi
&&
345 vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
353 he_init_one(struct pci_dev
*pci_dev
, const struct pci_device_id
*pci_ent
)
355 struct atm_dev
*atm_dev
= NULL
;
356 struct he_dev
*he_dev
= NULL
;
359 printk(KERN_INFO
"ATM he driver\n");
361 if (pci_enable_device(pci_dev
))
363 if (pci_set_dma_mask(pci_dev
, DMA_BIT_MASK(32)) != 0) {
364 printk(KERN_WARNING
"he: no suitable dma available\n");
366 goto init_one_failure
;
369 atm_dev
= atm_dev_register(DEV_LABEL
, &he_ops
, -1, NULL
);
372 goto init_one_failure
;
374 pci_set_drvdata(pci_dev
, atm_dev
);
376 he_dev
= kzalloc(sizeof(struct he_dev
),
380 goto init_one_failure
;
382 he_dev
->pci_dev
= pci_dev
;
383 he_dev
->atm_dev
= atm_dev
;
384 he_dev
->atm_dev
->dev_data
= he_dev
;
385 atm_dev
->dev_data
= he_dev
;
386 he_dev
->number
= atm_dev
->number
;
387 tasklet_init(&he_dev
->tasklet
, he_tasklet
, (unsigned long) he_dev
);
388 spin_lock_init(&he_dev
->global_lock
);
390 if (he_start(atm_dev
)) {
393 goto init_one_failure
;
397 he_dev
->next
= he_devs
;
403 atm_dev_deregister(atm_dev
);
405 pci_disable_device(pci_dev
);
409 static void __devexit
410 he_remove_one (struct pci_dev
*pci_dev
)
412 struct atm_dev
*atm_dev
;
413 struct he_dev
*he_dev
;
415 atm_dev
= pci_get_drvdata(pci_dev
);
416 he_dev
= HE_DEV(atm_dev
);
418 /* need to remove from he_devs */
421 atm_dev_deregister(atm_dev
);
424 pci_set_drvdata(pci_dev
, NULL
);
425 pci_disable_device(pci_dev
);
430 rate_to_atmf(unsigned rate
) /* cps to atm forum format */
432 #define NONZERO (1 << 14)
440 while (rate
> 0x3ff) {
445 return (NONZERO
| (exp
<< 9) | (rate
& 0x1ff));
448 static void __devinit
449 he_init_rx_lbfp0(struct he_dev
*he_dev
)
451 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
452 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
453 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
454 unsigned row_offset
= he_dev
->r0_startrow
* he_dev
->bytes_per_row
;
457 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
);
459 he_writel(he_dev
, lbufd_index
, RLBF0_H
);
461 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r0_numbuffs
; ++i
) {
463 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
465 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
466 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
468 if (++lbuf_count
== lbufs_per_row
) {
470 row_offset
+= he_dev
->bytes_per_row
;
475 he_writel(he_dev
, lbufd_index
- 2, RLBF0_T
);
476 he_writel(he_dev
, he_dev
->r0_numbuffs
, RLBF0_C
);
479 static void __devinit
480 he_init_rx_lbfp1(struct he_dev
*he_dev
)
482 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
483 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
484 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
485 unsigned row_offset
= he_dev
->r1_startrow
* he_dev
->bytes_per_row
;
488 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
490 he_writel(he_dev
, lbufd_index
, RLBF1_H
);
492 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r1_numbuffs
; ++i
) {
494 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
496 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
497 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
499 if (++lbuf_count
== lbufs_per_row
) {
501 row_offset
+= he_dev
->bytes_per_row
;
506 he_writel(he_dev
, lbufd_index
- 2, RLBF1_T
);
507 he_writel(he_dev
, he_dev
->r1_numbuffs
, RLBF1_C
);
510 static void __devinit
511 he_init_tx_lbfp(struct he_dev
*he_dev
)
513 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
514 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
515 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
516 unsigned row_offset
= he_dev
->tx_startrow
* he_dev
->bytes_per_row
;
518 lbufd_index
= he_dev
->r0_numbuffs
+ he_dev
->r1_numbuffs
;
519 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
521 he_writel(he_dev
, lbufd_index
, TLBF_H
);
523 for (i
= 0, lbuf_count
= 0; i
< he_dev
->tx_numbuffs
; ++i
) {
525 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
527 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
528 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
530 if (++lbuf_count
== lbufs_per_row
) {
532 row_offset
+= he_dev
->bytes_per_row
;
537 he_writel(he_dev
, lbufd_index
- 1, TLBF_T
);
541 he_init_tpdrq(struct he_dev
*he_dev
)
543 he_dev
->tpdrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
544 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
), &he_dev
->tpdrq_phys
);
545 if (he_dev
->tpdrq_base
== NULL
) {
546 hprintk("failed to alloc tpdrq\n");
549 memset(he_dev
->tpdrq_base
, 0,
550 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
));
552 he_dev
->tpdrq_tail
= he_dev
->tpdrq_base
;
553 he_dev
->tpdrq_head
= he_dev
->tpdrq_base
;
555 he_writel(he_dev
, he_dev
->tpdrq_phys
, TPDRQ_B_H
);
556 he_writel(he_dev
, 0, TPDRQ_T
);
557 he_writel(he_dev
, CONFIG_TPDRQ_SIZE
- 1, TPDRQ_S
);
562 static void __devinit
563 he_init_cs_block(struct he_dev
*he_dev
)
565 unsigned clock
, rate
, delta
;
568 /* 5.1.7 cs block initialization */
570 for (reg
= 0; reg
< 0x20; ++reg
)
571 he_writel_mbox(he_dev
, 0x0, CS_STTIM0
+ reg
);
573 /* rate grid timer reload values */
575 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
576 rate
= he_dev
->atm_dev
->link_rate
;
577 delta
= rate
/ 16 / 2;
579 for (reg
= 0; reg
< 0x10; ++reg
) {
580 /* 2.4 internal transmit function
582 * we initialize the first row in the rate grid.
583 * values are period (in clock cycles) of timer
585 unsigned period
= clock
/ rate
;
587 he_writel_mbox(he_dev
, period
, CS_TGRLD0
+ reg
);
591 if (he_is622(he_dev
)) {
592 /* table 5.2 (4 cells per lbuf) */
593 he_writel_mbox(he_dev
, 0x000800fa, CS_ERTHR0
);
594 he_writel_mbox(he_dev
, 0x000c33cb, CS_ERTHR1
);
595 he_writel_mbox(he_dev
, 0x0010101b, CS_ERTHR2
);
596 he_writel_mbox(he_dev
, 0x00181dac, CS_ERTHR3
);
597 he_writel_mbox(he_dev
, 0x00280600, CS_ERTHR4
);
599 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
600 he_writel_mbox(he_dev
, 0x023de8b3, CS_ERCTL0
);
601 he_writel_mbox(he_dev
, 0x1801, CS_ERCTL1
);
602 he_writel_mbox(he_dev
, 0x68b3, CS_ERCTL2
);
603 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
604 he_writel_mbox(he_dev
, 0x68b3, CS_ERSTAT1
);
605 he_writel_mbox(he_dev
, 0x14585, CS_RTFWR
);
607 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
610 he_writel_mbox(he_dev
, 0x00159ece, CS_TFBSET
);
611 he_writel_mbox(he_dev
, 0x68b3, CS_WCRMAX
);
612 he_writel_mbox(he_dev
, 0x5eb3, CS_WCRMIN
);
613 he_writel_mbox(he_dev
, 0xe8b3, CS_WCRINC
);
614 he_writel_mbox(he_dev
, 0xdeb3, CS_WCRDEC
);
615 he_writel_mbox(he_dev
, 0x68b3, CS_WCRCEIL
);
618 he_writel_mbox(he_dev
, 0x5, CS_OTPPER
);
619 he_writel_mbox(he_dev
, 0x14, CS_OTWPER
);
621 /* table 5.1 (4 cells per lbuf) */
622 he_writel_mbox(he_dev
, 0x000400ea, CS_ERTHR0
);
623 he_writel_mbox(he_dev
, 0x00063388, CS_ERTHR1
);
624 he_writel_mbox(he_dev
, 0x00081018, CS_ERTHR2
);
625 he_writel_mbox(he_dev
, 0x000c1dac, CS_ERTHR3
);
626 he_writel_mbox(he_dev
, 0x0014051a, CS_ERTHR4
);
628 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
629 he_writel_mbox(he_dev
, 0x0235e4b1, CS_ERCTL0
);
630 he_writel_mbox(he_dev
, 0x4701, CS_ERCTL1
);
631 he_writel_mbox(he_dev
, 0x64b1, CS_ERCTL2
);
632 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
633 he_writel_mbox(he_dev
, 0x64b1, CS_ERSTAT1
);
634 he_writel_mbox(he_dev
, 0xf424, CS_RTFWR
);
636 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
639 he_writel_mbox(he_dev
, 0x000563b7, CS_TFBSET
);
640 he_writel_mbox(he_dev
, 0x64b1, CS_WCRMAX
);
641 he_writel_mbox(he_dev
, 0x5ab1, CS_WCRMIN
);
642 he_writel_mbox(he_dev
, 0xe4b1, CS_WCRINC
);
643 he_writel_mbox(he_dev
, 0xdab1, CS_WCRDEC
);
644 he_writel_mbox(he_dev
, 0x64b1, CS_WCRCEIL
);
647 he_writel_mbox(he_dev
, 0x6, CS_OTPPER
);
648 he_writel_mbox(he_dev
, 0x1e, CS_OTWPER
);
651 he_writel_mbox(he_dev
, 0x8, CS_OTTLIM
);
653 for (reg
= 0; reg
< 0x8; ++reg
)
654 he_writel_mbox(he_dev
, 0x0, CS_HGRRT0
+ reg
);
659 he_init_cs_block_rcm(struct he_dev
*he_dev
)
661 unsigned (*rategrid
)[16][16];
662 unsigned rate
, delta
;
665 unsigned rate_atmf
, exp
, man
;
666 unsigned long long rate_cps
;
667 int mult
, buf
, buf_limit
= 4;
669 rategrid
= kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL
);
673 /* initialize rate grid group table */
675 for (reg
= 0x0; reg
< 0xff; ++reg
)
676 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
678 /* initialize rate controller groups */
680 for (reg
= 0x100; reg
< 0x1ff; ++reg
)
681 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
683 /* initialize tNrm lookup table */
685 /* the manual makes reference to a routine in a sample driver
686 for proper configuration; fortunately, we only need this
687 in order to support abr connection */
689 /* initialize rate to group table */
691 rate
= he_dev
->atm_dev
->link_rate
;
695 * 2.4 transmit internal functions
697 * we construct a copy of the rate grid used by the scheduler
698 * in order to construct the rate to group table below
701 for (j
= 0; j
< 16; j
++) {
702 (*rategrid
)[0][j
] = rate
;
706 for (i
= 1; i
< 16; i
++)
707 for (j
= 0; j
< 16; j
++)
709 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 4;
711 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 2;
714 * 2.4 transmit internal function
716 * this table maps the upper 5 bits of exponent and mantissa
717 * of the atm forum representation of the rate into an index
722 while (rate_atmf
< 0x400) {
723 man
= (rate_atmf
& 0x1f) << 4;
724 exp
= rate_atmf
>> 5;
727 instead of '/ 512', use '>> 9' to prevent a call
728 to divdu3 on x86 platforms
730 rate_cps
= (unsigned long long) (1 << exp
) * (man
+ 512) >> 9;
733 rate_cps
= 10; /* 2.2.1 minimum payload rate is 10 cps */
735 for (i
= 255; i
> 0; i
--)
736 if ((*rategrid
)[i
/16][i
%16] >= rate_cps
)
737 break; /* pick nearest rate instead? */
740 * each table entry is 16 bits: (rate grid index (8 bits)
741 * and a buffer limit (8 bits)
742 * there are two table entries in each 32-bit register
746 buf
= rate_cps
* he_dev
->tx_numbuffs
/
747 (he_dev
->atm_dev
->link_rate
* 2);
749 /* this is pretty, but avoids _divdu3 and is mostly correct */
750 mult
= he_dev
->atm_dev
->link_rate
/ ATM_OC3_PCR
;
751 if (rate_cps
> (272 * mult
))
753 else if (rate_cps
> (204 * mult
))
755 else if (rate_cps
> (136 * mult
))
757 else if (rate_cps
> (68 * mult
))
764 reg
= (reg
<< 16) | ((i
<< 8) | buf
);
766 #define RTGTBL_OFFSET 0x400
769 he_writel_rcm(he_dev
, reg
,
770 CONFIG_RCMABR
+ RTGTBL_OFFSET
+ (rate_atmf
>> 1));
780 he_init_group(struct he_dev
*he_dev
, int group
)
782 struct he_buff
*heb
, *next
;
786 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
787 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
788 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
789 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
790 G0_RBPS_BS
+ (group
* 32));
793 he_dev
->rbpl_table
= kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE
)
794 * sizeof(unsigned long), GFP_KERNEL
);
795 if (!he_dev
->rbpl_table
) {
796 hprintk("unable to allocate rbpl bitmap table\n");
799 bitmap_zero(he_dev
->rbpl_table
, RBPL_TABLE_SIZE
);
801 /* rbpl_virt 64-bit pointers */
802 he_dev
->rbpl_virt
= kmalloc(RBPL_TABLE_SIZE
803 * sizeof(struct he_buff
*), GFP_KERNEL
);
804 if (!he_dev
->rbpl_virt
) {
805 hprintk("unable to allocate rbpl virt table\n");
806 goto out_free_rbpl_table
;
809 /* large buffer pool */
810 he_dev
->rbpl_pool
= pci_pool_create("rbpl", he_dev
->pci_dev
,
811 CONFIG_RBPL_BUFSIZE
, 64, 0);
812 if (he_dev
->rbpl_pool
== NULL
) {
813 hprintk("unable to create rbpl pool\n");
814 goto out_free_rbpl_virt
;
817 he_dev
->rbpl_base
= pci_alloc_consistent(he_dev
->pci_dev
,
818 CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
), &he_dev
->rbpl_phys
);
819 if (he_dev
->rbpl_base
== NULL
) {
820 hprintk("failed to alloc rbpl_base\n");
821 goto out_destroy_rbpl_pool
;
823 memset(he_dev
->rbpl_base
, 0, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
));
825 INIT_LIST_HEAD(&he_dev
->rbpl_outstanding
);
827 for (i
= 0; i
< CONFIG_RBPL_SIZE
; ++i
) {
829 heb
= pci_pool_alloc(he_dev
->rbpl_pool
, GFP_KERNEL
|GFP_DMA
, &mapping
);
832 heb
->mapping
= mapping
;
833 list_add(&heb
->entry
, &he_dev
->rbpl_outstanding
);
835 set_bit(i
, he_dev
->rbpl_table
);
836 he_dev
->rbpl_virt
[i
] = heb
;
837 he_dev
->rbpl_hint
= i
+ 1;
838 he_dev
->rbpl_base
[i
].idx
= i
<< RBP_IDX_OFFSET
;
839 he_dev
->rbpl_base
[i
].phys
= mapping
+ offsetof(struct he_buff
, data
);
841 he_dev
->rbpl_tail
= &he_dev
->rbpl_base
[CONFIG_RBPL_SIZE
- 1];
843 he_writel(he_dev
, he_dev
->rbpl_phys
, G0_RBPL_S
+ (group
* 32));
844 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
),
845 G0_RBPL_T
+ (group
* 32));
846 he_writel(he_dev
, (CONFIG_RBPL_BUFSIZE
- sizeof(struct he_buff
))/4,
847 G0_RBPL_BS
+ (group
* 32));
849 RBP_THRESH(CONFIG_RBPL_THRESH
) |
850 RBP_QSIZE(CONFIG_RBPL_SIZE
- 1) |
852 G0_RBPL_QI
+ (group
* 32));
854 /* rx buffer ready queue */
856 he_dev
->rbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
857 CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
), &he_dev
->rbrq_phys
);
858 if (he_dev
->rbrq_base
== NULL
) {
859 hprintk("failed to allocate rbrq\n");
862 memset(he_dev
->rbrq_base
, 0, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
));
864 he_dev
->rbrq_head
= he_dev
->rbrq_base
;
865 he_writel(he_dev
, he_dev
->rbrq_phys
, G0_RBRQ_ST
+ (group
* 16));
866 he_writel(he_dev
, 0, G0_RBRQ_H
+ (group
* 16));
868 RBRQ_THRESH(CONFIG_RBRQ_THRESH
) | RBRQ_SIZE(CONFIG_RBRQ_SIZE
- 1),
869 G0_RBRQ_Q
+ (group
* 16));
871 hprintk("coalescing interrupts\n");
872 he_writel(he_dev
, RBRQ_TIME(768) | RBRQ_COUNT(7),
873 G0_RBRQ_I
+ (group
* 16));
875 he_writel(he_dev
, RBRQ_TIME(0) | RBRQ_COUNT(1),
876 G0_RBRQ_I
+ (group
* 16));
878 /* tx buffer ready queue */
880 he_dev
->tbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
881 CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
), &he_dev
->tbrq_phys
);
882 if (he_dev
->tbrq_base
== NULL
) {
883 hprintk("failed to allocate tbrq\n");
884 goto out_free_rbpq_base
;
886 memset(he_dev
->tbrq_base
, 0, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
));
888 he_dev
->tbrq_head
= he_dev
->tbrq_base
;
890 he_writel(he_dev
, he_dev
->tbrq_phys
, G0_TBRQ_B_T
+ (group
* 16));
891 he_writel(he_dev
, 0, G0_TBRQ_H
+ (group
* 16));
892 he_writel(he_dev
, CONFIG_TBRQ_SIZE
- 1, G0_TBRQ_S
+ (group
* 16));
893 he_writel(he_dev
, CONFIG_TBRQ_THRESH
, G0_TBRQ_THRESH
+ (group
* 16));
898 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBRQ_SIZE
*
899 sizeof(struct he_rbrq
), he_dev
->rbrq_base
,
902 list_for_each_entry_safe(heb
, next
, &he_dev
->rbpl_outstanding
, entry
)
903 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
905 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
*
906 sizeof(struct he_rbp
), he_dev
->rbpl_base
,
908 out_destroy_rbpl_pool
:
909 pci_pool_destroy(he_dev
->rbpl_pool
);
911 kfree(he_dev
->rbpl_virt
);
913 kfree(he_dev
->rbpl_table
);
919 he_init_irq(struct he_dev
*he_dev
)
923 /* 2.9.3.5 tail offset for each interrupt queue is located after the
924 end of the interrupt queue */
926 he_dev
->irq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
927 (CONFIG_IRQ_SIZE
+1) * sizeof(struct he_irq
), &he_dev
->irq_phys
);
928 if (he_dev
->irq_base
== NULL
) {
929 hprintk("failed to allocate irq\n");
932 he_dev
->irq_tailoffset
= (unsigned *)
933 &he_dev
->irq_base
[CONFIG_IRQ_SIZE
];
934 *he_dev
->irq_tailoffset
= 0;
935 he_dev
->irq_head
= he_dev
->irq_base
;
936 he_dev
->irq_tail
= he_dev
->irq_base
;
938 for (i
= 0; i
< CONFIG_IRQ_SIZE
; ++i
)
939 he_dev
->irq_base
[i
].isw
= ITYPE_INVALID
;
941 he_writel(he_dev
, he_dev
->irq_phys
, IRQ0_BASE
);
943 IRQ_SIZE(CONFIG_IRQ_SIZE
) | IRQ_THRESH(CONFIG_IRQ_THRESH
),
945 he_writel(he_dev
, IRQ_INT_A
| IRQ_TYPE_LINE
, IRQ0_CNTL
);
946 he_writel(he_dev
, 0x0, IRQ0_DATA
);
948 he_writel(he_dev
, 0x0, IRQ1_BASE
);
949 he_writel(he_dev
, 0x0, IRQ1_HEAD
);
950 he_writel(he_dev
, 0x0, IRQ1_CNTL
);
951 he_writel(he_dev
, 0x0, IRQ1_DATA
);
953 he_writel(he_dev
, 0x0, IRQ2_BASE
);
954 he_writel(he_dev
, 0x0, IRQ2_HEAD
);
955 he_writel(he_dev
, 0x0, IRQ2_CNTL
);
956 he_writel(he_dev
, 0x0, IRQ2_DATA
);
958 he_writel(he_dev
, 0x0, IRQ3_BASE
);
959 he_writel(he_dev
, 0x0, IRQ3_HEAD
);
960 he_writel(he_dev
, 0x0, IRQ3_CNTL
);
961 he_writel(he_dev
, 0x0, IRQ3_DATA
);
963 /* 2.9.3.2 interrupt queue mapping registers */
965 he_writel(he_dev
, 0x0, GRP_10_MAP
);
966 he_writel(he_dev
, 0x0, GRP_32_MAP
);
967 he_writel(he_dev
, 0x0, GRP_54_MAP
);
968 he_writel(he_dev
, 0x0, GRP_76_MAP
);
970 if (request_irq(he_dev
->pci_dev
->irq
, he_irq_handler
, IRQF_DISABLED
|IRQF_SHARED
, DEV_LABEL
, he_dev
)) {
971 hprintk("irq %d already in use\n", he_dev
->pci_dev
->irq
);
975 he_dev
->irq
= he_dev
->pci_dev
->irq
;
981 he_start(struct atm_dev
*dev
)
983 struct he_dev
*he_dev
;
984 struct pci_dev
*pci_dev
;
985 unsigned long membase
;
988 u32 gen_cntl_0
, host_cntl
, lb_swap
;
989 u8 cache_size
, timer
;
992 unsigned int status
, reg
;
995 he_dev
= HE_DEV(dev
);
996 pci_dev
= he_dev
->pci_dev
;
998 membase
= pci_resource_start(pci_dev
, 0);
999 HPRINTK("membase = 0x%lx irq = %d.\n", membase
, pci_dev
->irq
);
1002 * pci bus controller initialization
1005 /* 4.3 pci bus controller-specific initialization */
1006 if (pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
) != 0) {
1007 hprintk("can't read GEN_CNTL_0\n");
1010 gen_cntl_0
|= (MRL_ENB
| MRM_ENB
| IGNORE_TIMEOUT
);
1011 if (pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
) != 0) {
1012 hprintk("can't write GEN_CNTL_0.\n");
1016 if (pci_read_config_word(pci_dev
, PCI_COMMAND
, &command
) != 0) {
1017 hprintk("can't read PCI_COMMAND.\n");
1021 command
|= (PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
| PCI_COMMAND_INVALIDATE
);
1022 if (pci_write_config_word(pci_dev
, PCI_COMMAND
, command
) != 0) {
1023 hprintk("can't enable memory.\n");
1027 if (pci_read_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, &cache_size
)) {
1028 hprintk("can't read cache line size?\n");
1032 if (cache_size
< 16) {
1034 if (pci_write_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, cache_size
))
1035 hprintk("can't set cache line size to %d\n", cache_size
);
1038 if (pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &timer
)) {
1039 hprintk("can't read latency timer?\n");
1045 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1047 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1048 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1051 #define LAT_TIMER 209
1052 if (timer
< LAT_TIMER
) {
1053 HPRINTK("latency timer was %d, setting to %d\n", timer
, LAT_TIMER
);
1055 if (pci_write_config_byte(pci_dev
, PCI_LATENCY_TIMER
, timer
))
1056 hprintk("can't set latency timer to %d\n", timer
);
1059 if (!(he_dev
->membase
= ioremap(membase
, HE_REGMAP_SIZE
))) {
1060 hprintk("can't set up page mapping\n");
1064 /* 4.4 card reset */
1065 he_writel(he_dev
, 0x0, RESET_CNTL
);
1066 he_writel(he_dev
, 0xff, RESET_CNTL
);
1068 udelay(16*1000); /* 16 ms */
1069 status
= he_readl(he_dev
, RESET_CNTL
);
1070 if ((status
& BOARD_RST_STATUS
) == 0) {
1071 hprintk("reset failed\n");
1075 /* 4.5 set bus width */
1076 host_cntl
= he_readl(he_dev
, HOST_CNTL
);
1077 if (host_cntl
& PCI_BUS_SIZE64
)
1078 gen_cntl_0
|= ENBL_64
;
1080 gen_cntl_0
&= ~ENBL_64
;
1082 if (disable64
== 1) {
1083 hprintk("disabling 64-bit pci bus transfers\n");
1084 gen_cntl_0
&= ~ENBL_64
;
1087 if (gen_cntl_0
& ENBL_64
)
1088 hprintk("64-bit transfers enabled\n");
1090 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1092 /* 4.7 read prom contents */
1093 for (i
= 0; i
< PROD_ID_LEN
; ++i
)
1094 he_dev
->prod_id
[i
] = read_prom_byte(he_dev
, PROD_ID
+ i
);
1096 he_dev
->media
= read_prom_byte(he_dev
, MEDIA
);
1098 for (i
= 0; i
< 6; ++i
)
1099 dev
->esi
[i
] = read_prom_byte(he_dev
, MAC_ADDR
+ i
);
1101 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1103 he_dev
->media
& 0x40 ? "SM" : "MM",
1110 he_dev
->atm_dev
->link_rate
= he_is622(he_dev
) ?
1111 ATM_OC12_PCR
: ATM_OC3_PCR
;
1113 /* 4.6 set host endianess */
1114 lb_swap
= he_readl(he_dev
, LB_SWAP
);
1115 if (he_is622(he_dev
))
1116 lb_swap
&= ~XFER_SIZE
; /* 4 cells */
1118 lb_swap
|= XFER_SIZE
; /* 8 cells */
1120 lb_swap
|= DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
;
1122 lb_swap
&= ~(DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
|
1123 DATA_WR_SWAP
| DATA_RD_SWAP
| DESC_RD_SWAP
);
1124 #endif /* __BIG_ENDIAN */
1125 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1127 /* 4.8 sdram controller initialization */
1128 he_writel(he_dev
, he_is622(he_dev
) ? LB_64_ENB
: 0x0, SDRAM_CTL
);
1130 /* 4.9 initialize rnum value */
1131 lb_swap
|= SWAP_RNUM_MAX(0xf);
1132 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1134 /* 4.10 initialize the interrupt queues */
1135 if ((err
= he_init_irq(he_dev
)) != 0)
1138 /* 4.11 enable pci bus controller state machines */
1139 host_cntl
|= (OUTFF_ENB
| CMDFF_ENB
|
1140 QUICK_RD_RETRY
| QUICK_WR_RETRY
| PERR_INT_ENB
);
1141 he_writel(he_dev
, host_cntl
, HOST_CNTL
);
1143 gen_cntl_0
|= INT_PROC_ENBL
|INIT_ENB
;
1144 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1147 * atm network controller initialization
1150 /* 5.1.1 generic configuration state */
1153 * local (cell) buffer memory map
1157 * 0 ____________1023 bytes 0 _______________________2047 bytes
1159 * | utility | | rx0 | |
1160 * 5|____________| 255|___________________| u |
1163 * | rx0 | row | tx | l |
1165 * | | 767|___________________| t |
1166 * 517|____________| 768| | y |
1167 * row 518| | | rx1 | |
1168 * | | 1023|___________________|___|
1173 * 1535|____________|
1176 * 2047|____________|
1180 /* total 4096 connections */
1181 he_dev
->vcibits
= CONFIG_DEFAULT_VCIBITS
;
1182 he_dev
->vpibits
= CONFIG_DEFAULT_VPIBITS
;
1184 if (nvpibits
!= -1 && nvcibits
!= -1 && nvpibits
+nvcibits
!= HE_MAXCIDBITS
) {
1185 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS
);
1189 if (nvpibits
!= -1) {
1190 he_dev
->vpibits
= nvpibits
;
1191 he_dev
->vcibits
= HE_MAXCIDBITS
- nvpibits
;
1194 if (nvcibits
!= -1) {
1195 he_dev
->vcibits
= nvcibits
;
1196 he_dev
->vpibits
= HE_MAXCIDBITS
- nvcibits
;
1200 if (he_is622(he_dev
)) {
1201 he_dev
->cells_per_row
= 40;
1202 he_dev
->bytes_per_row
= 2048;
1203 he_dev
->r0_numrows
= 256;
1204 he_dev
->tx_numrows
= 512;
1205 he_dev
->r1_numrows
= 256;
1206 he_dev
->r0_startrow
= 0;
1207 he_dev
->tx_startrow
= 256;
1208 he_dev
->r1_startrow
= 768;
1210 he_dev
->cells_per_row
= 20;
1211 he_dev
->bytes_per_row
= 1024;
1212 he_dev
->r0_numrows
= 512;
1213 he_dev
->tx_numrows
= 1018;
1214 he_dev
->r1_numrows
= 512;
1215 he_dev
->r0_startrow
= 6;
1216 he_dev
->tx_startrow
= 518;
1217 he_dev
->r1_startrow
= 1536;
1220 he_dev
->cells_per_lbuf
= 4;
1221 he_dev
->buffer_limit
= 4;
1222 he_dev
->r0_numbuffs
= he_dev
->r0_numrows
*
1223 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1224 if (he_dev
->r0_numbuffs
> 2560)
1225 he_dev
->r0_numbuffs
= 2560;
1227 he_dev
->r1_numbuffs
= he_dev
->r1_numrows
*
1228 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1229 if (he_dev
->r1_numbuffs
> 2560)
1230 he_dev
->r1_numbuffs
= 2560;
1232 he_dev
->tx_numbuffs
= he_dev
->tx_numrows
*
1233 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1234 if (he_dev
->tx_numbuffs
> 5120)
1235 he_dev
->tx_numbuffs
= 5120;
1237 /* 5.1.2 configure hardware dependent registers */
1240 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1241 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1242 (he_is622(he_dev
) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1243 (he_is622(he_dev
) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1246 he_writel(he_dev
, BANK_ON
|
1247 (he_is622(he_dev
) ? (REF_RATE(0x384) | WIDE_DATA
) : REF_RATE(0x150)),
1251 (he_is622(he_dev
) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1252 RM_RW_WAIT(1), RCMCONFIG
);
1254 (he_is622(he_dev
) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1255 TM_RW_WAIT(1), TCMCONFIG
);
1257 he_writel(he_dev
, he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
, LB_CONFIG
);
1260 (he_is622(he_dev
) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1261 (he_is622(he_dev
) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1262 RX_VALVP(he_dev
->vpibits
) |
1263 RX_VALVC(he_dev
->vcibits
), RC_CONFIG
);
1265 he_writel(he_dev
, DRF_THRESH(0x20) |
1266 (he_is622(he_dev
) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1267 TX_VCI_MASK(he_dev
->vcibits
) |
1268 LBFREE_CNT(he_dev
->tx_numbuffs
), TX_CONFIG
);
1270 he_writel(he_dev
, 0x0, TXAAL5_PROTO
);
1272 he_writel(he_dev
, PHY_INT_ENB
|
1273 (he_is622(he_dev
) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1276 /* 5.1.3 initialize connection memory */
1278 for (i
= 0; i
< TCM_MEM_SIZE
; ++i
)
1279 he_writel_tcm(he_dev
, 0, i
);
1281 for (i
= 0; i
< RCM_MEM_SIZE
; ++i
)
1282 he_writel_rcm(he_dev
, 0, i
);
1285 * transmit connection memory map
1288 * 0x0 ___________________
1294 * 0x8000|___________________|
1297 * 0xc000|___________________|
1300 * 0xe000|___________________|
1302 * 0xf000|___________________|
1304 * 0x10000|___________________|
1307 * |___________________|
1310 * 0x1ffff|___________________|
1315 he_writel(he_dev
, CONFIG_TSRB
, TSRB_BA
);
1316 he_writel(he_dev
, CONFIG_TSRC
, TSRC_BA
);
1317 he_writel(he_dev
, CONFIG_TSRD
, TSRD_BA
);
1318 he_writel(he_dev
, CONFIG_TMABR
, TMABR_BA
);
1319 he_writel(he_dev
, CONFIG_TPDBA
, TPD_BA
);
1323 * receive connection memory map
1325 * 0x0 ___________________
1331 * 0x8000|___________________|
1334 * | LBM | link lists of local
1335 * | tx | buffer memory
1337 * 0xd000|___________________|
1340 * 0xe000|___________________|
1343 * |___________________|
1346 * 0xffff|___________________|
1349 he_writel(he_dev
, 0x08000, RCMLBM_BA
);
1350 he_writel(he_dev
, 0x0e000, RCMRSRB_BA
);
1351 he_writel(he_dev
, 0x0d800, RCMABR_BA
);
1353 /* 5.1.4 initialize local buffer free pools linked lists */
1355 he_init_rx_lbfp0(he_dev
);
1356 he_init_rx_lbfp1(he_dev
);
1358 he_writel(he_dev
, 0x0, RLBC_H
);
1359 he_writel(he_dev
, 0x0, RLBC_T
);
1360 he_writel(he_dev
, 0x0, RLBC_H2
);
1362 he_writel(he_dev
, 512, RXTHRSH
); /* 10% of r0+r1 buffers */
1363 he_writel(he_dev
, 256, LITHRSH
); /* 5% of r0+r1 buffers */
1365 he_init_tx_lbfp(he_dev
);
1367 he_writel(he_dev
, he_is622(he_dev
) ? 0x104780 : 0x800, UBUFF_BA
);
1369 /* 5.1.5 initialize intermediate receive queues */
1371 if (he_is622(he_dev
)) {
1372 he_writel(he_dev
, 0x000f, G0_INMQ_S
);
1373 he_writel(he_dev
, 0x200f, G0_INMQ_L
);
1375 he_writel(he_dev
, 0x001f, G1_INMQ_S
);
1376 he_writel(he_dev
, 0x201f, G1_INMQ_L
);
1378 he_writel(he_dev
, 0x002f, G2_INMQ_S
);
1379 he_writel(he_dev
, 0x202f, G2_INMQ_L
);
1381 he_writel(he_dev
, 0x003f, G3_INMQ_S
);
1382 he_writel(he_dev
, 0x203f, G3_INMQ_L
);
1384 he_writel(he_dev
, 0x004f, G4_INMQ_S
);
1385 he_writel(he_dev
, 0x204f, G4_INMQ_L
);
1387 he_writel(he_dev
, 0x005f, G5_INMQ_S
);
1388 he_writel(he_dev
, 0x205f, G5_INMQ_L
);
1390 he_writel(he_dev
, 0x006f, G6_INMQ_S
);
1391 he_writel(he_dev
, 0x206f, G6_INMQ_L
);
1393 he_writel(he_dev
, 0x007f, G7_INMQ_S
);
1394 he_writel(he_dev
, 0x207f, G7_INMQ_L
);
1396 he_writel(he_dev
, 0x0000, G0_INMQ_S
);
1397 he_writel(he_dev
, 0x0008, G0_INMQ_L
);
1399 he_writel(he_dev
, 0x0001, G1_INMQ_S
);
1400 he_writel(he_dev
, 0x0009, G1_INMQ_L
);
1402 he_writel(he_dev
, 0x0002, G2_INMQ_S
);
1403 he_writel(he_dev
, 0x000a, G2_INMQ_L
);
1405 he_writel(he_dev
, 0x0003, G3_INMQ_S
);
1406 he_writel(he_dev
, 0x000b, G3_INMQ_L
);
1408 he_writel(he_dev
, 0x0004, G4_INMQ_S
);
1409 he_writel(he_dev
, 0x000c, G4_INMQ_L
);
1411 he_writel(he_dev
, 0x0005, G5_INMQ_S
);
1412 he_writel(he_dev
, 0x000d, G5_INMQ_L
);
1414 he_writel(he_dev
, 0x0006, G6_INMQ_S
);
1415 he_writel(he_dev
, 0x000e, G6_INMQ_L
);
1417 he_writel(he_dev
, 0x0007, G7_INMQ_S
);
1418 he_writel(he_dev
, 0x000f, G7_INMQ_L
);
1421 /* 5.1.6 application tunable parameters */
1423 he_writel(he_dev
, 0x0, MCC
);
1424 he_writel(he_dev
, 0x0, OEC
);
1425 he_writel(he_dev
, 0x0, DCC
);
1426 he_writel(he_dev
, 0x0, CEC
);
1428 /* 5.1.7 cs block initialization */
1430 he_init_cs_block(he_dev
);
1432 /* 5.1.8 cs block connection memory initialization */
1434 if (he_init_cs_block_rcm(he_dev
) < 0)
1437 /* 5.1.10 initialize host structures */
1439 he_init_tpdrq(he_dev
);
1441 he_dev
->tpd_pool
= pci_pool_create("tpd", he_dev
->pci_dev
,
1442 sizeof(struct he_tpd
), TPD_ALIGNMENT
, 0);
1443 if (he_dev
->tpd_pool
== NULL
) {
1444 hprintk("unable to create tpd pci_pool\n");
1448 INIT_LIST_HEAD(&he_dev
->outstanding_tpds
);
1450 if (he_init_group(he_dev
, 0) != 0)
1453 for (group
= 1; group
< HE_NUM_GROUPS
; ++group
) {
1454 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
1455 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
1456 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
1457 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1458 G0_RBPS_BS
+ (group
* 32));
1460 he_writel(he_dev
, 0x0, G0_RBPL_S
+ (group
* 32));
1461 he_writel(he_dev
, 0x0, G0_RBPL_T
+ (group
* 32));
1462 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1463 G0_RBPL_QI
+ (group
* 32));
1464 he_writel(he_dev
, 0x0, G0_RBPL_BS
+ (group
* 32));
1466 he_writel(he_dev
, 0x0, G0_RBRQ_ST
+ (group
* 16));
1467 he_writel(he_dev
, 0x0, G0_RBRQ_H
+ (group
* 16));
1468 he_writel(he_dev
, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1469 G0_RBRQ_Q
+ (group
* 16));
1470 he_writel(he_dev
, 0x0, G0_RBRQ_I
+ (group
* 16));
1472 he_writel(he_dev
, 0x0, G0_TBRQ_B_T
+ (group
* 16));
1473 he_writel(he_dev
, 0x0, G0_TBRQ_H
+ (group
* 16));
1474 he_writel(he_dev
, TBRQ_THRESH(0x1),
1475 G0_TBRQ_THRESH
+ (group
* 16));
1476 he_writel(he_dev
, 0x0, G0_TBRQ_S
+ (group
* 16));
1479 /* host status page */
1481 he_dev
->hsp
= pci_alloc_consistent(he_dev
->pci_dev
,
1482 sizeof(struct he_hsp
), &he_dev
->hsp_phys
);
1483 if (he_dev
->hsp
== NULL
) {
1484 hprintk("failed to allocate host status page\n");
1487 memset(he_dev
->hsp
, 0, sizeof(struct he_hsp
));
1488 he_writel(he_dev
, he_dev
->hsp_phys
, HSP_BA
);
1490 /* initialize framer */
1492 #ifdef CONFIG_ATM_HE_USE_SUNI
1493 if (he_isMM(he_dev
))
1494 suni_init(he_dev
->atm_dev
);
1495 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->start
)
1496 he_dev
->atm_dev
->phy
->start(he_dev
->atm_dev
);
1497 #endif /* CONFIG_ATM_HE_USE_SUNI */
1500 /* this really should be in suni.c but for now... */
1503 val
= he_phy_get(he_dev
->atm_dev
, SUNI_TPOP_APM
);
1504 val
= (val
& ~SUNI_TPOP_APM_S
) | (SUNI_TPOP_S_SDH
<< SUNI_TPOP_APM_S_SHIFT
);
1505 he_phy_put(he_dev
->atm_dev
, val
, SUNI_TPOP_APM
);
1506 he_phy_put(he_dev
->atm_dev
, SUNI_TACP_IUCHP_CLP
, SUNI_TACP_IUCHP
);
1509 /* 5.1.12 enable transmit and receive */
1511 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1512 reg
|= TX_ENABLE
|ER_ENABLE
;
1513 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1515 reg
= he_readl(he_dev
, RC_CONFIG
);
1517 he_writel(he_dev
, reg
, RC_CONFIG
);
1519 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
) {
1520 he_dev
->cs_stper
[i
].inuse
= 0;
1521 he_dev
->cs_stper
[i
].pcr
= -1;
1523 he_dev
->total_bw
= 0;
1526 /* atm linux initialization */
1528 he_dev
->atm_dev
->ci_range
.vpi_bits
= he_dev
->vpibits
;
1529 he_dev
->atm_dev
->ci_range
.vci_bits
= he_dev
->vcibits
;
1531 he_dev
->irq_peak
= 0;
1532 he_dev
->rbrq_peak
= 0;
1533 he_dev
->rbpl_peak
= 0;
1534 he_dev
->tbrq_peak
= 0;
1536 HPRINTK("hell bent for leather!\n");
1542 he_stop(struct he_dev
*he_dev
)
1544 struct he_buff
*heb
, *next
;
1545 struct pci_dev
*pci_dev
;
1546 u32 gen_cntl_0
, reg
;
1549 pci_dev
= he_dev
->pci_dev
;
1551 /* disable interrupts */
1553 if (he_dev
->membase
) {
1554 pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
);
1555 gen_cntl_0
&= ~(INT_PROC_ENBL
| INIT_ENB
);
1556 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1558 tasklet_disable(&he_dev
->tasklet
);
1560 /* disable recv and transmit */
1562 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1563 reg
&= ~(TX_ENABLE
|ER_ENABLE
);
1564 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1566 reg
= he_readl(he_dev
, RC_CONFIG
);
1567 reg
&= ~(RX_ENABLE
);
1568 he_writel(he_dev
, reg
, RC_CONFIG
);
1571 #ifdef CONFIG_ATM_HE_USE_SUNI
1572 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->stop
)
1573 he_dev
->atm_dev
->phy
->stop(he_dev
->atm_dev
);
1574 #endif /* CONFIG_ATM_HE_USE_SUNI */
1577 free_irq(he_dev
->irq
, he_dev
);
1579 if (he_dev
->irq_base
)
1580 pci_free_consistent(he_dev
->pci_dev
, (CONFIG_IRQ_SIZE
+1)
1581 * sizeof(struct he_irq
), he_dev
->irq_base
, he_dev
->irq_phys
);
1584 pci_free_consistent(he_dev
->pci_dev
, sizeof(struct he_hsp
),
1585 he_dev
->hsp
, he_dev
->hsp_phys
);
1587 if (he_dev
->rbpl_base
) {
1588 list_for_each_entry_safe(heb
, next
, &he_dev
->rbpl_outstanding
, entry
)
1589 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
1591 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
1592 * sizeof(struct he_rbp
), he_dev
->rbpl_base
, he_dev
->rbpl_phys
);
1595 kfree(he_dev
->rbpl_virt
);
1596 kfree(he_dev
->rbpl_table
);
1598 if (he_dev
->rbpl_pool
)
1599 pci_pool_destroy(he_dev
->rbpl_pool
);
1601 if (he_dev
->rbrq_base
)
1602 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
),
1603 he_dev
->rbrq_base
, he_dev
->rbrq_phys
);
1605 if (he_dev
->tbrq_base
)
1606 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1607 he_dev
->tbrq_base
, he_dev
->tbrq_phys
);
1609 if (he_dev
->tpdrq_base
)
1610 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1611 he_dev
->tpdrq_base
, he_dev
->tpdrq_phys
);
1613 if (he_dev
->tpd_pool
)
1614 pci_pool_destroy(he_dev
->tpd_pool
);
1616 if (he_dev
->pci_dev
) {
1617 pci_read_config_word(he_dev
->pci_dev
, PCI_COMMAND
, &command
);
1618 command
&= ~(PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
1619 pci_write_config_word(he_dev
->pci_dev
, PCI_COMMAND
, command
);
1622 if (he_dev
->membase
)
1623 iounmap(he_dev
->membase
);
1626 static struct he_tpd
*
1627 __alloc_tpd(struct he_dev
*he_dev
)
1632 tpd
= pci_pool_alloc(he_dev
->tpd_pool
, GFP_ATOMIC
|GFP_DMA
, &mapping
);
1636 tpd
->status
= TPD_ADDR(mapping
);
1638 tpd
->iovec
[0].addr
= 0; tpd
->iovec
[0].len
= 0;
1639 tpd
->iovec
[1].addr
= 0; tpd
->iovec
[1].len
= 0;
1640 tpd
->iovec
[2].addr
= 0; tpd
->iovec
[2].len
= 0;
1645 #define AAL5_LEN(buf,len) \
1646 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1647 (((unsigned char *)(buf))[(len)-5]))
1651 * aal5 packets can optionally return the tcp checksum in the lower
1652 * 16 bits of the crc (RSR0_TCP_CKSUM)
1655 #define TCP_CKSUM(buf,len) \
1656 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1657 (((unsigned char *)(buf))[(len-1)]))
1660 he_service_rbrq(struct he_dev
*he_dev
, int group
)
1662 struct he_rbrq
*rbrq_tail
= (struct he_rbrq
*)
1663 ((unsigned long)he_dev
->rbrq_base
|
1664 he_dev
->hsp
->group
[group
].rbrq_tail
);
1665 unsigned cid
, lastcid
= -1;
1666 struct sk_buff
*skb
;
1667 struct atm_vcc
*vcc
= NULL
;
1668 struct he_vcc
*he_vcc
;
1669 struct he_buff
*heb
, *next
;
1671 int pdus_assembled
= 0;
1674 read_lock(&vcc_sklist_lock
);
1675 while (he_dev
->rbrq_head
!= rbrq_tail
) {
1678 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1679 he_dev
->rbrq_head
, group
,
1680 RBRQ_ADDR(he_dev
->rbrq_head
),
1681 RBRQ_BUFLEN(he_dev
->rbrq_head
),
1682 RBRQ_CID(he_dev
->rbrq_head
),
1683 RBRQ_CRC_ERR(he_dev
->rbrq_head
) ? " CRC_ERR" : "",
1684 RBRQ_LEN_ERR(he_dev
->rbrq_head
) ? " LEN_ERR" : "",
1685 RBRQ_END_PDU(he_dev
->rbrq_head
) ? " END_PDU" : "",
1686 RBRQ_AAL5_PROT(he_dev
->rbrq_head
) ? " AAL5_PROT" : "",
1687 RBRQ_CON_CLOSED(he_dev
->rbrq_head
) ? " CON_CLOSED" : "",
1688 RBRQ_HBUF_ERR(he_dev
->rbrq_head
) ? " HBUF_ERR" : "");
1690 i
= RBRQ_ADDR(he_dev
->rbrq_head
) >> RBP_IDX_OFFSET
;
1691 heb
= he_dev
->rbpl_virt
[i
];
1693 cid
= RBRQ_CID(he_dev
->rbrq_head
);
1695 vcc
= __find_vcc(he_dev
, cid
);
1698 if (vcc
== NULL
|| (he_vcc
= HE_VCC(vcc
)) == NULL
) {
1699 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid
);
1700 if (!RBRQ_HBUF_ERR(he_dev
->rbrq_head
)) {
1701 clear_bit(i
, he_dev
->rbpl_table
);
1702 list_del(&heb
->entry
);
1703 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
1706 goto next_rbrq_entry
;
1709 if (RBRQ_HBUF_ERR(he_dev
->rbrq_head
)) {
1710 hprintk("HBUF_ERR! (cid 0x%x)\n", cid
);
1711 atomic_inc(&vcc
->stats
->rx_drop
);
1712 goto return_host_buffers
;
1715 heb
->len
= RBRQ_BUFLEN(he_dev
->rbrq_head
) * 4;
1716 clear_bit(i
, he_dev
->rbpl_table
);
1717 list_move_tail(&heb
->entry
, &he_vcc
->buffers
);
1718 he_vcc
->pdu_len
+= heb
->len
;
1720 if (RBRQ_CON_CLOSED(he_dev
->rbrq_head
)) {
1722 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid
);
1723 wake_up(&he_vcc
->rx_waitq
);
1724 goto return_host_buffers
;
1727 if (!RBRQ_END_PDU(he_dev
->rbrq_head
))
1728 goto next_rbrq_entry
;
1730 if (RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1731 || RBRQ_CRC_ERR(he_dev
->rbrq_head
)) {
1732 HPRINTK("%s%s (%d.%d)\n",
1733 RBRQ_CRC_ERR(he_dev
->rbrq_head
)
1735 RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1737 vcc
->vpi
, vcc
->vci
);
1738 atomic_inc(&vcc
->stats
->rx_err
);
1739 goto return_host_buffers
;
1742 skb
= atm_alloc_charge(vcc
, he_vcc
->pdu_len
+ rx_skb_reserve
,
1745 HPRINTK("charge failed (%d.%d)\n", vcc
->vpi
, vcc
->vci
);
1746 goto return_host_buffers
;
1749 if (rx_skb_reserve
> 0)
1750 skb_reserve(skb
, rx_skb_reserve
);
1752 __net_timestamp(skb
);
1754 list_for_each_entry(heb
, &he_vcc
->buffers
, entry
)
1755 memcpy(skb_put(skb
, heb
->len
), &heb
->data
, heb
->len
);
1757 switch (vcc
->qos
.aal
) {
1759 /* 2.10.1.5 raw cell receive */
1760 skb
->len
= ATM_AAL0_SDU
;
1761 skb_set_tail_pointer(skb
, skb
->len
);
1764 /* 2.10.1.2 aal5 receive */
1766 skb
->len
= AAL5_LEN(skb
->data
, he_vcc
->pdu_len
);
1767 skb_set_tail_pointer(skb
, skb
->len
);
1768 #ifdef USE_CHECKSUM_HW
1769 if (vcc
->vpi
== 0 && vcc
->vci
>= ATM_NOT_RSV_VCI
) {
1770 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1771 skb
->csum
= TCP_CKSUM(skb
->data
,
1778 #ifdef should_never_happen
1779 if (skb
->len
> vcc
->qos
.rxtp
.max_sdu
)
1780 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb
->len
, vcc
->qos
.rxtp
.max_sdu
, cid
);
1784 ATM_SKB(skb
)->vcc
= vcc
;
1786 spin_unlock(&he_dev
->global_lock
);
1787 vcc
->push(vcc
, skb
);
1788 spin_lock(&he_dev
->global_lock
);
1790 atomic_inc(&vcc
->stats
->rx
);
1792 return_host_buffers
:
1795 list_for_each_entry_safe(heb
, next
, &he_vcc
->buffers
, entry
)
1796 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
1797 INIT_LIST_HEAD(&he_vcc
->buffers
);
1798 he_vcc
->pdu_len
= 0;
1801 he_dev
->rbrq_head
= (struct he_rbrq
*)
1802 ((unsigned long) he_dev
->rbrq_base
|
1803 RBRQ_MASK(++he_dev
->rbrq_head
));
1806 read_unlock(&vcc_sklist_lock
);
1809 if (updated
> he_dev
->rbrq_peak
)
1810 he_dev
->rbrq_peak
= updated
;
1812 he_writel(he_dev
, RBRQ_MASK(he_dev
->rbrq_head
),
1813 G0_RBRQ_H
+ (group
* 16));
1816 return pdus_assembled
;
1820 he_service_tbrq(struct he_dev
*he_dev
, int group
)
1822 struct he_tbrq
*tbrq_tail
= (struct he_tbrq
*)
1823 ((unsigned long)he_dev
->tbrq_base
|
1824 he_dev
->hsp
->group
[group
].tbrq_tail
);
1826 int slot
, updated
= 0;
1827 struct he_tpd
*__tpd
;
1829 /* 2.1.6 transmit buffer return queue */
1831 while (he_dev
->tbrq_head
!= tbrq_tail
) {
1834 HPRINTK("tbrq%d 0x%x%s%s\n",
1836 TBRQ_TPD(he_dev
->tbrq_head
),
1837 TBRQ_EOS(he_dev
->tbrq_head
) ? " EOS" : "",
1838 TBRQ_MULTIPLE(he_dev
->tbrq_head
) ? " MULTIPLE" : "");
1840 list_for_each_entry(__tpd
, &he_dev
->outstanding_tpds
, entry
) {
1841 if (TPD_ADDR(__tpd
->status
) == TBRQ_TPD(he_dev
->tbrq_head
)) {
1843 list_del(&__tpd
->entry
);
1849 hprintk("unable to locate tpd for dma buffer %x\n",
1850 TBRQ_TPD(he_dev
->tbrq_head
));
1851 goto next_tbrq_entry
;
1854 if (TBRQ_EOS(he_dev
->tbrq_head
)) {
1855 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1856 he_mkcid(he_dev
, tpd
->vcc
->vpi
, tpd
->vcc
->vci
));
1858 wake_up(&HE_VCC(tpd
->vcc
)->tx_waitq
);
1860 goto next_tbrq_entry
;
1863 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
1864 if (tpd
->iovec
[slot
].addr
)
1865 pci_unmap_single(he_dev
->pci_dev
,
1866 tpd
->iovec
[slot
].addr
,
1867 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
1869 if (tpd
->iovec
[slot
].len
& TPD_LST
)
1874 if (tpd
->skb
) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1875 if (tpd
->vcc
&& tpd
->vcc
->pop
)
1876 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
1878 dev_kfree_skb_any(tpd
->skb
);
1883 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
1884 he_dev
->tbrq_head
= (struct he_tbrq
*)
1885 ((unsigned long) he_dev
->tbrq_base
|
1886 TBRQ_MASK(++he_dev
->tbrq_head
));
1890 if (updated
> he_dev
->tbrq_peak
)
1891 he_dev
->tbrq_peak
= updated
;
1893 he_writel(he_dev
, TBRQ_MASK(he_dev
->tbrq_head
),
1894 G0_TBRQ_H
+ (group
* 16));
1899 he_service_rbpl(struct he_dev
*he_dev
, int group
)
1901 struct he_rbp
*new_tail
;
1902 struct he_rbp
*rbpl_head
;
1903 struct he_buff
*heb
;
1908 rbpl_head
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
1909 RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
)));
1912 new_tail
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
1913 RBPL_MASK(he_dev
->rbpl_tail
+1));
1915 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1916 if (new_tail
== rbpl_head
)
1919 i
= find_next_zero_bit(he_dev
->rbpl_table
, RBPL_TABLE_SIZE
, he_dev
->rbpl_hint
);
1920 if (i
> (RBPL_TABLE_SIZE
- 1)) {
1921 i
= find_first_zero_bit(he_dev
->rbpl_table
, RBPL_TABLE_SIZE
);
1922 if (i
> (RBPL_TABLE_SIZE
- 1))
1925 he_dev
->rbpl_hint
= i
+ 1;
1927 heb
= pci_pool_alloc(he_dev
->rbpl_pool
, GFP_ATOMIC
|GFP_DMA
, &mapping
);
1930 heb
->mapping
= mapping
;
1931 list_add(&heb
->entry
, &he_dev
->rbpl_outstanding
);
1932 he_dev
->rbpl_virt
[i
] = heb
;
1933 set_bit(i
, he_dev
->rbpl_table
);
1934 new_tail
->idx
= i
<< RBP_IDX_OFFSET
;
1935 new_tail
->phys
= mapping
+ offsetof(struct he_buff
, data
);
1937 he_dev
->rbpl_tail
= new_tail
;
1942 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
), G0_RBPL_T
);
1946 he_tasklet(unsigned long data
)
1948 unsigned long flags
;
1949 struct he_dev
*he_dev
= (struct he_dev
*) data
;
1953 HPRINTK("tasklet (0x%lx)\n", data
);
1954 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
1956 while (he_dev
->irq_head
!= he_dev
->irq_tail
) {
1959 type
= ITYPE_TYPE(he_dev
->irq_head
->isw
);
1960 group
= ITYPE_GROUP(he_dev
->irq_head
->isw
);
1963 case ITYPE_RBRQ_THRESH
:
1964 HPRINTK("rbrq%d threshold\n", group
);
1966 case ITYPE_RBRQ_TIMER
:
1967 if (he_service_rbrq(he_dev
, group
))
1968 he_service_rbpl(he_dev
, group
);
1970 case ITYPE_TBRQ_THRESH
:
1971 HPRINTK("tbrq%d threshold\n", group
);
1973 case ITYPE_TPD_COMPLETE
:
1974 he_service_tbrq(he_dev
, group
);
1976 case ITYPE_RBPL_THRESH
:
1977 he_service_rbpl(he_dev
, group
);
1979 case ITYPE_RBPS_THRESH
:
1980 /* shouldn't happen unless small buffers enabled */
1983 HPRINTK("phy interrupt\n");
1984 #ifdef CONFIG_ATM_HE_USE_SUNI
1985 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
1986 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->interrupt
)
1987 he_dev
->atm_dev
->phy
->interrupt(he_dev
->atm_dev
);
1988 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
1992 switch (type
|group
) {
1994 hprintk("parity error\n");
1997 hprintk("abort 0x%x\n", he_readl(he_dev
, ABORT_ADDR
));
2001 case ITYPE_TYPE(ITYPE_INVALID
):
2002 /* see 8.1.1 -- check all queues */
2004 HPRINTK("isw not updated 0x%x\n", he_dev
->irq_head
->isw
);
2006 he_service_rbrq(he_dev
, 0);
2007 he_service_rbpl(he_dev
, 0);
2008 he_service_tbrq(he_dev
, 0);
2011 hprintk("bad isw 0x%x?\n", he_dev
->irq_head
->isw
);
2014 he_dev
->irq_head
->isw
= ITYPE_INVALID
;
2016 he_dev
->irq_head
= (struct he_irq
*) NEXT_ENTRY(he_dev
->irq_base
, he_dev
->irq_head
, IRQ_MASK
);
2020 if (updated
> he_dev
->irq_peak
)
2021 he_dev
->irq_peak
= updated
;
2024 IRQ_SIZE(CONFIG_IRQ_SIZE
) |
2025 IRQ_THRESH(CONFIG_IRQ_THRESH
) |
2026 IRQ_TAIL(he_dev
->irq_tail
), IRQ0_HEAD
);
2027 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata; flush posted writes */
2029 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2033 he_irq_handler(int irq
, void *dev_id
)
2035 unsigned long flags
;
2036 struct he_dev
*he_dev
= (struct he_dev
* )dev_id
;
2042 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2044 he_dev
->irq_tail
= (struct he_irq
*) (((unsigned long)he_dev
->irq_base
) |
2045 (*he_dev
->irq_tailoffset
<< 2));
2047 if (he_dev
->irq_tail
== he_dev
->irq_head
) {
2048 HPRINTK("tailoffset not updated?\n");
2049 he_dev
->irq_tail
= (struct he_irq
*) ((unsigned long)he_dev
->irq_base
|
2050 ((he_readl(he_dev
, IRQ0_BASE
) & IRQ_MASK
) << 2));
2051 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata */
2055 if (he_dev
->irq_head
== he_dev
->irq_tail
/* && !IRQ_PENDING */)
2056 hprintk("spurious (or shared) interrupt?\n");
2059 if (he_dev
->irq_head
!= he_dev
->irq_tail
) {
2061 tasklet_schedule(&he_dev
->tasklet
);
2062 he_writel(he_dev
, INT_CLEAR_A
, INT_FIFO
); /* clear interrupt */
2063 (void) he_readl(he_dev
, INT_FIFO
); /* flush posted writes */
2065 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2066 return IRQ_RETVAL(handled
);
2070 static __inline__
void
2071 __enqueue_tpd(struct he_dev
*he_dev
, struct he_tpd
*tpd
, unsigned cid
)
2073 struct he_tpdrq
*new_tail
;
2075 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2076 tpd
, cid
, he_dev
->tpdrq_tail
);
2078 /* new_tail = he_dev->tpdrq_tail; */
2079 new_tail
= (struct he_tpdrq
*) ((unsigned long) he_dev
->tpdrq_base
|
2080 TPDRQ_MASK(he_dev
->tpdrq_tail
+1));
2083 * check to see if we are about to set the tail == head
2084 * if true, update the head pointer from the adapter
2085 * to see if this is really the case (reading the queue
2086 * head for every enqueue would be unnecessarily slow)
2089 if (new_tail
== he_dev
->tpdrq_head
) {
2090 he_dev
->tpdrq_head
= (struct he_tpdrq
*)
2091 (((unsigned long)he_dev
->tpdrq_base
) |
2092 TPDRQ_MASK(he_readl(he_dev
, TPDRQ_B_H
)));
2094 if (new_tail
== he_dev
->tpdrq_head
) {
2097 hprintk("tpdrq full (cid 0x%x)\n", cid
);
2100 * push tpd onto a transmit backlog queue
2101 * after service_tbrq, service the backlog
2102 * for now, we just drop the pdu
2104 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
2105 if (tpd
->iovec
[slot
].addr
)
2106 pci_unmap_single(he_dev
->pci_dev
,
2107 tpd
->iovec
[slot
].addr
,
2108 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
2113 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
2115 dev_kfree_skb_any(tpd
->skb
);
2116 atomic_inc(&tpd
->vcc
->stats
->tx_err
);
2118 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
2123 /* 2.1.5 transmit packet descriptor ready queue */
2124 list_add_tail(&tpd
->entry
, &he_dev
->outstanding_tpds
);
2125 he_dev
->tpdrq_tail
->tpd
= TPD_ADDR(tpd
->status
);
2126 he_dev
->tpdrq_tail
->cid
= cid
;
2129 he_dev
->tpdrq_tail
= new_tail
;
2131 he_writel(he_dev
, TPDRQ_MASK(he_dev
->tpdrq_tail
), TPDRQ_T
);
2132 (void) he_readl(he_dev
, TPDRQ_T
); /* flush posted writes */
2136 he_open(struct atm_vcc
*vcc
)
2138 unsigned long flags
;
2139 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2140 struct he_vcc
*he_vcc
;
2142 unsigned cid
, rsr0
, rsr1
, rsr4
, tsr0
, tsr0_aal
, tsr4
, period
, reg
, clock
;
2143 short vpi
= vcc
->vpi
;
2146 if (vci
== ATM_VCI_UNSPEC
|| vpi
== ATM_VPI_UNSPEC
)
2149 HPRINTK("open vcc %p %d.%d\n", vcc
, vpi
, vci
);
2151 set_bit(ATM_VF_ADDR
, &vcc
->flags
);
2153 cid
= he_mkcid(he_dev
, vpi
, vci
);
2155 he_vcc
= kmalloc(sizeof(struct he_vcc
), GFP_ATOMIC
);
2156 if (he_vcc
== NULL
) {
2157 hprintk("unable to allocate he_vcc during open\n");
2161 INIT_LIST_HEAD(&he_vcc
->buffers
);
2162 he_vcc
->pdu_len
= 0;
2163 he_vcc
->rc_index
= -1;
2165 init_waitqueue_head(&he_vcc
->rx_waitq
);
2166 init_waitqueue_head(&he_vcc
->tx_waitq
);
2168 vcc
->dev_data
= he_vcc
;
2170 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2173 pcr_goal
= atm_pcr_goal(&vcc
->qos
.txtp
);
2175 pcr_goal
= he_dev
->atm_dev
->link_rate
;
2176 if (pcr_goal
< 0) /* means round down, technically */
2177 pcr_goal
= -pcr_goal
;
2179 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid
, pcr_goal
);
2181 switch (vcc
->qos
.aal
) {
2183 tsr0_aal
= TSR0_AAL5
;
2187 tsr0_aal
= TSR0_AAL0_SDU
;
2188 tsr4
= TSR4_AAL0_SDU
;
2195 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2196 tsr0
= he_readl_tsr0(he_dev
, cid
);
2197 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2199 if (TSR0_CONN_STATE(tsr0
) != 0) {
2200 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid
, tsr0
);
2205 switch (vcc
->qos
.txtp
.traffic_class
) {
2207 /* 2.3.3.1 open connection ubr */
2209 tsr0
= TSR0_UBR
| TSR0_GROUP(0) | tsr0_aal
|
2210 TSR0_USE_WMIN
| TSR0_UPDATE_GER
;
2214 /* 2.3.3.2 open connection cbr */
2216 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2217 if ((he_dev
->total_bw
+ pcr_goal
)
2218 > (he_dev
->atm_dev
->link_rate
* 9 / 10))
2224 spin_lock_irqsave(&he_dev
->global_lock
, flags
); /* also protects he_dev->cs_stper[] */
2226 /* find an unused cs_stper register */
2227 for (reg
= 0; reg
< HE_NUM_CS_STPER
; ++reg
)
2228 if (he_dev
->cs_stper
[reg
].inuse
== 0 ||
2229 he_dev
->cs_stper
[reg
].pcr
== pcr_goal
)
2232 if (reg
== HE_NUM_CS_STPER
) {
2234 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2238 he_dev
->total_bw
+= pcr_goal
;
2240 he_vcc
->rc_index
= reg
;
2241 ++he_dev
->cs_stper
[reg
].inuse
;
2242 he_dev
->cs_stper
[reg
].pcr
= pcr_goal
;
2244 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
2245 period
= clock
/ pcr_goal
;
2247 HPRINTK("rc_index = %d period = %d\n",
2250 he_writel_mbox(he_dev
, rate_to_atmf(period
/2),
2252 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2254 tsr0
= TSR0_CBR
| TSR0_GROUP(0) | tsr0_aal
|
2263 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2265 he_writel_tsr0(he_dev
, tsr0
, cid
);
2266 he_writel_tsr4(he_dev
, tsr4
| 1, cid
);
2267 he_writel_tsr1(he_dev
, TSR1_MCR(rate_to_atmf(0)) |
2268 TSR1_PCR(rate_to_atmf(pcr_goal
)), cid
);
2269 he_writel_tsr2(he_dev
, TSR2_ACR(rate_to_atmf(pcr_goal
)), cid
);
2270 he_writel_tsr9(he_dev
, TSR9_OPEN_CONN
, cid
);
2272 he_writel_tsr3(he_dev
, 0x0, cid
);
2273 he_writel_tsr5(he_dev
, 0x0, cid
);
2274 he_writel_tsr6(he_dev
, 0x0, cid
);
2275 he_writel_tsr7(he_dev
, 0x0, cid
);
2276 he_writel_tsr8(he_dev
, 0x0, cid
);
2277 he_writel_tsr10(he_dev
, 0x0, cid
);
2278 he_writel_tsr11(he_dev
, 0x0, cid
);
2279 he_writel_tsr12(he_dev
, 0x0, cid
);
2280 he_writel_tsr13(he_dev
, 0x0, cid
);
2281 he_writel_tsr14(he_dev
, 0x0, cid
);
2282 (void) he_readl_tsr0(he_dev
, cid
); /* flush posted writes */
2283 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2286 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2289 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid
,
2290 &HE_VCC(vcc
)->rx_waitq
);
2292 switch (vcc
->qos
.aal
) {
2304 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2306 rsr0
= he_readl_rsr0(he_dev
, cid
);
2307 if (rsr0
& RSR0_OPEN_CONN
) {
2308 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2310 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid
, rsr0
);
2315 rsr1
= RSR1_GROUP(0) | RSR1_RBPL_ONLY
;
2316 rsr4
= RSR4_GROUP(0) | RSR4_RBPL_ONLY
;
2317 rsr0
= vcc
->qos
.rxtp
.traffic_class
== ATM_UBR
?
2318 (RSR0_EPD_ENABLE
|RSR0_PPD_ENABLE
) : 0;
2320 #ifdef USE_CHECKSUM_HW
2321 if (vpi
== 0 && vci
>= ATM_NOT_RSV_VCI
)
2322 rsr0
|= RSR0_TCP_CKSUM
;
2325 he_writel_rsr4(he_dev
, rsr4
, cid
);
2326 he_writel_rsr1(he_dev
, rsr1
, cid
);
2327 /* 5.1.11 last parameter initialized should be
2328 the open/closed indication in rsr0 */
2329 he_writel_rsr0(he_dev
,
2330 rsr0
| RSR0_START_PDU
| RSR0_OPEN_CONN
| aal
, cid
);
2331 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2333 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2340 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2343 set_bit(ATM_VF_READY
, &vcc
->flags
);
2349 he_close(struct atm_vcc
*vcc
)
2351 unsigned long flags
;
2352 DECLARE_WAITQUEUE(wait
, current
);
2353 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2356 struct he_vcc
*he_vcc
= HE_VCC(vcc
);
2357 #define MAX_RETRY 30
2358 int retry
= 0, sleep
= 1, tx_inuse
;
2360 HPRINTK("close vcc %p %d.%d\n", vcc
, vcc
->vpi
, vcc
->vci
);
2362 clear_bit(ATM_VF_READY
, &vcc
->flags
);
2363 cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2365 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2368 HPRINTK("close rx cid 0x%x\n", cid
);
2370 /* 2.7.2.2 close receive operation */
2372 /* wait for previous close (if any) to finish */
2374 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2375 while (he_readl(he_dev
, RCC_STAT
) & RCC_BUSY
) {
2376 HPRINTK("close cid 0x%x RCC_BUSY\n", cid
);
2380 set_current_state(TASK_UNINTERRUPTIBLE
);
2381 add_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2383 he_writel_rsr0(he_dev
, RSR0_CLOSE_CONN
, cid
);
2384 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2385 he_writel_mbox(he_dev
, cid
, RXCON_CLOSE
);
2386 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2388 timeout
= schedule_timeout(30*HZ
);
2390 remove_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2391 set_current_state(TASK_RUNNING
);
2394 hprintk("close rx timeout cid 0x%x\n", cid
);
2396 HPRINTK("close rx cid 0x%x complete\n", cid
);
2400 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2401 volatile unsigned tsr4
, tsr0
;
2404 HPRINTK("close tx cid 0x%x\n", cid
);
2408 * ... the host must first stop queueing packets to the TPDRQ
2409 * on the connection to be closed, then wait for all outstanding
2410 * packets to be transmitted and their buffers returned to the
2411 * TBRQ. When the last packet on the connection arrives in the
2412 * TBRQ, the host issues the close command to the adapter.
2415 while (((tx_inuse
= atomic_read(&sk_atm(vcc
)->sk_wmem_alloc
)) > 1) &&
2416 (retry
< MAX_RETRY
)) {
2425 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid
, tx_inuse
);
2427 /* 2.3.1.1 generic close operations with flush */
2429 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2430 he_writel_tsr4_upper(he_dev
, TSR4_FLUSH_CONN
, cid
);
2431 /* also clears TSR4_SESSION_ENDED */
2433 switch (vcc
->qos
.txtp
.traffic_class
) {
2435 he_writel_tsr1(he_dev
,
2436 TSR1_MCR(rate_to_atmf(200000))
2437 | TSR1_PCR(0), cid
);
2440 he_writel_tsr14_upper(he_dev
, TSR14_DELETE
, cid
);
2443 (void) he_readl_tsr4(he_dev
, cid
); /* flush posted writes */
2445 tpd
= __alloc_tpd(he_dev
);
2447 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid
);
2448 goto close_tx_incomplete
;
2450 tpd
->status
|= TPD_EOS
| TPD_INT
;
2455 set_current_state(TASK_UNINTERRUPTIBLE
);
2456 add_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2457 __enqueue_tpd(he_dev
, tpd
, cid
);
2458 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2460 timeout
= schedule_timeout(30*HZ
);
2462 remove_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2463 set_current_state(TASK_RUNNING
);
2465 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2468 hprintk("close tx timeout cid 0x%x\n", cid
);
2469 goto close_tx_incomplete
;
2472 while (!((tsr4
= he_readl_tsr4(he_dev
, cid
)) & TSR4_SESSION_ENDED
)) {
2473 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid
, tsr4
);
2477 while (TSR0_CONN_STATE(tsr0
= he_readl_tsr0(he_dev
, cid
)) != 0) {
2478 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid
, tsr0
);
2482 close_tx_incomplete
:
2484 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
2485 int reg
= he_vcc
->rc_index
;
2487 HPRINTK("cs_stper reg = %d\n", reg
);
2489 if (he_dev
->cs_stper
[reg
].inuse
== 0)
2490 hprintk("cs_stper[%d].inuse = 0!\n", reg
);
2492 --he_dev
->cs_stper
[reg
].inuse
;
2494 he_dev
->total_bw
-= he_dev
->cs_stper
[reg
].pcr
;
2496 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2498 HPRINTK("close tx cid 0x%x complete\n", cid
);
2503 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2507 he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
2509 unsigned long flags
;
2510 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2511 unsigned cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2513 #ifdef USE_SCATTERGATHER
2517 #define HE_TPD_BUFSIZE 0xffff
2519 HPRINTK("send %d.%d\n", vcc
->vpi
, vcc
->vci
);
2521 if ((skb
->len
> HE_TPD_BUFSIZE
) ||
2522 ((vcc
->qos
.aal
== ATM_AAL0
) && (skb
->len
!= ATM_AAL0_SDU
))) {
2523 hprintk("buffer too large (or small) -- %d bytes\n", skb
->len
);
2527 dev_kfree_skb_any(skb
);
2528 atomic_inc(&vcc
->stats
->tx_err
);
2532 #ifndef USE_SCATTERGATHER
2533 if (skb_shinfo(skb
)->nr_frags
) {
2534 hprintk("no scatter/gather support\n");
2538 dev_kfree_skb_any(skb
);
2539 atomic_inc(&vcc
->stats
->tx_err
);
2543 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2545 tpd
= __alloc_tpd(he_dev
);
2550 dev_kfree_skb_any(skb
);
2551 atomic_inc(&vcc
->stats
->tx_err
);
2552 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2556 if (vcc
->qos
.aal
== ATM_AAL5
)
2557 tpd
->status
|= TPD_CELLTYPE(TPD_USERCELL
);
2559 char *pti_clp
= (void *) (skb
->data
+ 3);
2562 pti
= (*pti_clp
& ATM_HDR_PTI_MASK
) >> ATM_HDR_PTI_SHIFT
;
2563 clp
= (*pti_clp
& ATM_HDR_CLP
);
2564 tpd
->status
|= TPD_CELLTYPE(pti
);
2566 tpd
->status
|= TPD_CLP
;
2568 skb_pull(skb
, ATM_AAL0_SDU
- ATM_CELL_PAYLOAD
);
2571 #ifdef USE_SCATTERGATHER
2572 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
, skb
->data
,
2573 skb_headlen(skb
), PCI_DMA_TODEVICE
);
2574 tpd
->iovec
[slot
].len
= skb_headlen(skb
);
2577 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2578 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2580 if (slot
== TPD_MAXIOV
) { /* queue tpd; start new tpd */
2582 tpd
->skb
= NULL
; /* not the last fragment
2583 so dont ->push() yet */
2586 __enqueue_tpd(he_dev
, tpd
, cid
);
2587 tpd
= __alloc_tpd(he_dev
);
2592 dev_kfree_skb_any(skb
);
2593 atomic_inc(&vcc
->stats
->tx_err
);
2594 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2597 tpd
->status
|= TPD_USERCELL
;
2601 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
,
2602 (void *) page_address(frag
->page
) + frag
->page_offset
,
2603 frag
->size
, PCI_DMA_TODEVICE
);
2604 tpd
->iovec
[slot
].len
= frag
->size
;
2609 tpd
->iovec
[slot
- 1].len
|= TPD_LST
;
2611 tpd
->address0
= pci_map_single(he_dev
->pci_dev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
2612 tpd
->length0
= skb
->len
| TPD_LST
;
2614 tpd
->status
|= TPD_INT
;
2619 ATM_SKB(skb
)->vcc
= vcc
;
2621 __enqueue_tpd(he_dev
, tpd
, cid
);
2622 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2624 atomic_inc(&vcc
->stats
->tx
);
2630 he_ioctl(struct atm_dev
*atm_dev
, unsigned int cmd
, void __user
*arg
)
2632 unsigned long flags
;
2633 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2634 struct he_ioctl_reg reg
;
2639 if (!capable(CAP_NET_ADMIN
))
2642 if (copy_from_user(®
, arg
,
2643 sizeof(struct he_ioctl_reg
)))
2646 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2648 case HE_REGTYPE_PCI
:
2649 if (reg
.addr
>= HE_REGMAP_SIZE
) {
2654 reg
.val
= he_readl(he_dev
, reg
.addr
);
2656 case HE_REGTYPE_RCM
:
2658 he_readl_rcm(he_dev
, reg
.addr
);
2660 case HE_REGTYPE_TCM
:
2662 he_readl_tcm(he_dev
, reg
.addr
);
2664 case HE_REGTYPE_MBOX
:
2666 he_readl_mbox(he_dev
, reg
.addr
);
2672 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2674 if (copy_to_user(arg
, ®
,
2675 sizeof(struct he_ioctl_reg
)))
2679 #ifdef CONFIG_ATM_HE_USE_SUNI
2680 if (atm_dev
->phy
&& atm_dev
->phy
->ioctl
)
2681 err
= atm_dev
->phy
->ioctl(atm_dev
, cmd
, arg
);
2682 #else /* CONFIG_ATM_HE_USE_SUNI */
2684 #endif /* CONFIG_ATM_HE_USE_SUNI */
2692 he_phy_put(struct atm_dev
*atm_dev
, unsigned char val
, unsigned long addr
)
2694 unsigned long flags
;
2695 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2697 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val
, addr
);
2699 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2700 he_writel(he_dev
, val
, FRAMER
+ (addr
*4));
2701 (void) he_readl(he_dev
, FRAMER
+ (addr
*4)); /* flush posted writes */
2702 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2706 static unsigned char
2707 he_phy_get(struct atm_dev
*atm_dev
, unsigned long addr
)
2709 unsigned long flags
;
2710 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2713 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2714 reg
= he_readl(he_dev
, FRAMER
+ (addr
*4));
2715 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2717 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr
, reg
);
2722 he_proc_read(struct atm_dev
*dev
, loff_t
*pos
, char *page
)
2724 unsigned long flags
;
2725 struct he_dev
*he_dev
= HE_DEV(dev
);
2728 struct he_rbrq
*rbrq_tail
;
2729 struct he_tpdrq
*tpdrq_head
;
2730 int rbpl_head
, rbpl_tail
;
2732 static long mcc
= 0, oec
= 0, dcc
= 0, cec
= 0;
2737 return sprintf(page
, "ATM he driver\n");
2740 return sprintf(page
, "%s%s\n\n",
2741 he_dev
->prod_id
, he_dev
->media
& 0x40 ? "SM" : "MM");
2744 return sprintf(page
, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2746 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2747 mcc
+= he_readl(he_dev
, MCC
);
2748 oec
+= he_readl(he_dev
, OEC
);
2749 dcc
+= he_readl(he_dev
, DCC
);
2750 cec
+= he_readl(he_dev
, CEC
);
2751 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2754 return sprintf(page
, "%16ld %16ld %13ld %17ld\n\n",
2755 mcc
, oec
, dcc
, cec
);
2758 return sprintf(page
, "irq_size = %d inuse = ? peak = %d\n",
2759 CONFIG_IRQ_SIZE
, he_dev
->irq_peak
);
2762 return sprintf(page
, "tpdrq_size = %d inuse = ?\n",
2766 return sprintf(page
, "rbrq_size = %d inuse = ? peak = %d\n",
2767 CONFIG_RBRQ_SIZE
, he_dev
->rbrq_peak
);
2770 return sprintf(page
, "tbrq_size = %d peak = %d\n",
2771 CONFIG_TBRQ_SIZE
, he_dev
->tbrq_peak
);
2775 rbpl_head
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
));
2776 rbpl_tail
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_T
));
2778 inuse
= rbpl_head
- rbpl_tail
;
2780 inuse
+= CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
);
2781 inuse
/= sizeof(struct he_rbp
);
2784 return sprintf(page
, "rbpl_size = %d inuse = %d\n\n",
2785 CONFIG_RBPL_SIZE
, inuse
);
2789 return sprintf(page
, "rate controller periods (cbr)\n pcr #vc\n");
2791 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
)
2793 return sprintf(page
, "cs_stper%-2d %8ld %3d\n", i
,
2794 he_dev
->cs_stper
[i
].pcr
,
2795 he_dev
->cs_stper
[i
].inuse
);
2798 return sprintf(page
, "total bw (cbr): %d (limit %d)\n",
2799 he_dev
->total_bw
, he_dev
->atm_dev
->link_rate
* 10 / 9);
2804 /* eeprom routines -- see 4.7 */
2806 static u8
read_prom_byte(struct he_dev
*he_dev
, int addr
)
2808 u32 val
= 0, tmp_read
= 0;
2812 val
= readl(he_dev
->membase
+ HOST_CNTL
);
2815 /* Turn on write enable */
2817 he_writel(he_dev
, val
, HOST_CNTL
);
2819 /* Send READ instruction */
2820 for (i
= 0; i
< ARRAY_SIZE(readtab
); i
++) {
2821 he_writel(he_dev
, val
| readtab
[i
], HOST_CNTL
);
2822 udelay(EEPROM_DELAY
);
2825 /* Next, we need to send the byte address to read from */
2826 for (i
= 7; i
>= 0; i
--) {
2827 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
2828 udelay(EEPROM_DELAY
);
2829 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
2830 udelay(EEPROM_DELAY
);
2835 val
&= 0xFFFFF7FF; /* Turn off write enable */
2836 he_writel(he_dev
, val
, HOST_CNTL
);
2838 /* Now, we can read data from the EEPROM by clocking it in */
2839 for (i
= 7; i
>= 0; i
--) {
2840 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
2841 udelay(EEPROM_DELAY
);
2842 tmp_read
= he_readl(he_dev
, HOST_CNTL
);
2843 byte_read
|= (unsigned char)
2844 ((tmp_read
& ID_DOUT
) >> ID_DOFFSET
<< i
);
2845 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
2846 udelay(EEPROM_DELAY
);
2849 he_writel(he_dev
, val
| ID_CS
, HOST_CNTL
);
2850 udelay(EEPROM_DELAY
);
2855 MODULE_LICENSE("GPL");
2856 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2857 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2858 module_param(disable64
, bool, 0);
2859 MODULE_PARM_DESC(disable64
, "disable 64-bit pci bus transfers");
2860 module_param(nvpibits
, short, 0);
2861 MODULE_PARM_DESC(nvpibits
, "numbers of bits for vpi (default 0)");
2862 module_param(nvcibits
, short, 0);
2863 MODULE_PARM_DESC(nvcibits
, "numbers of bits for vci (default 12)");
2864 module_param(rx_skb_reserve
, short, 0);
2865 MODULE_PARM_DESC(rx_skb_reserve
, "padding for receive skb (default 16)");
2866 module_param(irq_coalesce
, bool, 0);
2867 MODULE_PARM_DESC(irq_coalesce
, "use interrupt coalescing (default 1)");
2868 module_param(sdh
, bool, 0);
2869 MODULE_PARM_DESC(sdh
, "use SDH framing (default 0)");
2871 static struct pci_device_id he_pci_tbl
[] = {
2872 { PCI_VENDOR_ID_FORE
, PCI_DEVICE_ID_FORE_HE
, PCI_ANY_ID
, PCI_ANY_ID
,
2877 MODULE_DEVICE_TABLE(pci
, he_pci_tbl
);
2879 static struct pci_driver he_driver
= {
2881 .probe
= he_init_one
,
2882 .remove
= __devexit_p(he_remove_one
),
2883 .id_table
= he_pci_tbl
,
2886 static int __init
he_init(void)
2888 return pci_register_driver(&he_driver
);
2891 static void __exit
he_cleanup(void)
2893 pci_unregister_driver(&he_driver
);
2896 module_init(he_init
);
2897 module_exit(he_cleanup
);