5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
56 #include <linux/module.h>
57 #include <linux/kernel.h>
58 #include <linux/skbuff.h>
59 #include <linux/pci.h>
60 #include <linux/errno.h>
61 #include <linux/types.h>
62 #include <linux/string.h>
63 #include <linux/delay.h>
64 #include <linux/init.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/bitmap.h>
71 #include <linux/slab.h>
73 #include <asm/byteorder.h>
74 #include <asm/uaccess.h>
76 #include <linux/atmdev.h>
77 #include <linux/atm.h>
78 #include <linux/sonet.h>
80 #undef USE_SCATTERGATHER
81 #undef USE_CHECKSUM_HW /* still confused about this */
86 #include <linux/atm_he.h>
88 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
91 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
93 #define HPRINTK(fmt,args...) do { } while (0)
98 static int he_open(struct atm_vcc
*vcc
);
99 static void he_close(struct atm_vcc
*vcc
);
100 static int he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
);
101 static int he_ioctl(struct atm_dev
*dev
, unsigned int cmd
, void __user
*arg
);
102 static irqreturn_t
he_irq_handler(int irq
, void *dev_id
);
103 static void he_tasklet(unsigned long data
);
104 static int he_proc_read(struct atm_dev
*dev
,loff_t
*pos
,char *page
);
105 static int he_start(struct atm_dev
*dev
);
106 static void he_stop(struct he_dev
*dev
);
107 static void he_phy_put(struct atm_dev
*, unsigned char, unsigned long);
108 static unsigned char he_phy_get(struct atm_dev
*, unsigned long);
110 static u8
read_prom_byte(struct he_dev
*he_dev
, int addr
);
114 static struct he_dev
*he_devs
;
115 static bool disable64
;
116 static short nvpibits
= -1;
117 static short nvcibits
= -1;
118 static short rx_skb_reserve
= 16;
119 static bool irq_coalesce
= 1;
122 /* Read from EEPROM = 0000 0011b */
123 static unsigned int readtab
[] = {
138 CLK_HIGH
| SI_HIGH
, /* 1 */
140 CLK_HIGH
| SI_HIGH
/* 1 */
143 /* Clock to read from/write to the EEPROM */
144 static unsigned int clocktab
[] = {
164 static struct atmdev_ops he_ops
=
170 .phy_put
= he_phy_put
,
171 .phy_get
= he_phy_get
,
172 .proc_read
= he_proc_read
,
176 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
177 #define he_readl(dev, reg) readl((dev)->membase + (reg))
179 /* section 2.12 connection memory access */
181 static __inline__
void
182 he_writel_internal(struct he_dev
*he_dev
, unsigned val
, unsigned addr
,
185 he_writel(he_dev
, val
, CON_DAT
);
186 (void) he_readl(he_dev
, CON_DAT
); /* flush posted writes */
187 he_writel(he_dev
, flags
| CON_CTL_WRITE
| CON_CTL_ADDR(addr
), CON_CTL
);
188 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
191 #define he_writel_rcm(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_RCM)
194 #define he_writel_tcm(dev, val, reg) \
195 he_writel_internal(dev, val, reg, CON_CTL_TCM)
197 #define he_writel_mbox(dev, val, reg) \
198 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
201 he_readl_internal(struct he_dev
*he_dev
, unsigned addr
, unsigned flags
)
203 he_writel(he_dev
, flags
| CON_CTL_READ
| CON_CTL_ADDR(addr
), CON_CTL
);
204 while (he_readl(he_dev
, CON_CTL
) & CON_CTL_BUSY
);
205 return he_readl(he_dev
, CON_DAT
);
208 #define he_readl_rcm(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_RCM)
211 #define he_readl_tcm(dev, reg) \
212 he_readl_internal(dev, reg, CON_CTL_TCM)
214 #define he_readl_mbox(dev, reg) \
215 he_readl_internal(dev, reg, CON_CTL_MBOX)
218 /* figure 2.2 connection id */
220 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
222 /* 2.5.1 per connection transmit state registers */
224 #define he_writel_tsr0(dev, val, cid) \
225 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
226 #define he_readl_tsr0(dev, cid) \
227 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
229 #define he_writel_tsr1(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
232 #define he_writel_tsr2(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
235 #define he_writel_tsr3(dev, val, cid) \
236 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
238 #define he_writel_tsr4(dev, val, cid) \
239 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
243 * NOTE While the transmit connection is active, bits 23 through 0
244 * of this register must not be written by the host. Byte
245 * enables should be used during normal operation when writing
246 * the most significant byte.
249 #define he_writel_tsr4_upper(dev, val, cid) \
250 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
252 | CON_BYTE_DISABLE_2 \
253 | CON_BYTE_DISABLE_1 \
254 | CON_BYTE_DISABLE_0)
256 #define he_readl_tsr4(dev, cid) \
257 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
259 #define he_writel_tsr5(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
262 #define he_writel_tsr6(dev, val, cid) \
263 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
265 #define he_writel_tsr7(dev, val, cid) \
266 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
269 #define he_writel_tsr8(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
272 #define he_writel_tsr9(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
275 #define he_writel_tsr10(dev, val, cid) \
276 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
278 #define he_writel_tsr11(dev, val, cid) \
279 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
282 #define he_writel_tsr12(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
285 #define he_writel_tsr13(dev, val, cid) \
286 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
289 #define he_writel_tsr14(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
292 #define he_writel_tsr14_upper(dev, val, cid) \
293 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
295 | CON_BYTE_DISABLE_2 \
296 | CON_BYTE_DISABLE_1 \
297 | CON_BYTE_DISABLE_0)
299 /* 2.7.1 per connection receive state registers */
301 #define he_writel_rsr0(dev, val, cid) \
302 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
303 #define he_readl_rsr0(dev, cid) \
304 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
306 #define he_writel_rsr1(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
309 #define he_writel_rsr2(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
312 #define he_writel_rsr3(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
315 #define he_writel_rsr4(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
318 #define he_writel_rsr5(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
321 #define he_writel_rsr6(dev, val, cid) \
322 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
324 #define he_writel_rsr7(dev, val, cid) \
325 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
327 static __inline__
struct atm_vcc
*
328 __find_vcc(struct he_dev
*he_dev
, unsigned cid
)
330 struct hlist_head
*head
;
336 vpi
= cid
>> he_dev
->vcibits
;
337 vci
= cid
& ((1 << he_dev
->vcibits
) - 1);
338 head
= &vcc_hash
[vci
& (VCC_HTABLE_SIZE
-1)];
340 sk_for_each(s
, head
) {
342 if (vcc
->dev
== he_dev
->atm_dev
&&
343 vcc
->vci
== vci
&& vcc
->vpi
== vpi
&&
344 vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
351 static int he_init_one(struct pci_dev
*pci_dev
,
352 const struct pci_device_id
*pci_ent
)
354 struct atm_dev
*atm_dev
= NULL
;
355 struct he_dev
*he_dev
= NULL
;
358 printk(KERN_INFO
"ATM he driver\n");
360 if (pci_enable_device(pci_dev
))
362 if (pci_set_dma_mask(pci_dev
, DMA_BIT_MASK(32)) != 0) {
363 printk(KERN_WARNING
"he: no suitable dma available\n");
365 goto init_one_failure
;
368 atm_dev
= atm_dev_register(DEV_LABEL
, &pci_dev
->dev
, &he_ops
, -1, NULL
);
371 goto init_one_failure
;
373 pci_set_drvdata(pci_dev
, atm_dev
);
375 he_dev
= kzalloc(sizeof(struct he_dev
),
379 goto init_one_failure
;
381 he_dev
->pci_dev
= pci_dev
;
382 he_dev
->atm_dev
= atm_dev
;
383 he_dev
->atm_dev
->dev_data
= he_dev
;
384 atm_dev
->dev_data
= he_dev
;
385 he_dev
->number
= atm_dev
->number
;
386 tasklet_init(&he_dev
->tasklet
, he_tasklet
, (unsigned long) he_dev
);
387 spin_lock_init(&he_dev
->global_lock
);
389 if (he_start(atm_dev
)) {
392 goto init_one_failure
;
396 he_dev
->next
= he_devs
;
402 atm_dev_deregister(atm_dev
);
404 pci_disable_device(pci_dev
);
408 static void he_remove_one(struct pci_dev
*pci_dev
)
410 struct atm_dev
*atm_dev
;
411 struct he_dev
*he_dev
;
413 atm_dev
= pci_get_drvdata(pci_dev
);
414 he_dev
= HE_DEV(atm_dev
);
416 /* need to remove from he_devs */
419 atm_dev_deregister(atm_dev
);
422 pci_set_drvdata(pci_dev
, NULL
);
423 pci_disable_device(pci_dev
);
428 rate_to_atmf(unsigned rate
) /* cps to atm forum format */
430 #define NONZERO (1 << 14)
438 while (rate
> 0x3ff) {
443 return (NONZERO
| (exp
<< 9) | (rate
& 0x1ff));
446 static void he_init_rx_lbfp0(struct he_dev
*he_dev
)
448 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
449 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
450 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
451 unsigned row_offset
= he_dev
->r0_startrow
* he_dev
->bytes_per_row
;
454 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
);
456 he_writel(he_dev
, lbufd_index
, RLBF0_H
);
458 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r0_numbuffs
; ++i
) {
460 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
462 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
463 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
465 if (++lbuf_count
== lbufs_per_row
) {
467 row_offset
+= he_dev
->bytes_per_row
;
472 he_writel(he_dev
, lbufd_index
- 2, RLBF0_T
);
473 he_writel(he_dev
, he_dev
->r0_numbuffs
, RLBF0_C
);
476 static void he_init_rx_lbfp1(struct he_dev
*he_dev
)
478 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
479 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
480 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
481 unsigned row_offset
= he_dev
->r1_startrow
* he_dev
->bytes_per_row
;
484 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
486 he_writel(he_dev
, lbufd_index
, RLBF1_H
);
488 for (i
= 0, lbuf_count
= 0; i
< he_dev
->r1_numbuffs
; ++i
) {
490 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
492 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
493 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
495 if (++lbuf_count
== lbufs_per_row
) {
497 row_offset
+= he_dev
->bytes_per_row
;
502 he_writel(he_dev
, lbufd_index
- 2, RLBF1_T
);
503 he_writel(he_dev
, he_dev
->r1_numbuffs
, RLBF1_C
);
506 static void he_init_tx_lbfp(struct he_dev
*he_dev
)
508 unsigned i
, lbm_offset
, lbufd_index
, lbuf_addr
, lbuf_count
;
509 unsigned lbufs_per_row
= he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
510 unsigned lbuf_bufsize
= he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
;
511 unsigned row_offset
= he_dev
->tx_startrow
* he_dev
->bytes_per_row
;
513 lbufd_index
= he_dev
->r0_numbuffs
+ he_dev
->r1_numbuffs
;
514 lbm_offset
= he_readl(he_dev
, RCMLBM_BA
) + (2 * lbufd_index
);
516 he_writel(he_dev
, lbufd_index
, TLBF_H
);
518 for (i
= 0, lbuf_count
= 0; i
< he_dev
->tx_numbuffs
; ++i
) {
520 lbuf_addr
= (row_offset
+ (lbuf_count
* lbuf_bufsize
)) / 32;
522 he_writel_rcm(he_dev
, lbuf_addr
, lbm_offset
);
523 he_writel_rcm(he_dev
, lbufd_index
, lbm_offset
+ 1);
525 if (++lbuf_count
== lbufs_per_row
) {
527 row_offset
+= he_dev
->bytes_per_row
;
532 he_writel(he_dev
, lbufd_index
- 1, TLBF_T
);
535 static int he_init_tpdrq(struct he_dev
*he_dev
)
537 he_dev
->tpdrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
538 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
), &he_dev
->tpdrq_phys
);
539 if (he_dev
->tpdrq_base
== NULL
) {
540 hprintk("failed to alloc tpdrq\n");
543 memset(he_dev
->tpdrq_base
, 0,
544 CONFIG_TPDRQ_SIZE
* sizeof(struct he_tpdrq
));
546 he_dev
->tpdrq_tail
= he_dev
->tpdrq_base
;
547 he_dev
->tpdrq_head
= he_dev
->tpdrq_base
;
549 he_writel(he_dev
, he_dev
->tpdrq_phys
, TPDRQ_B_H
);
550 he_writel(he_dev
, 0, TPDRQ_T
);
551 he_writel(he_dev
, CONFIG_TPDRQ_SIZE
- 1, TPDRQ_S
);
556 static void he_init_cs_block(struct he_dev
*he_dev
)
558 unsigned clock
, rate
, delta
;
561 /* 5.1.7 cs block initialization */
563 for (reg
= 0; reg
< 0x20; ++reg
)
564 he_writel_mbox(he_dev
, 0x0, CS_STTIM0
+ reg
);
566 /* rate grid timer reload values */
568 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
569 rate
= he_dev
->atm_dev
->link_rate
;
570 delta
= rate
/ 16 / 2;
572 for (reg
= 0; reg
< 0x10; ++reg
) {
573 /* 2.4 internal transmit function
575 * we initialize the first row in the rate grid.
576 * values are period (in clock cycles) of timer
578 unsigned period
= clock
/ rate
;
580 he_writel_mbox(he_dev
, period
, CS_TGRLD0
+ reg
);
584 if (he_is622(he_dev
)) {
585 /* table 5.2 (4 cells per lbuf) */
586 he_writel_mbox(he_dev
, 0x000800fa, CS_ERTHR0
);
587 he_writel_mbox(he_dev
, 0x000c33cb, CS_ERTHR1
);
588 he_writel_mbox(he_dev
, 0x0010101b, CS_ERTHR2
);
589 he_writel_mbox(he_dev
, 0x00181dac, CS_ERTHR3
);
590 he_writel_mbox(he_dev
, 0x00280600, CS_ERTHR4
);
592 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
593 he_writel_mbox(he_dev
, 0x023de8b3, CS_ERCTL0
);
594 he_writel_mbox(he_dev
, 0x1801, CS_ERCTL1
);
595 he_writel_mbox(he_dev
, 0x68b3, CS_ERCTL2
);
596 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
597 he_writel_mbox(he_dev
, 0x68b3, CS_ERSTAT1
);
598 he_writel_mbox(he_dev
, 0x14585, CS_RTFWR
);
600 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
603 he_writel_mbox(he_dev
, 0x00159ece, CS_TFBSET
);
604 he_writel_mbox(he_dev
, 0x68b3, CS_WCRMAX
);
605 he_writel_mbox(he_dev
, 0x5eb3, CS_WCRMIN
);
606 he_writel_mbox(he_dev
, 0xe8b3, CS_WCRINC
);
607 he_writel_mbox(he_dev
, 0xdeb3, CS_WCRDEC
);
608 he_writel_mbox(he_dev
, 0x68b3, CS_WCRCEIL
);
611 he_writel_mbox(he_dev
, 0x5, CS_OTPPER
);
612 he_writel_mbox(he_dev
, 0x14, CS_OTWPER
);
614 /* table 5.1 (4 cells per lbuf) */
615 he_writel_mbox(he_dev
, 0x000400ea, CS_ERTHR0
);
616 he_writel_mbox(he_dev
, 0x00063388, CS_ERTHR1
);
617 he_writel_mbox(he_dev
, 0x00081018, CS_ERTHR2
);
618 he_writel_mbox(he_dev
, 0x000c1dac, CS_ERTHR3
);
619 he_writel_mbox(he_dev
, 0x0014051a, CS_ERTHR4
);
621 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
622 he_writel_mbox(he_dev
, 0x0235e4b1, CS_ERCTL0
);
623 he_writel_mbox(he_dev
, 0x4701, CS_ERCTL1
);
624 he_writel_mbox(he_dev
, 0x64b1, CS_ERCTL2
);
625 he_writel_mbox(he_dev
, 0x1280, CS_ERSTAT0
);
626 he_writel_mbox(he_dev
, 0x64b1, CS_ERSTAT1
);
627 he_writel_mbox(he_dev
, 0xf424, CS_RTFWR
);
629 he_writel_mbox(he_dev
, 0x4680, CS_RTATR
);
632 he_writel_mbox(he_dev
, 0x000563b7, CS_TFBSET
);
633 he_writel_mbox(he_dev
, 0x64b1, CS_WCRMAX
);
634 he_writel_mbox(he_dev
, 0x5ab1, CS_WCRMIN
);
635 he_writel_mbox(he_dev
, 0xe4b1, CS_WCRINC
);
636 he_writel_mbox(he_dev
, 0xdab1, CS_WCRDEC
);
637 he_writel_mbox(he_dev
, 0x64b1, CS_WCRCEIL
);
640 he_writel_mbox(he_dev
, 0x6, CS_OTPPER
);
641 he_writel_mbox(he_dev
, 0x1e, CS_OTWPER
);
644 he_writel_mbox(he_dev
, 0x8, CS_OTTLIM
);
646 for (reg
= 0; reg
< 0x8; ++reg
)
647 he_writel_mbox(he_dev
, 0x0, CS_HGRRT0
+ reg
);
651 static int he_init_cs_block_rcm(struct he_dev
*he_dev
)
653 unsigned (*rategrid
)[16][16];
654 unsigned rate
, delta
;
657 unsigned rate_atmf
, exp
, man
;
658 unsigned long long rate_cps
;
659 int mult
, buf
, buf_limit
= 4;
661 rategrid
= kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL
);
665 /* initialize rate grid group table */
667 for (reg
= 0x0; reg
< 0xff; ++reg
)
668 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
670 /* initialize rate controller groups */
672 for (reg
= 0x100; reg
< 0x1ff; ++reg
)
673 he_writel_rcm(he_dev
, 0x0, CONFIG_RCMABR
+ reg
);
675 /* initialize tNrm lookup table */
677 /* the manual makes reference to a routine in a sample driver
678 for proper configuration; fortunately, we only need this
679 in order to support abr connection */
681 /* initialize rate to group table */
683 rate
= he_dev
->atm_dev
->link_rate
;
687 * 2.4 transmit internal functions
689 * we construct a copy of the rate grid used by the scheduler
690 * in order to construct the rate to group table below
693 for (j
= 0; j
< 16; j
++) {
694 (*rategrid
)[0][j
] = rate
;
698 for (i
= 1; i
< 16; i
++)
699 for (j
= 0; j
< 16; j
++)
701 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 4;
703 (*rategrid
)[i
][j
] = (*rategrid
)[i
- 1][j
] / 2;
706 * 2.4 transmit internal function
708 * this table maps the upper 5 bits of exponent and mantissa
709 * of the atm forum representation of the rate into an index
714 while (rate_atmf
< 0x400) {
715 man
= (rate_atmf
& 0x1f) << 4;
716 exp
= rate_atmf
>> 5;
719 instead of '/ 512', use '>> 9' to prevent a call
720 to divdu3 on x86 platforms
722 rate_cps
= (unsigned long long) (1 << exp
) * (man
+ 512) >> 9;
725 rate_cps
= 10; /* 2.2.1 minimum payload rate is 10 cps */
727 for (i
= 255; i
> 0; i
--)
728 if ((*rategrid
)[i
/16][i
%16] >= rate_cps
)
729 break; /* pick nearest rate instead? */
732 * each table entry is 16 bits: (rate grid index (8 bits)
733 * and a buffer limit (8 bits)
734 * there are two table entries in each 32-bit register
738 buf
= rate_cps
* he_dev
->tx_numbuffs
/
739 (he_dev
->atm_dev
->link_rate
* 2);
741 /* this is pretty, but avoids _divdu3 and is mostly correct */
742 mult
= he_dev
->atm_dev
->link_rate
/ ATM_OC3_PCR
;
743 if (rate_cps
> (272 * mult
))
745 else if (rate_cps
> (204 * mult
))
747 else if (rate_cps
> (136 * mult
))
749 else if (rate_cps
> (68 * mult
))
756 reg
= (reg
<< 16) | ((i
<< 8) | buf
);
758 #define RTGTBL_OFFSET 0x400
761 he_writel_rcm(he_dev
, reg
,
762 CONFIG_RCMABR
+ RTGTBL_OFFSET
+ (rate_atmf
>> 1));
771 static int he_init_group(struct he_dev
*he_dev
, int group
)
773 struct he_buff
*heb
, *next
;
777 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
778 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
779 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
780 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
781 G0_RBPS_BS
+ (group
* 32));
784 he_dev
->rbpl_table
= kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE
)
785 * sizeof(unsigned long), GFP_KERNEL
);
786 if (!he_dev
->rbpl_table
) {
787 hprintk("unable to allocate rbpl bitmap table\n");
790 bitmap_zero(he_dev
->rbpl_table
, RBPL_TABLE_SIZE
);
792 /* rbpl_virt 64-bit pointers */
793 he_dev
->rbpl_virt
= kmalloc(RBPL_TABLE_SIZE
794 * sizeof(struct he_buff
*), GFP_KERNEL
);
795 if (!he_dev
->rbpl_virt
) {
796 hprintk("unable to allocate rbpl virt table\n");
797 goto out_free_rbpl_table
;
800 /* large buffer pool */
801 he_dev
->rbpl_pool
= pci_pool_create("rbpl", he_dev
->pci_dev
,
802 CONFIG_RBPL_BUFSIZE
, 64, 0);
803 if (he_dev
->rbpl_pool
== NULL
) {
804 hprintk("unable to create rbpl pool\n");
805 goto out_free_rbpl_virt
;
808 he_dev
->rbpl_base
= pci_alloc_consistent(he_dev
->pci_dev
,
809 CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
), &he_dev
->rbpl_phys
);
810 if (he_dev
->rbpl_base
== NULL
) {
811 hprintk("failed to alloc rbpl_base\n");
812 goto out_destroy_rbpl_pool
;
814 memset(he_dev
->rbpl_base
, 0, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
));
816 INIT_LIST_HEAD(&he_dev
->rbpl_outstanding
);
818 for (i
= 0; i
< CONFIG_RBPL_SIZE
; ++i
) {
820 heb
= pci_pool_alloc(he_dev
->rbpl_pool
, GFP_KERNEL
|GFP_DMA
, &mapping
);
823 heb
->mapping
= mapping
;
824 list_add(&heb
->entry
, &he_dev
->rbpl_outstanding
);
826 set_bit(i
, he_dev
->rbpl_table
);
827 he_dev
->rbpl_virt
[i
] = heb
;
828 he_dev
->rbpl_hint
= i
+ 1;
829 he_dev
->rbpl_base
[i
].idx
= i
<< RBP_IDX_OFFSET
;
830 he_dev
->rbpl_base
[i
].phys
= mapping
+ offsetof(struct he_buff
, data
);
832 he_dev
->rbpl_tail
= &he_dev
->rbpl_base
[CONFIG_RBPL_SIZE
- 1];
834 he_writel(he_dev
, he_dev
->rbpl_phys
, G0_RBPL_S
+ (group
* 32));
835 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
),
836 G0_RBPL_T
+ (group
* 32));
837 he_writel(he_dev
, (CONFIG_RBPL_BUFSIZE
- sizeof(struct he_buff
))/4,
838 G0_RBPL_BS
+ (group
* 32));
840 RBP_THRESH(CONFIG_RBPL_THRESH
) |
841 RBP_QSIZE(CONFIG_RBPL_SIZE
- 1) |
843 G0_RBPL_QI
+ (group
* 32));
845 /* rx buffer ready queue */
847 he_dev
->rbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
848 CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
), &he_dev
->rbrq_phys
);
849 if (he_dev
->rbrq_base
== NULL
) {
850 hprintk("failed to allocate rbrq\n");
853 memset(he_dev
->rbrq_base
, 0, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
));
855 he_dev
->rbrq_head
= he_dev
->rbrq_base
;
856 he_writel(he_dev
, he_dev
->rbrq_phys
, G0_RBRQ_ST
+ (group
* 16));
857 he_writel(he_dev
, 0, G0_RBRQ_H
+ (group
* 16));
859 RBRQ_THRESH(CONFIG_RBRQ_THRESH
) | RBRQ_SIZE(CONFIG_RBRQ_SIZE
- 1),
860 G0_RBRQ_Q
+ (group
* 16));
862 hprintk("coalescing interrupts\n");
863 he_writel(he_dev
, RBRQ_TIME(768) | RBRQ_COUNT(7),
864 G0_RBRQ_I
+ (group
* 16));
866 he_writel(he_dev
, RBRQ_TIME(0) | RBRQ_COUNT(1),
867 G0_RBRQ_I
+ (group
* 16));
869 /* tx buffer ready queue */
871 he_dev
->tbrq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
872 CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
), &he_dev
->tbrq_phys
);
873 if (he_dev
->tbrq_base
== NULL
) {
874 hprintk("failed to allocate tbrq\n");
875 goto out_free_rbpq_base
;
877 memset(he_dev
->tbrq_base
, 0, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
));
879 he_dev
->tbrq_head
= he_dev
->tbrq_base
;
881 he_writel(he_dev
, he_dev
->tbrq_phys
, G0_TBRQ_B_T
+ (group
* 16));
882 he_writel(he_dev
, 0, G0_TBRQ_H
+ (group
* 16));
883 he_writel(he_dev
, CONFIG_TBRQ_SIZE
- 1, G0_TBRQ_S
+ (group
* 16));
884 he_writel(he_dev
, CONFIG_TBRQ_THRESH
, G0_TBRQ_THRESH
+ (group
* 16));
889 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBRQ_SIZE
*
890 sizeof(struct he_rbrq
), he_dev
->rbrq_base
,
893 list_for_each_entry_safe(heb
, next
, &he_dev
->rbpl_outstanding
, entry
)
894 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
896 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
*
897 sizeof(struct he_rbp
), he_dev
->rbpl_base
,
899 out_destroy_rbpl_pool
:
900 pci_pool_destroy(he_dev
->rbpl_pool
);
902 kfree(he_dev
->rbpl_virt
);
904 kfree(he_dev
->rbpl_table
);
909 static int he_init_irq(struct he_dev
*he_dev
)
913 /* 2.9.3.5 tail offset for each interrupt queue is located after the
914 end of the interrupt queue */
916 he_dev
->irq_base
= pci_alloc_consistent(he_dev
->pci_dev
,
917 (CONFIG_IRQ_SIZE
+1) * sizeof(struct he_irq
), &he_dev
->irq_phys
);
918 if (he_dev
->irq_base
== NULL
) {
919 hprintk("failed to allocate irq\n");
922 he_dev
->irq_tailoffset
= (unsigned *)
923 &he_dev
->irq_base
[CONFIG_IRQ_SIZE
];
924 *he_dev
->irq_tailoffset
= 0;
925 he_dev
->irq_head
= he_dev
->irq_base
;
926 he_dev
->irq_tail
= he_dev
->irq_base
;
928 for (i
= 0; i
< CONFIG_IRQ_SIZE
; ++i
)
929 he_dev
->irq_base
[i
].isw
= ITYPE_INVALID
;
931 he_writel(he_dev
, he_dev
->irq_phys
, IRQ0_BASE
);
933 IRQ_SIZE(CONFIG_IRQ_SIZE
) | IRQ_THRESH(CONFIG_IRQ_THRESH
),
935 he_writel(he_dev
, IRQ_INT_A
| IRQ_TYPE_LINE
, IRQ0_CNTL
);
936 he_writel(he_dev
, 0x0, IRQ0_DATA
);
938 he_writel(he_dev
, 0x0, IRQ1_BASE
);
939 he_writel(he_dev
, 0x0, IRQ1_HEAD
);
940 he_writel(he_dev
, 0x0, IRQ1_CNTL
);
941 he_writel(he_dev
, 0x0, IRQ1_DATA
);
943 he_writel(he_dev
, 0x0, IRQ2_BASE
);
944 he_writel(he_dev
, 0x0, IRQ2_HEAD
);
945 he_writel(he_dev
, 0x0, IRQ2_CNTL
);
946 he_writel(he_dev
, 0x0, IRQ2_DATA
);
948 he_writel(he_dev
, 0x0, IRQ3_BASE
);
949 he_writel(he_dev
, 0x0, IRQ3_HEAD
);
950 he_writel(he_dev
, 0x0, IRQ3_CNTL
);
951 he_writel(he_dev
, 0x0, IRQ3_DATA
);
953 /* 2.9.3.2 interrupt queue mapping registers */
955 he_writel(he_dev
, 0x0, GRP_10_MAP
);
956 he_writel(he_dev
, 0x0, GRP_32_MAP
);
957 he_writel(he_dev
, 0x0, GRP_54_MAP
);
958 he_writel(he_dev
, 0x0, GRP_76_MAP
);
960 if (request_irq(he_dev
->pci_dev
->irq
,
961 he_irq_handler
, IRQF_SHARED
, DEV_LABEL
, he_dev
)) {
962 hprintk("irq %d already in use\n", he_dev
->pci_dev
->irq
);
966 he_dev
->irq
= he_dev
->pci_dev
->irq
;
971 static int he_start(struct atm_dev
*dev
)
973 struct he_dev
*he_dev
;
974 struct pci_dev
*pci_dev
;
975 unsigned long membase
;
978 u32 gen_cntl_0
, host_cntl
, lb_swap
;
979 u8 cache_size
, timer
;
982 unsigned int status
, reg
;
985 he_dev
= HE_DEV(dev
);
986 pci_dev
= he_dev
->pci_dev
;
988 membase
= pci_resource_start(pci_dev
, 0);
989 HPRINTK("membase = 0x%lx irq = %d.\n", membase
, pci_dev
->irq
);
992 * pci bus controller initialization
995 /* 4.3 pci bus controller-specific initialization */
996 if (pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
) != 0) {
997 hprintk("can't read GEN_CNTL_0\n");
1000 gen_cntl_0
|= (MRL_ENB
| MRM_ENB
| IGNORE_TIMEOUT
);
1001 if (pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
) != 0) {
1002 hprintk("can't write GEN_CNTL_0.\n");
1006 if (pci_read_config_word(pci_dev
, PCI_COMMAND
, &command
) != 0) {
1007 hprintk("can't read PCI_COMMAND.\n");
1011 command
|= (PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
| PCI_COMMAND_INVALIDATE
);
1012 if (pci_write_config_word(pci_dev
, PCI_COMMAND
, command
) != 0) {
1013 hprintk("can't enable memory.\n");
1017 if (pci_read_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, &cache_size
)) {
1018 hprintk("can't read cache line size?\n");
1022 if (cache_size
< 16) {
1024 if (pci_write_config_byte(pci_dev
, PCI_CACHE_LINE_SIZE
, cache_size
))
1025 hprintk("can't set cache line size to %d\n", cache_size
);
1028 if (pci_read_config_byte(pci_dev
, PCI_LATENCY_TIMER
, &timer
)) {
1029 hprintk("can't read latency timer?\n");
1035 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1037 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1038 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1041 #define LAT_TIMER 209
1042 if (timer
< LAT_TIMER
) {
1043 HPRINTK("latency timer was %d, setting to %d\n", timer
, LAT_TIMER
);
1045 if (pci_write_config_byte(pci_dev
, PCI_LATENCY_TIMER
, timer
))
1046 hprintk("can't set latency timer to %d\n", timer
);
1049 if (!(he_dev
->membase
= ioremap(membase
, HE_REGMAP_SIZE
))) {
1050 hprintk("can't set up page mapping\n");
1054 /* 4.4 card reset */
1055 he_writel(he_dev
, 0x0, RESET_CNTL
);
1056 he_writel(he_dev
, 0xff, RESET_CNTL
);
1058 udelay(16*1000); /* 16 ms */
1059 status
= he_readl(he_dev
, RESET_CNTL
);
1060 if ((status
& BOARD_RST_STATUS
) == 0) {
1061 hprintk("reset failed\n");
1065 /* 4.5 set bus width */
1066 host_cntl
= he_readl(he_dev
, HOST_CNTL
);
1067 if (host_cntl
& PCI_BUS_SIZE64
)
1068 gen_cntl_0
|= ENBL_64
;
1070 gen_cntl_0
&= ~ENBL_64
;
1072 if (disable64
== 1) {
1073 hprintk("disabling 64-bit pci bus transfers\n");
1074 gen_cntl_0
&= ~ENBL_64
;
1077 if (gen_cntl_0
& ENBL_64
)
1078 hprintk("64-bit transfers enabled\n");
1080 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1082 /* 4.7 read prom contents */
1083 for (i
= 0; i
< PROD_ID_LEN
; ++i
)
1084 he_dev
->prod_id
[i
] = read_prom_byte(he_dev
, PROD_ID
+ i
);
1086 he_dev
->media
= read_prom_byte(he_dev
, MEDIA
);
1088 for (i
= 0; i
< 6; ++i
)
1089 dev
->esi
[i
] = read_prom_byte(he_dev
, MAC_ADDR
+ i
);
1091 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1093 he_dev
->media
& 0x40 ? "SM" : "MM",
1100 he_dev
->atm_dev
->link_rate
= he_is622(he_dev
) ?
1101 ATM_OC12_PCR
: ATM_OC3_PCR
;
1103 /* 4.6 set host endianess */
1104 lb_swap
= he_readl(he_dev
, LB_SWAP
);
1105 if (he_is622(he_dev
))
1106 lb_swap
&= ~XFER_SIZE
; /* 4 cells */
1108 lb_swap
|= XFER_SIZE
; /* 8 cells */
1110 lb_swap
|= DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
;
1112 lb_swap
&= ~(DESC_WR_SWAP
| INTR_SWAP
| BIG_ENDIAN_HOST
|
1113 DATA_WR_SWAP
| DATA_RD_SWAP
| DESC_RD_SWAP
);
1114 #endif /* __BIG_ENDIAN */
1115 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1117 /* 4.8 sdram controller initialization */
1118 he_writel(he_dev
, he_is622(he_dev
) ? LB_64_ENB
: 0x0, SDRAM_CTL
);
1120 /* 4.9 initialize rnum value */
1121 lb_swap
|= SWAP_RNUM_MAX(0xf);
1122 he_writel(he_dev
, lb_swap
, LB_SWAP
);
1124 /* 4.10 initialize the interrupt queues */
1125 if ((err
= he_init_irq(he_dev
)) != 0)
1128 /* 4.11 enable pci bus controller state machines */
1129 host_cntl
|= (OUTFF_ENB
| CMDFF_ENB
|
1130 QUICK_RD_RETRY
| QUICK_WR_RETRY
| PERR_INT_ENB
);
1131 he_writel(he_dev
, host_cntl
, HOST_CNTL
);
1133 gen_cntl_0
|= INT_PROC_ENBL
|INIT_ENB
;
1134 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1137 * atm network controller initialization
1140 /* 5.1.1 generic configuration state */
1143 * local (cell) buffer memory map
1147 * 0 ____________1023 bytes 0 _______________________2047 bytes
1149 * | utility | | rx0 | |
1150 * 5|____________| 255|___________________| u |
1153 * | rx0 | row | tx | l |
1155 * | | 767|___________________| t |
1156 * 517|____________| 768| | y |
1157 * row 518| | | rx1 | |
1158 * | | 1023|___________________|___|
1163 * 1535|____________|
1166 * 2047|____________|
1170 /* total 4096 connections */
1171 he_dev
->vcibits
= CONFIG_DEFAULT_VCIBITS
;
1172 he_dev
->vpibits
= CONFIG_DEFAULT_VPIBITS
;
1174 if (nvpibits
!= -1 && nvcibits
!= -1 && nvpibits
+nvcibits
!= HE_MAXCIDBITS
) {
1175 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS
);
1179 if (nvpibits
!= -1) {
1180 he_dev
->vpibits
= nvpibits
;
1181 he_dev
->vcibits
= HE_MAXCIDBITS
- nvpibits
;
1184 if (nvcibits
!= -1) {
1185 he_dev
->vcibits
= nvcibits
;
1186 he_dev
->vpibits
= HE_MAXCIDBITS
- nvcibits
;
1190 if (he_is622(he_dev
)) {
1191 he_dev
->cells_per_row
= 40;
1192 he_dev
->bytes_per_row
= 2048;
1193 he_dev
->r0_numrows
= 256;
1194 he_dev
->tx_numrows
= 512;
1195 he_dev
->r1_numrows
= 256;
1196 he_dev
->r0_startrow
= 0;
1197 he_dev
->tx_startrow
= 256;
1198 he_dev
->r1_startrow
= 768;
1200 he_dev
->cells_per_row
= 20;
1201 he_dev
->bytes_per_row
= 1024;
1202 he_dev
->r0_numrows
= 512;
1203 he_dev
->tx_numrows
= 1018;
1204 he_dev
->r1_numrows
= 512;
1205 he_dev
->r0_startrow
= 6;
1206 he_dev
->tx_startrow
= 518;
1207 he_dev
->r1_startrow
= 1536;
1210 he_dev
->cells_per_lbuf
= 4;
1211 he_dev
->buffer_limit
= 4;
1212 he_dev
->r0_numbuffs
= he_dev
->r0_numrows
*
1213 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1214 if (he_dev
->r0_numbuffs
> 2560)
1215 he_dev
->r0_numbuffs
= 2560;
1217 he_dev
->r1_numbuffs
= he_dev
->r1_numrows
*
1218 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1219 if (he_dev
->r1_numbuffs
> 2560)
1220 he_dev
->r1_numbuffs
= 2560;
1222 he_dev
->tx_numbuffs
= he_dev
->tx_numrows
*
1223 he_dev
->cells_per_row
/ he_dev
->cells_per_lbuf
;
1224 if (he_dev
->tx_numbuffs
> 5120)
1225 he_dev
->tx_numbuffs
= 5120;
1227 /* 5.1.2 configure hardware dependent registers */
1230 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1231 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1232 (he_is622(he_dev
) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1233 (he_is622(he_dev
) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1236 he_writel(he_dev
, BANK_ON
|
1237 (he_is622(he_dev
) ? (REF_RATE(0x384) | WIDE_DATA
) : REF_RATE(0x150)),
1241 (he_is622(he_dev
) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1242 RM_RW_WAIT(1), RCMCONFIG
);
1244 (he_is622(he_dev
) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1245 TM_RW_WAIT(1), TCMCONFIG
);
1247 he_writel(he_dev
, he_dev
->cells_per_lbuf
* ATM_CELL_PAYLOAD
, LB_CONFIG
);
1250 (he_is622(he_dev
) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1251 (he_is622(he_dev
) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1252 RX_VALVP(he_dev
->vpibits
) |
1253 RX_VALVC(he_dev
->vcibits
), RC_CONFIG
);
1255 he_writel(he_dev
, DRF_THRESH(0x20) |
1256 (he_is622(he_dev
) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1257 TX_VCI_MASK(he_dev
->vcibits
) |
1258 LBFREE_CNT(he_dev
->tx_numbuffs
), TX_CONFIG
);
1260 he_writel(he_dev
, 0x0, TXAAL5_PROTO
);
1262 he_writel(he_dev
, PHY_INT_ENB
|
1263 (he_is622(he_dev
) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1266 /* 5.1.3 initialize connection memory */
1268 for (i
= 0; i
< TCM_MEM_SIZE
; ++i
)
1269 he_writel_tcm(he_dev
, 0, i
);
1271 for (i
= 0; i
< RCM_MEM_SIZE
; ++i
)
1272 he_writel_rcm(he_dev
, 0, i
);
1275 * transmit connection memory map
1278 * 0x0 ___________________
1284 * 0x8000|___________________|
1287 * 0xc000|___________________|
1290 * 0xe000|___________________|
1292 * 0xf000|___________________|
1294 * 0x10000|___________________|
1297 * |___________________|
1300 * 0x1ffff|___________________|
1305 he_writel(he_dev
, CONFIG_TSRB
, TSRB_BA
);
1306 he_writel(he_dev
, CONFIG_TSRC
, TSRC_BA
);
1307 he_writel(he_dev
, CONFIG_TSRD
, TSRD_BA
);
1308 he_writel(he_dev
, CONFIG_TMABR
, TMABR_BA
);
1309 he_writel(he_dev
, CONFIG_TPDBA
, TPD_BA
);
1313 * receive connection memory map
1315 * 0x0 ___________________
1321 * 0x8000|___________________|
1324 * | LBM | link lists of local
1325 * | tx | buffer memory
1327 * 0xd000|___________________|
1330 * 0xe000|___________________|
1333 * |___________________|
1336 * 0xffff|___________________|
1339 he_writel(he_dev
, 0x08000, RCMLBM_BA
);
1340 he_writel(he_dev
, 0x0e000, RCMRSRB_BA
);
1341 he_writel(he_dev
, 0x0d800, RCMABR_BA
);
1343 /* 5.1.4 initialize local buffer free pools linked lists */
1345 he_init_rx_lbfp0(he_dev
);
1346 he_init_rx_lbfp1(he_dev
);
1348 he_writel(he_dev
, 0x0, RLBC_H
);
1349 he_writel(he_dev
, 0x0, RLBC_T
);
1350 he_writel(he_dev
, 0x0, RLBC_H2
);
1352 he_writel(he_dev
, 512, RXTHRSH
); /* 10% of r0+r1 buffers */
1353 he_writel(he_dev
, 256, LITHRSH
); /* 5% of r0+r1 buffers */
1355 he_init_tx_lbfp(he_dev
);
1357 he_writel(he_dev
, he_is622(he_dev
) ? 0x104780 : 0x800, UBUFF_BA
);
1359 /* 5.1.5 initialize intermediate receive queues */
1361 if (he_is622(he_dev
)) {
1362 he_writel(he_dev
, 0x000f, G0_INMQ_S
);
1363 he_writel(he_dev
, 0x200f, G0_INMQ_L
);
1365 he_writel(he_dev
, 0x001f, G1_INMQ_S
);
1366 he_writel(he_dev
, 0x201f, G1_INMQ_L
);
1368 he_writel(he_dev
, 0x002f, G2_INMQ_S
);
1369 he_writel(he_dev
, 0x202f, G2_INMQ_L
);
1371 he_writel(he_dev
, 0x003f, G3_INMQ_S
);
1372 he_writel(he_dev
, 0x203f, G3_INMQ_L
);
1374 he_writel(he_dev
, 0x004f, G4_INMQ_S
);
1375 he_writel(he_dev
, 0x204f, G4_INMQ_L
);
1377 he_writel(he_dev
, 0x005f, G5_INMQ_S
);
1378 he_writel(he_dev
, 0x205f, G5_INMQ_L
);
1380 he_writel(he_dev
, 0x006f, G6_INMQ_S
);
1381 he_writel(he_dev
, 0x206f, G6_INMQ_L
);
1383 he_writel(he_dev
, 0x007f, G7_INMQ_S
);
1384 he_writel(he_dev
, 0x207f, G7_INMQ_L
);
1386 he_writel(he_dev
, 0x0000, G0_INMQ_S
);
1387 he_writel(he_dev
, 0x0008, G0_INMQ_L
);
1389 he_writel(he_dev
, 0x0001, G1_INMQ_S
);
1390 he_writel(he_dev
, 0x0009, G1_INMQ_L
);
1392 he_writel(he_dev
, 0x0002, G2_INMQ_S
);
1393 he_writel(he_dev
, 0x000a, G2_INMQ_L
);
1395 he_writel(he_dev
, 0x0003, G3_INMQ_S
);
1396 he_writel(he_dev
, 0x000b, G3_INMQ_L
);
1398 he_writel(he_dev
, 0x0004, G4_INMQ_S
);
1399 he_writel(he_dev
, 0x000c, G4_INMQ_L
);
1401 he_writel(he_dev
, 0x0005, G5_INMQ_S
);
1402 he_writel(he_dev
, 0x000d, G5_INMQ_L
);
1404 he_writel(he_dev
, 0x0006, G6_INMQ_S
);
1405 he_writel(he_dev
, 0x000e, G6_INMQ_L
);
1407 he_writel(he_dev
, 0x0007, G7_INMQ_S
);
1408 he_writel(he_dev
, 0x000f, G7_INMQ_L
);
1411 /* 5.1.6 application tunable parameters */
1413 he_writel(he_dev
, 0x0, MCC
);
1414 he_writel(he_dev
, 0x0, OEC
);
1415 he_writel(he_dev
, 0x0, DCC
);
1416 he_writel(he_dev
, 0x0, CEC
);
1418 /* 5.1.7 cs block initialization */
1420 he_init_cs_block(he_dev
);
1422 /* 5.1.8 cs block connection memory initialization */
1424 if (he_init_cs_block_rcm(he_dev
) < 0)
1427 /* 5.1.10 initialize host structures */
1429 he_init_tpdrq(he_dev
);
1431 he_dev
->tpd_pool
= pci_pool_create("tpd", he_dev
->pci_dev
,
1432 sizeof(struct he_tpd
), TPD_ALIGNMENT
, 0);
1433 if (he_dev
->tpd_pool
== NULL
) {
1434 hprintk("unable to create tpd pci_pool\n");
1438 INIT_LIST_HEAD(&he_dev
->outstanding_tpds
);
1440 if (he_init_group(he_dev
, 0) != 0)
1443 for (group
= 1; group
< HE_NUM_GROUPS
; ++group
) {
1444 he_writel(he_dev
, 0x0, G0_RBPS_S
+ (group
* 32));
1445 he_writel(he_dev
, 0x0, G0_RBPS_T
+ (group
* 32));
1446 he_writel(he_dev
, 0x0, G0_RBPS_QI
+ (group
* 32));
1447 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1448 G0_RBPS_BS
+ (group
* 32));
1450 he_writel(he_dev
, 0x0, G0_RBPL_S
+ (group
* 32));
1451 he_writel(he_dev
, 0x0, G0_RBPL_T
+ (group
* 32));
1452 he_writel(he_dev
, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1453 G0_RBPL_QI
+ (group
* 32));
1454 he_writel(he_dev
, 0x0, G0_RBPL_BS
+ (group
* 32));
1456 he_writel(he_dev
, 0x0, G0_RBRQ_ST
+ (group
* 16));
1457 he_writel(he_dev
, 0x0, G0_RBRQ_H
+ (group
* 16));
1458 he_writel(he_dev
, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1459 G0_RBRQ_Q
+ (group
* 16));
1460 he_writel(he_dev
, 0x0, G0_RBRQ_I
+ (group
* 16));
1462 he_writel(he_dev
, 0x0, G0_TBRQ_B_T
+ (group
* 16));
1463 he_writel(he_dev
, 0x0, G0_TBRQ_H
+ (group
* 16));
1464 he_writel(he_dev
, TBRQ_THRESH(0x1),
1465 G0_TBRQ_THRESH
+ (group
* 16));
1466 he_writel(he_dev
, 0x0, G0_TBRQ_S
+ (group
* 16));
1469 /* host status page */
1471 he_dev
->hsp
= pci_alloc_consistent(he_dev
->pci_dev
,
1472 sizeof(struct he_hsp
), &he_dev
->hsp_phys
);
1473 if (he_dev
->hsp
== NULL
) {
1474 hprintk("failed to allocate host status page\n");
1477 memset(he_dev
->hsp
, 0, sizeof(struct he_hsp
));
1478 he_writel(he_dev
, he_dev
->hsp_phys
, HSP_BA
);
1480 /* initialize framer */
1482 #ifdef CONFIG_ATM_HE_USE_SUNI
1483 if (he_isMM(he_dev
))
1484 suni_init(he_dev
->atm_dev
);
1485 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->start
)
1486 he_dev
->atm_dev
->phy
->start(he_dev
->atm_dev
);
1487 #endif /* CONFIG_ATM_HE_USE_SUNI */
1490 /* this really should be in suni.c but for now... */
1493 val
= he_phy_get(he_dev
->atm_dev
, SUNI_TPOP_APM
);
1494 val
= (val
& ~SUNI_TPOP_APM_S
) | (SUNI_TPOP_S_SDH
<< SUNI_TPOP_APM_S_SHIFT
);
1495 he_phy_put(he_dev
->atm_dev
, val
, SUNI_TPOP_APM
);
1496 he_phy_put(he_dev
->atm_dev
, SUNI_TACP_IUCHP_CLP
, SUNI_TACP_IUCHP
);
1499 /* 5.1.12 enable transmit and receive */
1501 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1502 reg
|= TX_ENABLE
|ER_ENABLE
;
1503 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1505 reg
= he_readl(he_dev
, RC_CONFIG
);
1507 he_writel(he_dev
, reg
, RC_CONFIG
);
1509 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
) {
1510 he_dev
->cs_stper
[i
].inuse
= 0;
1511 he_dev
->cs_stper
[i
].pcr
= -1;
1513 he_dev
->total_bw
= 0;
1516 /* atm linux initialization */
1518 he_dev
->atm_dev
->ci_range
.vpi_bits
= he_dev
->vpibits
;
1519 he_dev
->atm_dev
->ci_range
.vci_bits
= he_dev
->vcibits
;
1521 he_dev
->irq_peak
= 0;
1522 he_dev
->rbrq_peak
= 0;
1523 he_dev
->rbpl_peak
= 0;
1524 he_dev
->tbrq_peak
= 0;
1526 HPRINTK("hell bent for leather!\n");
1532 he_stop(struct he_dev
*he_dev
)
1534 struct he_buff
*heb
, *next
;
1535 struct pci_dev
*pci_dev
;
1536 u32 gen_cntl_0
, reg
;
1539 pci_dev
= he_dev
->pci_dev
;
1541 /* disable interrupts */
1543 if (he_dev
->membase
) {
1544 pci_read_config_dword(pci_dev
, GEN_CNTL_0
, &gen_cntl_0
);
1545 gen_cntl_0
&= ~(INT_PROC_ENBL
| INIT_ENB
);
1546 pci_write_config_dword(pci_dev
, GEN_CNTL_0
, gen_cntl_0
);
1548 tasklet_disable(&he_dev
->tasklet
);
1550 /* disable recv and transmit */
1552 reg
= he_readl_mbox(he_dev
, CS_ERCTL0
);
1553 reg
&= ~(TX_ENABLE
|ER_ENABLE
);
1554 he_writel_mbox(he_dev
, reg
, CS_ERCTL0
);
1556 reg
= he_readl(he_dev
, RC_CONFIG
);
1557 reg
&= ~(RX_ENABLE
);
1558 he_writel(he_dev
, reg
, RC_CONFIG
);
1561 #ifdef CONFIG_ATM_HE_USE_SUNI
1562 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->stop
)
1563 he_dev
->atm_dev
->phy
->stop(he_dev
->atm_dev
);
1564 #endif /* CONFIG_ATM_HE_USE_SUNI */
1567 free_irq(he_dev
->irq
, he_dev
);
1569 if (he_dev
->irq_base
)
1570 pci_free_consistent(he_dev
->pci_dev
, (CONFIG_IRQ_SIZE
+1)
1571 * sizeof(struct he_irq
), he_dev
->irq_base
, he_dev
->irq_phys
);
1574 pci_free_consistent(he_dev
->pci_dev
, sizeof(struct he_hsp
),
1575 he_dev
->hsp
, he_dev
->hsp_phys
);
1577 if (he_dev
->rbpl_base
) {
1578 list_for_each_entry_safe(heb
, next
, &he_dev
->rbpl_outstanding
, entry
)
1579 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
1581 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBPL_SIZE
1582 * sizeof(struct he_rbp
), he_dev
->rbpl_base
, he_dev
->rbpl_phys
);
1585 kfree(he_dev
->rbpl_virt
);
1586 kfree(he_dev
->rbpl_table
);
1588 if (he_dev
->rbpl_pool
)
1589 pci_pool_destroy(he_dev
->rbpl_pool
);
1591 if (he_dev
->rbrq_base
)
1592 pci_free_consistent(he_dev
->pci_dev
, CONFIG_RBRQ_SIZE
* sizeof(struct he_rbrq
),
1593 he_dev
->rbrq_base
, he_dev
->rbrq_phys
);
1595 if (he_dev
->tbrq_base
)
1596 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1597 he_dev
->tbrq_base
, he_dev
->tbrq_phys
);
1599 if (he_dev
->tpdrq_base
)
1600 pci_free_consistent(he_dev
->pci_dev
, CONFIG_TBRQ_SIZE
* sizeof(struct he_tbrq
),
1601 he_dev
->tpdrq_base
, he_dev
->tpdrq_phys
);
1603 if (he_dev
->tpd_pool
)
1604 pci_pool_destroy(he_dev
->tpd_pool
);
1606 if (he_dev
->pci_dev
) {
1607 pci_read_config_word(he_dev
->pci_dev
, PCI_COMMAND
, &command
);
1608 command
&= ~(PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
1609 pci_write_config_word(he_dev
->pci_dev
, PCI_COMMAND
, command
);
1612 if (he_dev
->membase
)
1613 iounmap(he_dev
->membase
);
1616 static struct he_tpd
*
1617 __alloc_tpd(struct he_dev
*he_dev
)
1622 tpd
= pci_pool_alloc(he_dev
->tpd_pool
, GFP_ATOMIC
|GFP_DMA
, &mapping
);
1626 tpd
->status
= TPD_ADDR(mapping
);
1628 tpd
->iovec
[0].addr
= 0; tpd
->iovec
[0].len
= 0;
1629 tpd
->iovec
[1].addr
= 0; tpd
->iovec
[1].len
= 0;
1630 tpd
->iovec
[2].addr
= 0; tpd
->iovec
[2].len
= 0;
1635 #define AAL5_LEN(buf,len) \
1636 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1637 (((unsigned char *)(buf))[(len)-5]))
1641 * aal5 packets can optionally return the tcp checksum in the lower
1642 * 16 bits of the crc (RSR0_TCP_CKSUM)
1645 #define TCP_CKSUM(buf,len) \
1646 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1647 (((unsigned char *)(buf))[(len-1)]))
1650 he_service_rbrq(struct he_dev
*he_dev
, int group
)
1652 struct he_rbrq
*rbrq_tail
= (struct he_rbrq
*)
1653 ((unsigned long)he_dev
->rbrq_base
|
1654 he_dev
->hsp
->group
[group
].rbrq_tail
);
1655 unsigned cid
, lastcid
= -1;
1656 struct sk_buff
*skb
;
1657 struct atm_vcc
*vcc
= NULL
;
1658 struct he_vcc
*he_vcc
;
1659 struct he_buff
*heb
, *next
;
1661 int pdus_assembled
= 0;
1664 read_lock(&vcc_sklist_lock
);
1665 while (he_dev
->rbrq_head
!= rbrq_tail
) {
1668 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1669 he_dev
->rbrq_head
, group
,
1670 RBRQ_ADDR(he_dev
->rbrq_head
),
1671 RBRQ_BUFLEN(he_dev
->rbrq_head
),
1672 RBRQ_CID(he_dev
->rbrq_head
),
1673 RBRQ_CRC_ERR(he_dev
->rbrq_head
) ? " CRC_ERR" : "",
1674 RBRQ_LEN_ERR(he_dev
->rbrq_head
) ? " LEN_ERR" : "",
1675 RBRQ_END_PDU(he_dev
->rbrq_head
) ? " END_PDU" : "",
1676 RBRQ_AAL5_PROT(he_dev
->rbrq_head
) ? " AAL5_PROT" : "",
1677 RBRQ_CON_CLOSED(he_dev
->rbrq_head
) ? " CON_CLOSED" : "",
1678 RBRQ_HBUF_ERR(he_dev
->rbrq_head
) ? " HBUF_ERR" : "");
1680 i
= RBRQ_ADDR(he_dev
->rbrq_head
) >> RBP_IDX_OFFSET
;
1681 heb
= he_dev
->rbpl_virt
[i
];
1683 cid
= RBRQ_CID(he_dev
->rbrq_head
);
1685 vcc
= __find_vcc(he_dev
, cid
);
1688 if (vcc
== NULL
|| (he_vcc
= HE_VCC(vcc
)) == NULL
) {
1689 hprintk("vcc/he_vcc == NULL (cid 0x%x)\n", cid
);
1690 if (!RBRQ_HBUF_ERR(he_dev
->rbrq_head
)) {
1691 clear_bit(i
, he_dev
->rbpl_table
);
1692 list_del(&heb
->entry
);
1693 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
1696 goto next_rbrq_entry
;
1699 if (RBRQ_HBUF_ERR(he_dev
->rbrq_head
)) {
1700 hprintk("HBUF_ERR! (cid 0x%x)\n", cid
);
1701 atomic_inc(&vcc
->stats
->rx_drop
);
1702 goto return_host_buffers
;
1705 heb
->len
= RBRQ_BUFLEN(he_dev
->rbrq_head
) * 4;
1706 clear_bit(i
, he_dev
->rbpl_table
);
1707 list_move_tail(&heb
->entry
, &he_vcc
->buffers
);
1708 he_vcc
->pdu_len
+= heb
->len
;
1710 if (RBRQ_CON_CLOSED(he_dev
->rbrq_head
)) {
1712 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid
);
1713 wake_up(&he_vcc
->rx_waitq
);
1714 goto return_host_buffers
;
1717 if (!RBRQ_END_PDU(he_dev
->rbrq_head
))
1718 goto next_rbrq_entry
;
1720 if (RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1721 || RBRQ_CRC_ERR(he_dev
->rbrq_head
)) {
1722 HPRINTK("%s%s (%d.%d)\n",
1723 RBRQ_CRC_ERR(he_dev
->rbrq_head
)
1725 RBRQ_LEN_ERR(he_dev
->rbrq_head
)
1727 vcc
->vpi
, vcc
->vci
);
1728 atomic_inc(&vcc
->stats
->rx_err
);
1729 goto return_host_buffers
;
1732 skb
= atm_alloc_charge(vcc
, he_vcc
->pdu_len
+ rx_skb_reserve
,
1735 HPRINTK("charge failed (%d.%d)\n", vcc
->vpi
, vcc
->vci
);
1736 goto return_host_buffers
;
1739 if (rx_skb_reserve
> 0)
1740 skb_reserve(skb
, rx_skb_reserve
);
1742 __net_timestamp(skb
);
1744 list_for_each_entry(heb
, &he_vcc
->buffers
, entry
)
1745 memcpy(skb_put(skb
, heb
->len
), &heb
->data
, heb
->len
);
1747 switch (vcc
->qos
.aal
) {
1749 /* 2.10.1.5 raw cell receive */
1750 skb
->len
= ATM_AAL0_SDU
;
1751 skb_set_tail_pointer(skb
, skb
->len
);
1754 /* 2.10.1.2 aal5 receive */
1756 skb
->len
= AAL5_LEN(skb
->data
, he_vcc
->pdu_len
);
1757 skb_set_tail_pointer(skb
, skb
->len
);
1758 #ifdef USE_CHECKSUM_HW
1759 if (vcc
->vpi
== 0 && vcc
->vci
>= ATM_NOT_RSV_VCI
) {
1760 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1761 skb
->csum
= TCP_CKSUM(skb
->data
,
1768 #ifdef should_never_happen
1769 if (skb
->len
> vcc
->qos
.rxtp
.max_sdu
)
1770 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb
->len
, vcc
->qos
.rxtp
.max_sdu
, cid
);
1774 ATM_SKB(skb
)->vcc
= vcc
;
1776 spin_unlock(&he_dev
->global_lock
);
1777 vcc
->push(vcc
, skb
);
1778 spin_lock(&he_dev
->global_lock
);
1780 atomic_inc(&vcc
->stats
->rx
);
1782 return_host_buffers
:
1785 list_for_each_entry_safe(heb
, next
, &he_vcc
->buffers
, entry
)
1786 pci_pool_free(he_dev
->rbpl_pool
, heb
, heb
->mapping
);
1787 INIT_LIST_HEAD(&he_vcc
->buffers
);
1788 he_vcc
->pdu_len
= 0;
1791 he_dev
->rbrq_head
= (struct he_rbrq
*)
1792 ((unsigned long) he_dev
->rbrq_base
|
1793 RBRQ_MASK(he_dev
->rbrq_head
+ 1));
1796 read_unlock(&vcc_sklist_lock
);
1799 if (updated
> he_dev
->rbrq_peak
)
1800 he_dev
->rbrq_peak
= updated
;
1802 he_writel(he_dev
, RBRQ_MASK(he_dev
->rbrq_head
),
1803 G0_RBRQ_H
+ (group
* 16));
1806 return pdus_assembled
;
1810 he_service_tbrq(struct he_dev
*he_dev
, int group
)
1812 struct he_tbrq
*tbrq_tail
= (struct he_tbrq
*)
1813 ((unsigned long)he_dev
->tbrq_base
|
1814 he_dev
->hsp
->group
[group
].tbrq_tail
);
1816 int slot
, updated
= 0;
1817 struct he_tpd
*__tpd
;
1819 /* 2.1.6 transmit buffer return queue */
1821 while (he_dev
->tbrq_head
!= tbrq_tail
) {
1824 HPRINTK("tbrq%d 0x%x%s%s\n",
1826 TBRQ_TPD(he_dev
->tbrq_head
),
1827 TBRQ_EOS(he_dev
->tbrq_head
) ? " EOS" : "",
1828 TBRQ_MULTIPLE(he_dev
->tbrq_head
) ? " MULTIPLE" : "");
1830 list_for_each_entry(__tpd
, &he_dev
->outstanding_tpds
, entry
) {
1831 if (TPD_ADDR(__tpd
->status
) == TBRQ_TPD(he_dev
->tbrq_head
)) {
1833 list_del(&__tpd
->entry
);
1839 hprintk("unable to locate tpd for dma buffer %x\n",
1840 TBRQ_TPD(he_dev
->tbrq_head
));
1841 goto next_tbrq_entry
;
1844 if (TBRQ_EOS(he_dev
->tbrq_head
)) {
1845 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
1846 he_mkcid(he_dev
, tpd
->vcc
->vpi
, tpd
->vcc
->vci
));
1848 wake_up(&HE_VCC(tpd
->vcc
)->tx_waitq
);
1850 goto next_tbrq_entry
;
1853 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
1854 if (tpd
->iovec
[slot
].addr
)
1855 pci_unmap_single(he_dev
->pci_dev
,
1856 tpd
->iovec
[slot
].addr
,
1857 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
1859 if (tpd
->iovec
[slot
].len
& TPD_LST
)
1864 if (tpd
->skb
) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
1865 if (tpd
->vcc
&& tpd
->vcc
->pop
)
1866 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
1868 dev_kfree_skb_any(tpd
->skb
);
1873 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
1874 he_dev
->tbrq_head
= (struct he_tbrq
*)
1875 ((unsigned long) he_dev
->tbrq_base
|
1876 TBRQ_MASK(he_dev
->tbrq_head
+ 1));
1880 if (updated
> he_dev
->tbrq_peak
)
1881 he_dev
->tbrq_peak
= updated
;
1883 he_writel(he_dev
, TBRQ_MASK(he_dev
->tbrq_head
),
1884 G0_TBRQ_H
+ (group
* 16));
1889 he_service_rbpl(struct he_dev
*he_dev
, int group
)
1891 struct he_rbp
*new_tail
;
1892 struct he_rbp
*rbpl_head
;
1893 struct he_buff
*heb
;
1898 rbpl_head
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
1899 RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
)));
1902 new_tail
= (struct he_rbp
*) ((unsigned long)he_dev
->rbpl_base
|
1903 RBPL_MASK(he_dev
->rbpl_tail
+1));
1905 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
1906 if (new_tail
== rbpl_head
)
1909 i
= find_next_zero_bit(he_dev
->rbpl_table
, RBPL_TABLE_SIZE
, he_dev
->rbpl_hint
);
1910 if (i
> (RBPL_TABLE_SIZE
- 1)) {
1911 i
= find_first_zero_bit(he_dev
->rbpl_table
, RBPL_TABLE_SIZE
);
1912 if (i
> (RBPL_TABLE_SIZE
- 1))
1915 he_dev
->rbpl_hint
= i
+ 1;
1917 heb
= pci_pool_alloc(he_dev
->rbpl_pool
, GFP_ATOMIC
|GFP_DMA
, &mapping
);
1920 heb
->mapping
= mapping
;
1921 list_add(&heb
->entry
, &he_dev
->rbpl_outstanding
);
1922 he_dev
->rbpl_virt
[i
] = heb
;
1923 set_bit(i
, he_dev
->rbpl_table
);
1924 new_tail
->idx
= i
<< RBP_IDX_OFFSET
;
1925 new_tail
->phys
= mapping
+ offsetof(struct he_buff
, data
);
1927 he_dev
->rbpl_tail
= new_tail
;
1932 he_writel(he_dev
, RBPL_MASK(he_dev
->rbpl_tail
), G0_RBPL_T
);
1936 he_tasklet(unsigned long data
)
1938 unsigned long flags
;
1939 struct he_dev
*he_dev
= (struct he_dev
*) data
;
1943 HPRINTK("tasklet (0x%lx)\n", data
);
1944 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
1946 while (he_dev
->irq_head
!= he_dev
->irq_tail
) {
1949 type
= ITYPE_TYPE(he_dev
->irq_head
->isw
);
1950 group
= ITYPE_GROUP(he_dev
->irq_head
->isw
);
1953 case ITYPE_RBRQ_THRESH
:
1954 HPRINTK("rbrq%d threshold\n", group
);
1956 case ITYPE_RBRQ_TIMER
:
1957 if (he_service_rbrq(he_dev
, group
))
1958 he_service_rbpl(he_dev
, group
);
1960 case ITYPE_TBRQ_THRESH
:
1961 HPRINTK("tbrq%d threshold\n", group
);
1963 case ITYPE_TPD_COMPLETE
:
1964 he_service_tbrq(he_dev
, group
);
1966 case ITYPE_RBPL_THRESH
:
1967 he_service_rbpl(he_dev
, group
);
1969 case ITYPE_RBPS_THRESH
:
1970 /* shouldn't happen unless small buffers enabled */
1973 HPRINTK("phy interrupt\n");
1974 #ifdef CONFIG_ATM_HE_USE_SUNI
1975 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
1976 if (he_dev
->atm_dev
->phy
&& he_dev
->atm_dev
->phy
->interrupt
)
1977 he_dev
->atm_dev
->phy
->interrupt(he_dev
->atm_dev
);
1978 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
1982 switch (type
|group
) {
1984 hprintk("parity error\n");
1987 hprintk("abort 0x%x\n", he_readl(he_dev
, ABORT_ADDR
));
1991 case ITYPE_TYPE(ITYPE_INVALID
):
1992 /* see 8.1.1 -- check all queues */
1994 HPRINTK("isw not updated 0x%x\n", he_dev
->irq_head
->isw
);
1996 he_service_rbrq(he_dev
, 0);
1997 he_service_rbpl(he_dev
, 0);
1998 he_service_tbrq(he_dev
, 0);
2001 hprintk("bad isw 0x%x?\n", he_dev
->irq_head
->isw
);
2004 he_dev
->irq_head
->isw
= ITYPE_INVALID
;
2006 he_dev
->irq_head
= (struct he_irq
*) NEXT_ENTRY(he_dev
->irq_base
, he_dev
->irq_head
, IRQ_MASK
);
2010 if (updated
> he_dev
->irq_peak
)
2011 he_dev
->irq_peak
= updated
;
2014 IRQ_SIZE(CONFIG_IRQ_SIZE
) |
2015 IRQ_THRESH(CONFIG_IRQ_THRESH
) |
2016 IRQ_TAIL(he_dev
->irq_tail
), IRQ0_HEAD
);
2017 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata; flush posted writes */
2019 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2023 he_irq_handler(int irq
, void *dev_id
)
2025 unsigned long flags
;
2026 struct he_dev
*he_dev
= (struct he_dev
* )dev_id
;
2032 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2034 he_dev
->irq_tail
= (struct he_irq
*) (((unsigned long)he_dev
->irq_base
) |
2035 (*he_dev
->irq_tailoffset
<< 2));
2037 if (he_dev
->irq_tail
== he_dev
->irq_head
) {
2038 HPRINTK("tailoffset not updated?\n");
2039 he_dev
->irq_tail
= (struct he_irq
*) ((unsigned long)he_dev
->irq_base
|
2040 ((he_readl(he_dev
, IRQ0_BASE
) & IRQ_MASK
) << 2));
2041 (void) he_readl(he_dev
, INT_FIFO
); /* 8.1.2 controller errata */
2045 if (he_dev
->irq_head
== he_dev
->irq_tail
/* && !IRQ_PENDING */)
2046 hprintk("spurious (or shared) interrupt?\n");
2049 if (he_dev
->irq_head
!= he_dev
->irq_tail
) {
2051 tasklet_schedule(&he_dev
->tasklet
);
2052 he_writel(he_dev
, INT_CLEAR_A
, INT_FIFO
); /* clear interrupt */
2053 (void) he_readl(he_dev
, INT_FIFO
); /* flush posted writes */
2055 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2056 return IRQ_RETVAL(handled
);
2060 static __inline__
void
2061 __enqueue_tpd(struct he_dev
*he_dev
, struct he_tpd
*tpd
, unsigned cid
)
2063 struct he_tpdrq
*new_tail
;
2065 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2066 tpd
, cid
, he_dev
->tpdrq_tail
);
2068 /* new_tail = he_dev->tpdrq_tail; */
2069 new_tail
= (struct he_tpdrq
*) ((unsigned long) he_dev
->tpdrq_base
|
2070 TPDRQ_MASK(he_dev
->tpdrq_tail
+1));
2073 * check to see if we are about to set the tail == head
2074 * if true, update the head pointer from the adapter
2075 * to see if this is really the case (reading the queue
2076 * head for every enqueue would be unnecessarily slow)
2079 if (new_tail
== he_dev
->tpdrq_head
) {
2080 he_dev
->tpdrq_head
= (struct he_tpdrq
*)
2081 (((unsigned long)he_dev
->tpdrq_base
) |
2082 TPDRQ_MASK(he_readl(he_dev
, TPDRQ_B_H
)));
2084 if (new_tail
== he_dev
->tpdrq_head
) {
2087 hprintk("tpdrq full (cid 0x%x)\n", cid
);
2090 * push tpd onto a transmit backlog queue
2091 * after service_tbrq, service the backlog
2092 * for now, we just drop the pdu
2094 for (slot
= 0; slot
< TPD_MAXIOV
; ++slot
) {
2095 if (tpd
->iovec
[slot
].addr
)
2096 pci_unmap_single(he_dev
->pci_dev
,
2097 tpd
->iovec
[slot
].addr
,
2098 tpd
->iovec
[slot
].len
& TPD_LEN_MASK
,
2103 tpd
->vcc
->pop(tpd
->vcc
, tpd
->skb
);
2105 dev_kfree_skb_any(tpd
->skb
);
2106 atomic_inc(&tpd
->vcc
->stats
->tx_err
);
2108 pci_pool_free(he_dev
->tpd_pool
, tpd
, TPD_ADDR(tpd
->status
));
2113 /* 2.1.5 transmit packet descriptor ready queue */
2114 list_add_tail(&tpd
->entry
, &he_dev
->outstanding_tpds
);
2115 he_dev
->tpdrq_tail
->tpd
= TPD_ADDR(tpd
->status
);
2116 he_dev
->tpdrq_tail
->cid
= cid
;
2119 he_dev
->tpdrq_tail
= new_tail
;
2121 he_writel(he_dev
, TPDRQ_MASK(he_dev
->tpdrq_tail
), TPDRQ_T
);
2122 (void) he_readl(he_dev
, TPDRQ_T
); /* flush posted writes */
2126 he_open(struct atm_vcc
*vcc
)
2128 unsigned long flags
;
2129 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2130 struct he_vcc
*he_vcc
;
2132 unsigned cid
, rsr0
, rsr1
, rsr4
, tsr0
, tsr0_aal
, tsr4
, period
, reg
, clock
;
2133 short vpi
= vcc
->vpi
;
2136 if (vci
== ATM_VCI_UNSPEC
|| vpi
== ATM_VPI_UNSPEC
)
2139 HPRINTK("open vcc %p %d.%d\n", vcc
, vpi
, vci
);
2141 set_bit(ATM_VF_ADDR
, &vcc
->flags
);
2143 cid
= he_mkcid(he_dev
, vpi
, vci
);
2145 he_vcc
= kmalloc(sizeof(struct he_vcc
), GFP_ATOMIC
);
2146 if (he_vcc
== NULL
) {
2147 hprintk("unable to allocate he_vcc during open\n");
2151 INIT_LIST_HEAD(&he_vcc
->buffers
);
2152 he_vcc
->pdu_len
= 0;
2153 he_vcc
->rc_index
= -1;
2155 init_waitqueue_head(&he_vcc
->rx_waitq
);
2156 init_waitqueue_head(&he_vcc
->tx_waitq
);
2158 vcc
->dev_data
= he_vcc
;
2160 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2163 pcr_goal
= atm_pcr_goal(&vcc
->qos
.txtp
);
2165 pcr_goal
= he_dev
->atm_dev
->link_rate
;
2166 if (pcr_goal
< 0) /* means round down, technically */
2167 pcr_goal
= -pcr_goal
;
2169 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid
, pcr_goal
);
2171 switch (vcc
->qos
.aal
) {
2173 tsr0_aal
= TSR0_AAL5
;
2177 tsr0_aal
= TSR0_AAL0_SDU
;
2178 tsr4
= TSR4_AAL0_SDU
;
2185 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2186 tsr0
= he_readl_tsr0(he_dev
, cid
);
2187 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2189 if (TSR0_CONN_STATE(tsr0
) != 0) {
2190 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid
, tsr0
);
2195 switch (vcc
->qos
.txtp
.traffic_class
) {
2197 /* 2.3.3.1 open connection ubr */
2199 tsr0
= TSR0_UBR
| TSR0_GROUP(0) | tsr0_aal
|
2200 TSR0_USE_WMIN
| TSR0_UPDATE_GER
;
2204 /* 2.3.3.2 open connection cbr */
2206 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2207 if ((he_dev
->total_bw
+ pcr_goal
)
2208 > (he_dev
->atm_dev
->link_rate
* 9 / 10))
2214 spin_lock_irqsave(&he_dev
->global_lock
, flags
); /* also protects he_dev->cs_stper[] */
2216 /* find an unused cs_stper register */
2217 for (reg
= 0; reg
< HE_NUM_CS_STPER
; ++reg
)
2218 if (he_dev
->cs_stper
[reg
].inuse
== 0 ||
2219 he_dev
->cs_stper
[reg
].pcr
== pcr_goal
)
2222 if (reg
== HE_NUM_CS_STPER
) {
2224 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2228 he_dev
->total_bw
+= pcr_goal
;
2230 he_vcc
->rc_index
= reg
;
2231 ++he_dev
->cs_stper
[reg
].inuse
;
2232 he_dev
->cs_stper
[reg
].pcr
= pcr_goal
;
2234 clock
= he_is622(he_dev
) ? 66667000 : 50000000;
2235 period
= clock
/ pcr_goal
;
2237 HPRINTK("rc_index = %d period = %d\n",
2240 he_writel_mbox(he_dev
, rate_to_atmf(period
/2),
2242 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2244 tsr0
= TSR0_CBR
| TSR0_GROUP(0) | tsr0_aal
|
2253 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2255 he_writel_tsr0(he_dev
, tsr0
, cid
);
2256 he_writel_tsr4(he_dev
, tsr4
| 1, cid
);
2257 he_writel_tsr1(he_dev
, TSR1_MCR(rate_to_atmf(0)) |
2258 TSR1_PCR(rate_to_atmf(pcr_goal
)), cid
);
2259 he_writel_tsr2(he_dev
, TSR2_ACR(rate_to_atmf(pcr_goal
)), cid
);
2260 he_writel_tsr9(he_dev
, TSR9_OPEN_CONN
, cid
);
2262 he_writel_tsr3(he_dev
, 0x0, cid
);
2263 he_writel_tsr5(he_dev
, 0x0, cid
);
2264 he_writel_tsr6(he_dev
, 0x0, cid
);
2265 he_writel_tsr7(he_dev
, 0x0, cid
);
2266 he_writel_tsr8(he_dev
, 0x0, cid
);
2267 he_writel_tsr10(he_dev
, 0x0, cid
);
2268 he_writel_tsr11(he_dev
, 0x0, cid
);
2269 he_writel_tsr12(he_dev
, 0x0, cid
);
2270 he_writel_tsr13(he_dev
, 0x0, cid
);
2271 he_writel_tsr14(he_dev
, 0x0, cid
);
2272 (void) he_readl_tsr0(he_dev
, cid
); /* flush posted writes */
2273 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2276 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2279 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid
,
2280 &HE_VCC(vcc
)->rx_waitq
);
2282 switch (vcc
->qos
.aal
) {
2294 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2296 rsr0
= he_readl_rsr0(he_dev
, cid
);
2297 if (rsr0
& RSR0_OPEN_CONN
) {
2298 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2300 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid
, rsr0
);
2305 rsr1
= RSR1_GROUP(0) | RSR1_RBPL_ONLY
;
2306 rsr4
= RSR4_GROUP(0) | RSR4_RBPL_ONLY
;
2307 rsr0
= vcc
->qos
.rxtp
.traffic_class
== ATM_UBR
?
2308 (RSR0_EPD_ENABLE
|RSR0_PPD_ENABLE
) : 0;
2310 #ifdef USE_CHECKSUM_HW
2311 if (vpi
== 0 && vci
>= ATM_NOT_RSV_VCI
)
2312 rsr0
|= RSR0_TCP_CKSUM
;
2315 he_writel_rsr4(he_dev
, rsr4
, cid
);
2316 he_writel_rsr1(he_dev
, rsr1
, cid
);
2317 /* 5.1.11 last parameter initialized should be
2318 the open/closed indication in rsr0 */
2319 he_writel_rsr0(he_dev
,
2320 rsr0
| RSR0_START_PDU
| RSR0_OPEN_CONN
| aal
, cid
);
2321 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2323 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2330 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2333 set_bit(ATM_VF_READY
, &vcc
->flags
);
2339 he_close(struct atm_vcc
*vcc
)
2341 unsigned long flags
;
2342 DECLARE_WAITQUEUE(wait
, current
);
2343 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2346 struct he_vcc
*he_vcc
= HE_VCC(vcc
);
2347 #define MAX_RETRY 30
2348 int retry
= 0, sleep
= 1, tx_inuse
;
2350 HPRINTK("close vcc %p %d.%d\n", vcc
, vcc
->vpi
, vcc
->vci
);
2352 clear_bit(ATM_VF_READY
, &vcc
->flags
);
2353 cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2355 if (vcc
->qos
.rxtp
.traffic_class
!= ATM_NONE
) {
2358 HPRINTK("close rx cid 0x%x\n", cid
);
2360 /* 2.7.2.2 close receive operation */
2362 /* wait for previous close (if any) to finish */
2364 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2365 while (he_readl(he_dev
, RCC_STAT
) & RCC_BUSY
) {
2366 HPRINTK("close cid 0x%x RCC_BUSY\n", cid
);
2370 set_current_state(TASK_UNINTERRUPTIBLE
);
2371 add_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2373 he_writel_rsr0(he_dev
, RSR0_CLOSE_CONN
, cid
);
2374 (void) he_readl_rsr0(he_dev
, cid
); /* flush posted writes */
2375 he_writel_mbox(he_dev
, cid
, RXCON_CLOSE
);
2376 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2378 timeout
= schedule_timeout(30*HZ
);
2380 remove_wait_queue(&he_vcc
->rx_waitq
, &wait
);
2381 set_current_state(TASK_RUNNING
);
2384 hprintk("close rx timeout cid 0x%x\n", cid
);
2386 HPRINTK("close rx cid 0x%x complete\n", cid
);
2390 if (vcc
->qos
.txtp
.traffic_class
!= ATM_NONE
) {
2391 volatile unsigned tsr4
, tsr0
;
2394 HPRINTK("close tx cid 0x%x\n", cid
);
2398 * ... the host must first stop queueing packets to the TPDRQ
2399 * on the connection to be closed, then wait for all outstanding
2400 * packets to be transmitted and their buffers returned to the
2401 * TBRQ. When the last packet on the connection arrives in the
2402 * TBRQ, the host issues the close command to the adapter.
2405 while (((tx_inuse
= atomic_read(&sk_atm(vcc
)->sk_wmem_alloc
)) > 1) &&
2406 (retry
< MAX_RETRY
)) {
2415 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid
, tx_inuse
);
2417 /* 2.3.1.1 generic close operations with flush */
2419 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2420 he_writel_tsr4_upper(he_dev
, TSR4_FLUSH_CONN
, cid
);
2421 /* also clears TSR4_SESSION_ENDED */
2423 switch (vcc
->qos
.txtp
.traffic_class
) {
2425 he_writel_tsr1(he_dev
,
2426 TSR1_MCR(rate_to_atmf(200000))
2427 | TSR1_PCR(0), cid
);
2430 he_writel_tsr14_upper(he_dev
, TSR14_DELETE
, cid
);
2433 (void) he_readl_tsr4(he_dev
, cid
); /* flush posted writes */
2435 tpd
= __alloc_tpd(he_dev
);
2437 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid
);
2438 goto close_tx_incomplete
;
2440 tpd
->status
|= TPD_EOS
| TPD_INT
;
2445 set_current_state(TASK_UNINTERRUPTIBLE
);
2446 add_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2447 __enqueue_tpd(he_dev
, tpd
, cid
);
2448 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2450 timeout
= schedule_timeout(30*HZ
);
2452 remove_wait_queue(&he_vcc
->tx_waitq
, &wait
);
2453 set_current_state(TASK_RUNNING
);
2455 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2458 hprintk("close tx timeout cid 0x%x\n", cid
);
2459 goto close_tx_incomplete
;
2462 while (!((tsr4
= he_readl_tsr4(he_dev
, cid
)) & TSR4_SESSION_ENDED
)) {
2463 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid
, tsr4
);
2467 while (TSR0_CONN_STATE(tsr0
= he_readl_tsr0(he_dev
, cid
)) != 0) {
2468 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid
, tsr0
);
2472 close_tx_incomplete
:
2474 if (vcc
->qos
.txtp
.traffic_class
== ATM_CBR
) {
2475 int reg
= he_vcc
->rc_index
;
2477 HPRINTK("cs_stper reg = %d\n", reg
);
2479 if (he_dev
->cs_stper
[reg
].inuse
== 0)
2480 hprintk("cs_stper[%d].inuse = 0!\n", reg
);
2482 --he_dev
->cs_stper
[reg
].inuse
;
2484 he_dev
->total_bw
-= he_dev
->cs_stper
[reg
].pcr
;
2486 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2488 HPRINTK("close tx cid 0x%x complete\n", cid
);
2493 clear_bit(ATM_VF_ADDR
, &vcc
->flags
);
2497 he_send(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
2499 unsigned long flags
;
2500 struct he_dev
*he_dev
= HE_DEV(vcc
->dev
);
2501 unsigned cid
= he_mkcid(he_dev
, vcc
->vpi
, vcc
->vci
);
2503 #ifdef USE_SCATTERGATHER
2507 #define HE_TPD_BUFSIZE 0xffff
2509 HPRINTK("send %d.%d\n", vcc
->vpi
, vcc
->vci
);
2511 if ((skb
->len
> HE_TPD_BUFSIZE
) ||
2512 ((vcc
->qos
.aal
== ATM_AAL0
) && (skb
->len
!= ATM_AAL0_SDU
))) {
2513 hprintk("buffer too large (or small) -- %d bytes\n", skb
->len
);
2517 dev_kfree_skb_any(skb
);
2518 atomic_inc(&vcc
->stats
->tx_err
);
2522 #ifndef USE_SCATTERGATHER
2523 if (skb_shinfo(skb
)->nr_frags
) {
2524 hprintk("no scatter/gather support\n");
2528 dev_kfree_skb_any(skb
);
2529 atomic_inc(&vcc
->stats
->tx_err
);
2533 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2535 tpd
= __alloc_tpd(he_dev
);
2540 dev_kfree_skb_any(skb
);
2541 atomic_inc(&vcc
->stats
->tx_err
);
2542 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2546 if (vcc
->qos
.aal
== ATM_AAL5
)
2547 tpd
->status
|= TPD_CELLTYPE(TPD_USERCELL
);
2549 char *pti_clp
= (void *) (skb
->data
+ 3);
2552 pti
= (*pti_clp
& ATM_HDR_PTI_MASK
) >> ATM_HDR_PTI_SHIFT
;
2553 clp
= (*pti_clp
& ATM_HDR_CLP
);
2554 tpd
->status
|= TPD_CELLTYPE(pti
);
2556 tpd
->status
|= TPD_CLP
;
2558 skb_pull(skb
, ATM_AAL0_SDU
- ATM_CELL_PAYLOAD
);
2561 #ifdef USE_SCATTERGATHER
2562 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
, skb
->data
,
2563 skb_headlen(skb
), PCI_DMA_TODEVICE
);
2564 tpd
->iovec
[slot
].len
= skb_headlen(skb
);
2567 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2568 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2570 if (slot
== TPD_MAXIOV
) { /* queue tpd; start new tpd */
2572 tpd
->skb
= NULL
; /* not the last fragment
2573 so dont ->push() yet */
2576 __enqueue_tpd(he_dev
, tpd
, cid
);
2577 tpd
= __alloc_tpd(he_dev
);
2582 dev_kfree_skb_any(skb
);
2583 atomic_inc(&vcc
->stats
->tx_err
);
2584 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2587 tpd
->status
|= TPD_USERCELL
;
2591 tpd
->iovec
[slot
].addr
= pci_map_single(he_dev
->pci_dev
,
2592 (void *) page_address(frag
->page
) + frag
->page_offset
,
2593 frag
->size
, PCI_DMA_TODEVICE
);
2594 tpd
->iovec
[slot
].len
= frag
->size
;
2599 tpd
->iovec
[slot
- 1].len
|= TPD_LST
;
2601 tpd
->address0
= pci_map_single(he_dev
->pci_dev
, skb
->data
, skb
->len
, PCI_DMA_TODEVICE
);
2602 tpd
->length0
= skb
->len
| TPD_LST
;
2604 tpd
->status
|= TPD_INT
;
2609 ATM_SKB(skb
)->vcc
= vcc
;
2611 __enqueue_tpd(he_dev
, tpd
, cid
);
2612 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2614 atomic_inc(&vcc
->stats
->tx
);
2620 he_ioctl(struct atm_dev
*atm_dev
, unsigned int cmd
, void __user
*arg
)
2622 unsigned long flags
;
2623 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2624 struct he_ioctl_reg reg
;
2629 if (!capable(CAP_NET_ADMIN
))
2632 if (copy_from_user(®
, arg
,
2633 sizeof(struct he_ioctl_reg
)))
2636 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2638 case HE_REGTYPE_PCI
:
2639 if (reg
.addr
>= HE_REGMAP_SIZE
) {
2644 reg
.val
= he_readl(he_dev
, reg
.addr
);
2646 case HE_REGTYPE_RCM
:
2648 he_readl_rcm(he_dev
, reg
.addr
);
2650 case HE_REGTYPE_TCM
:
2652 he_readl_tcm(he_dev
, reg
.addr
);
2654 case HE_REGTYPE_MBOX
:
2656 he_readl_mbox(he_dev
, reg
.addr
);
2662 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2664 if (copy_to_user(arg
, ®
,
2665 sizeof(struct he_ioctl_reg
)))
2669 #ifdef CONFIG_ATM_HE_USE_SUNI
2670 if (atm_dev
->phy
&& atm_dev
->phy
->ioctl
)
2671 err
= atm_dev
->phy
->ioctl(atm_dev
, cmd
, arg
);
2672 #else /* CONFIG_ATM_HE_USE_SUNI */
2674 #endif /* CONFIG_ATM_HE_USE_SUNI */
2682 he_phy_put(struct atm_dev
*atm_dev
, unsigned char val
, unsigned long addr
)
2684 unsigned long flags
;
2685 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2687 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val
, addr
);
2689 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2690 he_writel(he_dev
, val
, FRAMER
+ (addr
*4));
2691 (void) he_readl(he_dev
, FRAMER
+ (addr
*4)); /* flush posted writes */
2692 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2696 static unsigned char
2697 he_phy_get(struct atm_dev
*atm_dev
, unsigned long addr
)
2699 unsigned long flags
;
2700 struct he_dev
*he_dev
= HE_DEV(atm_dev
);
2703 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2704 reg
= he_readl(he_dev
, FRAMER
+ (addr
*4));
2705 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2707 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr
, reg
);
2712 he_proc_read(struct atm_dev
*dev
, loff_t
*pos
, char *page
)
2714 unsigned long flags
;
2715 struct he_dev
*he_dev
= HE_DEV(dev
);
2718 struct he_rbrq
*rbrq_tail
;
2719 struct he_tpdrq
*tpdrq_head
;
2720 int rbpl_head
, rbpl_tail
;
2722 static long mcc
= 0, oec
= 0, dcc
= 0, cec
= 0;
2727 return sprintf(page
, "ATM he driver\n");
2730 return sprintf(page
, "%s%s\n\n",
2731 he_dev
->prod_id
, he_dev
->media
& 0x40 ? "SM" : "MM");
2734 return sprintf(page
, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2736 spin_lock_irqsave(&he_dev
->global_lock
, flags
);
2737 mcc
+= he_readl(he_dev
, MCC
);
2738 oec
+= he_readl(he_dev
, OEC
);
2739 dcc
+= he_readl(he_dev
, DCC
);
2740 cec
+= he_readl(he_dev
, CEC
);
2741 spin_unlock_irqrestore(&he_dev
->global_lock
, flags
);
2744 return sprintf(page
, "%16ld %16ld %13ld %17ld\n\n",
2745 mcc
, oec
, dcc
, cec
);
2748 return sprintf(page
, "irq_size = %d inuse = ? peak = %d\n",
2749 CONFIG_IRQ_SIZE
, he_dev
->irq_peak
);
2752 return sprintf(page
, "tpdrq_size = %d inuse = ?\n",
2756 return sprintf(page
, "rbrq_size = %d inuse = ? peak = %d\n",
2757 CONFIG_RBRQ_SIZE
, he_dev
->rbrq_peak
);
2760 return sprintf(page
, "tbrq_size = %d peak = %d\n",
2761 CONFIG_TBRQ_SIZE
, he_dev
->tbrq_peak
);
2765 rbpl_head
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_S
));
2766 rbpl_tail
= RBPL_MASK(he_readl(he_dev
, G0_RBPL_T
));
2768 inuse
= rbpl_head
- rbpl_tail
;
2770 inuse
+= CONFIG_RBPL_SIZE
* sizeof(struct he_rbp
);
2771 inuse
/= sizeof(struct he_rbp
);
2774 return sprintf(page
, "rbpl_size = %d inuse = %d\n\n",
2775 CONFIG_RBPL_SIZE
, inuse
);
2779 return sprintf(page
, "rate controller periods (cbr)\n pcr #vc\n");
2781 for (i
= 0; i
< HE_NUM_CS_STPER
; ++i
)
2783 return sprintf(page
, "cs_stper%-2d %8ld %3d\n", i
,
2784 he_dev
->cs_stper
[i
].pcr
,
2785 he_dev
->cs_stper
[i
].inuse
);
2788 return sprintf(page
, "total bw (cbr): %d (limit %d)\n",
2789 he_dev
->total_bw
, he_dev
->atm_dev
->link_rate
* 10 / 9);
2794 /* eeprom routines -- see 4.7 */
2796 static u8
read_prom_byte(struct he_dev
*he_dev
, int addr
)
2798 u32 val
= 0, tmp_read
= 0;
2802 val
= readl(he_dev
->membase
+ HOST_CNTL
);
2805 /* Turn on write enable */
2807 he_writel(he_dev
, val
, HOST_CNTL
);
2809 /* Send READ instruction */
2810 for (i
= 0; i
< ARRAY_SIZE(readtab
); i
++) {
2811 he_writel(he_dev
, val
| readtab
[i
], HOST_CNTL
);
2812 udelay(EEPROM_DELAY
);
2815 /* Next, we need to send the byte address to read from */
2816 for (i
= 7; i
>= 0; i
--) {
2817 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
2818 udelay(EEPROM_DELAY
);
2819 he_writel(he_dev
, val
| clocktab
[j
++] | (((addr
>> i
) & 1) << 9), HOST_CNTL
);
2820 udelay(EEPROM_DELAY
);
2825 val
&= 0xFFFFF7FF; /* Turn off write enable */
2826 he_writel(he_dev
, val
, HOST_CNTL
);
2828 /* Now, we can read data from the EEPROM by clocking it in */
2829 for (i
= 7; i
>= 0; i
--) {
2830 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
2831 udelay(EEPROM_DELAY
);
2832 tmp_read
= he_readl(he_dev
, HOST_CNTL
);
2833 byte_read
|= (unsigned char)
2834 ((tmp_read
& ID_DOUT
) >> ID_DOFFSET
<< i
);
2835 he_writel(he_dev
, val
| clocktab
[j
++], HOST_CNTL
);
2836 udelay(EEPROM_DELAY
);
2839 he_writel(he_dev
, val
| ID_CS
, HOST_CNTL
);
2840 udelay(EEPROM_DELAY
);
2845 MODULE_LICENSE("GPL");
2846 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
2847 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
2848 module_param(disable64
, bool, 0);
2849 MODULE_PARM_DESC(disable64
, "disable 64-bit pci bus transfers");
2850 module_param(nvpibits
, short, 0);
2851 MODULE_PARM_DESC(nvpibits
, "numbers of bits for vpi (default 0)");
2852 module_param(nvcibits
, short, 0);
2853 MODULE_PARM_DESC(nvcibits
, "numbers of bits for vci (default 12)");
2854 module_param(rx_skb_reserve
, short, 0);
2855 MODULE_PARM_DESC(rx_skb_reserve
, "padding for receive skb (default 16)");
2856 module_param(irq_coalesce
, bool, 0);
2857 MODULE_PARM_DESC(irq_coalesce
, "use interrupt coalescing (default 1)");
2858 module_param(sdh
, bool, 0);
2859 MODULE_PARM_DESC(sdh
, "use SDH framing (default 0)");
2861 static struct pci_device_id he_pci_tbl
[] = {
2862 { PCI_VDEVICE(FORE
, PCI_DEVICE_ID_FORE_HE
), 0 },
2866 MODULE_DEVICE_TABLE(pci
, he_pci_tbl
);
2868 static struct pci_driver he_driver
= {
2870 .probe
= he_init_one
,
2871 .remove
= he_remove_one
,
2872 .id_table
= he_pci_tbl
,
2875 static int __init
he_init(void)
2877 return pci_register_driver(&he_driver
);
2880 static void __exit
he_cleanup(void)
2882 pci_unregister_driver(&he_driver
);
2885 module_init(he_init
);
2886 module_exit(he_cleanup
);