1 /* $Id: netjet.c,v 1.29.2.4 2004/02/11 13:21:34 keil Exp $
3 * low level stuff for Traverse Technologie NETJet ISDN cards
6 * Copyright by Karsten Keil <keil@isdn4linux.de>
8 * This software may be used and distributed according to the terms
9 * of the GNU General Public License, incorporated herein by reference.
11 * Thanks to Traverse Technologies Australia for documents and information
13 * 16-Apr-2002 - led code added - Guy Ellis (guy@traverse.com.au)
17 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/ppp_defs.h>
28 const char *NETjet_revision
= "$Revision: 1.29.2.4 $";
30 /* Interface functions */
33 NETjet_ReadIC(struct IsdnCardState
*cs
, u_char offset
)
37 cs
->hw
.njet
.auxd
&= 0xfc;
38 cs
->hw
.njet
.auxd
|= (offset
>>4) & 3;
39 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
40 ret
= bytein(cs
->hw
.njet
.isac
+ ((offset
& 0xf)<<2));
45 NETjet_WriteIC(struct IsdnCardState
*cs
, u_char offset
, u_char value
)
47 cs
->hw
.njet
.auxd
&= 0xfc;
48 cs
->hw
.njet
.auxd
|= (offset
>>4) & 3;
49 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
50 byteout(cs
->hw
.njet
.isac
+ ((offset
& 0xf)<<2), value
);
54 NETjet_ReadICfifo(struct IsdnCardState
*cs
, u_char
*data
, int size
)
56 cs
->hw
.njet
.auxd
&= 0xfc;
57 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
58 insb(cs
->hw
.njet
.isac
, data
, size
);
62 NETjet_WriteICfifo(struct IsdnCardState
*cs
, u_char
*data
, int size
)
64 cs
->hw
.njet
.auxd
&= 0xfc;
65 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
66 outsb(cs
->hw
.njet
.isac
, data
, size
);
69 void fill_mem(struct BCState
*bcs
, u_int
*pos
, u_int cnt
, int chan
, u_char fill
)
71 u_int mask
=0x000000ff, val
= 0, *p
=pos
;
80 for (i
=0; i
<cnt
; i
++) {
83 if (p
> bcs
->hw
.tiger
.s_end
)
84 p
= bcs
->hw
.tiger
.send
;
89 mode_tiger(struct BCState
*bcs
, int mode
, int bc
)
91 struct IsdnCardState
*cs
= bcs
->cs
;
94 if (cs
->debug
& L1_DEB_HSCX
)
95 debugl1(cs
, "Tiger mode %d bchan %d/%d",
96 mode
, bc
, bcs
->channel
);
101 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
102 NETJET_DMA_TXSIZE
, bc
, 0xff);
103 if (cs
->debug
& L1_DEB_HSCX
)
104 debugl1(cs
, "Tiger stat rec %d/%d send %d",
105 bcs
->hw
.tiger
.r_tot
, bcs
->hw
.tiger
.r_err
,
106 bcs
->hw
.tiger
.s_tot
);
107 if ((cs
->bcs
[0].mode
== L1_MODE_NULL
) &&
108 (cs
->bcs
[1].mode
== L1_MODE_NULL
)) {
109 cs
->hw
.njet
.dmactrl
= 0;
110 byteout(cs
->hw
.njet
.base
+ NETJET_DMACTRL
,
111 cs
->hw
.njet
.dmactrl
);
112 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0);
114 if (cs
->typ
== ISDN_CTYPE_NETJET_S
)
118 led
= 0x01 << (6 + led
); // convert to mask
120 cs
->hw
.njet
.auxd
&= led
;
121 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
124 case (L1_MODE_TRANS
):
126 case (L1_MODE_HDLC_56K
):
128 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
129 NETJET_DMA_TXSIZE
, bc
, 0xff);
130 bcs
->hw
.tiger
.r_state
= HDLC_ZERO_SEARCH
;
131 bcs
->hw
.tiger
.r_tot
= 0;
132 bcs
->hw
.tiger
.r_bitcnt
= 0;
133 bcs
->hw
.tiger
.r_one
= 0;
134 bcs
->hw
.tiger
.r_err
= 0;
135 bcs
->hw
.tiger
.s_tot
= 0;
136 if (! cs
->hw
.njet
.dmactrl
) {
137 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
138 NETJET_DMA_TXSIZE
, !bc
, 0xff);
139 cs
->hw
.njet
.dmactrl
= 1;
140 byteout(cs
->hw
.njet
.base
+ NETJET_DMACTRL
,
141 cs
->hw
.njet
.dmactrl
);
142 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0x0f);
143 /* was 0x3f now 0x0f for TJ300 and TJ320 GE 13/07/00 */
145 bcs
->hw
.tiger
.sendp
= bcs
->hw
.tiger
.send
;
146 bcs
->hw
.tiger
.free
= NETJET_DMA_TXSIZE
;
147 test_and_set_bit(BC_FLG_EMPTY
, &bcs
->Flag
);
148 if (cs
->typ
== ISDN_CTYPE_NETJET_S
)
152 led
= 0x01 << (6 + led
); // convert to mask
153 cs
->hw
.njet
.auxd
|= led
;
154 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
158 if (cs
->debug
& L1_DEB_HSCX
)
159 debugl1(cs
, "tiger: set %x %x %x %x/%x pulse=%d",
160 bytein(cs
->hw
.njet
.base
+ NETJET_DMACTRL
),
161 bytein(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
),
162 bytein(cs
->hw
.njet
.base
+ NETJET_IRQSTAT0
),
163 inl(cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
),
164 inl(cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_ADR
),
165 bytein(cs
->hw
.njet
.base
+ NETJET_PULSE_CNT
));
168 static void printframe(struct IsdnCardState
*cs
, u_char
*buf
, int count
, char *s
) {
174 t
+= sprintf(t
, "tiger %s(%4d)", s
, count
);
185 t
+= sprintf(t
, "tiger %s ", s
);
191 #define MAKE_RAW_BYTE for (j=0; j<8; j++) { \
202 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
212 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
218 static int make_raw_data(struct BCState
*bcs
) {
219 // this make_raw is for 64k
220 register u_int i
,s_cnt
=0;
223 register u_char s_one
= 0;
224 register u_char s_val
= 0;
225 register u_char bitcnt
= 0;
229 debugl1(bcs
->cs
, "tiger make_raw: NULL skb");
232 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = HDLC_FLAG_VALUE
;
234 for (i
=0; i
<bcs
->tx_skb
->len
; i
++) {
235 val
= bcs
->tx_skb
->data
[i
];
236 fcs
= PPP_FCS (fcs
, val
);
242 val
= (fcs
>>8) & 0xff;
244 val
= HDLC_FLAG_VALUE
;
245 for (j
=0; j
<8; j
++) {
253 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
258 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
259 debugl1(bcs
->cs
,"tiger make_raw: in %ld out %d.%d",
260 bcs
->tx_skb
->len
, s_cnt
, bitcnt
);
266 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
267 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = 0xff; // NJ<->NJ thoughput bug fix
269 bcs
->hw
.tiger
.sendcnt
= s_cnt
;
270 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
271 bcs
->hw
.tiger
.sp
= bcs
->hw
.tiger
.sendbuf
;
277 #define MAKE_RAW_BYTE_56K for (j=0; j<8; j++) { \
290 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
302 bcs->hw.tiger.sendbuf[s_cnt++] = s_val;\
308 static int make_raw_data_56k(struct BCState
*bcs
) {
309 // this make_raw is for 56k
310 register u_int i
,s_cnt
=0;
313 register u_char s_one
= 0;
314 register u_char s_val
= 0;
315 register u_char bitcnt
= 0;
319 debugl1(bcs
->cs
, "tiger make_raw_56k: NULL skb");
322 val
= HDLC_FLAG_VALUE
;
323 for (j
=0; j
<8; j
++) {
333 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
339 for (i
=0; i
<bcs
->tx_skb
->len
; i
++) {
340 val
= bcs
->tx_skb
->data
[i
];
341 fcs
= PPP_FCS (fcs
, val
);
347 val
= (fcs
>>8) & 0xff;
349 val
= HDLC_FLAG_VALUE
;
350 for (j
=0; j
<8; j
++) {
360 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
365 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
366 debugl1(bcs
->cs
,"tiger make_raw_56k: in %ld out %d.%d",
367 bcs
->tx_skb
->len
, s_cnt
, bitcnt
);
373 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
374 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = 0xff; // NJ<->NJ thoughput bug fix
376 bcs
->hw
.tiger
.sendcnt
= s_cnt
;
377 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
378 bcs
->hw
.tiger
.sp
= bcs
->hw
.tiger
.sendbuf
;
382 static void got_frame(struct BCState
*bcs
, int count
) {
385 if (!(skb
= dev_alloc_skb(count
)))
386 printk(KERN_WARNING
"TIGER: receive out of memory\n");
388 memcpy(skb_put(skb
, count
), bcs
->hw
.tiger
.rcvbuf
, count
);
389 skb_queue_tail(&bcs
->rqueue
, skb
);
391 test_and_set_bit(B_RCVBUFREADY
, &bcs
->event
);
392 schedule_work(&bcs
->tqueue
);
394 if (bcs
->cs
->debug
& L1_DEB_RECEIVE_FRAME
)
395 printframe(bcs
->cs
, bcs
->hw
.tiger
.rcvbuf
, count
, "rec");
400 static void read_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
){
404 u_int
*pend
= bcs
->hw
.tiger
.rec
+NETJET_DMA_RXSIZE
-1;
405 register u_char state
= bcs
->hw
.tiger
.r_state
;
406 register u_char r_one
= bcs
->hw
.tiger
.r_one
;
407 register u_char r_val
= bcs
->hw
.tiger
.r_val
;
408 register u_int bitcnt
= bcs
->hw
.tiger
.r_bitcnt
;
413 if (bcs
->mode
== L1_MODE_HDLC
) { // it's 64k
421 for (i
=0;i
<cnt
;i
++) {
422 val
= bcs
->channel
? ((*p
>>8) & 0xff) : (*p
& 0xff);
425 p
= bcs
->hw
.tiger
.rec
;
426 if ((val
& mask
) == mask
) {
427 state
= HDLC_ZERO_SEARCH
;
428 bcs
->hw
.tiger
.r_tot
++;
433 for (j
=0;j
<bits
;j
++) {
434 if (state
== HDLC_ZERO_SEARCH
) {
439 state
= HDLC_FLAG_SEARCH
;
440 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
441 debugl1(bcs
->cs
,"tiger read_raw: zBit(%d,%d,%d) %x",
442 bcs
->hw
.tiger
.r_tot
,i
,j
,val
);
444 } else if (state
== HDLC_FLAG_SEARCH
) {
448 state
=HDLC_ZERO_SEARCH
;
454 state
=HDLC_FLAG_FOUND
;
455 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
456 debugl1(bcs
->cs
,"tiger read_raw: flag(%d,%d,%d) %x",
457 bcs
->hw
.tiger
.r_tot
,i
,j
,val
);
461 } else if (state
== HDLC_FLAG_FOUND
) {
465 state
=HDLC_ZERO_SEARCH
;
478 } else if (r_one
!=5) {
485 if ((state
!= HDLC_ZERO_SEARCH
) &&
487 state
=HDLC_FRAME_FOUND
;
488 bcs
->hw
.tiger
.r_fcs
= PPP_INITFCS
;
489 bcs
->hw
.tiger
.rcvbuf
[0] = r_val
;
490 bcs
->hw
.tiger
.r_fcs
= PPP_FCS (bcs
->hw
.tiger
.r_fcs
, r_val
);
491 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
492 debugl1(bcs
->cs
,"tiger read_raw: byte1(%d,%d,%d) rval %x val %x i %x",
493 bcs
->hw
.tiger
.r_tot
,i
,j
,r_val
,val
,
494 bcs
->cs
->hw
.njet
.irqstat0
);
496 } else if (state
== HDLC_FRAME_FOUND
) {
500 state
=HDLC_ZERO_SEARCH
;
513 debugl1(bcs
->cs
, "tiger: frame not byte aligned");
514 state
=HDLC_FLAG_SEARCH
;
515 bcs
->hw
.tiger
.r_err
++;
516 #ifdef ERROR_STATISTIC
520 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
521 debugl1(bcs
->cs
,"tiger frame end(%d,%d): fcs(%x) i %x",
522 i
,j
,bcs
->hw
.tiger
.r_fcs
, bcs
->cs
->hw
.njet
.irqstat0
);
523 if (bcs
->hw
.tiger
.r_fcs
== PPP_GOODFCS
) {
524 got_frame(bcs
, (bitcnt
>>3)-3);
526 if (bcs
->cs
->debug
) {
527 debugl1(bcs
->cs
, "tiger FCS error");
528 printframe(bcs
->cs
, bcs
->hw
.tiger
.rcvbuf
,
529 (bitcnt
>>3)-1, "rec");
530 bcs
->hw
.tiger
.r_err
++;
532 #ifdef ERROR_STATISTIC
536 state
=HDLC_FLAG_FOUND
;
539 } else if (r_one
==5) {
550 if ((state
== HDLC_FRAME_FOUND
) &&
552 if ((bitcnt
>>3)>=HSCX_BUFMAX
) {
553 debugl1(bcs
->cs
, "tiger: frame too big");
555 state
=HDLC_FLAG_SEARCH
;
556 bcs
->hw
.tiger
.r_err
++;
557 #ifdef ERROR_STATISTIC
561 bcs
->hw
.tiger
.rcvbuf
[(bitcnt
>>3)-1] = r_val
;
562 bcs
->hw
.tiger
.r_fcs
=
563 PPP_FCS (bcs
->hw
.tiger
.r_fcs
, r_val
);
569 bcs
->hw
.tiger
.r_tot
++;
571 bcs
->hw
.tiger
.r_state
= state
;
572 bcs
->hw
.tiger
.r_one
= r_one
;
573 bcs
->hw
.tiger
.r_val
= r_val
;
574 bcs
->hw
.tiger
.r_bitcnt
= bitcnt
;
577 void read_tiger(struct IsdnCardState
*cs
) {
579 int cnt
= NETJET_DMA_RXSIZE
/2;
581 if ((cs
->hw
.njet
.irqstat0
& cs
->hw
.njet
.last_is0
) & NETJET_IRQM0_READ
) {
582 debugl1(cs
,"tiger warn read double dma %x/%x",
583 cs
->hw
.njet
.irqstat0
, cs
->hw
.njet
.last_is0
);
584 #ifdef ERROR_STATISTIC
586 cs
->bcs
[0].err_rdo
++;
588 cs
->bcs
[1].err_rdo
++;
592 cs
->hw
.njet
.last_is0
&= ~NETJET_IRQM0_READ
;
593 cs
->hw
.njet
.last_is0
|= (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_READ
);
595 if (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_READ_1
)
596 p
= cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1;
598 p
= cs
->bcs
[0].hw
.tiger
.rec
+ cnt
- 1;
599 if ((cs
->bcs
[0].mode
== L1_MODE_HDLC
) || (cs
->bcs
[0].mode
== L1_MODE_HDLC_56K
))
600 read_raw(cs
->bcs
, p
, cnt
);
602 if ((cs
->bcs
[1].mode
== L1_MODE_HDLC
) || (cs
->bcs
[1].mode
== L1_MODE_HDLC_56K
))
603 read_raw(cs
->bcs
+ 1, p
, cnt
);
604 cs
->hw
.njet
.irqstat0
&= ~NETJET_IRQM0_READ
;
607 static void write_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
);
609 void netjet_fill_dma(struct BCState
*bcs
)
611 register u_int
*p
, *sp
;
616 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
617 debugl1(bcs
->cs
,"tiger fill_dma1: c%d %4x", bcs
->channel
,
619 if (test_and_set_bit(BC_FLG_BUSY
, &bcs
->Flag
))
621 if (bcs
->mode
== L1_MODE_HDLC
) { // it's 64k
622 if (make_raw_data(bcs
))
626 if (make_raw_data_56k(bcs
))
629 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
630 debugl1(bcs
->cs
,"tiger fill_dma2: c%d %4x", bcs
->channel
,
632 if (test_and_clear_bit(BC_FLG_NOFRAME
, &bcs
->Flag
)) {
633 write_raw(bcs
, bcs
->hw
.tiger
.sendp
, bcs
->hw
.tiger
.free
);
634 } else if (test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
)) {
635 p
= bus_to_virt(inl(bcs
->cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
));
636 sp
= bcs
->hw
.tiger
.sendp
;
637 if (p
== bcs
->hw
.tiger
.s_end
)
638 p
= bcs
->hw
.tiger
.send
-1;
639 if (sp
== bcs
->hw
.tiger
.s_end
)
640 sp
= bcs
->hw
.tiger
.send
-1;
643 write_raw(bcs
, bcs
->hw
.tiger
.sendp
, bcs
->hw
.tiger
.free
);
647 if (p
> bcs
->hw
.tiger
.s_end
)
648 p
= bcs
->hw
.tiger
.send
;
651 if (p
> bcs
->hw
.tiger
.s_end
)
652 p
= bcs
->hw
.tiger
.send
;
653 write_raw(bcs
, p
, bcs
->hw
.tiger
.free
- cnt
);
655 } else if (test_and_clear_bit(BC_FLG_EMPTY
, &bcs
->Flag
)) {
656 p
= bus_to_virt(inl(bcs
->cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
));
657 cnt
= bcs
->hw
.tiger
.s_end
- p
;
659 p
= bcs
->hw
.tiger
.send
+ 1;
660 cnt
= NETJET_DMA_TXSIZE
/2 - 2;
664 if (cnt
<= (NETJET_DMA_TXSIZE
/2))
665 cnt
+= NETJET_DMA_TXSIZE
/2;
669 write_raw(bcs
, p
, cnt
);
671 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
672 debugl1(bcs
->cs
,"tiger fill_dma3: c%d %4x", bcs
->channel
,
676 static void write_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
) {
677 u_int mask
, val
, *p
=buf
;
682 if (test_bit(BC_FLG_BUSY
, &bcs
->Flag
)) {
683 if (bcs
->hw
.tiger
.sendcnt
> cnt
) {
685 bcs
->hw
.tiger
.sendcnt
-= cnt
;
687 s_cnt
= bcs
->hw
.tiger
.sendcnt
;
688 bcs
->hw
.tiger
.sendcnt
= 0;
694 for (i
=0; i
<s_cnt
; i
++) {
695 val
= bcs
->channel
? ((bcs
->hw
.tiger
.sp
[i
] <<8) & 0xff00) :
696 (bcs
->hw
.tiger
.sp
[i
]);
699 if (p
>bcs
->hw
.tiger
.s_end
)
700 p
= bcs
->hw
.tiger
.send
;
702 bcs
->hw
.tiger
.s_tot
+= s_cnt
;
703 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
704 debugl1(bcs
->cs
,"tiger write_raw: c%d %p-%p %d/%d %d %x", bcs
->channel
,
706 bcs
->hw
.tiger
.sendcnt
, bcs
->cs
->hw
.njet
.irqstat0
);
707 if (bcs
->cs
->debug
& L1_DEB_HSCX_FIFO
)
708 printframe(bcs
->cs
, bcs
->hw
.tiger
.sp
, s_cnt
, "snd");
709 bcs
->hw
.tiger
.sp
+= s_cnt
;
710 bcs
->hw
.tiger
.sendp
= p
;
711 if (!bcs
->hw
.tiger
.sendcnt
) {
713 debugl1(bcs
->cs
,"tiger write_raw: NULL skb s_cnt %d", s_cnt
);
715 if (test_bit(FLG_LLI_L1WAKEUP
,&bcs
->st
->lli
.flag
) &&
716 (PACKET_NOACK
!= bcs
->tx_skb
->pkt_type
)) {
718 spin_lock_irqsave(&bcs
->aclock
, flags
);
719 bcs
->ackcnt
+= bcs
->tx_skb
->len
;
720 spin_unlock_irqrestore(&bcs
->aclock
, flags
);
721 schedule_event(bcs
, B_ACKPENDING
);
723 dev_kfree_skb_any(bcs
->tx_skb
);
726 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
727 bcs
->hw
.tiger
.free
= cnt
- s_cnt
;
728 if (bcs
->hw
.tiger
.free
> (NETJET_DMA_TXSIZE
/2))
729 test_and_set_bit(BC_FLG_HALF
, &bcs
->Flag
);
731 test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
);
732 test_and_set_bit(BC_FLG_NOFRAME
, &bcs
->Flag
);
734 if ((bcs
->tx_skb
= skb_dequeue(&bcs
->squeue
))) {
735 netjet_fill_dma(bcs
);
739 for (i
=s_cnt
; i
<cnt
;i
++) {
741 if (p
>bcs
->hw
.tiger
.s_end
)
742 p
= bcs
->hw
.tiger
.send
;
744 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
745 debugl1(bcs
->cs
, "tiger write_raw: fill rest %d",
748 test_and_set_bit(B_XMTBUFREADY
, &bcs
->event
);
749 schedule_work(&bcs
->tqueue
);
752 } else if (test_and_clear_bit(BC_FLG_NOFRAME
, &bcs
->Flag
)) {
753 test_and_set_bit(BC_FLG_HALF
, &bcs
->Flag
);
754 fill_mem(bcs
, buf
, cnt
, bcs
->channel
, 0xff);
755 bcs
->hw
.tiger
.free
+= cnt
;
756 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
757 debugl1(bcs
->cs
,"tiger write_raw: fill half");
758 } else if (test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
)) {
759 test_and_set_bit(BC_FLG_EMPTY
, &bcs
->Flag
);
760 fill_mem(bcs
, buf
, cnt
, bcs
->channel
, 0xff);
761 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
762 debugl1(bcs
->cs
,"tiger write_raw: fill full");
766 void write_tiger(struct IsdnCardState
*cs
) {
767 u_int
*p
, cnt
= NETJET_DMA_TXSIZE
/2;
769 if ((cs
->hw
.njet
.irqstat0
& cs
->hw
.njet
.last_is0
) & NETJET_IRQM0_WRITE
) {
770 debugl1(cs
,"tiger warn write double dma %x/%x",
771 cs
->hw
.njet
.irqstat0
, cs
->hw
.njet
.last_is0
);
772 #ifdef ERROR_STATISTIC
780 cs
->hw
.njet
.last_is0
&= ~NETJET_IRQM0_WRITE
;
781 cs
->hw
.njet
.last_is0
|= (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_WRITE
);
783 if (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_WRITE_1
)
784 p
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1;
786 p
= cs
->bcs
[0].hw
.tiger
.send
+ cnt
- 1;
787 if ((cs
->bcs
[0].mode
== L1_MODE_HDLC
) || (cs
->bcs
[0].mode
== L1_MODE_HDLC_56K
))
788 write_raw(cs
->bcs
, p
, cnt
);
789 if ((cs
->bcs
[1].mode
== L1_MODE_HDLC
) || (cs
->bcs
[1].mode
== L1_MODE_HDLC_56K
))
790 write_raw(cs
->bcs
+ 1, p
, cnt
);
791 cs
->hw
.njet
.irqstat0
&= ~NETJET_IRQM0_WRITE
;
795 tiger_l2l1(struct PStack
*st
, int pr
, void *arg
)
797 struct BCState
*bcs
= st
->l1
.bcs
;
798 struct sk_buff
*skb
= arg
;
802 case (PH_DATA
| REQUEST
):
803 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
805 skb_queue_tail(&bcs
->squeue
, skb
);
808 bcs
->cs
->BC_Send_Data(bcs
);
810 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
812 case (PH_PULL
| INDICATION
):
813 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
815 printk(KERN_WARNING
"tiger_l2l1: this shouldn't happen\n");
818 bcs
->cs
->BC_Send_Data(bcs
);
820 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
822 case (PH_PULL
| REQUEST
):
824 test_and_clear_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
825 st
->l1
.l1l2(st
, PH_PULL
| CONFIRM
, NULL
);
827 test_and_set_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
829 case (PH_ACTIVATE
| REQUEST
):
830 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
831 test_and_set_bit(BC_FLG_ACTIV
, &bcs
->Flag
);
832 mode_tiger(bcs
, st
->l1
.mode
, st
->l1
.bc
);
833 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG */
834 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
835 bcs
->cs
->cardmsg(bcs
->cs
, MDL_BC_ASSIGN
, (void *)(&st
->l1
.bc
));
836 l1_msg_b(st
, pr
, arg
);
838 case (PH_DEACTIVATE
| REQUEST
):
839 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG */
840 bcs
->cs
->cardmsg(bcs
->cs
, MDL_BC_RELEASE
, (void *)(&st
->l1
.bc
));
841 l1_msg_b(st
, pr
, arg
);
843 case (PH_DEACTIVATE
| CONFIRM
):
844 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
845 test_and_clear_bit(BC_FLG_ACTIV
, &bcs
->Flag
);
846 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
847 mode_tiger(bcs
, 0, st
->l1
.bc
);
848 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
849 st
->l1
.l1l2(st
, PH_DEACTIVATE
| CONFIRM
, NULL
);
856 close_tigerstate(struct BCState
*bcs
)
858 mode_tiger(bcs
, 0, bcs
->channel
);
859 if (test_and_clear_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
860 if (bcs
->hw
.tiger
.rcvbuf
) {
861 kfree(bcs
->hw
.tiger
.rcvbuf
);
862 bcs
->hw
.tiger
.rcvbuf
= NULL
;
864 if (bcs
->hw
.tiger
.sendbuf
) {
865 kfree(bcs
->hw
.tiger
.sendbuf
);
866 bcs
->hw
.tiger
.sendbuf
= NULL
;
868 skb_queue_purge(&bcs
->rqueue
);
869 skb_queue_purge(&bcs
->squeue
);
871 dev_kfree_skb_any(bcs
->tx_skb
);
873 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
879 open_tigerstate(struct IsdnCardState
*cs
, struct BCState
*bcs
)
881 if (!test_and_set_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
882 if (!(bcs
->hw
.tiger
.rcvbuf
= kmalloc(HSCX_BUFMAX
, GFP_ATOMIC
))) {
884 "HiSax: No memory for tiger.rcvbuf\n");
887 if (!(bcs
->hw
.tiger
.sendbuf
= kmalloc(RAW_BUFMAX
, GFP_ATOMIC
))) {
889 "HiSax: No memory for tiger.sendbuf\n");
892 skb_queue_head_init(&bcs
->rqueue
);
893 skb_queue_head_init(&bcs
->squeue
);
896 bcs
->hw
.tiger
.sendcnt
= 0;
897 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
904 setstack_tiger(struct PStack
*st
, struct BCState
*bcs
)
906 bcs
->channel
= st
->l1
.bc
;
907 if (open_tigerstate(st
->l1
.hardware
, bcs
))
910 st
->l2
.l2l1
= tiger_l2l1
;
911 setstack_manager(st
);
919 inittiger(struct IsdnCardState
*cs
)
921 if (!(cs
->bcs
[0].hw
.tiger
.send
= kmalloc(NETJET_DMA_TXSIZE
* sizeof(unsigned int),
922 GFP_KERNEL
| GFP_DMA
))) {
924 "HiSax: No memory for tiger.send\n");
927 cs
->bcs
[0].hw
.tiger
.s_irq
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
/2 - 1;
928 cs
->bcs
[0].hw
.tiger
.s_end
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1;
929 cs
->bcs
[1].hw
.tiger
.send
= cs
->bcs
[0].hw
.tiger
.send
;
930 cs
->bcs
[1].hw
.tiger
.s_irq
= cs
->bcs
[0].hw
.tiger
.s_irq
;
931 cs
->bcs
[1].hw
.tiger
.s_end
= cs
->bcs
[0].hw
.tiger
.s_end
;
933 memset(cs
->bcs
[0].hw
.tiger
.send
, 0xff, NETJET_DMA_TXSIZE
* sizeof(unsigned int));
934 debugl1(cs
, "tiger: send buf %p - %p", cs
->bcs
[0].hw
.tiger
.send
,
935 cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1);
936 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.send
),
937 cs
->hw
.njet
.base
+ NETJET_DMA_READ_START
);
938 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.s_irq
),
939 cs
->hw
.njet
.base
+ NETJET_DMA_READ_IRQ
);
940 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.s_end
),
941 cs
->hw
.njet
.base
+ NETJET_DMA_READ_END
);
942 if (!(cs
->bcs
[0].hw
.tiger
.rec
= kmalloc(NETJET_DMA_RXSIZE
* sizeof(unsigned int),
943 GFP_KERNEL
| GFP_DMA
))) {
945 "HiSax: No memory for tiger.rec\n");
948 debugl1(cs
, "tiger: rec buf %p - %p", cs
->bcs
[0].hw
.tiger
.rec
,
949 cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1);
950 cs
->bcs
[1].hw
.tiger
.rec
= cs
->bcs
[0].hw
.tiger
.rec
;
951 memset(cs
->bcs
[0].hw
.tiger
.rec
, 0xff, NETJET_DMA_RXSIZE
* sizeof(unsigned int));
952 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
),
953 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_START
);
954 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
/2 - 1),
955 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_IRQ
);
956 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1),
957 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_END
);
958 debugl1(cs
, "tiger: dmacfg %x/%x pulse=%d",
959 inl(cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_ADR
),
960 inl(cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
),
961 bytein(cs
->hw
.njet
.base
+ NETJET_PULSE_CNT
));
962 cs
->hw
.njet
.last_is0
= 0;
963 cs
->bcs
[0].BC_SetStack
= setstack_tiger
;
964 cs
->bcs
[1].BC_SetStack
= setstack_tiger
;
965 cs
->bcs
[0].BC_Close
= close_tigerstate
;
966 cs
->bcs
[1].BC_Close
= close_tigerstate
;
970 releasetiger(struct IsdnCardState
*cs
)
972 if (cs
->bcs
[0].hw
.tiger
.send
) {
973 kfree(cs
->bcs
[0].hw
.tiger
.send
);
974 cs
->bcs
[0].hw
.tiger
.send
= NULL
;
976 if (cs
->bcs
[1].hw
.tiger
.send
) {
977 cs
->bcs
[1].hw
.tiger
.send
= NULL
;
979 if (cs
->bcs
[0].hw
.tiger
.rec
) {
980 kfree(cs
->bcs
[0].hw
.tiger
.rec
);
981 cs
->bcs
[0].hw
.tiger
.rec
= NULL
;
983 if (cs
->bcs
[1].hw
.tiger
.rec
) {
984 cs
->bcs
[1].hw
.tiger
.rec
= NULL
;
989 release_io_netjet(struct IsdnCardState
*cs
)
991 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0);
992 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK1
, 0);
994 release_region(cs
->hw
.njet
.base
, 256);