1 /* $Id: netjet.c,v 1.29.2.4 2004/02/11 13:21:34 keil Exp $
3 * low level stuff for Traverse Technologie NETJet ISDN cards
6 * Copyright by Karsten Keil <keil@isdn4linux.de>
8 * This software may be used and distributed according to the terms
9 * of the GNU General Public License, incorporated herein by reference.
11 * Thanks to Traverse Technologies Australia for documents and information
13 * 16-Apr-2002 - led code added - Guy Ellis (guy@traverse.com.au)
17 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ppp_defs.h>
24 #include <linux/slab.h>
28 /* Interface functions */
31 NETjet_ReadIC(struct IsdnCardState
*cs
, u_char offset
)
35 cs
->hw
.njet
.auxd
&= 0xfc;
36 cs
->hw
.njet
.auxd
|= (offset
>> 4) & 3;
37 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
38 ret
= bytein(cs
->hw
.njet
.isac
+ ((offset
& 0xf) << 2));
43 NETjet_WriteIC(struct IsdnCardState
*cs
, u_char offset
, u_char value
)
45 cs
->hw
.njet
.auxd
&= 0xfc;
46 cs
->hw
.njet
.auxd
|= (offset
>> 4) & 3;
47 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
48 byteout(cs
->hw
.njet
.isac
+ ((offset
& 0xf) << 2), value
);
52 NETjet_ReadICfifo(struct IsdnCardState
*cs
, u_char
*data
, int size
)
54 cs
->hw
.njet
.auxd
&= 0xfc;
55 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
56 insb(cs
->hw
.njet
.isac
, data
, size
);
60 NETjet_WriteICfifo(struct IsdnCardState
*cs
, u_char
*data
, int size
)
62 cs
->hw
.njet
.auxd
&= 0xfc;
63 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
64 outsb(cs
->hw
.njet
.isac
, data
, size
);
67 static void fill_mem(struct BCState
*bcs
, u_int
*pos
, u_int cnt
, int chan
, u_char fill
)
69 u_int mask
= 0x000000ff, val
= 0, *p
= pos
;
78 for (i
= 0; i
< cnt
; i
++) {
81 if (p
> bcs
->hw
.tiger
.s_end
)
82 p
= bcs
->hw
.tiger
.send
;
87 mode_tiger(struct BCState
*bcs
, int mode
, int bc
)
89 struct IsdnCardState
*cs
= bcs
->cs
;
92 if (cs
->debug
& L1_DEB_HSCX
)
93 debugl1(cs
, "Tiger mode %d bchan %d/%d",
94 mode
, bc
, bcs
->channel
);
99 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
100 NETJET_DMA_TXSIZE
, bc
, 0xff);
101 if (cs
->debug
& L1_DEB_HSCX
)
102 debugl1(cs
, "Tiger stat rec %d/%d send %d",
103 bcs
->hw
.tiger
.r_tot
, bcs
->hw
.tiger
.r_err
,
104 bcs
->hw
.tiger
.s_tot
);
105 if ((cs
->bcs
[0].mode
== L1_MODE_NULL
) &&
106 (cs
->bcs
[1].mode
== L1_MODE_NULL
)) {
107 cs
->hw
.njet
.dmactrl
= 0;
108 byteout(cs
->hw
.njet
.base
+ NETJET_DMACTRL
,
109 cs
->hw
.njet
.dmactrl
);
110 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0);
112 if (cs
->typ
== ISDN_CTYPE_NETJET_S
)
116 led
= 0x01 << (6 + led
); // convert to mask
118 cs
->hw
.njet
.auxd
&= led
;
119 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
122 case (L1_MODE_TRANS
):
124 case (L1_MODE_HDLC_56K
):
126 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
127 NETJET_DMA_TXSIZE
, bc
, 0xff);
128 bcs
->hw
.tiger
.r_state
= HDLC_ZERO_SEARCH
;
129 bcs
->hw
.tiger
.r_tot
= 0;
130 bcs
->hw
.tiger
.r_bitcnt
= 0;
131 bcs
->hw
.tiger
.r_one
= 0;
132 bcs
->hw
.tiger
.r_err
= 0;
133 bcs
->hw
.tiger
.s_tot
= 0;
134 if (!cs
->hw
.njet
.dmactrl
) {
135 fill_mem(bcs
, bcs
->hw
.tiger
.send
,
136 NETJET_DMA_TXSIZE
, !bc
, 0xff);
137 cs
->hw
.njet
.dmactrl
= 1;
138 byteout(cs
->hw
.njet
.base
+ NETJET_DMACTRL
,
139 cs
->hw
.njet
.dmactrl
);
140 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0x0f);
141 /* was 0x3f now 0x0f for TJ300 and TJ320 GE 13/07/00 */
143 bcs
->hw
.tiger
.sendp
= bcs
->hw
.tiger
.send
;
144 bcs
->hw
.tiger
.free
= NETJET_DMA_TXSIZE
;
145 test_and_set_bit(BC_FLG_EMPTY
, &bcs
->Flag
);
146 if (cs
->typ
== ISDN_CTYPE_NETJET_S
)
150 led
= 0x01 << (6 + led
); // convert to mask
151 cs
->hw
.njet
.auxd
|= led
;
152 byteout(cs
->hw
.njet
.auxa
, cs
->hw
.njet
.auxd
);
156 if (cs
->debug
& L1_DEB_HSCX
)
157 debugl1(cs
, "tiger: set %x %x %x %x/%x pulse=%d",
158 bytein(cs
->hw
.njet
.base
+ NETJET_DMACTRL
),
159 bytein(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
),
160 bytein(cs
->hw
.njet
.base
+ NETJET_IRQSTAT0
),
161 inl(cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
),
162 inl(cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_ADR
),
163 bytein(cs
->hw
.njet
.base
+ NETJET_PULSE_CNT
));
166 static void printframe(struct IsdnCardState
*cs
, u_char
*buf
, int count
, char *s
) {
172 t
+= sprintf(t
, "tiger %s(%4d)", s
, count
);
179 debugl1(cs
, "%s", tmp
);
183 t
+= sprintf(t
, "tiger %s ", s
);
189 #define MAKE_RAW_BYTE for (j = 0; j < 8; j++) { \
200 bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
210 bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
216 static int make_raw_data(struct BCState
*bcs
) {
217 // this make_raw is for 64k
218 register u_int i
, s_cnt
= 0;
221 register u_char s_one
= 0;
222 register u_char s_val
= 0;
223 register u_char bitcnt
= 0;
227 debugl1(bcs
->cs
, "tiger make_raw: NULL skb");
230 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = HDLC_FLAG_VALUE
;
232 for (i
= 0; i
< bcs
->tx_skb
->len
; i
++) {
233 val
= bcs
->tx_skb
->data
[i
];
234 fcs
= PPP_FCS(fcs
, val
);
240 val
= (fcs
>> 8) & 0xff;
242 val
= HDLC_FLAG_VALUE
;
243 for (j
= 0; j
< 8; j
++) {
251 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
256 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
257 debugl1(bcs
->cs
, "tiger make_raw: in %u out %d.%d",
258 bcs
->tx_skb
->len
, s_cnt
, bitcnt
);
260 while (8 > bitcnt
++) {
264 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
265 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = 0xff; // NJ<->NJ thoughput bug fix
267 bcs
->hw
.tiger
.sendcnt
= s_cnt
;
268 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
269 bcs
->hw
.tiger
.sp
= bcs
->hw
.tiger
.sendbuf
;
275 #define MAKE_RAW_BYTE_56K for (j = 0; j < 8; j++) { \
288 bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
300 bcs->hw.tiger.sendbuf[s_cnt++] = s_val; \
306 static int make_raw_data_56k(struct BCState
*bcs
) {
307 // this make_raw is for 56k
308 register u_int i
, s_cnt
= 0;
311 register u_char s_one
= 0;
312 register u_char s_val
= 0;
313 register u_char bitcnt
= 0;
317 debugl1(bcs
->cs
, "tiger make_raw_56k: NULL skb");
320 val
= HDLC_FLAG_VALUE
;
321 for (j
= 0; j
< 8; j
++) {
331 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
337 for (i
= 0; i
< bcs
->tx_skb
->len
; i
++) {
338 val
= bcs
->tx_skb
->data
[i
];
339 fcs
= PPP_FCS(fcs
, val
);
345 val
= (fcs
>> 8) & 0xff;
347 val
= HDLC_FLAG_VALUE
;
348 for (j
= 0; j
< 8; j
++) {
358 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
363 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
364 debugl1(bcs
->cs
, "tiger make_raw_56k: in %u out %d.%d",
365 bcs
->tx_skb
->len
, s_cnt
, bitcnt
);
367 while (8 > bitcnt
++) {
371 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = s_val
;
372 bcs
->hw
.tiger
.sendbuf
[s_cnt
++] = 0xff; // NJ<->NJ thoughput bug fix
374 bcs
->hw
.tiger
.sendcnt
= s_cnt
;
375 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
376 bcs
->hw
.tiger
.sp
= bcs
->hw
.tiger
.sendbuf
;
380 static void got_frame(struct BCState
*bcs
, int count
) {
383 if (!(skb
= dev_alloc_skb(count
)))
384 printk(KERN_WARNING
"TIGER: receive out of memory\n");
386 skb_put_data(skb
, bcs
->hw
.tiger
.rcvbuf
, count
);
387 skb_queue_tail(&bcs
->rqueue
, skb
);
389 test_and_set_bit(B_RCVBUFREADY
, &bcs
->event
);
390 schedule_work(&bcs
->tqueue
);
392 if (bcs
->cs
->debug
& L1_DEB_RECEIVE_FRAME
)
393 printframe(bcs
->cs
, bcs
->hw
.tiger
.rcvbuf
, count
, "rec");
398 static void read_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
) {
402 u_int
*pend
= bcs
->hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1;
403 register u_char state
= bcs
->hw
.tiger
.r_state
;
404 register u_char r_one
= bcs
->hw
.tiger
.r_one
;
405 register u_char r_val
= bcs
->hw
.tiger
.r_val
;
406 register u_int bitcnt
= bcs
->hw
.tiger
.r_bitcnt
;
411 if (bcs
->mode
== L1_MODE_HDLC
) { // it's 64k
419 for (i
= 0; i
< cnt
; i
++) {
420 val
= bcs
->channel
? ((*p
>> 8) & 0xff) : (*p
& 0xff);
423 p
= bcs
->hw
.tiger
.rec
;
424 if ((val
& mask
) == mask
) {
425 state
= HDLC_ZERO_SEARCH
;
426 bcs
->hw
.tiger
.r_tot
++;
431 for (j
= 0; j
< bits
; j
++) {
432 if (state
== HDLC_ZERO_SEARCH
) {
437 state
= HDLC_FLAG_SEARCH
;
438 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
439 debugl1(bcs
->cs
, "tiger read_raw: zBit(%d,%d,%d) %x",
440 bcs
->hw
.tiger
.r_tot
, i
, j
, val
);
442 } else if (state
== HDLC_FLAG_SEARCH
) {
446 state
= HDLC_ZERO_SEARCH
;
452 state
= HDLC_FLAG_FOUND
;
453 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
454 debugl1(bcs
->cs
, "tiger read_raw: flag(%d,%d,%d) %x",
455 bcs
->hw
.tiger
.r_tot
, i
, j
, val
);
459 } else if (state
== HDLC_FLAG_FOUND
) {
463 state
= HDLC_ZERO_SEARCH
;
476 } else if (r_one
!= 5) {
483 if ((state
!= HDLC_ZERO_SEARCH
) &&
485 state
= HDLC_FRAME_FOUND
;
486 bcs
->hw
.tiger
.r_fcs
= PPP_INITFCS
;
487 bcs
->hw
.tiger
.rcvbuf
[0] = r_val
;
488 bcs
->hw
.tiger
.r_fcs
= PPP_FCS(bcs
->hw
.tiger
.r_fcs
, r_val
);
489 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
490 debugl1(bcs
->cs
, "tiger read_raw: byte1(%d,%d,%d) rval %x val %x i %x",
491 bcs
->hw
.tiger
.r_tot
, i
, j
, r_val
, val
,
492 bcs
->cs
->hw
.njet
.irqstat0
);
494 } else if (state
== HDLC_FRAME_FOUND
) {
498 state
= HDLC_ZERO_SEARCH
;
511 debugl1(bcs
->cs
, "tiger: frame not byte aligned");
512 state
= HDLC_FLAG_SEARCH
;
513 bcs
->hw
.tiger
.r_err
++;
514 #ifdef ERROR_STATISTIC
518 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
519 debugl1(bcs
->cs
, "tiger frame end(%d,%d): fcs(%x) i %x",
520 i
, j
, bcs
->hw
.tiger
.r_fcs
, bcs
->cs
->hw
.njet
.irqstat0
);
521 if (bcs
->hw
.tiger
.r_fcs
== PPP_GOODFCS
) {
522 got_frame(bcs
, (bitcnt
>> 3) - 3);
524 if (bcs
->cs
->debug
) {
525 debugl1(bcs
->cs
, "tiger FCS error");
526 printframe(bcs
->cs
, bcs
->hw
.tiger
.rcvbuf
,
527 (bitcnt
>> 3) - 1, "rec");
528 bcs
->hw
.tiger
.r_err
++;
530 #ifdef ERROR_STATISTIC
534 state
= HDLC_FLAG_FOUND
;
537 } else if (r_one
== 5) {
548 if ((state
== HDLC_FRAME_FOUND
) &&
550 if ((bitcnt
>> 3) >= HSCX_BUFMAX
) {
551 debugl1(bcs
->cs
, "tiger: frame too big");
553 state
= HDLC_FLAG_SEARCH
;
554 bcs
->hw
.tiger
.r_err
++;
555 #ifdef ERROR_STATISTIC
559 bcs
->hw
.tiger
.rcvbuf
[(bitcnt
>> 3) - 1] = r_val
;
560 bcs
->hw
.tiger
.r_fcs
=
561 PPP_FCS(bcs
->hw
.tiger
.r_fcs
, r_val
);
567 bcs
->hw
.tiger
.r_tot
++;
569 bcs
->hw
.tiger
.r_state
= state
;
570 bcs
->hw
.tiger
.r_one
= r_one
;
571 bcs
->hw
.tiger
.r_val
= r_val
;
572 bcs
->hw
.tiger
.r_bitcnt
= bitcnt
;
575 void read_tiger(struct IsdnCardState
*cs
) {
577 int cnt
= NETJET_DMA_RXSIZE
/ 2;
579 if ((cs
->hw
.njet
.irqstat0
& cs
->hw
.njet
.last_is0
) & NETJET_IRQM0_READ
) {
580 debugl1(cs
, "tiger warn read double dma %x/%x",
581 cs
->hw
.njet
.irqstat0
, cs
->hw
.njet
.last_is0
);
582 #ifdef ERROR_STATISTIC
584 cs
->bcs
[0].err_rdo
++;
586 cs
->bcs
[1].err_rdo
++;
590 cs
->hw
.njet
.last_is0
&= ~NETJET_IRQM0_READ
;
591 cs
->hw
.njet
.last_is0
|= (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_READ
);
593 if (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_READ_1
)
594 p
= cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1;
596 p
= cs
->bcs
[0].hw
.tiger
.rec
+ cnt
- 1;
597 if ((cs
->bcs
[0].mode
== L1_MODE_HDLC
) || (cs
->bcs
[0].mode
== L1_MODE_HDLC_56K
))
598 read_raw(cs
->bcs
, p
, cnt
);
600 if ((cs
->bcs
[1].mode
== L1_MODE_HDLC
) || (cs
->bcs
[1].mode
== L1_MODE_HDLC_56K
))
601 read_raw(cs
->bcs
+ 1, p
, cnt
);
602 cs
->hw
.njet
.irqstat0
&= ~NETJET_IRQM0_READ
;
605 static void write_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
);
607 void netjet_fill_dma(struct BCState
*bcs
)
609 register u_int
*p
, *sp
;
614 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
615 debugl1(bcs
->cs
, "tiger fill_dma1: c%d %4lx", bcs
->channel
,
617 if (test_and_set_bit(BC_FLG_BUSY
, &bcs
->Flag
))
619 if (bcs
->mode
== L1_MODE_HDLC
) { // it's 64k
620 if (make_raw_data(bcs
))
624 if (make_raw_data_56k(bcs
))
627 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
628 debugl1(bcs
->cs
, "tiger fill_dma2: c%d %4lx", bcs
->channel
,
630 if (test_and_clear_bit(BC_FLG_NOFRAME
, &bcs
->Flag
)) {
631 write_raw(bcs
, bcs
->hw
.tiger
.sendp
, bcs
->hw
.tiger
.free
);
632 } else if (test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
)) {
633 p
= bus_to_virt(inl(bcs
->cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
));
634 sp
= bcs
->hw
.tiger
.sendp
;
635 if (p
== bcs
->hw
.tiger
.s_end
)
636 p
= bcs
->hw
.tiger
.send
- 1;
637 if (sp
== bcs
->hw
.tiger
.s_end
)
638 sp
= bcs
->hw
.tiger
.send
- 1;
641 write_raw(bcs
, bcs
->hw
.tiger
.sendp
, bcs
->hw
.tiger
.free
);
645 if (p
> bcs
->hw
.tiger
.s_end
)
646 p
= bcs
->hw
.tiger
.send
;
649 if (p
> bcs
->hw
.tiger
.s_end
)
650 p
= bcs
->hw
.tiger
.send
;
651 write_raw(bcs
, p
, bcs
->hw
.tiger
.free
- cnt
);
653 } else if (test_and_clear_bit(BC_FLG_EMPTY
, &bcs
->Flag
)) {
654 p
= bus_to_virt(inl(bcs
->cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
));
655 cnt
= bcs
->hw
.tiger
.s_end
- p
;
657 p
= bcs
->hw
.tiger
.send
+ 1;
658 cnt
= NETJET_DMA_TXSIZE
/ 2 - 2;
662 if (cnt
<= (NETJET_DMA_TXSIZE
/ 2))
663 cnt
+= NETJET_DMA_TXSIZE
/ 2;
667 write_raw(bcs
, p
, cnt
);
669 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
670 debugl1(bcs
->cs
, "tiger fill_dma3: c%d %4lx", bcs
->channel
,
674 static void write_raw(struct BCState
*bcs
, u_int
*buf
, int cnt
) {
675 u_int mask
, val
, *p
= buf
;
680 if (test_bit(BC_FLG_BUSY
, &bcs
->Flag
)) {
681 if (bcs
->hw
.tiger
.sendcnt
> cnt
) {
683 bcs
->hw
.tiger
.sendcnt
-= cnt
;
685 s_cnt
= bcs
->hw
.tiger
.sendcnt
;
686 bcs
->hw
.tiger
.sendcnt
= 0;
692 for (i
= 0; i
< s_cnt
; i
++) {
693 val
= bcs
->channel
? ((bcs
->hw
.tiger
.sp
[i
] << 8) & 0xff00) :
694 (bcs
->hw
.tiger
.sp
[i
]);
697 if (p
> bcs
->hw
.tiger
.s_end
)
698 p
= bcs
->hw
.tiger
.send
;
700 bcs
->hw
.tiger
.s_tot
+= s_cnt
;
701 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
702 debugl1(bcs
->cs
, "tiger write_raw: c%d %p-%p %d/%d %d %x", bcs
->channel
,
704 bcs
->hw
.tiger
.sendcnt
, bcs
->cs
->hw
.njet
.irqstat0
);
705 if (bcs
->cs
->debug
& L1_DEB_HSCX_FIFO
)
706 printframe(bcs
->cs
, bcs
->hw
.tiger
.sp
, s_cnt
, "snd");
707 bcs
->hw
.tiger
.sp
+= s_cnt
;
708 bcs
->hw
.tiger
.sendp
= p
;
709 if (!bcs
->hw
.tiger
.sendcnt
) {
711 debugl1(bcs
->cs
, "tiger write_raw: NULL skb s_cnt %d", s_cnt
);
713 if (test_bit(FLG_LLI_L1WAKEUP
, &bcs
->st
->lli
.flag
) &&
714 (PACKET_NOACK
!= bcs
->tx_skb
->pkt_type
)) {
716 spin_lock_irqsave(&bcs
->aclock
, flags
);
717 bcs
->ackcnt
+= bcs
->tx_skb
->len
;
718 spin_unlock_irqrestore(&bcs
->aclock
, flags
);
719 schedule_event(bcs
, B_ACKPENDING
);
721 dev_kfree_skb_any(bcs
->tx_skb
);
724 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
725 bcs
->hw
.tiger
.free
= cnt
- s_cnt
;
726 if (bcs
->hw
.tiger
.free
> (NETJET_DMA_TXSIZE
/ 2))
727 test_and_set_bit(BC_FLG_HALF
, &bcs
->Flag
);
729 test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
);
730 test_and_set_bit(BC_FLG_NOFRAME
, &bcs
->Flag
);
732 if ((bcs
->tx_skb
= skb_dequeue(&bcs
->squeue
))) {
733 netjet_fill_dma(bcs
);
737 for (i
= s_cnt
; i
< cnt
; i
++) {
739 if (p
> bcs
->hw
.tiger
.s_end
)
740 p
= bcs
->hw
.tiger
.send
;
742 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
743 debugl1(bcs
->cs
, "tiger write_raw: fill rest %d",
746 test_and_set_bit(B_XMTBUFREADY
, &bcs
->event
);
747 schedule_work(&bcs
->tqueue
);
750 } else if (test_and_clear_bit(BC_FLG_NOFRAME
, &bcs
->Flag
)) {
751 test_and_set_bit(BC_FLG_HALF
, &bcs
->Flag
);
752 fill_mem(bcs
, buf
, cnt
, bcs
->channel
, 0xff);
753 bcs
->hw
.tiger
.free
+= cnt
;
754 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
755 debugl1(bcs
->cs
, "tiger write_raw: fill half");
756 } else if (test_and_clear_bit(BC_FLG_HALF
, &bcs
->Flag
)) {
757 test_and_set_bit(BC_FLG_EMPTY
, &bcs
->Flag
);
758 fill_mem(bcs
, buf
, cnt
, bcs
->channel
, 0xff);
759 if (bcs
->cs
->debug
& L1_DEB_HSCX
)
760 debugl1(bcs
->cs
, "tiger write_raw: fill full");
764 void write_tiger(struct IsdnCardState
*cs
) {
765 u_int
*p
, cnt
= NETJET_DMA_TXSIZE
/ 2;
767 if ((cs
->hw
.njet
.irqstat0
& cs
->hw
.njet
.last_is0
) & NETJET_IRQM0_WRITE
) {
768 debugl1(cs
, "tiger warn write double dma %x/%x",
769 cs
->hw
.njet
.irqstat0
, cs
->hw
.njet
.last_is0
);
770 #ifdef ERROR_STATISTIC
778 cs
->hw
.njet
.last_is0
&= ~NETJET_IRQM0_WRITE
;
779 cs
->hw
.njet
.last_is0
|= (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_WRITE
);
781 if (cs
->hw
.njet
.irqstat0
& NETJET_IRQM0_WRITE_1
)
782 p
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1;
784 p
= cs
->bcs
[0].hw
.tiger
.send
+ cnt
- 1;
785 if ((cs
->bcs
[0].mode
== L1_MODE_HDLC
) || (cs
->bcs
[0].mode
== L1_MODE_HDLC_56K
))
786 write_raw(cs
->bcs
, p
, cnt
);
787 if ((cs
->bcs
[1].mode
== L1_MODE_HDLC
) || (cs
->bcs
[1].mode
== L1_MODE_HDLC_56K
))
788 write_raw(cs
->bcs
+ 1, p
, cnt
);
789 cs
->hw
.njet
.irqstat0
&= ~NETJET_IRQM0_WRITE
;
793 tiger_l2l1(struct PStack
*st
, int pr
, void *arg
)
795 struct BCState
*bcs
= st
->l1
.bcs
;
796 struct sk_buff
*skb
= arg
;
800 case (PH_DATA
| REQUEST
):
801 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
803 skb_queue_tail(&bcs
->squeue
, skb
);
806 bcs
->cs
->BC_Send_Data(bcs
);
808 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
810 case (PH_PULL
| INDICATION
):
811 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
813 printk(KERN_WARNING
"tiger_l2l1: this shouldn't happen\n");
816 bcs
->cs
->BC_Send_Data(bcs
);
818 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
820 case (PH_PULL
| REQUEST
):
822 test_and_clear_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
823 st
->l1
.l1l2(st
, PH_PULL
| CONFIRM
, NULL
);
825 test_and_set_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
827 case (PH_ACTIVATE
| REQUEST
):
828 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
829 test_and_set_bit(BC_FLG_ACTIV
, &bcs
->Flag
);
830 mode_tiger(bcs
, st
->l1
.mode
, st
->l1
.bc
);
831 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG */
832 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
833 bcs
->cs
->cardmsg(bcs
->cs
, MDL_BC_ASSIGN
, (void *)(&st
->l1
.bc
));
834 l1_msg_b(st
, pr
, arg
);
836 case (PH_DEACTIVATE
| REQUEST
):
837 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG */
838 bcs
->cs
->cardmsg(bcs
->cs
, MDL_BC_RELEASE
, (void *)(&st
->l1
.bc
));
839 l1_msg_b(st
, pr
, arg
);
841 case (PH_DEACTIVATE
| CONFIRM
):
842 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
843 test_and_clear_bit(BC_FLG_ACTIV
, &bcs
->Flag
);
844 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
845 mode_tiger(bcs
, 0, st
->l1
.bc
);
846 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
847 st
->l1
.l1l2(st
, PH_DEACTIVATE
| CONFIRM
, NULL
);
854 close_tigerstate(struct BCState
*bcs
)
856 mode_tiger(bcs
, 0, bcs
->channel
);
857 if (test_and_clear_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
858 kfree(bcs
->hw
.tiger
.rcvbuf
);
859 bcs
->hw
.tiger
.rcvbuf
= NULL
;
860 kfree(bcs
->hw
.tiger
.sendbuf
);
861 bcs
->hw
.tiger
.sendbuf
= NULL
;
862 skb_queue_purge(&bcs
->rqueue
);
863 skb_queue_purge(&bcs
->squeue
);
865 dev_kfree_skb_any(bcs
->tx_skb
);
867 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
873 open_tigerstate(struct IsdnCardState
*cs
, struct BCState
*bcs
)
875 if (!test_and_set_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
876 if (!(bcs
->hw
.tiger
.rcvbuf
= kmalloc(HSCX_BUFMAX
, GFP_ATOMIC
))) {
878 "HiSax: No memory for tiger.rcvbuf\n");
881 if (!(bcs
->hw
.tiger
.sendbuf
= kmalloc(RAW_BUFMAX
, GFP_ATOMIC
))) {
883 "HiSax: No memory for tiger.sendbuf\n");
886 skb_queue_head_init(&bcs
->rqueue
);
887 skb_queue_head_init(&bcs
->squeue
);
890 bcs
->hw
.tiger
.sendcnt
= 0;
891 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
898 setstack_tiger(struct PStack
*st
, struct BCState
*bcs
)
900 bcs
->channel
= st
->l1
.bc
;
901 if (open_tigerstate(st
->l1
.hardware
, bcs
))
904 st
->l2
.l2l1
= tiger_l2l1
;
905 setstack_manager(st
);
913 inittiger(struct IsdnCardState
*cs
)
915 cs
->bcs
[0].hw
.tiger
.send
= kmalloc_array(NETJET_DMA_TXSIZE
,
916 sizeof(unsigned int),
917 GFP_KERNEL
| GFP_DMA
);
918 if (!cs
->bcs
[0].hw
.tiger
.send
) {
920 "HiSax: No memory for tiger.send\n");
923 cs
->bcs
[0].hw
.tiger
.s_irq
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
/ 2 - 1;
924 cs
->bcs
[0].hw
.tiger
.s_end
= cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1;
925 cs
->bcs
[1].hw
.tiger
.send
= cs
->bcs
[0].hw
.tiger
.send
;
926 cs
->bcs
[1].hw
.tiger
.s_irq
= cs
->bcs
[0].hw
.tiger
.s_irq
;
927 cs
->bcs
[1].hw
.tiger
.s_end
= cs
->bcs
[0].hw
.tiger
.s_end
;
929 memset(cs
->bcs
[0].hw
.tiger
.send
, 0xff, NETJET_DMA_TXSIZE
* sizeof(unsigned int));
930 debugl1(cs
, "tiger: send buf %p - %p", cs
->bcs
[0].hw
.tiger
.send
,
931 cs
->bcs
[0].hw
.tiger
.send
+ NETJET_DMA_TXSIZE
- 1);
932 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.send
),
933 cs
->hw
.njet
.base
+ NETJET_DMA_READ_START
);
934 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.s_irq
),
935 cs
->hw
.njet
.base
+ NETJET_DMA_READ_IRQ
);
936 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.s_end
),
937 cs
->hw
.njet
.base
+ NETJET_DMA_READ_END
);
938 cs
->bcs
[0].hw
.tiger
.rec
= kmalloc_array(NETJET_DMA_RXSIZE
,
939 sizeof(unsigned int),
940 GFP_KERNEL
| GFP_DMA
);
941 if (!cs
->bcs
[0].hw
.tiger
.rec
) {
943 "HiSax: No memory for tiger.rec\n");
946 debugl1(cs
, "tiger: rec buf %p - %p", cs
->bcs
[0].hw
.tiger
.rec
,
947 cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1);
948 cs
->bcs
[1].hw
.tiger
.rec
= cs
->bcs
[0].hw
.tiger
.rec
;
949 memset(cs
->bcs
[0].hw
.tiger
.rec
, 0xff, NETJET_DMA_RXSIZE
* sizeof(unsigned int));
950 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
),
951 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_START
);
952 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
/ 2 - 1),
953 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_IRQ
);
954 outl(virt_to_bus(cs
->bcs
[0].hw
.tiger
.rec
+ NETJET_DMA_RXSIZE
- 1),
955 cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_END
);
956 debugl1(cs
, "tiger: dmacfg %x/%x pulse=%d",
957 inl(cs
->hw
.njet
.base
+ NETJET_DMA_WRITE_ADR
),
958 inl(cs
->hw
.njet
.base
+ NETJET_DMA_READ_ADR
),
959 bytein(cs
->hw
.njet
.base
+ NETJET_PULSE_CNT
));
960 cs
->hw
.njet
.last_is0
= 0;
961 cs
->bcs
[0].BC_SetStack
= setstack_tiger
;
962 cs
->bcs
[1].BC_SetStack
= setstack_tiger
;
963 cs
->bcs
[0].BC_Close
= close_tigerstate
;
964 cs
->bcs
[1].BC_Close
= close_tigerstate
;
968 releasetiger(struct IsdnCardState
*cs
)
970 kfree(cs
->bcs
[0].hw
.tiger
.send
);
971 cs
->bcs
[0].hw
.tiger
.send
= NULL
;
972 cs
->bcs
[1].hw
.tiger
.send
= NULL
;
973 kfree(cs
->bcs
[0].hw
.tiger
.rec
);
974 cs
->bcs
[0].hw
.tiger
.rec
= NULL
;
975 cs
->bcs
[1].hw
.tiger
.rec
= NULL
;
979 release_io_netjet(struct IsdnCardState
*cs
)
981 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK0
, 0);
982 byteout(cs
->hw
.njet
.base
+ NETJET_IRQMASK1
, 0);
984 release_region(cs
->hw
.njet
.base
, 256);