4 * Author Karsten Keil <keil@isdn4linux.de>
6 * Copyright 2009 by Karsten Keil <keil@isdn4linux.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/mISDNhw.h>
28 #include <linux/slab.h>
32 #include <linux/isdn/hdlc.h>
34 #define NETJET_REV "2.0"
62 struct isdnhdlc_vars hsend
;
63 struct isdnhdlc_vars hrecv
;
68 #define TX_INIT 0x0001
69 #define TX_IDLE 0x0002
71 #define TX_UNDERRUN 0x0100
72 #define RX_OVERRUN 0x0100
77 struct list_head list
;
79 char name
[MISDN_MAX_IDLEN
];
87 spinlock_t lock
; /* lock HW */
89 struct tiger_dma send
;
90 struct tiger_dma recv
;
91 struct tiger_ch bc
[2];
100 static LIST_HEAD(Cards
);
101 static DEFINE_RWLOCK(card_lock
); /* protect Cards */
106 _set_debug(struct tiger_hw
*card
)
108 card
->isac
.dch
.debug
= debug
;
109 card
->bc
[0].bch
.debug
= debug
;
110 card
->bc
[1].bch
.debug
= debug
;
114 set_debug(const char *val
, struct kernel_param
*kp
)
117 struct tiger_hw
*card
;
119 ret
= param_set_uint(val
, kp
);
121 read_lock(&card_lock
);
122 list_for_each_entry(card
, &Cards
, list
)
124 read_unlock(&card_lock
);
129 MODULE_AUTHOR("Karsten Keil");
130 MODULE_LICENSE("GPL v2");
131 MODULE_VERSION(NETJET_REV
);
132 module_param_call(debug
, set_debug
, param_get_uint
, &debug
, S_IRUGO
| S_IWUSR
);
133 MODULE_PARM_DESC(debug
, "Netjet debug mask");
136 nj_disable_hwirq(struct tiger_hw
*card
)
138 outb(0, card
->base
+ NJ_IRQMASK0
);
139 outb(0, card
->base
+ NJ_IRQMASK1
);
144 ReadISAC_nj(void *p
, u8 offset
)
146 struct tiger_hw
*card
= p
;
150 card
->auxd
|= (offset
>> 4) & 3;
151 outb(card
->auxd
, card
->base
+ NJ_AUXDATA
);
152 ret
= inb(card
->base
+ NJ_ISAC_OFF
+ ((offset
& 0x0f) << 2));
157 WriteISAC_nj(void *p
, u8 offset
, u8 value
)
159 struct tiger_hw
*card
= p
;
162 card
->auxd
|= (offset
>> 4) & 3;
163 outb(card
->auxd
, card
->base
+ NJ_AUXDATA
);
164 outb(value
, card
->base
+ NJ_ISAC_OFF
+ ((offset
& 0x0f) << 2));
168 ReadFiFoISAC_nj(void *p
, u8 offset
, u8
*data
, int size
)
170 struct tiger_hw
*card
= p
;
173 outb(card
->auxd
, card
->base
+ NJ_AUXDATA
);
174 insb(card
->base
+ NJ_ISAC_OFF
, data
, size
);
178 WriteFiFoISAC_nj(void *p
, u8 offset
, u8
*data
, int size
)
180 struct tiger_hw
*card
= p
;
183 outb(card
->auxd
, card
->base
+ NJ_AUXDATA
);
184 outsb(card
->base
+ NJ_ISAC_OFF
, data
, size
);
188 fill_mem(struct tiger_ch
*bc
, u32 idx
, u32 cnt
, u32 fill
)
190 struct tiger_hw
*card
= bc
->bch
.hw
;
191 u32 mask
= 0xff, val
;
193 pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card
->name
,
194 bc
->bch
.nr
, fill
, cnt
, idx
, card
->send
.idx
);
195 if (bc
->bch
.nr
& 2) {
201 val
= card
->send
.start
[idx
];
204 card
->send
.start
[idx
++] = val
;
205 if (idx
>= card
->send
.size
)
211 mode_tiger(struct tiger_ch
*bc
, u32 protocol
)
213 struct tiger_hw
*card
= bc
->bch
.hw
;
215 pr_debug("%s: B%1d protocol %x-->%x\n", card
->name
,
216 bc
->bch
.nr
, bc
->bch
.state
, protocol
);
219 if (bc
->bch
.state
== ISDN_P_NONE
)
221 fill_mem(bc
, 0, card
->send
.size
, 0xff);
222 bc
->bch
.state
= protocol
;
223 /* only stop dma and interrupts if both channels NULL */
224 if ((card
->bc
[0].bch
.state
== ISDN_P_NONE
) &&
225 (card
->bc
[1].bch
.state
== ISDN_P_NONE
)) {
227 outb(card
->dmactrl
, card
->base
+ NJ_DMACTRL
);
228 outb(0, card
->base
+ NJ_IRQMASK0
);
230 test_and_clear_bit(FLG_HDLC
, &bc
->bch
.Flags
);
231 test_and_clear_bit(FLG_TRANSPARENT
, &bc
->bch
.Flags
);
237 test_and_set_bit(FLG_TRANSPARENT
, &bc
->bch
.Flags
);
238 bc
->bch
.state
= protocol
;
240 bc
->free
= card
->send
.size
/2;
242 bc
->txstate
= TX_INIT
| TX_IDLE
;
244 if (!card
->dmactrl
) {
246 outb(card
->dmactrl
, card
->base
+ NJ_DMACTRL
);
247 outb(0x0f, card
->base
+ NJ_IRQMASK0
);
251 test_and_set_bit(FLG_HDLC
, &bc
->bch
.Flags
);
252 bc
->bch
.state
= protocol
;
254 bc
->free
= card
->send
.size
/2;
256 bc
->txstate
= TX_INIT
| TX_IDLE
;
257 isdnhdlc_rcv_init(&bc
->hrecv
, 0);
258 isdnhdlc_out_init(&bc
->hsend
, 0);
260 if (!card
->dmactrl
) {
262 outb(card
->dmactrl
, card
->base
+ NJ_DMACTRL
);
263 outb(0x0f, card
->base
+ NJ_IRQMASK0
);
267 pr_info("%s: %s protocol %x not handled\n", card
->name
,
271 card
->send
.dmacur
= inl(card
->base
+ NJ_DMA_READ_ADR
);
272 card
->recv
.dmacur
= inl(card
->base
+ NJ_DMA_WRITE_ADR
);
273 card
->send
.idx
= (card
->send
.dmacur
- card
->send
.dmastart
) >> 2;
274 card
->recv
.idx
= (card
->recv
.dmacur
- card
->recv
.dmastart
) >> 2;
275 pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
276 card
->name
, __func__
,
277 inb(card
->base
+ NJ_DMACTRL
),
278 inb(card
->base
+ NJ_IRQMASK0
),
279 inb(card
->base
+ NJ_IRQSTAT0
),
286 nj_reset(struct tiger_hw
*card
)
288 outb(0xff, card
->base
+ NJ_CTRL
); /* Reset On */
291 /* now edge triggered for TJ320 GE 13/07/00 */
292 /* see comment in IRQ function */
293 if (card
->typ
== NETJET_S_TJ320
) /* TJ320 */
294 card
->ctrlreg
= 0x40; /* Reset Off and status read clear */
296 card
->ctrlreg
= 0x00; /* Reset Off and status read clear */
297 outb(card
->ctrlreg
, card
->base
+ NJ_CTRL
);
300 /* configure AUX pins (all output except ISAC IRQ pin) */
303 outb(~NJ_ISACIRQ
, card
->base
+ NJ_AUXCTRL
);
304 outb(NJ_ISACIRQ
, card
->base
+ NJ_IRQMASK1
);
305 outb(card
->auxd
, card
->base
+ NJ_AUXDATA
);
309 inittiger(struct tiger_hw
*card
)
313 card
->dma_p
= pci_alloc_consistent(card
->pdev
, NJ_DMA_SIZE
,
316 pr_info("%s: No DMA memory\n", card
->name
);
319 if ((u64
)card
->dma
> 0xffffffff) {
320 pr_info("%s: DMA outside 32 bit\n", card
->name
);
323 for (i
= 0; i
< 2; i
++) {
324 card
->bc
[i
].hsbuf
= kmalloc(NJ_DMA_TXSIZE
, GFP_ATOMIC
);
325 if (!card
->bc
[i
].hsbuf
) {
326 pr_info("%s: no B%d send buffer\n", card
->name
, i
+ 1);
329 card
->bc
[i
].hrbuf
= kmalloc(NJ_DMA_RXSIZE
, GFP_ATOMIC
);
330 if (!card
->bc
[i
].hrbuf
) {
331 pr_info("%s: no B%d recv buffer\n", card
->name
, i
+ 1);
335 memset(card
->dma_p
, 0xff, NJ_DMA_SIZE
);
337 card
->send
.start
= card
->dma_p
;
338 card
->send
.dmastart
= (u32
)card
->dma
;
339 card
->send
.dmaend
= card
->send
.dmastart
+
340 (4 * (NJ_DMA_TXSIZE
- 1));
341 card
->send
.dmairq
= card
->send
.dmastart
+
342 (4 * ((NJ_DMA_TXSIZE
/ 2) - 1));
343 card
->send
.size
= NJ_DMA_TXSIZE
;
345 if (debug
& DEBUG_HW
)
346 pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
347 " size %zu u32\n", card
->name
,
348 card
->send
.dmastart
, card
->send
.dmairq
,
349 card
->send
.dmaend
, card
->send
.start
, card
->send
.size
);
351 outl(card
->send
.dmastart
, card
->base
+ NJ_DMA_READ_START
);
352 outl(card
->send
.dmairq
, card
->base
+ NJ_DMA_READ_IRQ
);
353 outl(card
->send
.dmaend
, card
->base
+ NJ_DMA_READ_END
);
355 card
->recv
.start
= card
->dma_p
+ (NJ_DMA_SIZE
/ 2);
356 card
->recv
.dmastart
= (u32
)card
->dma
+ (NJ_DMA_SIZE
/ 2);
357 card
->recv
.dmaend
= card
->recv
.dmastart
+
358 (4 * (NJ_DMA_RXSIZE
- 1));
359 card
->recv
.dmairq
= card
->recv
.dmastart
+
360 (4 * ((NJ_DMA_RXSIZE
/ 2) - 1));
361 card
->recv
.size
= NJ_DMA_RXSIZE
;
363 if (debug
& DEBUG_HW
)
364 pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
365 " size %zu u32\n", card
->name
,
366 card
->recv
.dmastart
, card
->recv
.dmairq
,
367 card
->recv
.dmaend
, card
->recv
.start
, card
->recv
.size
);
369 outl(card
->recv
.dmastart
, card
->base
+ NJ_DMA_WRITE_START
);
370 outl(card
->recv
.dmairq
, card
->base
+ NJ_DMA_WRITE_IRQ
);
371 outl(card
->recv
.dmaend
, card
->base
+ NJ_DMA_WRITE_END
);
376 read_dma(struct tiger_ch
*bc
, u32 idx
, int cnt
)
378 struct tiger_hw
*card
= bc
->bch
.hw
;
383 if (bc
->lastrx
== idx
) {
384 bc
->rxstate
|= RX_OVERRUN
;
385 pr_info("%s: B%1d overrun at idx %d\n", card
->name
,
389 if (!bc
->bch
.rx_skb
) {
390 bc
->bch
.rx_skb
= mI_alloc_skb(bc
->bch
.maxlen
, GFP_ATOMIC
);
391 if (!bc
->bch
.rx_skb
) {
392 pr_info("%s: B%1d receive out of memory\n",
393 card
->name
, bc
->bch
.nr
);
398 if (test_bit(FLG_TRANSPARENT
, &bc
->bch
.Flags
)) {
399 if ((bc
->bch
.rx_skb
->len
+ cnt
) > bc
->bch
.maxlen
) {
400 pr_debug("%s: B%1d overrun %d\n", card
->name
,
401 bc
->bch
.nr
, bc
->bch
.rx_skb
->len
+ cnt
);
402 skb_trim(bc
->bch
.rx_skb
, 0);
405 p
= skb_put(bc
->bch
.rx_skb
, cnt
);
409 for (i
= 0; i
< cnt
; i
++) {
410 val
= card
->recv
.start
[idx
++];
413 if (idx
>= card
->recv
.size
)
419 if (test_bit(FLG_HDLC
, &bc
->bch
.Flags
)) {
420 stat
= isdnhdlc_decode(&bc
->hrecv
, pn
, cnt
, &i
,
421 bc
->bch
.rx_skb
->data
, bc
->bch
.maxlen
);
422 if (stat
> 0) /* valid frame received */
423 p
= skb_put(bc
->bch
.rx_skb
, stat
);
424 else if (stat
== -HDLC_CRC_ERROR
)
425 pr_info("%s: B%1d receive frame CRC error\n",
426 card
->name
, bc
->bch
.nr
);
427 else if (stat
== -HDLC_FRAMING_ERROR
)
428 pr_info("%s: B%1d receive framing error\n",
429 card
->name
, bc
->bch
.nr
);
430 else if (stat
== -HDLC_LENGTH_ERROR
)
431 pr_info("%s: B%1d receive frame too long (> %d)\n",
432 card
->name
, bc
->bch
.nr
, bc
->bch
.maxlen
);
437 if (debug
& DEBUG_HW_BFIFO
) {
438 snprintf(card
->log
, LOG_SIZE
, "B%1d-recv %s %d ",
439 bc
->bch
.nr
, card
->name
, stat
);
440 print_hex_dump_bytes(card
->log
, DUMP_PREFIX_OFFSET
,
443 recv_Bchannel(&bc
->bch
, 0);
445 if (test_bit(FLG_HDLC
, &bc
->bch
.Flags
)) {
448 if (!bc
->bch
.rx_skb
) {
449 bc
->bch
.rx_skb
= mI_alloc_skb(bc
->bch
.maxlen
,
451 if (!bc
->bch
.rx_skb
) {
452 pr_info("%s: B%1d receive out of memory\n",
453 card
->name
, bc
->bch
.nr
);
463 recv_tiger(struct tiger_hw
*card
, u8 irq_stat
)
466 int cnt
= card
->recv
.size
/ 2;
468 /* Note receive is via the WRITE DMA channel */
469 card
->last_is0
&= ~NJ_IRQM0_WR_MASK
;
470 card
->last_is0
|= (irq_stat
& NJ_IRQM0_WR_MASK
);
472 if (irq_stat
& NJ_IRQM0_WR_END
)
475 idx
= card
->recv
.size
- 1;
477 if (test_bit(FLG_ACTIVE
, &card
->bc
[0].bch
.Flags
))
478 read_dma(&card
->bc
[0], idx
, cnt
);
479 if (test_bit(FLG_ACTIVE
, &card
->bc
[1].bch
.Flags
))
480 read_dma(&card
->bc
[1], idx
, cnt
);
483 /* sync with current DMA address at start or after exception */
485 resync(struct tiger_ch
*bc
, struct tiger_hw
*card
)
487 card
->send
.dmacur
= inl(card
->base
| NJ_DMA_READ_ADR
);
488 card
->send
.idx
= (card
->send
.dmacur
- card
->send
.dmastart
) >> 2;
489 if (bc
->free
> card
->send
.size
/ 2)
490 bc
->free
= card
->send
.size
/ 2;
491 /* currently we simple sync to the next complete free area
492 * this hast the advantage that we have always maximum time to
495 if (card
->send
.idx
< ((card
->send
.size
/ 2) - 1))
496 bc
->idx
= (card
->recv
.size
/ 2) - 1;
498 bc
->idx
= card
->recv
.size
- 1;
499 bc
->txstate
= TX_RUN
;
500 pr_debug("%s: %s B%1d free %d idx %d/%d\n", card
->name
,
501 __func__
, bc
->bch
.nr
, bc
->free
, bc
->idx
, card
->send
.idx
);
504 static int bc_next_frame(struct tiger_ch
*);
507 fill_hdlc_flag(struct tiger_ch
*bc
)
509 struct tiger_hw
*card
= bc
->bch
.hw
;
516 pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card
->name
,
517 __func__
, bc
->bch
.nr
, bc
->free
, bc
->txstate
,
518 bc
->idx
, card
->send
.idx
);
519 if (bc
->txstate
& (TX_IDLE
| TX_INIT
| TX_UNDERRUN
))
521 count
= isdnhdlc_encode(&bc
->hsend
, NULL
, 0, &i
,
522 bc
->hsbuf
, bc
->free
);
523 pr_debug("%s: B%1d hdlc encoded %d flags\n", card
->name
,
527 m
= (bc
->bch
.nr
& 1) ? 0xffffff00 : 0xffff00ff;
528 for (i
= 0; i
< count
; i
++) {
529 if (bc
->idx
>= card
->send
.size
)
531 v
= card
->send
.start
[bc
->idx
];
533 v
|= (bc
->bch
.nr
& 1) ? (u32
)(p
[i
]) : ((u32
)(p
[i
])) << 8;
534 card
->send
.start
[bc
->idx
++] = v
;
536 if (debug
& DEBUG_HW_BFIFO
) {
537 snprintf(card
->log
, LOG_SIZE
, "B%1d-send %s %d ",
538 bc
->bch
.nr
, card
->name
, count
);
539 print_hex_dump_bytes(card
->log
, DUMP_PREFIX_OFFSET
, p
, count
);
544 fill_dma(struct tiger_ch
*bc
)
546 struct tiger_hw
*card
= bc
->bch
.hw
;
553 count
= bc
->bch
.tx_skb
->len
- bc
->bch
.tx_idx
;
556 pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card
->name
,
557 __func__
, bc
->bch
.nr
, count
, bc
->free
, bc
->bch
.tx_idx
,
558 bc
->bch
.tx_skb
->len
, bc
->txstate
, bc
->idx
, card
->send
.idx
);
559 if (bc
->txstate
& (TX_IDLE
| TX_INIT
| TX_UNDERRUN
))
561 p
= bc
->bch
.tx_skb
->data
+ bc
->bch
.tx_idx
;
562 if (test_bit(FLG_HDLC
, &bc
->bch
.Flags
)) {
563 count
= isdnhdlc_encode(&bc
->hsend
, p
, count
, &i
,
564 bc
->hsbuf
, bc
->free
);
565 pr_debug("%s: B%1d hdlc encoded %d in %d\n", card
->name
,
566 bc
->bch
.nr
, i
, count
);
571 if (count
> bc
->free
)
573 bc
->bch
.tx_idx
+= count
;
576 m
= (bc
->bch
.nr
& 1) ? 0xffffff00 : 0xffff00ff;
577 for (i
= 0; i
< count
; i
++) {
578 if (bc
->idx
>= card
->send
.size
)
580 v
= card
->send
.start
[bc
->idx
];
582 v
|= (bc
->bch
.nr
& 1) ? (u32
)(p
[i
]) : ((u32
)(p
[i
])) << 8;
583 card
->send
.start
[bc
->idx
++] = v
;
585 if (debug
& DEBUG_HW_BFIFO
) {
586 snprintf(card
->log
, LOG_SIZE
, "B%1d-send %s %d ",
587 bc
->bch
.nr
, card
->name
, count
);
588 print_hex_dump_bytes(card
->log
, DUMP_PREFIX_OFFSET
, p
, count
);
596 bc_next_frame(struct tiger_ch
*bc
)
598 if (bc
->bch
.tx_skb
&& bc
->bch
.tx_idx
< bc
->bch
.tx_skb
->len
)
601 if (bc
->bch
.tx_skb
) {
602 /* send confirm, on trans, free on hdlc. */
603 if (test_bit(FLG_TRANSPARENT
, &bc
->bch
.Flags
))
604 confirm_Bsend(&bc
->bch
);
605 dev_kfree_skb(bc
->bch
.tx_skb
);
607 if (get_next_bframe(&bc
->bch
))
616 send_tiger_bc(struct tiger_hw
*card
, struct tiger_ch
*bc
)
620 bc
->free
+= card
->send
.size
/ 2;
621 if (bc
->free
>= card
->send
.size
) {
622 if (!(bc
->txstate
& (TX_UNDERRUN
| TX_INIT
))) {
623 pr_info("%s: B%1d TX underrun state %x\n", card
->name
,
624 bc
->bch
.nr
, bc
->txstate
);
625 bc
->txstate
|= TX_UNDERRUN
;
627 bc
->free
= card
->send
.size
;
629 ret
= bc_next_frame(bc
);
631 if (test_bit(FLG_HDLC
, &bc
->bch
.Flags
)) {
635 pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card
->name
,
636 bc
->bch
.nr
, bc
->free
, bc
->idx
, card
->send
.idx
);
637 if (!(bc
->txstate
& (TX_IDLE
| TX_INIT
))) {
638 fill_mem(bc
, bc
->idx
, bc
->free
, 0xff);
639 if (bc
->free
== card
->send
.size
)
640 bc
->txstate
|= TX_IDLE
;
646 send_tiger(struct tiger_hw
*card
, u8 irq_stat
)
650 /* Note send is via the READ DMA channel */
651 if ((irq_stat
& card
->last_is0
) & NJ_IRQM0_RD_MASK
) {
652 pr_info("%s: tiger warn write double dma %x/%x\n",
653 card
->name
, irq_stat
, card
->last_is0
);
656 card
->last_is0
&= ~NJ_IRQM0_RD_MASK
;
657 card
->last_is0
|= (irq_stat
& NJ_IRQM0_RD_MASK
);
659 for (i
= 0; i
< 2; i
++) {
660 if (test_bit(FLG_ACTIVE
, &card
->bc
[i
].bch
.Flags
))
661 send_tiger_bc(card
, &card
->bc
[i
]);
666 nj_irq(int intno
, void *dev_id
)
668 struct tiger_hw
*card
= dev_id
;
669 u8 val
, s1val
, s0val
;
671 spin_lock(&card
->lock
);
672 s0val
= inb(card
->base
| NJ_IRQSTAT0
);
673 s1val
= inb(card
->base
| NJ_IRQSTAT1
);
674 if ((s1val
& NJ_ISACIRQ
) && (s0val
== 0)) {
676 spin_unlock(&card
->lock
);
679 pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card
->name
, s0val
, s1val
);
681 if (!(s1val
& NJ_ISACIRQ
)) {
682 val
= ReadISAC_nj(card
, ISAC_ISTA
);
684 mISDNisac_irq(&card
->isac
, val
);
689 outb(s0val
, card
->base
| NJ_IRQSTAT0
);
693 /* set bits in sval to indicate which page is free */
694 card
->recv
.dmacur
= inl(card
->base
| NJ_DMA_WRITE_ADR
);
695 card
->recv
.idx
= (card
->recv
.dmacur
- card
->recv
.dmastart
) >> 2;
696 if (card
->recv
.dmacur
< card
->recv
.dmairq
)
697 s0val
= 0x08; /* the 2nd write area is free */
699 s0val
= 0x04; /* the 1st write area is free */
701 card
->send
.dmacur
= inl(card
->base
| NJ_DMA_READ_ADR
);
702 card
->send
.idx
= (card
->send
.dmacur
- card
->send
.dmastart
) >> 2;
703 if (card
->send
.dmacur
< card
->send
.dmairq
)
704 s0val
|= 0x02; /* the 2nd read area is free */
706 s0val
|= 0x01; /* the 1st read area is free */
708 pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card
->name
,
709 s1val
, s0val
, card
->last_is0
,
710 card
->recv
.idx
, card
->send
.idx
);
711 /* test if we have a DMA interrupt */
712 if (s0val
!= card
->last_is0
) {
713 if ((s0val
& NJ_IRQM0_RD_MASK
) !=
714 (card
->last_is0
& NJ_IRQM0_RD_MASK
))
715 /* got a write dma int */
716 send_tiger(card
, s0val
);
717 if ((s0val
& NJ_IRQM0_WR_MASK
) !=
718 (card
->last_is0
& NJ_IRQM0_WR_MASK
))
719 /* got a read dma int */
720 recv_tiger(card
, s0val
);
723 spin_unlock(&card
->lock
);
728 nj_l2l1B(struct mISDNchannel
*ch
, struct sk_buff
*skb
)
731 struct bchannel
*bch
= container_of(ch
, struct bchannel
, ch
);
732 struct tiger_ch
*bc
= container_of(bch
, struct tiger_ch
, bch
);
733 struct tiger_hw
*card
= bch
->hw
;
734 struct mISDNhead
*hh
= mISDN_HEAD_P(skb
);
740 spin_lock_irqsave(&card
->lock
, flags
);
741 ret
= bchannel_senddata(bch
, skb
);
742 if (ret
> 0) { /* direct TX */
743 id
= hh
->id
; /* skb can be freed */
746 spin_unlock_irqrestore(&card
->lock
, flags
);
747 if (!test_bit(FLG_TRANSPARENT
, &bch
->Flags
))
748 queue_ch_frame(ch
, PH_DATA_CNF
, id
, NULL
);
750 spin_unlock_irqrestore(&card
->lock
, flags
);
752 case PH_ACTIVATE_REQ
:
753 spin_lock_irqsave(&card
->lock
, flags
);
754 if (!test_and_set_bit(FLG_ACTIVE
, &bch
->Flags
))
755 ret
= mode_tiger(bc
, ch
->protocol
);
758 spin_unlock_irqrestore(&card
->lock
, flags
);
760 _queue_data(ch
, PH_ACTIVATE_IND
, MISDN_ID_ANY
, 0,
763 case PH_DEACTIVATE_REQ
:
764 spin_lock_irqsave(&card
->lock
, flags
);
765 mISDN_clear_bchannel(bch
);
766 mode_tiger(bc
, ISDN_P_NONE
);
767 spin_unlock_irqrestore(&card
->lock
, flags
);
768 _queue_data(ch
, PH_DEACTIVATE_IND
, MISDN_ID_ANY
, 0,
779 channel_bctrl(struct tiger_ch
*bc
, struct mISDN_ctrl_req
*cq
)
782 struct tiger_hw
*card
= bc
->bch
.hw
;
785 case MISDN_CTRL_GETOP
:
788 /* Nothing implemented yet */
789 case MISDN_CTRL_FILL_EMPTY
:
791 pr_info("%s: %s unknown Op %x\n", card
->name
, __func__
, cq
->op
);
799 nj_bctrl(struct mISDNchannel
*ch
, u32 cmd
, void *arg
)
801 struct bchannel
*bch
= container_of(ch
, struct bchannel
, ch
);
802 struct tiger_ch
*bc
= container_of(bch
, struct tiger_ch
, bch
);
803 struct tiger_hw
*card
= bch
->hw
;
807 pr_debug("%s: %s cmd:%x %p\n", card
->name
, __func__
, cmd
, arg
);
810 test_and_clear_bit(FLG_OPEN
, &bch
->Flags
);
811 if (test_bit(FLG_ACTIVE
, &bch
->Flags
)) {
812 spin_lock_irqsave(&card
->lock
, flags
);
813 mISDN_freebchannel(bch
);
814 test_and_clear_bit(FLG_TX_BUSY
, &bch
->Flags
);
815 test_and_clear_bit(FLG_ACTIVE
, &bch
->Flags
);
816 mode_tiger(bc
, ISDN_P_NONE
);
817 spin_unlock_irqrestore(&card
->lock
, flags
);
819 ch
->protocol
= ISDN_P_NONE
;
821 module_put(THIS_MODULE
);
824 case CONTROL_CHANNEL
:
825 ret
= channel_bctrl(bc
, arg
);
828 pr_info("%s: %s unknown prim(%x)\n", card
->name
, __func__
, cmd
);
834 channel_ctrl(struct tiger_hw
*card
, struct mISDN_ctrl_req
*cq
)
839 case MISDN_CTRL_GETOP
:
840 cq
->op
= MISDN_CTRL_LOOP
;
842 case MISDN_CTRL_LOOP
:
843 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
844 if (cq
->channel
< 0 || cq
->channel
> 3) {
848 ret
= card
->isac
.ctrl(&card
->isac
, HW_TESTLOOP
, cq
->channel
);
851 pr_info("%s: %s unknown Op %x\n", card
->name
, __func__
, cq
->op
);
859 open_bchannel(struct tiger_hw
*card
, struct channel_req
*rq
)
861 struct bchannel
*bch
;
863 if (rq
->adr
.channel
> 2)
865 if (rq
->protocol
== ISDN_P_NONE
)
867 bch
= &card
->bc
[rq
->adr
.channel
- 1].bch
;
868 if (test_and_set_bit(FLG_OPEN
, &bch
->Flags
))
869 return -EBUSY
; /* b-channel can be only open once */
870 test_and_clear_bit(FLG_FILLEMPTY
, &bch
->Flags
);
871 bch
->ch
.protocol
= rq
->protocol
;
877 * device control function
880 nj_dctrl(struct mISDNchannel
*ch
, u32 cmd
, void *arg
)
882 struct mISDNdevice
*dev
= container_of(ch
, struct mISDNdevice
, D
);
883 struct dchannel
*dch
= container_of(dev
, struct dchannel
, dev
);
884 struct tiger_hw
*card
= dch
->hw
;
885 struct channel_req
*rq
;
888 pr_debug("%s: %s cmd:%x %p\n", card
->name
, __func__
, cmd
, arg
);
892 if (rq
->protocol
== ISDN_P_TE_S0
)
893 err
= card
->isac
.open(&card
->isac
, rq
);
895 err
= open_bchannel(card
, rq
);
898 if (!try_module_get(THIS_MODULE
))
899 pr_info("%s: cannot get module\n", card
->name
);
902 pr_debug("%s: dev(%d) close from %p\n", card
->name
, dch
->dev
.id
,
903 __builtin_return_address(0));
904 module_put(THIS_MODULE
);
906 case CONTROL_CHANNEL
:
907 err
= channel_ctrl(card
, arg
);
910 pr_debug("%s: %s unknown command %x\n",
911 card
->name
, __func__
, cmd
);
918 nj_init_card(struct tiger_hw
*card
)
923 spin_lock_irqsave(&card
->lock
, flags
);
924 nj_disable_hwirq(card
);
925 spin_unlock_irqrestore(&card
->lock
, flags
);
927 card
->irq
= card
->pdev
->irq
;
928 if (request_irq(card
->irq
, nj_irq
, IRQF_SHARED
, card
->name
, card
)) {
929 pr_info("%s: couldn't get interrupt %d\n",
930 card
->name
, card
->irq
);
935 spin_lock_irqsave(&card
->lock
, flags
);
937 ret
= card
->isac
.init(&card
->isac
);
940 ret
= inittiger(card
);
943 mode_tiger(&card
->bc
[0], ISDN_P_NONE
);
944 mode_tiger(&card
->bc
[1], ISDN_P_NONE
);
946 spin_unlock_irqrestore(&card
->lock
, flags
);
952 nj_release(struct tiger_hw
*card
)
958 spin_lock_irqsave(&card
->lock
, flags
);
959 nj_disable_hwirq(card
);
960 mode_tiger(&card
->bc
[0], ISDN_P_NONE
);
961 mode_tiger(&card
->bc
[1], ISDN_P_NONE
);
962 card
->isac
.release(&card
->isac
);
963 spin_unlock_irqrestore(&card
->lock
, flags
);
964 release_region(card
->base
, card
->base_s
);
968 free_irq(card
->irq
, card
);
969 if (card
->isac
.dch
.dev
.dev
.class)
970 mISDN_unregister_device(&card
->isac
.dch
.dev
);
972 for (i
= 0; i
< 2; i
++) {
973 mISDN_freebchannel(&card
->bc
[i
].bch
);
974 kfree(card
->bc
[i
].hsbuf
);
975 kfree(card
->bc
[i
].hrbuf
);
978 pci_free_consistent(card
->pdev
, NJ_DMA_SIZE
,
979 card
->dma_p
, card
->dma
);
980 write_lock_irqsave(&card_lock
, flags
);
981 list_del(&card
->list
);
982 write_unlock_irqrestore(&card_lock
, flags
);
983 pci_clear_master(card
->pdev
);
984 pci_disable_device(card
->pdev
);
985 pci_set_drvdata(card
->pdev
, NULL
);
991 nj_setup(struct tiger_hw
*card
)
993 card
->base
= pci_resource_start(card
->pdev
, 0);
994 card
->base_s
= pci_resource_len(card
->pdev
, 0);
995 if (!request_region(card
->base
, card
->base_s
, card
->name
)) {
996 pr_info("%s: NETjet config port %#x-%#x already in use\n",
997 card
->name
, card
->base
,
998 (u32
)(card
->base
+ card
->base_s
- 1));
1002 ASSIGN_FUNC(nj
, ISAC
, card
->isac
);
1007 static int __devinit
1008 setup_instance(struct tiger_hw
*card
)
1013 snprintf(card
->name
, MISDN_MAX_IDLEN
- 1, "netjet.%d", nj_cnt
+ 1);
1014 write_lock_irqsave(&card_lock
, flags
);
1015 list_add_tail(&card
->list
, &Cards
);
1016 write_unlock_irqrestore(&card_lock
, flags
);
1019 card
->isac
.name
= card
->name
;
1020 spin_lock_init(&card
->lock
);
1021 card
->isac
.hwlock
= &card
->lock
;
1022 mISDNisac_init(&card
->isac
, card
);
1024 card
->isac
.dch
.dev
.Bprotocols
= (1 << (ISDN_P_B_RAW
& ISDN_P_B_MASK
)) |
1025 (1 << (ISDN_P_B_HDLC
& ISDN_P_B_MASK
));
1026 card
->isac
.dch
.dev
.D
.ctrl
= nj_dctrl
;
1027 for (i
= 0; i
< 2; i
++) {
1028 card
->bc
[i
].bch
.nr
= i
+ 1;
1029 set_channelmap(i
+ 1, card
->isac
.dch
.dev
.channelmap
);
1030 mISDN_initbchannel(&card
->bc
[i
].bch
, MAX_DATA_MEM
);
1031 card
->bc
[i
].bch
.hw
= card
;
1032 card
->bc
[i
].bch
.ch
.send
= nj_l2l1B
;
1033 card
->bc
[i
].bch
.ch
.ctrl
= nj_bctrl
;
1034 card
->bc
[i
].bch
.ch
.nr
= i
+ 1;
1035 list_add(&card
->bc
[i
].bch
.ch
.list
,
1036 &card
->isac
.dch
.dev
.bchannels
);
1037 card
->bc
[i
].bch
.hw
= card
;
1039 err
= nj_setup(card
);
1042 err
= mISDN_register_device(&card
->isac
.dch
.dev
, &card
->pdev
->dev
,
1046 err
= nj_init_card(card
);
1049 pr_notice("Netjet %d cards installed\n", nj_cnt
);
1057 static int __devinit
1058 nj_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1062 struct tiger_hw
*card
;
1064 if (pdev
->subsystem_vendor
== 0x8086 &&
1065 pdev
->subsystem_device
== 0x0003) {
1066 pr_notice("Netjet: Digium X100P/X101P not handled\n");
1070 if (pdev
->subsystem_vendor
== 0x55 &&
1071 pdev
->subsystem_device
== 0x02) {
1072 pr_notice("Netjet: Enter!Now not handled yet\n");
1076 if (pdev
->subsystem_vendor
== 0xb100 &&
1077 pdev
->subsystem_device
== 0x0003 ) {
1078 pr_notice("Netjet: Digium TDM400P not handled yet\n");
1082 card
= kzalloc(sizeof(struct tiger_hw
), GFP_ATOMIC
);
1084 pr_info("No kmem for Netjet\n");
1090 err
= pci_enable_device(pdev
);
1096 printk(KERN_INFO
"nj_probe(mISDN): found adapter at %s\n",
1099 pci_set_master(pdev
);
1101 /* the TJ300 and TJ320 must be detected, the IRQ handling is different
1102 * unfortunately the chips use the same device ID, but the TJ320 has
1103 * the bit20 in status PCI cfg register set
1105 pci_read_config_dword(pdev
, 0x04, &cfg
);
1106 if (cfg
& 0x00100000)
1107 card
->typ
= NETJET_S_TJ320
;
1109 card
->typ
= NETJET_S_TJ300
;
1111 card
->base
= pci_resource_start(pdev
, 0);
1112 card
->irq
= pdev
->irq
;
1113 pci_set_drvdata(pdev
, card
);
1114 err
= setup_instance(card
);
1116 pci_set_drvdata(pdev
, NULL
);
1122 static void __devexit
nj_remove(struct pci_dev
*pdev
)
1124 struct tiger_hw
*card
= pci_get_drvdata(pdev
);
1129 pr_info("%s drvdata already removed\n", __func__
);
1132 /* We cannot select cards with PCI_SUB... IDs, since here are cards with
1133 * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
1134 * known other cards which not work with this driver - see probe function */
1135 static struct pci_device_id nj_pci_ids
[] __devinitdata
= {
1136 { PCI_VENDOR_ID_TIGERJET
, PCI_DEVICE_ID_TIGERJET_300
,
1137 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0},
1140 MODULE_DEVICE_TABLE(pci
, nj_pci_ids
);
1142 static struct pci_driver nj_driver
= {
1145 .remove
= __devexit_p(nj_remove
),
1146 .id_table
= nj_pci_ids
,
1149 static int __init
nj_init(void)
1153 pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV
);
1154 err
= pci_register_driver(&nj_driver
);
1158 static void __exit
nj_cleanup(void)
1160 pci_unregister_driver(&nj_driver
);
1163 module_init(nj_init
);
1164 module_exit(nj_cleanup
);