1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
3 * low level driver for CCD's hfc-pci based cards
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
18 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/sched.h>
24 #include <linux/interrupt.h>
26 static const char *hfcpci_revision
= "$Revision: 1.48.2.4 $";
28 /* table entry in the PCI devices list */
36 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
37 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
38 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
40 static const PCI_ENTRY id_list
[] =
42 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_2BD0
, "CCD/Billion/Asuscom", "2BD0"},
43 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B000
, "Billion", "B000"},
44 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B006
, "Billion", "B006"},
45 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B007
, "Billion", "B007"},
46 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B008
, "Billion", "B008"},
47 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B009
, "Billion", "B009"},
48 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B00A
, "Billion", "B00A"},
49 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B00B
, "Billion", "B00B"},
50 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B00C
, "Billion", "B00C"},
51 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B100
, "Seyeon", "B100"},
52 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B700
, "Primux II S0", "B700"},
53 {PCI_VENDOR_ID_CCD
, PCI_DEVICE_ID_CCD_B701
, "Primux II S0 NT", "B701"},
54 {PCI_VENDOR_ID_ABOCOM
, PCI_DEVICE_ID_ABOCOM_2BD1
, "Abocom/Magitek", "2BD1"},
55 {PCI_VENDOR_ID_ASUSTEK
, PCI_DEVICE_ID_ASUSTEK_0675
, "Asuscom/Askey", "675"},
56 {PCI_VENDOR_ID_BERKOM
, PCI_DEVICE_ID_BERKOM_T_CONCEPT
, "German telekom", "T-Concept"},
57 {PCI_VENDOR_ID_BERKOM
, PCI_DEVICE_ID_BERKOM_A1T
, "German telekom", "A1T"},
58 {PCI_VENDOR_ID_ANIGMA
, PCI_DEVICE_ID_ANIGMA_MC145575
, "Motorola MC145575", "MC145575"},
59 {PCI_VENDOR_ID_ZOLTRIX
, PCI_DEVICE_ID_ZOLTRIX_2BD0
, "Zoltrix", "2BD0"},
60 {PCI_VENDOR_ID_DIGI
, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E
,"Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
61 {PCI_VENDOR_ID_DIGI
, PCI_DEVICE_ID_DIGI_DF_M_E
,"Digi International", "Digi DataFire Micro V (Europe)"},
62 {PCI_VENDOR_ID_DIGI
, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A
,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
63 {PCI_VENDOR_ID_DIGI
, PCI_DEVICE_ID_DIGI_DF_M_A
,"Digi International", "Digi DataFire Micro V (North America)"},
64 {PCI_VENDOR_ID_SITECOM
, PCI_DEVICE_ID_SITECOM_DC105V2
, "Sitecom Europe", "DC-105 ISDN PCI"},
69 /******************************************/
70 /* free hardware resources used by driver */
71 /******************************************/
73 release_io_hfcpci(struct IsdnCardState
*cs
)
75 printk(KERN_INFO
"HiSax: release hfcpci at %p\n",
76 cs
->hw
.hfcpci
.pci_io
);
77 cs
->hw
.hfcpci
.int_m2
= 0; /* interrupt output off ! */
78 Write_hfc(cs
, HFCPCI_INT_M2
, cs
->hw
.hfcpci
.int_m2
);
79 Write_hfc(cs
, HFCPCI_CIRM
, HFCPCI_RESET
); /* Reset On */
81 Write_hfc(cs
, HFCPCI_CIRM
, 0); /* Reset Off */
83 Write_hfc(cs
, HFCPCI_INT_M2
, cs
->hw
.hfcpci
.int_m2
);
84 pci_write_config_word(cs
->hw
.hfcpci
.dev
, PCI_COMMAND
, 0); /* disable memory mapped ports + busmaster */
85 del_timer(&cs
->hw
.hfcpci
.timer
);
86 pci_free_consistent(cs
->hw
.hfcpci
.dev
, 0x8000,
87 cs
->hw
.hfcpci
.fifos
, cs
->hw
.hfcpci
.dma
);
88 cs
->hw
.hfcpci
.fifos
= NULL
;
89 iounmap((void *)cs
->hw
.hfcpci
.pci_io
);
92 /********************************************************************************/
93 /* function called to reset the HFC PCI chip. A complete software reset of chip */
94 /* and fifos is done. */
95 /********************************************************************************/
97 reset_hfcpci(struct IsdnCardState
*cs
)
99 pci_write_config_word(cs
->hw
.hfcpci
.dev
, PCI_COMMAND
, PCI_ENA_MEMIO
); /* enable memory mapped ports, disable busmaster */
100 cs
->hw
.hfcpci
.int_m2
= 0; /* interrupt output off ! */
101 Write_hfc(cs
, HFCPCI_INT_M2
, cs
->hw
.hfcpci
.int_m2
);
103 printk(KERN_INFO
"HFC_PCI: resetting card\n");
104 pci_write_config_word(cs
->hw
.hfcpci
.dev
, PCI_COMMAND
, PCI_ENA_MEMIO
+ PCI_ENA_MASTER
); /* enable memory ports + busmaster */
105 Write_hfc(cs
, HFCPCI_CIRM
, HFCPCI_RESET
); /* Reset On */
107 Write_hfc(cs
, HFCPCI_CIRM
, 0); /* Reset Off */
109 if (Read_hfc(cs
, HFCPCI_STATUS
) & 2)
110 printk(KERN_WARNING
"HFC-PCI init bit busy\n");
112 cs
->hw
.hfcpci
.fifo_en
= 0x30; /* only D fifos enabled */
113 Write_hfc(cs
, HFCPCI_FIFO_EN
, cs
->hw
.hfcpci
.fifo_en
);
115 cs
->hw
.hfcpci
.trm
= 0 + HFCPCI_BTRANS_THRESMASK
; /* no echo connect , threshold */
116 Write_hfc(cs
, HFCPCI_TRM
, cs
->hw
.hfcpci
.trm
);
118 Write_hfc(cs
, HFCPCI_CLKDEL
, CLKDEL_TE
); /* ST-Bit delay for TE-Mode */
119 cs
->hw
.hfcpci
.sctrl_e
= HFCPCI_AUTO_AWAKE
;
120 Write_hfc(cs
, HFCPCI_SCTRL_E
, cs
->hw
.hfcpci
.sctrl_e
); /* S/T Auto awake */
121 cs
->hw
.hfcpci
.bswapped
= 0; /* no exchange */
122 cs
->hw
.hfcpci
.nt_mode
= 0; /* we are in TE mode */
123 cs
->hw
.hfcpci
.ctmt
= HFCPCI_TIM3_125
| HFCPCI_AUTO_TIMER
;
124 Write_hfc(cs
, HFCPCI_CTMT
, cs
->hw
.hfcpci
.ctmt
);
126 cs
->hw
.hfcpci
.int_m1
= HFCPCI_INTS_DTRANS
| HFCPCI_INTS_DREC
|
127 HFCPCI_INTS_L1STATE
| HFCPCI_INTS_TIMER
;
128 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
130 /* Clear already pending ints */
131 if (Read_hfc(cs
, HFCPCI_INT_S1
));
133 Write_hfc(cs
, HFCPCI_STATES
, HFCPCI_LOAD_STATE
| 2); /* HFC ST 2 */
135 Write_hfc(cs
, HFCPCI_STATES
, 2); /* HFC ST 2 */
136 cs
->hw
.hfcpci
.mst_m
= HFCPCI_MASTER
; /* HFC Master Mode */
138 Write_hfc(cs
, HFCPCI_MST_MODE
, cs
->hw
.hfcpci
.mst_m
);
139 cs
->hw
.hfcpci
.sctrl
= 0x40; /* set tx_lo mode, error in datasheet ! */
140 Write_hfc(cs
, HFCPCI_SCTRL
, cs
->hw
.hfcpci
.sctrl
);
141 cs
->hw
.hfcpci
.sctrl_r
= 0;
142 Write_hfc(cs
, HFCPCI_SCTRL_R
, cs
->hw
.hfcpci
.sctrl_r
);
144 /* Init GCI/IOM2 in master mode */
145 /* Slots 0 and 1 are set for B-chan 1 and 2 */
146 /* D- and monitor/CI channel are not enabled */
147 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
148 /* STIO2 is used as data input, B1+B2 from IOM->ST */
149 /* ST B-channel send disabled -> continous 1s */
150 /* The IOM slots are always enabled */
151 cs
->hw
.hfcpci
.conn
= 0x36; /* set data flow directions */
152 Write_hfc(cs
, HFCPCI_CONNECT
, cs
->hw
.hfcpci
.conn
);
153 Write_hfc(cs
, HFCPCI_B1_SSL
, 0x80); /* B1-Slot 0 STIO1 out enabled */
154 Write_hfc(cs
, HFCPCI_B2_SSL
, 0x81); /* B2-Slot 1 STIO1 out enabled */
155 Write_hfc(cs
, HFCPCI_B1_RSL
, 0x80); /* B1-Slot 0 STIO2 in enabled */
156 Write_hfc(cs
, HFCPCI_B2_RSL
, 0x81); /* B2-Slot 1 STIO2 in enabled */
158 /* Finally enable IRQ output */
159 cs
->hw
.hfcpci
.int_m2
= HFCPCI_IRQ_ENABLE
;
160 Write_hfc(cs
, HFCPCI_INT_M2
, cs
->hw
.hfcpci
.int_m2
);
161 if (Read_hfc(cs
, HFCPCI_INT_S1
));
164 /***************************************************/
165 /* Timer function called when kernel timer expires */
166 /***************************************************/
168 hfcpci_Timer(struct IsdnCardState
*cs
)
170 cs
->hw
.hfcpci
.timer
.expires
= jiffies
+ 75;
172 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
173 add_timer(&cs->hw.hfcpci.timer);
178 /*********************************/
179 /* schedule a new D-channel task */
180 /*********************************/
182 sched_event_D_pci(struct IsdnCardState
*cs
, int event
)
184 test_and_set_bit(event
, &cs
->event
);
185 schedule_work(&cs
->tqueue
);
188 /*********************************/
189 /* schedule a new b_channel task */
190 /*********************************/
192 hfcpci_sched_event(struct BCState
*bcs
, int event
)
194 test_and_set_bit(event
, &bcs
->event
);
195 schedule_work(&bcs
->tqueue
);
198 /************************************************/
199 /* select a b-channel entry matching and active */
200 /************************************************/
203 Sel_BCS(struct IsdnCardState
*cs
, int channel
)
205 if (cs
->bcs
[0].mode
&& (cs
->bcs
[0].channel
== channel
))
206 return (&cs
->bcs
[0]);
207 else if (cs
->bcs
[1].mode
&& (cs
->bcs
[1].channel
== channel
))
208 return (&cs
->bcs
[1]);
213 /***************************************/
214 /* clear the desired B-channel rx fifo */
215 /***************************************/
216 static void hfcpci_clear_fifo_rx(struct IsdnCardState
*cs
, int fifo
)
221 bzr
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxbz_b2
;
222 fifo_state
= cs
->hw
.hfcpci
.fifo_en
& HFCPCI_FIFOEN_B2RX
;
224 bzr
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxbz_b1
;
225 fifo_state
= cs
->hw
.hfcpci
.fifo_en
& HFCPCI_FIFOEN_B1RX
;
228 cs
->hw
.hfcpci
.fifo_en
^= fifo_state
;
229 Write_hfc(cs
, HFCPCI_FIFO_EN
, cs
->hw
.hfcpci
.fifo_en
);
230 cs
->hw
.hfcpci
.last_bfifo_cnt
[fifo
] = 0;
231 bzr
->za
[MAX_B_FRAMES
].z1
= B_FIFO_SIZE
+ B_SUB_VAL
- 1;
232 bzr
->za
[MAX_B_FRAMES
].z2
= bzr
->za
[MAX_B_FRAMES
].z1
;
233 bzr
->f1
= MAX_B_FRAMES
;
234 bzr
->f2
= bzr
->f1
; /* init F pointers to remain constant */
236 cs
->hw
.hfcpci
.fifo_en
|= fifo_state
;
237 Write_hfc(cs
, HFCPCI_FIFO_EN
, cs
->hw
.hfcpci
.fifo_en
);
240 /***************************************/
241 /* clear the desired B-channel tx fifo */
242 /***************************************/
243 static void hfcpci_clear_fifo_tx(struct IsdnCardState
*cs
, int fifo
)
248 bzt
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.txbz_b2
;
249 fifo_state
= cs
->hw
.hfcpci
.fifo_en
& HFCPCI_FIFOEN_B2TX
;
251 bzt
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.txbz_b1
;
252 fifo_state
= cs
->hw
.hfcpci
.fifo_en
& HFCPCI_FIFOEN_B1TX
;
255 cs
->hw
.hfcpci
.fifo_en
^= fifo_state
;
256 Write_hfc(cs
, HFCPCI_FIFO_EN
, cs
->hw
.hfcpci
.fifo_en
);
257 bzt
->za
[MAX_B_FRAMES
].z1
= B_FIFO_SIZE
+ B_SUB_VAL
- 1;
258 bzt
->za
[MAX_B_FRAMES
].z2
= bzt
->za
[MAX_B_FRAMES
].z1
;
259 bzt
->f1
= MAX_B_FRAMES
;
260 bzt
->f2
= bzt
->f1
; /* init F pointers to remain constant */
262 cs
->hw
.hfcpci
.fifo_en
|= fifo_state
;
263 Write_hfc(cs
, HFCPCI_FIFO_EN
, cs
->hw
.hfcpci
.fifo_en
);
266 /*********************************************/
267 /* read a complete B-frame out of the buffer */
268 /*********************************************/
269 static struct sk_buff
271 hfcpci_empty_fifo(struct BCState
*bcs
, bzfifo_type
* bz
, u_char
* bdata
, int count
)
273 u_char
*ptr
, *ptr1
, new_f2
;
275 struct IsdnCardState
*cs
= bcs
->cs
;
276 int total
, maxlen
, new_z2
;
279 if ((cs
->debug
& L1_DEB_HSCX
) && !(cs
->debug
& L1_DEB_HSCX_FIFO
))
280 debugl1(cs
, "hfcpci_empty_fifo");
281 zp
= &bz
->za
[bz
->f2
]; /* point to Z-Regs */
282 new_z2
= zp
->z2
+ count
; /* new position in fifo */
283 if (new_z2
>= (B_FIFO_SIZE
+ B_SUB_VAL
))
284 new_z2
-= B_FIFO_SIZE
; /* buffer wrap */
285 new_f2
= (bz
->f2
+ 1) & MAX_B_FRAMES
;
286 if ((count
> HSCX_BUFMAX
+ 3) || (count
< 4) ||
287 (*(bdata
+ (zp
->z1
- B_SUB_VAL
)))) {
288 if (cs
->debug
& L1_DEB_WARN
)
289 debugl1(cs
, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count
);
290 #ifdef ERROR_STATISTIC
293 bz
->za
[new_f2
].z2
= new_z2
;
294 bz
->f2
= new_f2
; /* next buffer */
296 } else if (!(skb
= dev_alloc_skb(count
- 3)))
297 printk(KERN_WARNING
"HFCPCI: receive out of memory\n");
301 ptr
= skb_put(skb
, count
);
303 if (zp
->z2
+ count
<= B_FIFO_SIZE
+ B_SUB_VAL
)
304 maxlen
= count
; /* complete transfer */
306 maxlen
= B_FIFO_SIZE
+ B_SUB_VAL
- zp
->z2
; /* maximum */
308 ptr1
= bdata
+ (zp
->z2
- B_SUB_VAL
); /* start of data */
309 memcpy(ptr
, ptr1
, maxlen
); /* copy data */
312 if (count
) { /* rest remaining */
314 ptr1
= bdata
; /* start of buffer */
315 memcpy(ptr
, ptr1
, count
); /* rest */
317 bz
->za
[new_f2
].z2
= new_z2
;
318 bz
->f2
= new_f2
; /* next buffer */
324 /*******************************/
325 /* D-channel receive procedure */
326 /*******************************/
329 receive_dmsg(struct IsdnCardState
*cs
)
339 df
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->d_chan
.d_rx
;
340 if (test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
341 debugl1(cs
, "rec_dmsg blocked");
344 while (((df
->f1
& D_FREG_MASK
) != (df
->f2
& D_FREG_MASK
)) && count
--) {
345 zp
= &df
->za
[df
->f2
& D_FREG_MASK
];
346 rcnt
= zp
->z1
- zp
->z2
;
350 if (cs
->debug
& L1_DEB_ISAC
)
351 debugl1(cs
, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
352 df
->f1
, df
->f2
, zp
->z1
, zp
->z2
, rcnt
);
354 if ((rcnt
> MAX_DFRAME_LEN
+ 3) || (rcnt
< 4) ||
355 (df
->data
[zp
->z1
])) {
356 if (cs
->debug
& L1_DEB_WARN
)
357 debugl1(cs
, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt
, df
->data
[zp
->z1
]);
358 #ifdef ERROR_STATISTIC
361 df
->f2
= ((df
->f2
+ 1) & MAX_D_FRAMES
) | (MAX_D_FRAMES
+ 1); /* next buffer */
362 df
->za
[df
->f2
& D_FREG_MASK
].z2
= (zp
->z2
+ rcnt
) & (D_FIFO_SIZE
- 1);
363 } else if ((skb
= dev_alloc_skb(rcnt
- 3))) {
366 ptr
= skb_put(skb
, rcnt
);
368 if (zp
->z2
+ rcnt
<= D_FIFO_SIZE
)
369 maxlen
= rcnt
; /* complete transfer */
371 maxlen
= D_FIFO_SIZE
- zp
->z2
; /* maximum */
373 ptr1
= df
->data
+ zp
->z2
; /* start of data */
374 memcpy(ptr
, ptr1
, maxlen
); /* copy data */
377 if (rcnt
) { /* rest remaining */
379 ptr1
= df
->data
; /* start of buffer */
380 memcpy(ptr
, ptr1
, rcnt
); /* rest */
382 df
->f2
= ((df
->f2
+ 1) & MAX_D_FRAMES
) | (MAX_D_FRAMES
+ 1); /* next buffer */
383 df
->za
[df
->f2
& D_FREG_MASK
].z2
= (zp
->z2
+ total
) & (D_FIFO_SIZE
- 1);
385 skb_queue_tail(&cs
->rq
, skb
);
386 sched_event_D_pci(cs
, D_RCVBUFREADY
);
388 printk(KERN_WARNING
"HFC-PCI: D receive out of memory\n");
390 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
394 /*******************************************************************************/
395 /* check for transparent receive data and read max one threshold size if avail */
396 /*******************************************************************************/
398 hfcpci_empty_fifo_trans(struct BCState
*bcs
, bzfifo_type
* bz
, u_char
* bdata
)
400 unsigned short *z1r
, *z2r
;
401 int new_z2
, fcnt
, maxlen
;
405 z1r
= &bz
->za
[MAX_B_FRAMES
].z1
; /* pointer to z reg */
408 if (!(fcnt
= *z1r
- *z2r
))
409 return (0); /* no data avail */
412 fcnt
+= B_FIFO_SIZE
; /* bytes actually buffered */
413 if (fcnt
> HFCPCI_BTRANS_THRESHOLD
)
414 fcnt
= HFCPCI_BTRANS_THRESHOLD
; /* limit size */
416 new_z2
= *z2r
+ fcnt
; /* new position in fifo */
417 if (new_z2
>= (B_FIFO_SIZE
+ B_SUB_VAL
))
418 new_z2
-= B_FIFO_SIZE
; /* buffer wrap */
420 if (!(skb
= dev_alloc_skb(fcnt
)))
421 printk(KERN_WARNING
"HFCPCI: receive out of memory\n");
423 ptr
= skb_put(skb
, fcnt
);
424 if (*z2r
+ fcnt
<= B_FIFO_SIZE
+ B_SUB_VAL
)
425 maxlen
= fcnt
; /* complete transfer */
427 maxlen
= B_FIFO_SIZE
+ B_SUB_VAL
- *z2r
; /* maximum */
429 ptr1
= bdata
+ (*z2r
- B_SUB_VAL
); /* start of data */
430 memcpy(ptr
, ptr1
, maxlen
); /* copy data */
433 if (fcnt
) { /* rest remaining */
435 ptr1
= bdata
; /* start of buffer */
436 memcpy(ptr
, ptr1
, fcnt
); /* rest */
438 skb_queue_tail(&bcs
->rqueue
, skb
);
439 hfcpci_sched_event(bcs
, B_RCVBUFREADY
);
442 *z2r
= new_z2
; /* new position */
444 } /* hfcpci_empty_fifo_trans */
446 /**********************************/
447 /* B-channel main receive routine */
448 /**********************************/
450 main_rec_hfcpci(struct BCState
*bcs
)
452 struct IsdnCardState
*cs
= bcs
->cs
;
454 int receive
, count
= 5;
461 if ((bcs
->channel
) && (!cs
->hw
.hfcpci
.bswapped
)) {
462 bz
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxbz_b2
;
463 bdata
= ((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxdat_b2
;
466 bz
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxbz_b1
;
467 bdata
= ((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxdat_b1
;
472 if (test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
473 debugl1(cs
, "rec_data %d blocked", bcs
->channel
);
476 if (bz
->f1
!= bz
->f2
) {
477 if (cs
->debug
& L1_DEB_HSCX
)
478 debugl1(cs
, "hfcpci rec %d f1(%d) f2(%d)",
479 bcs
->channel
, bz
->f1
, bz
->f2
);
480 zp
= &bz
->za
[bz
->f2
];
482 rcnt
= zp
->z1
- zp
->z2
;
486 if (cs
->debug
& L1_DEB_HSCX
)
487 debugl1(cs
, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
488 bcs
->channel
, zp
->z1
, zp
->z2
, rcnt
);
489 if ((skb
= hfcpci_empty_fifo(bcs
, bz
, bdata
, rcnt
))) {
490 skb_queue_tail(&bcs
->rqueue
, skb
);
491 hfcpci_sched_event(bcs
, B_RCVBUFREADY
);
493 rcnt
= bz
->f1
- bz
->f2
;
495 rcnt
+= MAX_B_FRAMES
+ 1;
496 if (cs
->hw
.hfcpci
.last_bfifo_cnt
[real_fifo
] > rcnt
+ 1) {
498 hfcpci_clear_fifo_rx(cs
, real_fifo
);
500 cs
->hw
.hfcpci
.last_bfifo_cnt
[real_fifo
] = rcnt
;
505 } else if (bcs
->mode
== L1_MODE_TRANS
)
506 receive
= hfcpci_empty_fifo_trans(bcs
, bz
, bdata
);
509 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
510 if (count
&& receive
)
514 /**************************/
515 /* D-channel send routine */
516 /**************************/
518 hfcpci_fill_dfifo(struct IsdnCardState
*cs
)
521 int count
, new_z1
, maxlen
;
523 u_char
*src
, *dst
, new_f1
;
527 if (cs
->tx_skb
->len
<= 0)
530 df
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->d_chan
.d_tx
;
532 if (cs
->debug
& L1_DEB_ISAC
)
533 debugl1(cs
, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
535 df
->za
[df
->f1
& D_FREG_MASK
].z1
);
536 fcnt
= df
->f1
- df
->f2
; /* frame count actually buffered */
538 fcnt
+= (MAX_D_FRAMES
+ 1); /* if wrap around */
539 if (fcnt
> (MAX_D_FRAMES
- 1)) {
540 if (cs
->debug
& L1_DEB_ISAC
)
541 debugl1(cs
, "hfcpci_fill_Dfifo more as 14 frames");
542 #ifdef ERROR_STATISTIC
547 /* now determine free bytes in FIFO buffer */
548 count
= df
->za
[df
->f2
& D_FREG_MASK
].z2
- df
->za
[df
->f1
& D_FREG_MASK
].z1
- 1;
550 count
+= D_FIFO_SIZE
; /* count now contains available bytes */
552 if (cs
->debug
& L1_DEB_ISAC
)
553 debugl1(cs
, "hfcpci_fill_Dfifo count(%u/%d)",
554 cs
->tx_skb
->len
, count
);
555 if (count
< cs
->tx_skb
->len
) {
556 if (cs
->debug
& L1_DEB_ISAC
)
557 debugl1(cs
, "hfcpci_fill_Dfifo no fifo mem");
560 count
= cs
->tx_skb
->len
; /* get frame len */
561 new_z1
= (df
->za
[df
->f1
& D_FREG_MASK
].z1
+ count
) & (D_FIFO_SIZE
- 1);
562 new_f1
= ((df
->f1
+ 1) & D_FREG_MASK
) | (D_FREG_MASK
+ 1);
563 src
= cs
->tx_skb
->data
; /* source pointer */
564 dst
= df
->data
+ df
->za
[df
->f1
& D_FREG_MASK
].z1
;
565 maxlen
= D_FIFO_SIZE
- df
->za
[df
->f1
& D_FREG_MASK
].z1
; /* end fifo */
567 maxlen
= count
; /* limit size */
568 memcpy(dst
, src
, maxlen
); /* first copy */
570 count
-= maxlen
; /* remaining bytes */
572 dst
= df
->data
; /* start of buffer */
573 src
+= maxlen
; /* new position */
574 memcpy(dst
, src
, count
);
576 df
->za
[new_f1
& D_FREG_MASK
].z1
= new_z1
; /* for next buffer */
577 df
->za
[df
->f1
& D_FREG_MASK
].z1
= new_z1
; /* new pos actual buffer */
578 df
->f1
= new_f1
; /* next frame */
580 dev_kfree_skb_any(cs
->tx_skb
);
584 /**************************/
585 /* B-channel send routine */
586 /**************************/
588 hfcpci_fill_fifo(struct BCState
*bcs
)
590 struct IsdnCardState
*cs
= bcs
->cs
;
595 u_char new_f1
, *src
, *dst
;
596 unsigned short *z1t
, *z2t
;
600 if (bcs
->tx_skb
->len
<= 0)
603 if ((bcs
->channel
) && (!cs
->hw
.hfcpci
.bswapped
)) {
604 bz
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.txbz_b2
;
605 bdata
= ((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.txdat_b2
;
607 bz
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.txbz_b1
;
608 bdata
= ((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.txdat_b1
;
611 if (bcs
->mode
== L1_MODE_TRANS
) {
612 z1t
= &bz
->za
[MAX_B_FRAMES
].z1
;
614 if (cs
->debug
& L1_DEB_HSCX
)
615 debugl1(cs
, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
616 bcs
->channel
, *z1t
, *z2t
);
619 fcnt
+= B_FIFO_SIZE
; /* fcnt contains available bytes in fifo */
620 fcnt
= B_FIFO_SIZE
- fcnt
; /* remaining bytes to send */
622 while ((fcnt
< 2 * HFCPCI_BTRANS_THRESHOLD
) && (bcs
->tx_skb
)) {
623 if (bcs
->tx_skb
->len
< B_FIFO_SIZE
- fcnt
) {
624 /* data is suitable for fifo */
625 count
= bcs
->tx_skb
->len
;
627 new_z1
= *z1t
+ count
; /* new buffer Position */
628 if (new_z1
>= (B_FIFO_SIZE
+ B_SUB_VAL
))
629 new_z1
-= B_FIFO_SIZE
; /* buffer wrap */
630 src
= bcs
->tx_skb
->data
; /* source pointer */
631 dst
= bdata
+ (*z1t
- B_SUB_VAL
);
632 maxlen
= (B_FIFO_SIZE
+ B_SUB_VAL
) - *z1t
; /* end of fifo */
634 maxlen
= count
; /* limit size */
635 memcpy(dst
, src
, maxlen
); /* first copy */
637 count
-= maxlen
; /* remaining bytes */
639 dst
= bdata
; /* start of buffer */
640 src
+= maxlen
; /* new position */
641 memcpy(dst
, src
, count
);
643 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
644 fcnt
+= bcs
->tx_skb
->len
;
645 *z1t
= new_z1
; /* now send data */
646 } else if (cs
->debug
& L1_DEB_HSCX
)
647 debugl1(cs
, "hfcpci_fill_fifo_trans %d frame length %d discarded",
648 bcs
->channel
, bcs
->tx_skb
->len
);
650 if (test_bit(FLG_LLI_L1WAKEUP
,&bcs
->st
->lli
.flag
) &&
651 (PACKET_NOACK
!= bcs
->tx_skb
->pkt_type
)) {
653 spin_lock_irqsave(&bcs
->aclock
, flags
);
654 bcs
->ackcnt
+= bcs
->tx_skb
->len
;
655 spin_unlock_irqrestore(&bcs
->aclock
, flags
);
656 schedule_event(bcs
, B_ACKPENDING
);
659 dev_kfree_skb_any(bcs
->tx_skb
);
660 bcs
->tx_skb
= skb_dequeue(&bcs
->squeue
); /* fetch next data */
662 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
665 if (cs
->debug
& L1_DEB_HSCX
)
666 debugl1(cs
, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
667 bcs
->channel
, bz
->f1
, bz
->f2
,
670 fcnt
= bz
->f1
- bz
->f2
; /* frame count actually buffered */
672 fcnt
+= (MAX_B_FRAMES
+ 1); /* if wrap around */
673 if (fcnt
> (MAX_B_FRAMES
- 1)) {
674 if (cs
->debug
& L1_DEB_HSCX
)
675 debugl1(cs
, "hfcpci_fill_Bfifo more as 14 frames");
678 /* now determine free bytes in FIFO buffer */
679 count
= bz
->za
[bz
->f2
].z2
- bz
->za
[bz
->f1
].z1
- 1;
681 count
+= B_FIFO_SIZE
; /* count now contains available bytes */
683 if (cs
->debug
& L1_DEB_HSCX
)
684 debugl1(cs
, "hfcpci_fill_fifo %d count(%u/%d),%lx",
685 bcs
->channel
, bcs
->tx_skb
->len
,
686 count
, current
->state
);
688 if (count
< bcs
->tx_skb
->len
) {
689 if (cs
->debug
& L1_DEB_HSCX
)
690 debugl1(cs
, "hfcpci_fill_fifo no fifo mem");
693 count
= bcs
->tx_skb
->len
; /* get frame len */
694 new_z1
= bz
->za
[bz
->f1
].z1
+ count
; /* new buffer Position */
695 if (new_z1
>= (B_FIFO_SIZE
+ B_SUB_VAL
))
696 new_z1
-= B_FIFO_SIZE
; /* buffer wrap */
698 new_f1
= ((bz
->f1
+ 1) & MAX_B_FRAMES
);
699 src
= bcs
->tx_skb
->data
; /* source pointer */
700 dst
= bdata
+ (bz
->za
[bz
->f1
].z1
- B_SUB_VAL
);
701 maxlen
= (B_FIFO_SIZE
+ B_SUB_VAL
) - bz
->za
[bz
->f1
].z1
; /* end fifo */
703 maxlen
= count
; /* limit size */
704 memcpy(dst
, src
, maxlen
); /* first copy */
706 count
-= maxlen
; /* remaining bytes */
708 dst
= bdata
; /* start of buffer */
709 src
+= maxlen
; /* new position */
710 memcpy(dst
, src
, count
);
712 bcs
->tx_cnt
-= bcs
->tx_skb
->len
;
713 if (test_bit(FLG_LLI_L1WAKEUP
,&bcs
->st
->lli
.flag
) &&
714 (PACKET_NOACK
!= bcs
->tx_skb
->pkt_type
)) {
716 spin_lock_irqsave(&bcs
->aclock
, flags
);
717 bcs
->ackcnt
+= bcs
->tx_skb
->len
;
718 spin_unlock_irqrestore(&bcs
->aclock
, flags
);
719 schedule_event(bcs
, B_ACKPENDING
);
722 bz
->za
[new_f1
].z1
= new_z1
; /* for next buffer */
723 bz
->f1
= new_f1
; /* next frame */
725 dev_kfree_skb_any(bcs
->tx_skb
);
727 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
730 /**********************************************/
731 /* D-channel l1 state call for leased NT-mode */
732 /**********************************************/
734 dch_nt_l2l1(struct PStack
*st
, int pr
, void *arg
)
736 struct IsdnCardState
*cs
= (struct IsdnCardState
*) st
->l1
.hardware
;
739 case (PH_DATA
| REQUEST
):
740 case (PH_PULL
| REQUEST
):
741 case (PH_PULL
| INDICATION
):
742 st
->l1
.l1hw(st
, pr
, arg
);
744 case (PH_ACTIVATE
| REQUEST
):
745 st
->l1
.l1l2(st
, PH_ACTIVATE
| CONFIRM
, NULL
);
747 case (PH_TESTLOOP
| REQUEST
):
749 debugl1(cs
, "PH_TEST_LOOP B1");
751 debugl1(cs
, "PH_TEST_LOOP B2");
752 if (!(3 & (long) arg
))
753 debugl1(cs
, "PH_TEST_LOOP DISABLED");
754 st
->l1
.l1hw(st
, HW_TESTLOOP
| REQUEST
, arg
);
758 debugl1(cs
, "dch_nt_l2l1 msg %04X unhandled", pr
);
765 /***********************/
766 /* set/reset echo mode */
767 /***********************/
769 hfcpci_auxcmd(struct IsdnCardState
*cs
, isdn_ctrl
* ic
)
772 int i
= *(unsigned int *) ic
->parm
.num
;
774 if ((ic
->arg
== 98) &&
775 (!(cs
->hw
.hfcpci
.int_m1
& (HFCPCI_INTS_B2TRANS
+ HFCPCI_INTS_B2REC
+ HFCPCI_INTS_B1TRANS
+ HFCPCI_INTS_B1REC
)))) {
776 spin_lock_irqsave(&cs
->lock
, flags
);
777 Write_hfc(cs
, HFCPCI_CLKDEL
, CLKDEL_NT
); /* ST-Bit delay for NT-Mode */
778 Write_hfc(cs
, HFCPCI_STATES
, HFCPCI_LOAD_STATE
| 0); /* HFC ST G0 */
780 cs
->hw
.hfcpci
.sctrl
|= SCTRL_MODE_NT
;
781 Write_hfc(cs
, HFCPCI_SCTRL
, cs
->hw
.hfcpci
.sctrl
); /* set NT-mode */
783 Write_hfc(cs
, HFCPCI_STATES
, HFCPCI_LOAD_STATE
| 1); /* HFC ST G1 */
785 Write_hfc(cs
, HFCPCI_STATES
, 1 | HFCPCI_ACTIVATE
| HFCPCI_DO_ACTION
);
786 cs
->dc
.hfcpci
.ph_state
= 1;
787 cs
->hw
.hfcpci
.nt_mode
= 1;
788 cs
->hw
.hfcpci
.nt_timer
= 0;
789 cs
->stlist
->l2
.l2l1
= dch_nt_l2l1
;
790 spin_unlock_irqrestore(&cs
->lock
, flags
);
791 debugl1(cs
, "NT mode activated");
794 if ((cs
->chanlimit
> 1) || (cs
->hw
.hfcpci
.bswapped
) ||
795 (cs
->hw
.hfcpci
.nt_mode
) || (ic
->arg
!= 12))
798 spin_lock_irqsave(&cs
->lock
, flags
);
801 cs
->hw
.hfcpci
.trm
|= 0x20; /* enable echo chan */
802 cs
->hw
.hfcpci
.int_m1
|= HFCPCI_INTS_B2REC
;
803 cs
->hw
.hfcpci
.fifo_en
|= HFCPCI_FIFOEN_B2RX
;
806 cs
->hw
.hfcpci
.trm
&= ~0x20; /* disable echo chan */
807 cs
->hw
.hfcpci
.int_m1
&= ~HFCPCI_INTS_B2REC
;
808 cs
->hw
.hfcpci
.fifo_en
&= ~HFCPCI_FIFOEN_B2RX
;
810 cs
->hw
.hfcpci
.sctrl_r
&= ~SCTRL_B2_ENA
;
811 cs
->hw
.hfcpci
.sctrl
&= ~SCTRL_B2_ENA
;
812 cs
->hw
.hfcpci
.conn
|= 0x10; /* B2-IOM -> B2-ST */
813 cs
->hw
.hfcpci
.ctmt
&= ~2;
814 Write_hfc(cs
, HFCPCI_CTMT
, cs
->hw
.hfcpci
.ctmt
);
815 Write_hfc(cs
, HFCPCI_SCTRL_R
, cs
->hw
.hfcpci
.sctrl_r
);
816 Write_hfc(cs
, HFCPCI_SCTRL
, cs
->hw
.hfcpci
.sctrl
);
817 Write_hfc(cs
, HFCPCI_CONNECT
, cs
->hw
.hfcpci
.conn
);
818 Write_hfc(cs
, HFCPCI_TRM
, cs
->hw
.hfcpci
.trm
);
819 Write_hfc(cs
, HFCPCI_FIFO_EN
, cs
->hw
.hfcpci
.fifo_en
);
820 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
821 spin_unlock_irqrestore(&cs
->lock
, flags
);
823 } /* hfcpci_auxcmd */
825 /*****************************/
826 /* E-channel receive routine */
827 /*****************************/
829 receive_emsg(struct IsdnCardState
*cs
)
832 int receive
, count
= 5;
836 u_char
*ptr
, *ptr1
, new_f2
;
837 int total
, maxlen
, new_z2
;
838 u_char e_buffer
[256];
840 bz
= &((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxbz_b2
;
841 bdata
= ((fifo_area
*) (cs
->hw
.hfcpci
.fifos
))->b_chans
.rxdat_b2
;
844 if (test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
845 debugl1(cs
, "echo_rec_data blocked");
848 if (bz
->f1
!= bz
->f2
) {
849 if (cs
->debug
& L1_DEB_ISAC
)
850 debugl1(cs
, "hfcpci e_rec f1(%d) f2(%d)",
852 zp
= &bz
->za
[bz
->f2
];
854 rcnt
= zp
->z1
- zp
->z2
;
858 if (cs
->debug
& L1_DEB_ISAC
)
859 debugl1(cs
, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
860 zp
->z1
, zp
->z2
, rcnt
);
861 new_z2
= zp
->z2
+ rcnt
; /* new position in fifo */
862 if (new_z2
>= (B_FIFO_SIZE
+ B_SUB_VAL
))
863 new_z2
-= B_FIFO_SIZE
; /* buffer wrap */
864 new_f2
= (bz
->f2
+ 1) & MAX_B_FRAMES
;
865 if ((rcnt
> 256 + 3) || (count
< 4) ||
866 (*(bdata
+ (zp
->z1
- B_SUB_VAL
)))) {
867 if (cs
->debug
& L1_DEB_WARN
)
868 debugl1(cs
, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt
);
869 bz
->za
[new_f2
].z2
= new_z2
;
870 bz
->f2
= new_f2
; /* next buffer */
876 if (zp
->z2
<= B_FIFO_SIZE
+ B_SUB_VAL
)
877 maxlen
= rcnt
; /* complete transfer */
879 maxlen
= B_FIFO_SIZE
+ B_SUB_VAL
- zp
->z2
; /* maximum */
881 ptr1
= bdata
+ (zp
->z2
- B_SUB_VAL
); /* start of data */
882 memcpy(ptr
, ptr1
, maxlen
); /* copy data */
885 if (rcnt
) { /* rest remaining */
887 ptr1
= bdata
; /* start of buffer */
888 memcpy(ptr
, ptr1
, rcnt
); /* rest */
890 bz
->za
[new_f2
].z2
= new_z2
;
891 bz
->f2
= new_f2
; /* next buffer */
892 if (cs
->debug
& DEB_DLOG_HEX
) {
894 if ((total
- 3) < MAX_DLOG_SPACE
/ 3 - 10) {
900 ptr
+= QuickHex(ptr
, e_buffer
, total
- 3);
904 HiSax_putstatus(cs
, NULL
, cs
->dlog
);
906 HiSax_putstatus(cs
, "LogEcho: ", "warning Frame too big (%d)", total
- 3);
910 rcnt
= bz
->f1
- bz
->f2
;
912 rcnt
+= MAX_B_FRAMES
+ 1;
919 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
920 if (count
&& receive
)
924 /*********************/
925 /* Interrupt handler */
926 /*********************/
928 hfcpci_interrupt(int intno
, void *dev_id
)
931 struct IsdnCardState
*cs
= dev_id
;
937 if (!(cs
->hw
.hfcpci
.int_m2
& 0x08)) {
938 debugl1(cs
, "HFC-PCI: int_m2 %x not initialised", cs
->hw
.hfcpci
.int_m2
);
939 return IRQ_NONE
; /* not initialised */
941 spin_lock_irqsave(&cs
->lock
, flags
);
942 if (HFCPCI_ANYINT
& (stat
= Read_hfc(cs
, HFCPCI_STATUS
))) {
943 val
= Read_hfc(cs
, HFCPCI_INT_S1
);
944 if (cs
->debug
& L1_DEB_ISAC
)
945 debugl1(cs
, "HFC-PCI: stat(%02x) s1(%02x)", stat
, val
);
947 spin_unlock_irqrestore(&cs
->lock
, flags
);
950 if (cs
->debug
& L1_DEB_ISAC
)
951 debugl1(cs
, "HFC-PCI irq %x %s", val
,
952 test_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
) ?
953 "locked" : "unlocked");
954 val
&= cs
->hw
.hfcpci
.int_m1
;
955 if (val
& 0x40) { /* state machine irq */
956 exval
= Read_hfc(cs
, HFCPCI_STATES
) & 0xf;
957 if (cs
->debug
& L1_DEB_ISAC
)
958 debugl1(cs
, "ph_state chg %d->%d", cs
->dc
.hfcpci
.ph_state
,
960 cs
->dc
.hfcpci
.ph_state
= exval
;
961 sched_event_D_pci(cs
, D_L1STATECHANGE
);
964 if (val
& 0x80) { /* timer irq */
965 if (cs
->hw
.hfcpci
.nt_mode
) {
966 if ((--cs
->hw
.hfcpci
.nt_timer
) < 0)
967 sched_event_D_pci(cs
, D_L1STATECHANGE
);
970 Write_hfc(cs
, HFCPCI_CTMT
, cs
->hw
.hfcpci
.ctmt
| HFCPCI_CLTIMER
);
973 if (test_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
974 cs
->hw
.hfcpci
.int_s1
|= val
;
975 spin_unlock_irqrestore(&cs
->lock
, flags
);
978 if (cs
->hw
.hfcpci
.int_s1
& 0x18) {
980 val
= cs
->hw
.hfcpci
.int_s1
;
981 cs
->hw
.hfcpci
.int_s1
= exval
;
984 if (!(bcs
= Sel_BCS(cs
, cs
->hw
.hfcpci
.bswapped
? 1 : 0))) {
986 debugl1(cs
, "hfcpci spurious 0x08 IRQ");
988 main_rec_hfcpci(bcs
);
993 else if (!(bcs
= Sel_BCS(cs
, 1))) {
995 debugl1(cs
, "hfcpci spurious 0x10 IRQ");
997 main_rec_hfcpci(bcs
);
1000 if (!(bcs
= Sel_BCS(cs
, cs
->hw
.hfcpci
.bswapped
? 1 : 0))) {
1002 debugl1(cs
, "hfcpci spurious 0x01 IRQ");
1005 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1006 hfcpci_fill_fifo(bcs
);
1007 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1009 debugl1(cs
, "fill_data %d blocked", bcs
->channel
);
1011 if ((bcs
->tx_skb
= skb_dequeue(&bcs
->squeue
))) {
1012 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1013 hfcpci_fill_fifo(bcs
);
1014 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1016 debugl1(cs
, "fill_data %d blocked", bcs
->channel
);
1018 hfcpci_sched_event(bcs
, B_XMTBUFREADY
);
1024 if (!(bcs
= Sel_BCS(cs
, 1))) {
1026 debugl1(cs
, "hfcpci spurious 0x02 IRQ");
1029 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1030 hfcpci_fill_fifo(bcs
);
1031 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1033 debugl1(cs
, "fill_data %d blocked", bcs
->channel
);
1035 if ((bcs
->tx_skb
= skb_dequeue(&bcs
->squeue
))) {
1036 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1037 hfcpci_fill_fifo(bcs
);
1038 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1040 debugl1(cs
, "fill_data %d blocked", bcs
->channel
);
1042 hfcpci_sched_event(bcs
, B_XMTBUFREADY
);
1047 if (val
& 0x20) { /* receive dframe */
1050 if (val
& 0x04) { /* dframe transmitted */
1051 if (test_and_clear_bit(FLG_DBUSY_TIMER
, &cs
->HW_Flags
))
1052 del_timer(&cs
->dbusytimer
);
1053 if (test_and_clear_bit(FLG_L1_DBUSY
, &cs
->HW_Flags
))
1054 sched_event_D_pci(cs
, D_CLEARBUSY
);
1056 if (cs
->tx_skb
->len
) {
1057 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1058 hfcpci_fill_dfifo(cs
);
1059 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1061 debugl1(cs
, "hfcpci_fill_dfifo irq blocked");
1065 dev_kfree_skb_irq(cs
->tx_skb
);
1070 if ((cs
->tx_skb
= skb_dequeue(&cs
->sq
))) {
1072 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1073 hfcpci_fill_dfifo(cs
);
1074 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1076 debugl1(cs
, "hfcpci_fill_dfifo irq blocked");
1079 sched_event_D_pci(cs
, D_XMTBUFREADY
);
1082 if (cs
->hw
.hfcpci
.int_s1
&& count
--) {
1083 val
= cs
->hw
.hfcpci
.int_s1
;
1084 cs
->hw
.hfcpci
.int_s1
= 0;
1085 if (cs
->debug
& L1_DEB_ISAC
)
1086 debugl1(cs
, "HFC-PCI irq %x loop %d", val
, 15 - count
);
1090 spin_unlock_irqrestore(&cs
->lock
, flags
);
1094 /********************************************************************/
1095 /* timer callback for D-chan busy resolution. Currently no function */
1096 /********************************************************************/
1098 hfcpci_dbusy_timer(struct IsdnCardState
*cs
)
1102 /*************************************/
1103 /* Layer 1 D-channel hardware access */
1104 /*************************************/
1106 HFCPCI_l1hw(struct PStack
*st
, int pr
, void *arg
)
1109 struct IsdnCardState
*cs
= (struct IsdnCardState
*) st
->l1
.hardware
;
1110 struct sk_buff
*skb
= arg
;
1113 case (PH_DATA
| REQUEST
):
1114 if (cs
->debug
& DEB_DLOG_HEX
)
1115 LogFrame(cs
, skb
->data
, skb
->len
);
1116 if (cs
->debug
& DEB_DLOG_VERBOSE
)
1117 dlogframe(cs
, skb
, 0);
1118 spin_lock_irqsave(&cs
->lock
, flags
);
1120 skb_queue_tail(&cs
->sq
, skb
);
1121 #ifdef L2FRAME_DEBUG /* psa */
1122 if (cs
->debug
& L1_DEB_LAPD
)
1123 Logl2Frame(cs
, skb
, "PH_DATA Queued", 0);
1128 #ifdef L2FRAME_DEBUG /* psa */
1129 if (cs
->debug
& L1_DEB_LAPD
)
1130 Logl2Frame(cs
, skb
, "PH_DATA", 0);
1132 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1133 hfcpci_fill_dfifo(cs
);
1134 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1136 debugl1(cs
, "hfcpci_fill_dfifo blocked");
1139 spin_unlock_irqrestore(&cs
->lock
, flags
);
1141 case (PH_PULL
| INDICATION
):
1142 spin_lock_irqsave(&cs
->lock
, flags
);
1144 if (cs
->debug
& L1_DEB_WARN
)
1145 debugl1(cs
, " l2l1 tx_skb exist this shouldn't happen");
1146 skb_queue_tail(&cs
->sq
, skb
);
1147 spin_unlock_irqrestore(&cs
->lock
, flags
);
1150 if (cs
->debug
& DEB_DLOG_HEX
)
1151 LogFrame(cs
, skb
->data
, skb
->len
);
1152 if (cs
->debug
& DEB_DLOG_VERBOSE
)
1153 dlogframe(cs
, skb
, 0);
1156 #ifdef L2FRAME_DEBUG /* psa */
1157 if (cs
->debug
& L1_DEB_LAPD
)
1158 Logl2Frame(cs
, skb
, "PH_DATA_PULLED", 0);
1160 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1161 hfcpci_fill_dfifo(cs
);
1162 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1164 debugl1(cs
, "hfcpci_fill_dfifo blocked");
1165 spin_unlock_irqrestore(&cs
->lock
, flags
);
1167 case (PH_PULL
| REQUEST
):
1168 #ifdef L2FRAME_DEBUG /* psa */
1169 if (cs
->debug
& L1_DEB_LAPD
)
1170 debugl1(cs
, "-> PH_REQUEST_PULL");
1173 test_and_clear_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
1174 st
->l1
.l1l2(st
, PH_PULL
| CONFIRM
, NULL
);
1176 test_and_set_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
1178 case (HW_RESET
| REQUEST
):
1179 spin_lock_irqsave(&cs
->lock
, flags
);
1180 Write_hfc(cs
, HFCPCI_STATES
, HFCPCI_LOAD_STATE
| 3); /* HFC ST 3 */
1182 Write_hfc(cs
, HFCPCI_STATES
, 3); /* HFC ST 2 */
1183 cs
->hw
.hfcpci
.mst_m
|= HFCPCI_MASTER
;
1184 Write_hfc(cs
, HFCPCI_MST_MODE
, cs
->hw
.hfcpci
.mst_m
);
1185 Write_hfc(cs
, HFCPCI_STATES
, HFCPCI_ACTIVATE
| HFCPCI_DO_ACTION
);
1186 spin_unlock_irqrestore(&cs
->lock
, flags
);
1187 l1_msg(cs
, HW_POWERUP
| CONFIRM
, NULL
);
1189 case (HW_ENABLE
| REQUEST
):
1190 spin_lock_irqsave(&cs
->lock
, flags
);
1191 Write_hfc(cs
, HFCPCI_STATES
, HFCPCI_DO_ACTION
);
1192 spin_unlock_irqrestore(&cs
->lock
, flags
);
1194 case (HW_DEACTIVATE
| REQUEST
):
1195 spin_lock_irqsave(&cs
->lock
, flags
);
1196 cs
->hw
.hfcpci
.mst_m
&= ~HFCPCI_MASTER
;
1197 Write_hfc(cs
, HFCPCI_MST_MODE
, cs
->hw
.hfcpci
.mst_m
);
1198 spin_unlock_irqrestore(&cs
->lock
, flags
);
1200 case (HW_INFO3
| REQUEST
):
1201 spin_lock_irqsave(&cs
->lock
, flags
);
1202 cs
->hw
.hfcpci
.mst_m
|= HFCPCI_MASTER
;
1203 Write_hfc(cs
, HFCPCI_MST_MODE
, cs
->hw
.hfcpci
.mst_m
);
1204 spin_unlock_irqrestore(&cs
->lock
, flags
);
1206 case (HW_TESTLOOP
| REQUEST
):
1207 spin_lock_irqsave(&cs
->lock
, flags
);
1208 switch ((long) arg
) {
1210 Write_hfc(cs
, HFCPCI_B1_SSL
, 0x80); /* tx slot */
1211 Write_hfc(cs
, HFCPCI_B1_RSL
, 0x80); /* rx slot */
1212 cs
->hw
.hfcpci
.conn
= (cs
->hw
.hfcpci
.conn
& ~7) | 1;
1213 Write_hfc(cs
, HFCPCI_CONNECT
, cs
->hw
.hfcpci
.conn
);
1217 Write_hfc(cs
, HFCPCI_B2_SSL
, 0x81); /* tx slot */
1218 Write_hfc(cs
, HFCPCI_B2_RSL
, 0x81); /* rx slot */
1219 cs
->hw
.hfcpci
.conn
= (cs
->hw
.hfcpci
.conn
& ~0x38) | 0x08;
1220 Write_hfc(cs
, HFCPCI_CONNECT
, cs
->hw
.hfcpci
.conn
);
1224 spin_unlock_irqrestore(&cs
->lock
, flags
);
1225 if (cs
->debug
& L1_DEB_WARN
)
1226 debugl1(cs
, "hfcpci_l1hw loop invalid %4lx", (long) arg
);
1229 cs
->hw
.hfcpci
.trm
|= 0x80; /* enable IOM-loop */
1230 Write_hfc(cs
, HFCPCI_TRM
, cs
->hw
.hfcpci
.trm
);
1231 spin_unlock_irqrestore(&cs
->lock
, flags
);
1234 if (cs
->debug
& L1_DEB_WARN
)
1235 debugl1(cs
, "hfcpci_l1hw unknown pr %4x", pr
);
1240 /***********************************************/
1241 /* called during init setting l1 stack pointer */
1242 /***********************************************/
1244 setstack_hfcpci(struct PStack
*st
, struct IsdnCardState
*cs
)
1246 st
->l1
.l1hw
= HFCPCI_l1hw
;
1249 /**************************************/
1250 /* send B-channel data if not blocked */
1251 /**************************************/
1253 hfcpci_send_data(struct BCState
*bcs
)
1255 struct IsdnCardState
*cs
= bcs
->cs
;
1257 if (!test_and_set_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
)) {
1258 hfcpci_fill_fifo(bcs
);
1259 test_and_clear_bit(FLG_LOCK_ATOMIC
, &cs
->HW_Flags
);
1261 debugl1(cs
, "send_data %d blocked", bcs
->channel
);
1264 /***************************************************************/
1265 /* activate/deactivate hardware for selected channels and mode */
1266 /***************************************************************/
1268 mode_hfcpci(struct BCState
*bcs
, int mode
, int bc
)
1270 struct IsdnCardState
*cs
= bcs
->cs
;
1273 if (cs
->debug
& L1_DEB_HSCX
)
1274 debugl1(cs
, "HFCPCI bchannel mode %d bchan %d/%d",
1275 mode
, bc
, bcs
->channel
);
1279 if (cs
->chanlimit
> 1) {
1280 cs
->hw
.hfcpci
.bswapped
= 0; /* B1 and B2 normal mode */
1281 cs
->hw
.hfcpci
.sctrl_e
&= ~0x80;
1284 if (mode
!= L1_MODE_NULL
) {
1285 cs
->hw
.hfcpci
.bswapped
= 1; /* B1 and B2 exchanged */
1286 cs
->hw
.hfcpci
.sctrl_e
|= 0x80;
1288 cs
->hw
.hfcpci
.bswapped
= 0; /* B1 and B2 normal mode */
1289 cs
->hw
.hfcpci
.sctrl_e
&= ~0x80;
1293 cs
->hw
.hfcpci
.bswapped
= 0; /* B1 and B2 normal mode */
1294 cs
->hw
.hfcpci
.sctrl_e
&= ~0x80;
1298 case (L1_MODE_NULL
):
1300 cs
->hw
.hfcpci
.sctrl
&= ~SCTRL_B2_ENA
;
1301 cs
->hw
.hfcpci
.sctrl_r
&= ~SCTRL_B2_ENA
;
1303 cs
->hw
.hfcpci
.sctrl
&= ~SCTRL_B1_ENA
;
1304 cs
->hw
.hfcpci
.sctrl_r
&= ~SCTRL_B1_ENA
;
1307 cs
->hw
.hfcpci
.fifo_en
&= ~HFCPCI_FIFOEN_B2
;
1308 cs
->hw
.hfcpci
.int_m1
&= ~(HFCPCI_INTS_B2TRANS
+ HFCPCI_INTS_B2REC
);
1310 cs
->hw
.hfcpci
.fifo_en
&= ~HFCPCI_FIFOEN_B1
;
1311 cs
->hw
.hfcpci
.int_m1
&= ~(HFCPCI_INTS_B1TRANS
+ HFCPCI_INTS_B1REC
);
1314 case (L1_MODE_TRANS
):
1315 hfcpci_clear_fifo_rx(cs
, fifo2
);
1316 hfcpci_clear_fifo_tx(cs
, fifo2
);
1318 cs
->hw
.hfcpci
.sctrl
|= SCTRL_B2_ENA
;
1319 cs
->hw
.hfcpci
.sctrl_r
|= SCTRL_B2_ENA
;
1321 cs
->hw
.hfcpci
.sctrl
|= SCTRL_B1_ENA
;
1322 cs
->hw
.hfcpci
.sctrl_r
|= SCTRL_B1_ENA
;
1325 cs
->hw
.hfcpci
.fifo_en
|= HFCPCI_FIFOEN_B2
;
1326 cs
->hw
.hfcpci
.int_m1
|= (HFCPCI_INTS_B2TRANS
+ HFCPCI_INTS_B2REC
);
1327 cs
->hw
.hfcpci
.ctmt
|= 2;
1328 cs
->hw
.hfcpci
.conn
&= ~0x18;
1330 cs
->hw
.hfcpci
.fifo_en
|= HFCPCI_FIFOEN_B1
;
1331 cs
->hw
.hfcpci
.int_m1
|= (HFCPCI_INTS_B1TRANS
+ HFCPCI_INTS_B1REC
);
1332 cs
->hw
.hfcpci
.ctmt
|= 1;
1333 cs
->hw
.hfcpci
.conn
&= ~0x03;
1336 case (L1_MODE_HDLC
):
1337 hfcpci_clear_fifo_rx(cs
, fifo2
);
1338 hfcpci_clear_fifo_tx(cs
, fifo2
);
1340 cs
->hw
.hfcpci
.sctrl
|= SCTRL_B2_ENA
;
1341 cs
->hw
.hfcpci
.sctrl_r
|= SCTRL_B2_ENA
;
1343 cs
->hw
.hfcpci
.sctrl
|= SCTRL_B1_ENA
;
1344 cs
->hw
.hfcpci
.sctrl_r
|= SCTRL_B1_ENA
;
1347 cs
->hw
.hfcpci
.last_bfifo_cnt
[1] = 0;
1348 cs
->hw
.hfcpci
.fifo_en
|= HFCPCI_FIFOEN_B2
;
1349 cs
->hw
.hfcpci
.int_m1
|= (HFCPCI_INTS_B2TRANS
+ HFCPCI_INTS_B2REC
);
1350 cs
->hw
.hfcpci
.ctmt
&= ~2;
1351 cs
->hw
.hfcpci
.conn
&= ~0x18;
1353 cs
->hw
.hfcpci
.last_bfifo_cnt
[0] = 0;
1354 cs
->hw
.hfcpci
.fifo_en
|= HFCPCI_FIFOEN_B1
;
1355 cs
->hw
.hfcpci
.int_m1
|= (HFCPCI_INTS_B1TRANS
+ HFCPCI_INTS_B1REC
);
1356 cs
->hw
.hfcpci
.ctmt
&= ~1;
1357 cs
->hw
.hfcpci
.conn
&= ~0x03;
1360 case (L1_MODE_EXTRN
):
1362 cs
->hw
.hfcpci
.conn
|= 0x10;
1363 cs
->hw
.hfcpci
.sctrl
|= SCTRL_B2_ENA
;
1364 cs
->hw
.hfcpci
.sctrl_r
|= SCTRL_B2_ENA
;
1365 cs
->hw
.hfcpci
.fifo_en
&= ~HFCPCI_FIFOEN_B2
;
1366 cs
->hw
.hfcpci
.int_m1
&= ~(HFCPCI_INTS_B2TRANS
+ HFCPCI_INTS_B2REC
);
1368 cs
->hw
.hfcpci
.conn
|= 0x02;
1369 cs
->hw
.hfcpci
.sctrl
|= SCTRL_B1_ENA
;
1370 cs
->hw
.hfcpci
.sctrl_r
|= SCTRL_B1_ENA
;
1371 cs
->hw
.hfcpci
.fifo_en
&= ~HFCPCI_FIFOEN_B1
;
1372 cs
->hw
.hfcpci
.int_m1
&= ~(HFCPCI_INTS_B1TRANS
+ HFCPCI_INTS_B1REC
);
1376 Write_hfc(cs
, HFCPCI_SCTRL_E
, cs
->hw
.hfcpci
.sctrl_e
);
1377 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
1378 Write_hfc(cs
, HFCPCI_FIFO_EN
, cs
->hw
.hfcpci
.fifo_en
);
1379 Write_hfc(cs
, HFCPCI_SCTRL
, cs
->hw
.hfcpci
.sctrl
);
1380 Write_hfc(cs
, HFCPCI_SCTRL_R
, cs
->hw
.hfcpci
.sctrl_r
);
1381 Write_hfc(cs
, HFCPCI_CTMT
, cs
->hw
.hfcpci
.ctmt
);
1382 Write_hfc(cs
, HFCPCI_CONNECT
, cs
->hw
.hfcpci
.conn
);
1385 /******************************/
1386 /* Layer2 -> Layer 1 Transfer */
1387 /******************************/
1389 hfcpci_l2l1(struct PStack
*st
, int pr
, void *arg
)
1391 struct BCState
*bcs
= st
->l1
.bcs
;
1393 struct sk_buff
*skb
= arg
;
1396 case (PH_DATA
| REQUEST
):
1397 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
1399 skb_queue_tail(&bcs
->squeue
, skb
);
1402 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1403 bcs
->cs
->BC_Send_Data(bcs
);
1405 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
1407 case (PH_PULL
| INDICATION
):
1408 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
1410 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
1411 printk(KERN_WARNING
"hfc_l2l1: this shouldn't happen\n");
1414 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1416 bcs
->cs
->BC_Send_Data(bcs
);
1417 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
1419 case (PH_PULL
| REQUEST
):
1421 test_and_clear_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
1422 st
->l1
.l1l2(st
, PH_PULL
| CONFIRM
, NULL
);
1424 test_and_set_bit(FLG_L1_PULL_REQ
, &st
->l1
.Flags
);
1426 case (PH_ACTIVATE
| REQUEST
):
1427 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
1428 test_and_set_bit(BC_FLG_ACTIV
, &bcs
->Flag
);
1429 mode_hfcpci(bcs
, st
->l1
.mode
, st
->l1
.bc
);
1430 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
1431 l1_msg_b(st
, pr
, arg
);
1433 case (PH_DEACTIVATE
| REQUEST
):
1434 l1_msg_b(st
, pr
, arg
);
1436 case (PH_DEACTIVATE
| CONFIRM
):
1437 spin_lock_irqsave(&bcs
->cs
->lock
, flags
);
1438 test_and_clear_bit(BC_FLG_ACTIV
, &bcs
->Flag
);
1439 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
1440 mode_hfcpci(bcs
, 0, st
->l1
.bc
);
1441 spin_unlock_irqrestore(&bcs
->cs
->lock
, flags
);
1442 st
->l1
.l1l2(st
, PH_DEACTIVATE
| CONFIRM
, NULL
);
1447 /******************************************/
1448 /* deactivate B-channel access and queues */
1449 /******************************************/
1451 close_hfcpci(struct BCState
*bcs
)
1453 mode_hfcpci(bcs
, 0, bcs
->channel
);
1454 if (test_and_clear_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
1455 skb_queue_purge(&bcs
->rqueue
);
1456 skb_queue_purge(&bcs
->squeue
);
1458 dev_kfree_skb_any(bcs
->tx_skb
);
1460 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
1465 /*************************************/
1466 /* init B-channel queues and control */
1467 /*************************************/
1469 open_hfcpcistate(struct IsdnCardState
*cs
, struct BCState
*bcs
)
1471 if (!test_and_set_bit(BC_FLG_INIT
, &bcs
->Flag
)) {
1472 skb_queue_head_init(&bcs
->rqueue
);
1473 skb_queue_head_init(&bcs
->squeue
);
1476 test_and_clear_bit(BC_FLG_BUSY
, &bcs
->Flag
);
1482 /*********************************/
1483 /* inits the stack for B-channel */
1484 /*********************************/
1486 setstack_2b(struct PStack
*st
, struct BCState
*bcs
)
1488 bcs
->channel
= st
->l1
.bc
;
1489 if (open_hfcpcistate(st
->l1
.hardware
, bcs
))
1492 st
->l2
.l2l1
= hfcpci_l2l1
;
1493 setstack_manager(st
);
1499 /***************************/
1500 /* handle L1 state changes */
1501 /***************************/
1503 hfcpci_bh(struct work_struct
*work
)
1505 struct IsdnCardState
*cs
=
1506 container_of(work
, struct IsdnCardState
, tqueue
);
1508 // struct PStack *stptr;
1510 if (test_and_clear_bit(D_L1STATECHANGE
, &cs
->event
)) {
1511 if (!cs
->hw
.hfcpci
.nt_mode
)
1512 switch (cs
->dc
.hfcpci
.ph_state
) {
1514 l1_msg(cs
, HW_RESET
| INDICATION
, NULL
);
1517 l1_msg(cs
, HW_DEACTIVATE
| INDICATION
, NULL
);
1520 l1_msg(cs
, HW_RSYNC
| INDICATION
, NULL
);
1523 l1_msg(cs
, HW_INFO2
| INDICATION
, NULL
);
1526 l1_msg(cs
, HW_INFO4_P8
| INDICATION
, NULL
);
1531 spin_lock_irqsave(&cs
->lock
, flags
);
1532 switch (cs
->dc
.hfcpci
.ph_state
) {
1534 if (cs
->hw
.hfcpci
.nt_timer
< 0) {
1535 cs
->hw
.hfcpci
.nt_timer
= 0;
1536 cs
->hw
.hfcpci
.int_m1
&= ~HFCPCI_INTS_TIMER
;
1537 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
1538 /* Clear already pending ints */
1539 if (Read_hfc(cs
, HFCPCI_INT_S1
));
1540 Write_hfc(cs
, HFCPCI_STATES
, 4 | HFCPCI_LOAD_STATE
);
1542 Write_hfc(cs
, HFCPCI_STATES
, 4);
1543 cs
->dc
.hfcpci
.ph_state
= 4;
1545 cs
->hw
.hfcpci
.int_m1
|= HFCPCI_INTS_TIMER
;
1546 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
1547 cs
->hw
.hfcpci
.ctmt
&= ~HFCPCI_AUTO_TIMER
;
1548 cs
->hw
.hfcpci
.ctmt
|= HFCPCI_TIM3_125
;
1549 Write_hfc(cs
, HFCPCI_CTMT
, cs
->hw
.hfcpci
.ctmt
| HFCPCI_CLTIMER
);
1550 Write_hfc(cs
, HFCPCI_CTMT
, cs
->hw
.hfcpci
.ctmt
| HFCPCI_CLTIMER
);
1551 cs
->hw
.hfcpci
.nt_timer
= NT_T1_COUNT
;
1552 Write_hfc(cs
, HFCPCI_STATES
, 2 | HFCPCI_NT_G2_G3
); /* allow G2 -> G3 transition */
1558 cs
->hw
.hfcpci
.nt_timer
= 0;
1559 cs
->hw
.hfcpci
.int_m1
&= ~HFCPCI_INTS_TIMER
;
1560 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
1565 spin_unlock_irqrestore(&cs
->lock
, flags
);
1568 if (test_and_clear_bit(D_RCVBUFREADY
, &cs
->event
))
1569 DChannel_proc_rcv(cs
);
1570 if (test_and_clear_bit(D_XMTBUFREADY
, &cs
->event
))
1571 DChannel_proc_xmt(cs
);
1575 /********************************/
1576 /* called for card init message */
1577 /********************************/
1579 inithfcpci(struct IsdnCardState
*cs
)
1581 cs
->bcs
[0].BC_SetStack
= setstack_2b
;
1582 cs
->bcs
[1].BC_SetStack
= setstack_2b
;
1583 cs
->bcs
[0].BC_Close
= close_hfcpci
;
1584 cs
->bcs
[1].BC_Close
= close_hfcpci
;
1585 cs
->dbusytimer
.function
= (void *) hfcpci_dbusy_timer
;
1586 cs
->dbusytimer
.data
= (long) cs
;
1587 init_timer(&cs
->dbusytimer
);
1588 mode_hfcpci(cs
->bcs
, 0, 0);
1589 mode_hfcpci(cs
->bcs
+ 1, 0, 1);
1594 /*******************************************/
1595 /* handle card messages from control layer */
1596 /*******************************************/
1598 hfcpci_card_msg(struct IsdnCardState
*cs
, int mt
, void *arg
)
1602 if (cs
->debug
& L1_DEB_ISAC
)
1603 debugl1(cs
, "HFCPCI: card_msg %x", mt
);
1606 spin_lock_irqsave(&cs
->lock
, flags
);
1608 spin_unlock_irqrestore(&cs
->lock
, flags
);
1611 release_io_hfcpci(cs
);
1614 spin_lock_irqsave(&cs
->lock
, flags
);
1617 spin_unlock_irqrestore(&cs
->lock
, flags
);
1618 msleep(80); /* Timeout 80ms */
1619 /* now switch timer interrupt off */
1620 spin_lock_irqsave(&cs
->lock
, flags
);
1621 cs
->hw
.hfcpci
.int_m1
&= ~HFCPCI_INTS_TIMER
;
1622 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
1623 /* reinit mode reg */
1624 Write_hfc(cs
, HFCPCI_MST_MODE
, cs
->hw
.hfcpci
.mst_m
);
1625 spin_unlock_irqrestore(&cs
->lock
, flags
);
1634 /* this variable is used as card index when more than one cards are present */
1635 static struct pci_dev
*dev_hfcpci __devinitdata
= NULL
;
1638 setup_hfcpci(struct IsdnCard
*card
)
1641 struct IsdnCardState
*cs
= card
->cs
;
1644 struct pci_dev
*tmp_hfcpci
= NULL
;
1647 #error "not running on big endian machines now"
1650 strcpy(tmp
, hfcpci_revision
);
1651 printk(KERN_INFO
"HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp
));
1653 cs
->hw
.hfcpci
.int_s1
= 0;
1654 cs
->dc
.hfcpci
.ph_state
= 0;
1655 cs
->hw
.hfcpci
.fifo
= 255;
1656 if (cs
->typ
!= ISDN_CTYPE_HFC_PCI
)
1660 while (id_list
[i
].vendor_id
) {
1661 tmp_hfcpci
= hisax_find_pci_device(id_list
[i
].vendor_id
,
1662 id_list
[i
].device_id
,
1666 dma_addr_t dma_mask
= DMA_BIT_MASK(32) & ~0x7fffUL
;
1667 if (pci_enable_device(tmp_hfcpci
))
1669 if (pci_set_dma_mask(tmp_hfcpci
, dma_mask
)) {
1671 "HiSax hfc_pci: No suitable DMA available.\n");
1674 if (pci_set_consistent_dma_mask(tmp_hfcpci
, dma_mask
)) {
1676 "HiSax hfc_pci: No suitable consistent DMA available.\n");
1679 pci_set_master(tmp_hfcpci
);
1680 if ((card
->para
[0]) && (card
->para
[0] != (tmp_hfcpci
->resource
[ 0].start
& PCI_BASE_ADDRESS_IO_MASK
)))
1688 printk(KERN_WARNING
"HFC-PCI: No PCI card found\n");
1693 dev_hfcpci
= tmp_hfcpci
; /* old device */
1694 cs
->hw
.hfcpci
.dev
= dev_hfcpci
;
1695 cs
->irq
= dev_hfcpci
->irq
;
1697 printk(KERN_WARNING
"HFC-PCI: No IRQ for PCI card found\n");
1700 cs
->hw
.hfcpci
.pci_io
= (char *)(unsigned long)dev_hfcpci
->resource
[1].start
;
1701 printk(KERN_INFO
"HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list
[i
].vendor_name
, id_list
[i
].card_name
);
1703 if (!cs
->hw
.hfcpci
.pci_io
) {
1704 printk(KERN_WARNING
"HFC-PCI: No IO-Mem for PCI card found\n");
1708 /* Allocate memory for FIFOS */
1709 cs
->hw
.hfcpci
.fifos
= pci_alloc_consistent(cs
->hw
.hfcpci
.dev
,
1710 0x8000, &cs
->hw
.hfcpci
.dma
);
1711 if (!cs
->hw
.hfcpci
.fifos
) {
1712 printk(KERN_WARNING
"HFC-PCI: Error allocating FIFO memory!\n");
1715 if (cs
->hw
.hfcpci
.dma
& 0x7fff) {
1717 "HFC-PCI: Error DMA memory not on 32K boundary (%lx)\n",
1718 (u_long
)cs
->hw
.hfcpci
.dma
);
1719 pci_free_consistent(cs
->hw
.hfcpci
.dev
, 0x8000,
1720 cs
->hw
.hfcpci
.fifos
, cs
->hw
.hfcpci
.dma
);
1723 pci_write_config_dword(cs
->hw
.hfcpci
.dev
, 0x80, (u32
)cs
->hw
.hfcpci
.dma
);
1724 cs
->hw
.hfcpci
.pci_io
= ioremap((ulong
) cs
->hw
.hfcpci
.pci_io
, 256);
1726 "HFC-PCI: defined at mem %p fifo %p(%lx) IRQ %d HZ %d\n",
1727 cs
->hw
.hfcpci
.pci_io
,
1728 cs
->hw
.hfcpci
.fifos
,
1729 (u_long
)cs
->hw
.hfcpci
.dma
,
1732 spin_lock_irqsave(&cs
->lock
, flags
);
1734 pci_write_config_word(cs
->hw
.hfcpci
.dev
, PCI_COMMAND
, PCI_ENA_MEMIO
); /* enable memory mapped ports, disable busmaster */
1735 cs
->hw
.hfcpci
.int_m2
= 0; /* disable alle interrupts */
1736 cs
->hw
.hfcpci
.int_m1
= 0;
1737 Write_hfc(cs
, HFCPCI_INT_M1
, cs
->hw
.hfcpci
.int_m1
);
1738 Write_hfc(cs
, HFCPCI_INT_M2
, cs
->hw
.hfcpci
.int_m2
);
1739 /* At this point the needed PCI config is done */
1740 /* fifos are still not enabled */
1742 INIT_WORK(&cs
->tqueue
, hfcpci_bh
);
1743 cs
->setstack_d
= setstack_hfcpci
;
1744 cs
->BC_Send_Data
= &hfcpci_send_data
;
1745 cs
->readisac
= NULL
;
1746 cs
->writeisac
= NULL
;
1747 cs
->readisacfifo
= NULL
;
1748 cs
->writeisacfifo
= NULL
;
1749 cs
->BC_Read_Reg
= NULL
;
1750 cs
->BC_Write_Reg
= NULL
;
1751 cs
->irq_func
= &hfcpci_interrupt
;
1752 cs
->irq_flags
|= IRQF_SHARED
;
1753 cs
->hw
.hfcpci
.timer
.function
= (void *) hfcpci_Timer
;
1754 cs
->hw
.hfcpci
.timer
.data
= (long) cs
;
1755 init_timer(&cs
->hw
.hfcpci
.timer
);
1756 cs
->cardmsg
= &hfcpci_card_msg
;
1757 cs
->auxcmd
= &hfcpci_auxcmd
;
1759 spin_unlock_irqrestore(&cs
->lock
, flags
);