[PATCH] g_file_storage: fix obscure race condition
[linux/fpc-iii.git] / drivers / isdn / hisax / hfc_pci.c
blob4866fc32d8d9f1697806ccd938b9edf2d13692eb
1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
3 * low level driver for CCD´s hfc-pci based cards
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
18 #include <linux/init.h>
19 #include <linux/config.h>
20 #include "hisax.h"
21 #include "hfc_pci.h"
22 #include "isdnl1.h"
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
26 extern const char *CardType[];
28 static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
30 /* table entry in the PCI devices list */
31 typedef struct {
32 int vendor_id;
33 int device_id;
34 char *vendor_name;
35 char *card_name;
36 } PCI_ENTRY;
38 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
39 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
40 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
42 static const PCI_ENTRY id_list[] =
44 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
45 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
46 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
47 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
48 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
49 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
50 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
51 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
52 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
53 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
54 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
55 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
56 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
57 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
58 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
59 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
60 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,"Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
64 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
65 {0, 0, NULL, NULL},
69 #ifdef CONFIG_PCI
71 /******************************************/
72 /* free hardware resources used by driver */
73 /******************************************/
74 static void
75 release_io_hfcpci(struct IsdnCardState *cs)
77 printk(KERN_INFO "HiSax: release hfcpci at %p\n",
78 cs->hw.hfcpci.pci_io);
79 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
80 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
81 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
82 mdelay(10);
83 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
84 mdelay(10);
85 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
86 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
87 del_timer(&cs->hw.hfcpci.timer);
88 kfree(cs->hw.hfcpci.share_start);
89 cs->hw.hfcpci.share_start = NULL;
90 iounmap((void *)cs->hw.hfcpci.pci_io);
93 /********************************************************************************/
94 /* function called to reset the HFC PCI chip. A complete software reset of chip */
95 /* and fifos is done. */
96 /********************************************************************************/
97 static void
98 reset_hfcpci(struct IsdnCardState *cs)
100 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
101 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
102 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
104 printk(KERN_INFO "HFC_PCI: resetting card\n");
105 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
106 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
107 mdelay(10);
108 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
109 mdelay(10);
110 if (Read_hfc(cs, HFCPCI_STATUS) & 2)
111 printk(KERN_WARNING "HFC-PCI init bit busy\n");
113 cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
114 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
116 cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
117 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
119 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
120 cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
121 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
122 cs->hw.hfcpci.bswapped = 0; /* no exchange */
123 cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
124 cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
125 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
127 cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
128 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
129 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
131 /* Clear already pending ints */
132 if (Read_hfc(cs, HFCPCI_INT_S1));
134 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
135 udelay(10);
136 Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
137 cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
139 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
140 cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
141 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
142 cs->hw.hfcpci.sctrl_r = 0;
143 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
145 /* Init GCI/IOM2 in master mode */
146 /* Slots 0 and 1 are set for B-chan 1 and 2 */
147 /* D- and monitor/CI channel are not enabled */
148 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
149 /* STIO2 is used as data input, B1+B2 from IOM->ST */
150 /* ST B-channel send disabled -> continous 1s */
151 /* The IOM slots are always enabled */
152 cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
153 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
154 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
155 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
156 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
157 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
159 /* Finally enable IRQ output */
160 cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
161 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
162 if (Read_hfc(cs, HFCPCI_INT_S1));
165 /***************************************************/
166 /* Timer function called when kernel timer expires */
167 /***************************************************/
168 static void
169 hfcpci_Timer(struct IsdnCardState *cs)
171 cs->hw.hfcpci.timer.expires = jiffies + 75;
172 /* WD RESET */
173 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
174 add_timer(&cs->hw.hfcpci.timer);
179 /*********************************/
180 /* schedule a new D-channel task */
181 /*********************************/
182 static void
183 sched_event_D_pci(struct IsdnCardState *cs, int event)
185 test_and_set_bit(event, &cs->event);
186 schedule_work(&cs->tqueue);
189 /*********************************/
190 /* schedule a new b_channel task */
191 /*********************************/
192 static void
193 hfcpci_sched_event(struct BCState *bcs, int event)
195 test_and_set_bit(event, &bcs->event);
196 schedule_work(&bcs->tqueue);
199 /************************************************/
200 /* select a b-channel entry matching and active */
201 /************************************************/
202 static
203 struct BCState *
204 Sel_BCS(struct IsdnCardState *cs, int channel)
206 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
207 return (&cs->bcs[0]);
208 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
209 return (&cs->bcs[1]);
210 else
211 return (NULL);
214 /***************************************/
215 /* clear the desired B-channel rx fifo */
216 /***************************************/
217 static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
218 { u_char fifo_state;
219 bzfifo_type *bzr;
221 if (fifo) {
222 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
223 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
224 } else {
225 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
226 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
228 if (fifo_state)
229 cs->hw.hfcpci.fifo_en ^= fifo_state;
230 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
231 cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
232 bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
233 bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
234 bzr->f1 = MAX_B_FRAMES;
235 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
236 if (fifo_state)
237 cs->hw.hfcpci.fifo_en |= fifo_state;
238 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
241 /***************************************/
242 /* clear the desired B-channel tx fifo */
243 /***************************************/
244 static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
245 { u_char fifo_state;
246 bzfifo_type *bzt;
248 if (fifo) {
249 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
250 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
251 } else {
252 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
253 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
255 if (fifo_state)
256 cs->hw.hfcpci.fifo_en ^= fifo_state;
257 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
258 bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
259 bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
260 bzt->f1 = MAX_B_FRAMES;
261 bzt->f2 = bzt->f1; /* init F pointers to remain constant */
262 if (fifo_state)
263 cs->hw.hfcpci.fifo_en |= fifo_state;
264 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
267 /*********************************************/
268 /* read a complete B-frame out of the buffer */
269 /*********************************************/
270 static struct sk_buff
272 hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type * bz, u_char * bdata, int count)
274 u_char *ptr, *ptr1, new_f2;
275 struct sk_buff *skb;
276 struct IsdnCardState *cs = bcs->cs;
277 int total, maxlen, new_z2;
278 z_type *zp;
280 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
281 debugl1(cs, "hfcpci_empty_fifo");
282 zp = &bz->za[bz->f2]; /* point to Z-Regs */
283 new_z2 = zp->z2 + count; /* new position in fifo */
284 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
285 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
286 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
287 if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
288 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
289 if (cs->debug & L1_DEB_WARN)
290 debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
291 #ifdef ERROR_STATISTIC
292 bcs->err_inv++;
293 #endif
294 bz->za[new_f2].z2 = new_z2;
295 bz->f2 = new_f2; /* next buffer */
296 skb = NULL;
297 } else if (!(skb = dev_alloc_skb(count - 3)))
298 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
299 else {
300 total = count;
301 count -= 3;
302 ptr = skb_put(skb, count);
304 if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
305 maxlen = count; /* complete transfer */
306 else
307 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
309 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
310 memcpy(ptr, ptr1, maxlen); /* copy data */
311 count -= maxlen;
313 if (count) { /* rest remaining */
314 ptr += maxlen;
315 ptr1 = bdata; /* start of buffer */
316 memcpy(ptr, ptr1, count); /* rest */
318 bz->za[new_f2].z2 = new_z2;
319 bz->f2 = new_f2; /* next buffer */
322 return (skb);
325 /*******************************/
326 /* D-channel receive procedure */
327 /*******************************/
328 static
330 receive_dmsg(struct IsdnCardState *cs)
332 struct sk_buff *skb;
333 int maxlen;
334 int rcnt, total;
335 int count = 5;
336 u_char *ptr, *ptr1;
337 dfifo_type *df;
338 z_type *zp;
340 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
341 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
342 debugl1(cs, "rec_dmsg blocked");
343 return (1);
345 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
346 zp = &df->za[df->f2 & D_FREG_MASK];
347 rcnt = zp->z1 - zp->z2;
348 if (rcnt < 0)
349 rcnt += D_FIFO_SIZE;
350 rcnt++;
351 if (cs->debug & L1_DEB_ISAC)
352 debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
353 df->f1, df->f2, zp->z1, zp->z2, rcnt);
355 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
356 (df->data[zp->z1])) {
357 if (cs->debug & L1_DEB_WARN)
358 debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
359 #ifdef ERROR_STATISTIC
360 cs->err_rx++;
361 #endif
362 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
363 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
364 } else if ((skb = dev_alloc_skb(rcnt - 3))) {
365 total = rcnt;
366 rcnt -= 3;
367 ptr = skb_put(skb, rcnt);
369 if (zp->z2 + rcnt <= D_FIFO_SIZE)
370 maxlen = rcnt; /* complete transfer */
371 else
372 maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
374 ptr1 = df->data + zp->z2; /* start of data */
375 memcpy(ptr, ptr1, maxlen); /* copy data */
376 rcnt -= maxlen;
378 if (rcnt) { /* rest remaining */
379 ptr += maxlen;
380 ptr1 = df->data; /* start of buffer */
381 memcpy(ptr, ptr1, rcnt); /* rest */
383 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
384 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
386 skb_queue_tail(&cs->rq, skb);
387 sched_event_D_pci(cs, D_RCVBUFREADY);
388 } else
389 printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
391 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
392 return (1);
395 /*******************************************************************************/
396 /* check for transparent receive data and read max one threshold size if avail */
397 /*******************************************************************************/
398 static int
399 hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type * bz, u_char * bdata)
401 unsigned short *z1r, *z2r;
402 int new_z2, fcnt, maxlen;
403 struct sk_buff *skb;
404 u_char *ptr, *ptr1;
406 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
407 z2r = z1r + 1;
409 if (!(fcnt = *z1r - *z2r))
410 return (0); /* no data avail */
412 if (fcnt <= 0)
413 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
414 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
415 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
417 new_z2 = *z2r + fcnt; /* new position in fifo */
418 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
419 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
421 if (!(skb = dev_alloc_skb(fcnt)))
422 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
423 else {
424 ptr = skb_put(skb, fcnt);
425 if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
426 maxlen = fcnt; /* complete transfer */
427 else
428 maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
430 ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
431 memcpy(ptr, ptr1, maxlen); /* copy data */
432 fcnt -= maxlen;
434 if (fcnt) { /* rest remaining */
435 ptr += maxlen;
436 ptr1 = bdata; /* start of buffer */
437 memcpy(ptr, ptr1, fcnt); /* rest */
439 skb_queue_tail(&bcs->rqueue, skb);
440 hfcpci_sched_event(bcs, B_RCVBUFREADY);
443 *z2r = new_z2; /* new position */
444 return (1);
445 } /* hfcpci_empty_fifo_trans */
447 /**********************************/
448 /* B-channel main receive routine */
449 /**********************************/
450 static void
451 main_rec_hfcpci(struct BCState *bcs)
453 struct IsdnCardState *cs = bcs->cs;
454 int rcnt, real_fifo;
455 int receive, count = 5;
456 struct sk_buff *skb;
457 bzfifo_type *bz;
458 u_char *bdata;
459 z_type *zp;
462 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
463 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
464 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
465 real_fifo = 1;
466 } else {
467 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
468 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
469 real_fifo = 0;
471 Begin:
472 count--;
473 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
474 debugl1(cs, "rec_data %d blocked", bcs->channel);
475 return;
477 if (bz->f1 != bz->f2) {
478 if (cs->debug & L1_DEB_HSCX)
479 debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
480 bcs->channel, bz->f1, bz->f2);
481 zp = &bz->za[bz->f2];
483 rcnt = zp->z1 - zp->z2;
484 if (rcnt < 0)
485 rcnt += B_FIFO_SIZE;
486 rcnt++;
487 if (cs->debug & L1_DEB_HSCX)
488 debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
489 bcs->channel, zp->z1, zp->z2, rcnt);
490 if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
491 skb_queue_tail(&bcs->rqueue, skb);
492 hfcpci_sched_event(bcs, B_RCVBUFREADY);
494 rcnt = bz->f1 - bz->f2;
495 if (rcnt < 0)
496 rcnt += MAX_B_FRAMES + 1;
497 if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
498 rcnt = 0;
499 hfcpci_clear_fifo_rx(cs, real_fifo);
501 cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
502 if (rcnt > 1)
503 receive = 1;
504 else
505 receive = 0;
506 } else if (bcs->mode == L1_MODE_TRANS)
507 receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
508 else
509 receive = 0;
510 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
511 if (count && receive)
512 goto Begin;
513 return;
516 /**************************/
517 /* D-channel send routine */
518 /**************************/
519 static void
520 hfcpci_fill_dfifo(struct IsdnCardState *cs)
522 int fcnt;
523 int count, new_z1, maxlen;
524 dfifo_type *df;
525 u_char *src, *dst, new_f1;
527 if (!cs->tx_skb)
528 return;
529 if (cs->tx_skb->len <= 0)
530 return;
532 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
534 if (cs->debug & L1_DEB_ISAC)
535 debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
536 df->f1, df->f2,
537 df->za[df->f1 & D_FREG_MASK].z1);
538 fcnt = df->f1 - df->f2; /* frame count actually buffered */
539 if (fcnt < 0)
540 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
541 if (fcnt > (MAX_D_FRAMES - 1)) {
542 if (cs->debug & L1_DEB_ISAC)
543 debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
544 #ifdef ERROR_STATISTIC
545 cs->err_tx++;
546 #endif
547 return;
549 /* now determine free bytes in FIFO buffer */
550 count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
551 if (count <= 0)
552 count += D_FIFO_SIZE; /* count now contains available bytes */
554 if (cs->debug & L1_DEB_ISAC)
555 debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
556 cs->tx_skb->len, count);
557 if (count < cs->tx_skb->len) {
558 if (cs->debug & L1_DEB_ISAC)
559 debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
560 return;
562 count = cs->tx_skb->len; /* get frame len */
563 new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
564 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
565 src = cs->tx_skb->data; /* source pointer */
566 dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
567 maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
568 if (maxlen > count)
569 maxlen = count; /* limit size */
570 memcpy(dst, src, maxlen); /* first copy */
572 count -= maxlen; /* remaining bytes */
573 if (count) {
574 dst = df->data; /* start of buffer */
575 src += maxlen; /* new position */
576 memcpy(dst, src, count);
578 df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
579 df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
580 df->f1 = new_f1; /* next frame */
582 dev_kfree_skb_any(cs->tx_skb);
583 cs->tx_skb = NULL;
584 return;
587 /**************************/
588 /* B-channel send routine */
589 /**************************/
590 static void
591 hfcpci_fill_fifo(struct BCState *bcs)
593 struct IsdnCardState *cs = bcs->cs;
594 int maxlen, fcnt;
595 int count, new_z1;
596 bzfifo_type *bz;
597 u_char *bdata;
598 u_char new_f1, *src, *dst;
599 unsigned short *z1t, *z2t;
601 if (!bcs->tx_skb)
602 return;
603 if (bcs->tx_skb->len <= 0)
604 return;
606 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
607 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
608 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
609 } else {
610 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
611 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
614 if (bcs->mode == L1_MODE_TRANS) {
615 z1t = &bz->za[MAX_B_FRAMES].z1;
616 z2t = z1t + 1;
617 if (cs->debug & L1_DEB_HSCX)
618 debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
619 bcs->channel, *z1t, *z2t);
620 fcnt = *z2t - *z1t;
621 if (fcnt <= 0)
622 fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
623 fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
625 while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
626 if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
627 /* data is suitable for fifo */
628 count = bcs->tx_skb->len;
630 new_z1 = *z1t + count; /* new buffer Position */
631 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
632 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
633 src = bcs->tx_skb->data; /* source pointer */
634 dst = bdata + (*z1t - B_SUB_VAL);
635 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
636 if (maxlen > count)
637 maxlen = count; /* limit size */
638 memcpy(dst, src, maxlen); /* first copy */
640 count -= maxlen; /* remaining bytes */
641 if (count) {
642 dst = bdata; /* start of buffer */
643 src += maxlen; /* new position */
644 memcpy(dst, src, count);
646 bcs->tx_cnt -= bcs->tx_skb->len;
647 fcnt += bcs->tx_skb->len;
648 *z1t = new_z1; /* now send data */
649 } else if (cs->debug & L1_DEB_HSCX)
650 debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
651 bcs->channel, bcs->tx_skb->len);
653 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
654 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
655 u_long flags;
656 spin_lock_irqsave(&bcs->aclock, flags);
657 bcs->ackcnt += bcs->tx_skb->len;
658 spin_unlock_irqrestore(&bcs->aclock, flags);
659 schedule_event(bcs, B_ACKPENDING);
662 dev_kfree_skb_any(bcs->tx_skb);
663 bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
665 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
666 return;
668 if (cs->debug & L1_DEB_HSCX)
669 debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
670 bcs->channel, bz->f1, bz->f2,
671 bz->za[bz->f1].z1);
673 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
674 if (fcnt < 0)
675 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
676 if (fcnt > (MAX_B_FRAMES - 1)) {
677 if (cs->debug & L1_DEB_HSCX)
678 debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
679 return;
681 /* now determine free bytes in FIFO buffer */
682 count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
683 if (count <= 0)
684 count += B_FIFO_SIZE; /* count now contains available bytes */
686 if (cs->debug & L1_DEB_HSCX)
687 debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
688 bcs->channel, bcs->tx_skb->len,
689 count, current->state);
691 if (count < bcs->tx_skb->len) {
692 if (cs->debug & L1_DEB_HSCX)
693 debugl1(cs, "hfcpci_fill_fifo no fifo mem");
694 return;
696 count = bcs->tx_skb->len; /* get frame len */
697 new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
698 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
699 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
701 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
702 src = bcs->tx_skb->data; /* source pointer */
703 dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
704 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
705 if (maxlen > count)
706 maxlen = count; /* limit size */
707 memcpy(dst, src, maxlen); /* first copy */
709 count -= maxlen; /* remaining bytes */
710 if (count) {
711 dst = bdata; /* start of buffer */
712 src += maxlen; /* new position */
713 memcpy(dst, src, count);
715 bcs->tx_cnt -= bcs->tx_skb->len;
716 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
717 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
718 u_long flags;
719 spin_lock_irqsave(&bcs->aclock, flags);
720 bcs->ackcnt += bcs->tx_skb->len;
721 spin_unlock_irqrestore(&bcs->aclock, flags);
722 schedule_event(bcs, B_ACKPENDING);
725 bz->za[new_f1].z1 = new_z1; /* for next buffer */
726 bz->f1 = new_f1; /* next frame */
728 dev_kfree_skb_any(bcs->tx_skb);
729 bcs->tx_skb = NULL;
730 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
731 return;
734 /**********************************************/
735 /* D-channel l1 state call for leased NT-mode */
736 /**********************************************/
737 static void
738 dch_nt_l2l1(struct PStack *st, int pr, void *arg)
740 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
742 switch (pr) {
743 case (PH_DATA | REQUEST):
744 case (PH_PULL | REQUEST):
745 case (PH_PULL | INDICATION):
746 st->l1.l1hw(st, pr, arg);
747 break;
748 case (PH_ACTIVATE | REQUEST):
749 st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
750 break;
751 case (PH_TESTLOOP | REQUEST):
752 if (1 & (long) arg)
753 debugl1(cs, "PH_TEST_LOOP B1");
754 if (2 & (long) arg)
755 debugl1(cs, "PH_TEST_LOOP B2");
756 if (!(3 & (long) arg))
757 debugl1(cs, "PH_TEST_LOOP DISABLED");
758 st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
759 break;
760 default:
761 if (cs->debug)
762 debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
763 break;
769 /***********************/
770 /* set/reset echo mode */
771 /***********************/
772 static int
773 hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl * ic)
775 u_long flags;
776 int i = *(unsigned int *) ic->parm.num;
778 if ((ic->arg == 98) &&
779 (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
780 spin_lock_irqsave(&cs->lock, flags);
781 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
782 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
783 udelay(10);
784 cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
785 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
786 udelay(10);
787 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
788 udelay(10);
789 Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
790 cs->dc.hfcpci.ph_state = 1;
791 cs->hw.hfcpci.nt_mode = 1;
792 cs->hw.hfcpci.nt_timer = 0;
793 cs->stlist->l2.l2l1 = dch_nt_l2l1;
794 spin_unlock_irqrestore(&cs->lock, flags);
795 debugl1(cs, "NT mode activated");
796 return (0);
798 if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
799 (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
800 return (-EINVAL);
802 spin_lock_irqsave(&cs->lock, flags);
803 if (i) {
804 cs->logecho = 1;
805 cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
806 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
807 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
808 } else {
809 cs->logecho = 0;
810 cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
811 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
812 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
814 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
815 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
816 cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
817 cs->hw.hfcpci.ctmt &= ~2;
818 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
819 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
820 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
821 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
822 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
823 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
824 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
825 spin_unlock_irqrestore(&cs->lock, flags);
826 return (0);
827 } /* hfcpci_auxcmd */
829 /*****************************/
830 /* E-channel receive routine */
831 /*****************************/
832 static void
833 receive_emsg(struct IsdnCardState *cs)
835 int rcnt;
836 int receive, count = 5;
837 bzfifo_type *bz;
838 u_char *bdata;
839 z_type *zp;
840 u_char *ptr, *ptr1, new_f2;
841 int total, maxlen, new_z2;
842 u_char e_buffer[256];
844 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
845 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
846 Begin:
847 count--;
848 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
849 debugl1(cs, "echo_rec_data blocked");
850 return;
852 if (bz->f1 != bz->f2) {
853 if (cs->debug & L1_DEB_ISAC)
854 debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
855 bz->f1, bz->f2);
856 zp = &bz->za[bz->f2];
858 rcnt = zp->z1 - zp->z2;
859 if (rcnt < 0)
860 rcnt += B_FIFO_SIZE;
861 rcnt++;
862 if (cs->debug & L1_DEB_ISAC)
863 debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
864 zp->z1, zp->z2, rcnt);
865 new_z2 = zp->z2 + rcnt; /* new position in fifo */
866 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
867 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
868 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
869 if ((rcnt > 256 + 3) || (count < 4) ||
870 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
871 if (cs->debug & L1_DEB_WARN)
872 debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
873 bz->za[new_f2].z2 = new_z2;
874 bz->f2 = new_f2; /* next buffer */
875 } else {
876 total = rcnt;
877 rcnt -= 3;
878 ptr = e_buffer;
880 if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
881 maxlen = rcnt; /* complete transfer */
882 else
883 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
885 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
886 memcpy(ptr, ptr1, maxlen); /* copy data */
887 rcnt -= maxlen;
889 if (rcnt) { /* rest remaining */
890 ptr += maxlen;
891 ptr1 = bdata; /* start of buffer */
892 memcpy(ptr, ptr1, rcnt); /* rest */
894 bz->za[new_f2].z2 = new_z2;
895 bz->f2 = new_f2; /* next buffer */
896 if (cs->debug & DEB_DLOG_HEX) {
897 ptr = cs->dlog;
898 if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
899 *ptr++ = 'E';
900 *ptr++ = 'C';
901 *ptr++ = 'H';
902 *ptr++ = 'O';
903 *ptr++ = ':';
904 ptr += QuickHex(ptr, e_buffer, total - 3);
905 ptr--;
906 *ptr++ = '\n';
907 *ptr = 0;
908 HiSax_putstatus(cs, NULL, cs->dlog);
909 } else
910 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
914 rcnt = bz->f1 - bz->f2;
915 if (rcnt < 0)
916 rcnt += MAX_B_FRAMES + 1;
917 if (rcnt > 1)
918 receive = 1;
919 else
920 receive = 0;
921 } else
922 receive = 0;
923 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
924 if (count && receive)
925 goto Begin;
926 return;
927 } /* receive_emsg */
929 /*********************/
930 /* Interrupt handler */
931 /*********************/
932 static irqreturn_t
933 hfcpci_interrupt(int intno, void *dev_id, struct pt_regs *regs)
935 u_long flags;
936 struct IsdnCardState *cs = dev_id;
937 u_char exval;
938 struct BCState *bcs;
939 int count = 15;
940 u_char val, stat;
942 if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
943 debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
944 return IRQ_NONE; /* not initialised */
946 spin_lock_irqsave(&cs->lock, flags);
947 if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
948 val = Read_hfc(cs, HFCPCI_INT_S1);
949 if (cs->debug & L1_DEB_ISAC)
950 debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
951 } else {
952 spin_unlock_irqrestore(&cs->lock, flags);
953 return IRQ_NONE;
955 if (cs->debug & L1_DEB_ISAC)
956 debugl1(cs, "HFC-PCI irq %x %s", val,
957 test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
958 "locked" : "unlocked");
959 val &= cs->hw.hfcpci.int_m1;
960 if (val & 0x40) { /* state machine irq */
961 exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
962 if (cs->debug & L1_DEB_ISAC)
963 debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
964 exval);
965 cs->dc.hfcpci.ph_state = exval;
966 sched_event_D_pci(cs, D_L1STATECHANGE);
967 val &= ~0x40;
969 if (val & 0x80) { /* timer irq */
970 if (cs->hw.hfcpci.nt_mode) {
971 if ((--cs->hw.hfcpci.nt_timer) < 0)
972 sched_event_D_pci(cs, D_L1STATECHANGE);
974 val &= ~0x80;
975 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
977 while (val) {
978 if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
979 cs->hw.hfcpci.int_s1 |= val;
980 spin_unlock_irqrestore(&cs->lock, flags);
981 return IRQ_HANDLED;
983 if (cs->hw.hfcpci.int_s1 & 0x18) {
984 exval = val;
985 val = cs->hw.hfcpci.int_s1;
986 cs->hw.hfcpci.int_s1 = exval;
988 if (val & 0x08) {
989 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
990 if (cs->debug)
991 debugl1(cs, "hfcpci spurious 0x08 IRQ");
992 } else
993 main_rec_hfcpci(bcs);
995 if (val & 0x10) {
996 if (cs->logecho)
997 receive_emsg(cs);
998 else if (!(bcs = Sel_BCS(cs, 1))) {
999 if (cs->debug)
1000 debugl1(cs, "hfcpci spurious 0x10 IRQ");
1001 } else
1002 main_rec_hfcpci(bcs);
1004 if (val & 0x01) {
1005 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
1006 if (cs->debug)
1007 debugl1(cs, "hfcpci spurious 0x01 IRQ");
1008 } else {
1009 if (bcs->tx_skb) {
1010 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1011 hfcpci_fill_fifo(bcs);
1012 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1013 } else
1014 debugl1(cs, "fill_data %d blocked", bcs->channel);
1015 } else {
1016 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1017 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1018 hfcpci_fill_fifo(bcs);
1019 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1020 } else
1021 debugl1(cs, "fill_data %d blocked", bcs->channel);
1022 } else {
1023 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1028 if (val & 0x02) {
1029 if (!(bcs = Sel_BCS(cs, 1))) {
1030 if (cs->debug)
1031 debugl1(cs, "hfcpci spurious 0x02 IRQ");
1032 } else {
1033 if (bcs->tx_skb) {
1034 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1035 hfcpci_fill_fifo(bcs);
1036 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1037 } else
1038 debugl1(cs, "fill_data %d blocked", bcs->channel);
1039 } else {
1040 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1041 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1042 hfcpci_fill_fifo(bcs);
1043 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1044 } else
1045 debugl1(cs, "fill_data %d blocked", bcs->channel);
1046 } else {
1047 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1052 if (val & 0x20) { /* receive dframe */
1053 receive_dmsg(cs);
1055 if (val & 0x04) { /* dframe transmitted */
1056 if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
1057 del_timer(&cs->dbusytimer);
1058 if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
1059 sched_event_D_pci(cs, D_CLEARBUSY);
1060 if (cs->tx_skb) {
1061 if (cs->tx_skb->len) {
1062 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1063 hfcpci_fill_dfifo(cs);
1064 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1065 } else {
1066 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1068 goto afterXPR;
1069 } else {
1070 dev_kfree_skb_irq(cs->tx_skb);
1071 cs->tx_cnt = 0;
1072 cs->tx_skb = NULL;
1075 if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
1076 cs->tx_cnt = 0;
1077 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1078 hfcpci_fill_dfifo(cs);
1079 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1080 } else {
1081 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1083 } else
1084 sched_event_D_pci(cs, D_XMTBUFREADY);
1086 afterXPR:
1087 if (cs->hw.hfcpci.int_s1 && count--) {
1088 val = cs->hw.hfcpci.int_s1;
1089 cs->hw.hfcpci.int_s1 = 0;
1090 if (cs->debug & L1_DEB_ISAC)
1091 debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
1092 } else
1093 val = 0;
1095 spin_unlock_irqrestore(&cs->lock, flags);
1096 return IRQ_HANDLED;
1099 /********************************************************************/
1100 /* timer callback for D-chan busy resolution. Currently no function */
1101 /********************************************************************/
1102 static void
1103 hfcpci_dbusy_timer(struct IsdnCardState *cs)
1107 /*************************************/
1108 /* Layer 1 D-channel hardware access */
1109 /*************************************/
1110 static void
1111 HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
1113 u_long flags;
1114 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
1115 struct sk_buff *skb = arg;
1117 switch (pr) {
1118 case (PH_DATA | REQUEST):
1119 if (cs->debug & DEB_DLOG_HEX)
1120 LogFrame(cs, skb->data, skb->len);
1121 if (cs->debug & DEB_DLOG_VERBOSE)
1122 dlogframe(cs, skb, 0);
1123 spin_lock_irqsave(&cs->lock, flags);
1124 if (cs->tx_skb) {
1125 skb_queue_tail(&cs->sq, skb);
1126 #ifdef L2FRAME_DEBUG /* psa */
1127 if (cs->debug & L1_DEB_LAPD)
1128 Logl2Frame(cs, skb, "PH_DATA Queued", 0);
1129 #endif
1130 } else {
1131 cs->tx_skb = skb;
1132 cs->tx_cnt = 0;
1133 #ifdef L2FRAME_DEBUG /* psa */
1134 if (cs->debug & L1_DEB_LAPD)
1135 Logl2Frame(cs, skb, "PH_DATA", 0);
1136 #endif
1137 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1138 hfcpci_fill_dfifo(cs);
1139 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1140 } else
1141 debugl1(cs, "hfcpci_fill_dfifo blocked");
1144 spin_unlock_irqrestore(&cs->lock, flags);
1145 break;
1146 case (PH_PULL | INDICATION):
1147 spin_lock_irqsave(&cs->lock, flags);
1148 if (cs->tx_skb) {
1149 if (cs->debug & L1_DEB_WARN)
1150 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
1151 skb_queue_tail(&cs->sq, skb);
1152 spin_unlock_irqrestore(&cs->lock, flags);
1153 break;
1155 if (cs->debug & DEB_DLOG_HEX)
1156 LogFrame(cs, skb->data, skb->len);
1157 if (cs->debug & DEB_DLOG_VERBOSE)
1158 dlogframe(cs, skb, 0);
1159 cs->tx_skb = skb;
1160 cs->tx_cnt = 0;
1161 #ifdef L2FRAME_DEBUG /* psa */
1162 if (cs->debug & L1_DEB_LAPD)
1163 Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
1164 #endif
1165 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1166 hfcpci_fill_dfifo(cs);
1167 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1168 } else
1169 debugl1(cs, "hfcpci_fill_dfifo blocked");
1170 spin_unlock_irqrestore(&cs->lock, flags);
1171 break;
1172 case (PH_PULL | REQUEST):
1173 #ifdef L2FRAME_DEBUG /* psa */
1174 if (cs->debug & L1_DEB_LAPD)
1175 debugl1(cs, "-> PH_REQUEST_PULL");
1176 #endif
1177 if (!cs->tx_skb) {
1178 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1179 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1180 } else
1181 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1182 break;
1183 case (HW_RESET | REQUEST):
1184 spin_lock_irqsave(&cs->lock, flags);
1185 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
1186 udelay(6);
1187 Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
1188 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1189 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1190 Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
1191 spin_unlock_irqrestore(&cs->lock, flags);
1192 l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
1193 break;
1194 case (HW_ENABLE | REQUEST):
1195 spin_lock_irqsave(&cs->lock, flags);
1196 Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
1197 spin_unlock_irqrestore(&cs->lock, flags);
1198 break;
1199 case (HW_DEACTIVATE | REQUEST):
1200 spin_lock_irqsave(&cs->lock, flags);
1201 cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
1202 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1203 spin_unlock_irqrestore(&cs->lock, flags);
1204 break;
1205 case (HW_INFO3 | REQUEST):
1206 spin_lock_irqsave(&cs->lock, flags);
1207 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1208 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1209 spin_unlock_irqrestore(&cs->lock, flags);
1210 break;
1211 case (HW_TESTLOOP | REQUEST):
1212 spin_lock_irqsave(&cs->lock, flags);
1213 switch ((int) arg) {
1214 case (1):
1215 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
1216 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
1217 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
1218 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1219 break;
1221 case (2):
1222 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
1223 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
1224 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
1225 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1226 break;
1228 default:
1229 spin_unlock_irqrestore(&cs->lock, flags);
1230 if (cs->debug & L1_DEB_WARN)
1231 debugl1(cs, "hfcpci_l1hw loop invalid %4x", (int) arg);
1232 return;
1234 cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
1235 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
1236 spin_unlock_irqrestore(&cs->lock, flags);
1237 break;
1238 default:
1239 if (cs->debug & L1_DEB_WARN)
1240 debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
1241 break;
1245 /***********************************************/
1246 /* called during init setting l1 stack pointer */
1247 /***********************************************/
1248 static void
1249 setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
1251 st->l1.l1hw = HFCPCI_l1hw;
1254 /**************************************/
1255 /* send B-channel data if not blocked */
1256 /**************************************/
1257 static void
1258 hfcpci_send_data(struct BCState *bcs)
1260 struct IsdnCardState *cs = bcs->cs;
1262 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1263 hfcpci_fill_fifo(bcs);
1264 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1265 } else
1266 debugl1(cs, "send_data %d blocked", bcs->channel);
1269 /***************************************************************/
1270 /* activate/deactivate hardware for selected channels and mode */
1271 /***************************************************************/
1272 static void
1273 mode_hfcpci(struct BCState *bcs, int mode, int bc)
1275 struct IsdnCardState *cs = bcs->cs;
1276 int fifo2;
1278 if (cs->debug & L1_DEB_HSCX)
1279 debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
1280 mode, bc, bcs->channel);
1281 bcs->mode = mode;
1282 bcs->channel = bc;
1283 fifo2 = bc;
1284 if (cs->chanlimit > 1) {
1285 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1286 cs->hw.hfcpci.sctrl_e &= ~0x80;
1287 } else {
1288 if (bc) {
1289 if (mode != L1_MODE_NULL) {
1290 cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
1291 cs->hw.hfcpci.sctrl_e |= 0x80;
1292 } else {
1293 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1294 cs->hw.hfcpci.sctrl_e &= ~0x80;
1296 fifo2 = 0;
1297 } else {
1298 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1299 cs->hw.hfcpci.sctrl_e &= ~0x80;
1302 switch (mode) {
1303 case (L1_MODE_NULL):
1304 if (bc) {
1305 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
1306 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
1307 } else {
1308 cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
1309 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
1311 if (fifo2) {
1312 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1313 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1314 } else {
1315 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1316 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1318 break;
1319 case (L1_MODE_TRANS):
1320 hfcpci_clear_fifo_rx(cs, fifo2);
1321 hfcpci_clear_fifo_tx(cs, fifo2);
1322 if (bc) {
1323 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1324 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1325 } else {
1326 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1327 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1329 if (fifo2) {
1330 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1331 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1332 cs->hw.hfcpci.ctmt |= 2;
1333 cs->hw.hfcpci.conn &= ~0x18;
1334 } else {
1335 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1336 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1337 cs->hw.hfcpci.ctmt |= 1;
1338 cs->hw.hfcpci.conn &= ~0x03;
1340 break;
1341 case (L1_MODE_HDLC):
1342 hfcpci_clear_fifo_rx(cs, fifo2);
1343 hfcpci_clear_fifo_tx(cs, fifo2);
1344 if (bc) {
1345 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1346 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1347 } else {
1348 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1349 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1351 if (fifo2) {
1352 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1353 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1354 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1355 cs->hw.hfcpci.ctmt &= ~2;
1356 cs->hw.hfcpci.conn &= ~0x18;
1357 } else {
1358 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1359 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1360 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1361 cs->hw.hfcpci.ctmt &= ~1;
1362 cs->hw.hfcpci.conn &= ~0x03;
1364 break;
1365 case (L1_MODE_EXTRN):
1366 if (bc) {
1367 cs->hw.hfcpci.conn |= 0x10;
1368 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1369 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1370 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1371 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1372 } else {
1373 cs->hw.hfcpci.conn |= 0x02;
1374 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1375 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1376 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1377 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1379 break;
1381 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
1382 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1383 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
1384 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
1385 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
1386 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
1387 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1390 /******************************/
1391 /* Layer2 -> Layer 1 Transfer */
1392 /******************************/
1393 static void
1394 hfcpci_l2l1(struct PStack *st, int pr, void *arg)
1396 struct BCState *bcs = st->l1.bcs;
1397 u_long flags;
1398 struct sk_buff *skb = arg;
1400 switch (pr) {
1401 case (PH_DATA | REQUEST):
1402 spin_lock_irqsave(&bcs->cs->lock, flags);
1403 if (bcs->tx_skb) {
1404 skb_queue_tail(&bcs->squeue, skb);
1405 } else {
1406 bcs->tx_skb = skb;
1407 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1408 bcs->cs->BC_Send_Data(bcs);
1410 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1411 break;
1412 case (PH_PULL | INDICATION):
1413 spin_lock_irqsave(&bcs->cs->lock, flags);
1414 if (bcs->tx_skb) {
1415 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1416 printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
1417 break;
1419 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1420 bcs->tx_skb = skb;
1421 bcs->cs->BC_Send_Data(bcs);
1422 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1423 break;
1424 case (PH_PULL | REQUEST):
1425 if (!bcs->tx_skb) {
1426 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1427 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1428 } else
1429 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1430 break;
1431 case (PH_ACTIVATE | REQUEST):
1432 spin_lock_irqsave(&bcs->cs->lock, flags);
1433 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
1434 mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
1435 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1436 l1_msg_b(st, pr, arg);
1437 break;
1438 case (PH_DEACTIVATE | REQUEST):
1439 l1_msg_b(st, pr, arg);
1440 break;
1441 case (PH_DEACTIVATE | CONFIRM):
1442 spin_lock_irqsave(&bcs->cs->lock, flags);
1443 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
1444 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1445 mode_hfcpci(bcs, 0, st->l1.bc);
1446 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1447 st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
1448 break;
1452 /******************************************/
1453 /* deactivate B-channel access and queues */
1454 /******************************************/
1455 static void
1456 close_hfcpci(struct BCState *bcs)
1458 mode_hfcpci(bcs, 0, bcs->channel);
1459 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
1460 skb_queue_purge(&bcs->rqueue);
1461 skb_queue_purge(&bcs->squeue);
1462 if (bcs->tx_skb) {
1463 dev_kfree_skb_any(bcs->tx_skb);
1464 bcs->tx_skb = NULL;
1465 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1470 /*************************************/
1471 /* init B-channel queues and control */
1472 /*************************************/
1473 static int
1474 open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
1476 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
1477 skb_queue_head_init(&bcs->rqueue);
1478 skb_queue_head_init(&bcs->squeue);
1480 bcs->tx_skb = NULL;
1481 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1482 bcs->event = 0;
1483 bcs->tx_cnt = 0;
1484 return (0);
1487 /*********************************/
1488 /* inits the stack for B-channel */
1489 /*********************************/
1490 static int
1491 setstack_2b(struct PStack *st, struct BCState *bcs)
1493 bcs->channel = st->l1.bc;
1494 if (open_hfcpcistate(st->l1.hardware, bcs))
1495 return (-1);
1496 st->l1.bcs = bcs;
1497 st->l2.l2l1 = hfcpci_l2l1;
1498 setstack_manager(st);
1499 bcs->st = st;
1500 setstack_l1_B(st);
1501 return (0);
1504 /***************************/
1505 /* handle L1 state changes */
1506 /***************************/
1507 static void
1508 hfcpci_bh(struct IsdnCardState *cs)
1510 u_long flags;
1511 // struct PStack *stptr;
1513 if (!cs)
1514 return;
1515 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
1516 if (!cs->hw.hfcpci.nt_mode)
1517 switch (cs->dc.hfcpci.ph_state) {
1518 case (0):
1519 l1_msg(cs, HW_RESET | INDICATION, NULL);
1520 break;
1521 case (3):
1522 l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
1523 break;
1524 case (8):
1525 l1_msg(cs, HW_RSYNC | INDICATION, NULL);
1526 break;
1527 case (6):
1528 l1_msg(cs, HW_INFO2 | INDICATION, NULL);
1529 break;
1530 case (7):
1531 l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
1532 break;
1533 default:
1534 break;
1535 } else {
1536 spin_lock_irqsave(&cs->lock, flags);
1537 switch (cs->dc.hfcpci.ph_state) {
1538 case (2):
1539 if (cs->hw.hfcpci.nt_timer < 0) {
1540 cs->hw.hfcpci.nt_timer = 0;
1541 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1542 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1543 /* Clear already pending ints */
1544 if (Read_hfc(cs, HFCPCI_INT_S1));
1545 Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
1546 udelay(10);
1547 Write_hfc(cs, HFCPCI_STATES, 4);
1548 cs->dc.hfcpci.ph_state = 4;
1549 } else {
1550 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
1551 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1552 cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
1553 cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
1554 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1555 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1556 cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
1557 Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
1559 break;
1560 case (1):
1561 case (3):
1562 case (4):
1563 cs->hw.hfcpci.nt_timer = 0;
1564 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1565 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1566 break;
1567 default:
1568 break;
1570 spin_unlock_irqrestore(&cs->lock, flags);
1573 if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
1574 DChannel_proc_rcv(cs);
1575 if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
1576 DChannel_proc_xmt(cs);
1580 /********************************/
1581 /* called for card init message */
1582 /********************************/
1583 static void __init
1584 inithfcpci(struct IsdnCardState *cs)
1586 cs->bcs[0].BC_SetStack = setstack_2b;
1587 cs->bcs[1].BC_SetStack = setstack_2b;
1588 cs->bcs[0].BC_Close = close_hfcpci;
1589 cs->bcs[1].BC_Close = close_hfcpci;
1590 cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
1591 cs->dbusytimer.data = (long) cs;
1592 init_timer(&cs->dbusytimer);
1593 mode_hfcpci(cs->bcs, 0, 0);
1594 mode_hfcpci(cs->bcs + 1, 0, 1);
1599 /*******************************************/
1600 /* handle card messages from control layer */
1601 /*******************************************/
1602 static int
1603 hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1605 u_long flags;
1607 if (cs->debug & L1_DEB_ISAC)
1608 debugl1(cs, "HFCPCI: card_msg %x", mt);
1609 switch (mt) {
1610 case CARD_RESET:
1611 spin_lock_irqsave(&cs->lock, flags);
1612 reset_hfcpci(cs);
1613 spin_unlock_irqrestore(&cs->lock, flags);
1614 return (0);
1615 case CARD_RELEASE:
1616 release_io_hfcpci(cs);
1617 return (0);
1618 case CARD_INIT:
1619 spin_lock_irqsave(&cs->lock, flags);
1620 inithfcpci(cs);
1621 reset_hfcpci(cs);
1622 spin_unlock_irqrestore(&cs->lock, flags);
1623 msleep(80); /* Timeout 80ms */
1624 /* now switch timer interrupt off */
1625 spin_lock_irqsave(&cs->lock, flags);
1626 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1627 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1628 /* reinit mode reg */
1629 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1630 spin_unlock_irqrestore(&cs->lock, flags);
1631 return (0);
1632 case CARD_TEST:
1633 return (0);
1635 return (0);
1639 /* this variable is used as card index when more than one cards are present */
1640 static struct pci_dev *dev_hfcpci __initdata = NULL;
1642 #endif /* CONFIG_PCI */
1644 int __init
1645 setup_hfcpci(struct IsdnCard *card)
1647 u_long flags;
1648 struct IsdnCardState *cs = card->cs;
1649 char tmp[64];
1650 int i;
1651 struct pci_dev *tmp_hfcpci = NULL;
1653 #ifdef __BIG_ENDIAN
1654 #error "not running on big endian machines now"
1655 #endif
1656 strcpy(tmp, hfcpci_revision);
1657 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1658 #ifdef CONFIG_PCI
1659 cs->hw.hfcpci.int_s1 = 0;
1660 cs->dc.hfcpci.ph_state = 0;
1661 cs->hw.hfcpci.fifo = 255;
1662 if (cs->typ == ISDN_CTYPE_HFC_PCI) {
1663 i = 0;
1664 while (id_list[i].vendor_id) {
1665 tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
1666 id_list[i].device_id,
1667 dev_hfcpci);
1668 i++;
1669 if (tmp_hfcpci) {
1670 if (pci_enable_device(tmp_hfcpci))
1671 continue;
1672 pci_set_master(tmp_hfcpci);
1673 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1674 continue;
1675 else
1676 break;
1680 if (tmp_hfcpci) {
1681 i--;
1682 dev_hfcpci = tmp_hfcpci; /* old device */
1683 cs->hw.hfcpci.dev = dev_hfcpci;
1684 cs->irq = dev_hfcpci->irq;
1685 if (!cs->irq) {
1686 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1687 return (0);
1689 cs->hw.hfcpci.pci_io = (char *) dev_hfcpci->resource[ 1].start;
1690 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1691 } else {
1692 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1693 return (0);
1695 if (!cs->hw.hfcpci.pci_io) {
1696 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1697 return (0);
1699 /* Allocate memory for FIFOS */
1700 /* Because the HFC-PCI needs a 32K physical alignment, we */
1701 /* need to allocate the double mem and align the address */
1702 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1703 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1704 return 0;
1706 cs->hw.hfcpci.fifos = (void *)
1707 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
1708 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
1709 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1710 printk(KERN_INFO
1711 "HFC-PCI: defined at mem %#x fifo %#x(%#x) IRQ %d HZ %d\n",
1712 (u_int) cs->hw.hfcpci.pci_io,
1713 (u_int) cs->hw.hfcpci.fifos,
1714 (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
1715 cs->irq, HZ);
1716 spin_lock_irqsave(&cs->lock, flags);
1717 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1718 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1719 cs->hw.hfcpci.int_m1 = 0;
1720 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1721 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1722 /* At this point the needed PCI config is done */
1723 /* fifos are still not enabled */
1724 INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
1725 cs->setstack_d = setstack_hfcpci;
1726 cs->BC_Send_Data = &hfcpci_send_data;
1727 cs->readisac = NULL;
1728 cs->writeisac = NULL;
1729 cs->readisacfifo = NULL;
1730 cs->writeisacfifo = NULL;
1731 cs->BC_Read_Reg = NULL;
1732 cs->BC_Write_Reg = NULL;
1733 cs->irq_func = &hfcpci_interrupt;
1734 cs->irq_flags |= SA_SHIRQ;
1735 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1736 cs->hw.hfcpci.timer.data = (long) cs;
1737 init_timer(&cs->hw.hfcpci.timer);
1738 cs->cardmsg = &hfcpci_card_msg;
1739 cs->auxcmd = &hfcpci_auxcmd;
1740 spin_unlock_irqrestore(&cs->lock, flags);
1741 return (1);
1742 } else
1743 return (0); /* no valid card type */
1744 #else
1745 printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n");
1746 return (0);
1747 #endif /* CONFIG_PCI */