Linux 2.6.21.1
[linux/fpc-iii.git] / drivers / isdn / hisax / hfc_pci.c
blob8a48a3ce0a55a39204945b2acedb15463bae607a
1 /* $Id: hfc_pci.c,v 1.48.2.4 2004/02/11 13:21:33 keil Exp $
3 * low level driver for CCD´s hfc-pci based cards
5 * Author Werner Cornelius
6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de>
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
13 * For changes and modifications please read
14 * Documentation/isdn/HiSax.cert
18 #include <linux/init.h>
19 #include "hisax.h"
20 #include "hfc_pci.h"
21 #include "isdnl1.h"
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
25 extern const char *CardType[];
27 static const char *hfcpci_revision = "$Revision: 1.48.2.4 $";
29 /* table entry in the PCI devices list */
30 typedef struct {
31 int vendor_id;
32 int device_id;
33 char *vendor_name;
34 char *card_name;
35 } PCI_ENTRY;
37 #define NT_T1_COUNT 20 /* number of 3.125ms interrupts for G2 timeout */
38 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
39 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
41 static const PCI_ENTRY id_list[] =
43 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, "CCD/Billion/Asuscom", "2BD0"},
44 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, "Billion", "B000"},
45 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, "Billion", "B006"},
46 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, "Billion", "B007"},
47 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, "Billion", "B008"},
48 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, "Billion", "B009"},
49 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, "Billion", "B00A"},
50 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, "Billion", "B00B"},
51 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, "Billion", "B00C"},
52 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, "Seyeon", "B100"},
53 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, "Primux II S0", "B700"},
54 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, "Primux II S0 NT", "B701"},
55 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, "Abocom/Magitek", "2BD1"},
56 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, "Asuscom/Askey", "675"},
57 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, "German telekom", "T-Concept"},
58 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, "German telekom", "A1T"},
59 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, "Motorola MC145575", "MC145575"},
60 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, "Zoltrix", "2BD0"},
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,"Digi International", "Digi DataFire Micro V IOM2 (Europe)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
64 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
65 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
66 {0, 0, NULL, NULL},
70 #ifdef CONFIG_PCI
72 /******************************************/
73 /* free hardware resources used by driver */
74 /******************************************/
75 static void
76 release_io_hfcpci(struct IsdnCardState *cs)
78 printk(KERN_INFO "HiSax: release hfcpci at %p\n",
79 cs->hw.hfcpci.pci_io);
80 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
81 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
82 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
83 mdelay(10);
84 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
85 mdelay(10);
86 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
87 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, 0); /* disable memory mapped ports + busmaster */
88 del_timer(&cs->hw.hfcpci.timer);
89 kfree(cs->hw.hfcpci.share_start);
90 cs->hw.hfcpci.share_start = NULL;
91 iounmap((void *)cs->hw.hfcpci.pci_io);
94 /********************************************************************************/
95 /* function called to reset the HFC PCI chip. A complete software reset of chip */
96 /* and fifos is done. */
97 /********************************************************************************/
98 static void
99 reset_hfcpci(struct IsdnCardState *cs)
101 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
102 cs->hw.hfcpci.int_m2 = 0; /* interrupt output off ! */
103 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
105 printk(KERN_INFO "HFC_PCI: resetting card\n");
106 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); /* enable memory ports + busmaster */
107 Write_hfc(cs, HFCPCI_CIRM, HFCPCI_RESET); /* Reset On */
108 mdelay(10);
109 Write_hfc(cs, HFCPCI_CIRM, 0); /* Reset Off */
110 mdelay(10);
111 if (Read_hfc(cs, HFCPCI_STATUS) & 2)
112 printk(KERN_WARNING "HFC-PCI init bit busy\n");
114 cs->hw.hfcpci.fifo_en = 0x30; /* only D fifos enabled */
115 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
117 cs->hw.hfcpci.trm = 0 + HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
118 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
120 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_TE); /* ST-Bit delay for TE-Mode */
121 cs->hw.hfcpci.sctrl_e = HFCPCI_AUTO_AWAKE;
122 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e); /* S/T Auto awake */
123 cs->hw.hfcpci.bswapped = 0; /* no exchange */
124 cs->hw.hfcpci.nt_mode = 0; /* we are in TE mode */
125 cs->hw.hfcpci.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
126 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
128 cs->hw.hfcpci.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
129 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
130 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
132 /* Clear already pending ints */
133 if (Read_hfc(cs, HFCPCI_INT_S1));
135 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 2); /* HFC ST 2 */
136 udelay(10);
137 Write_hfc(cs, HFCPCI_STATES, 2); /* HFC ST 2 */
138 cs->hw.hfcpci.mst_m = HFCPCI_MASTER; /* HFC Master Mode */
140 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
141 cs->hw.hfcpci.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
142 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
143 cs->hw.hfcpci.sctrl_r = 0;
144 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
146 /* Init GCI/IOM2 in master mode */
147 /* Slots 0 and 1 are set for B-chan 1 and 2 */
148 /* D- and monitor/CI channel are not enabled */
149 /* STIO1 is used as output for data, B1+B2 from ST->IOM+HFC */
150 /* STIO2 is used as data input, B1+B2 from IOM->ST */
151 /* ST B-channel send disabled -> continous 1s */
152 /* The IOM slots are always enabled */
153 cs->hw.hfcpci.conn = 0x36; /* set data flow directions */
154 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
155 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* B1-Slot 0 STIO1 out enabled */
156 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* B2-Slot 1 STIO1 out enabled */
157 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* B1-Slot 0 STIO2 in enabled */
158 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* B2-Slot 1 STIO2 in enabled */
160 /* Finally enable IRQ output */
161 cs->hw.hfcpci.int_m2 = HFCPCI_IRQ_ENABLE;
162 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
163 if (Read_hfc(cs, HFCPCI_INT_S1));
166 /***************************************************/
167 /* Timer function called when kernel timer expires */
168 /***************************************************/
169 static void
170 hfcpci_Timer(struct IsdnCardState *cs)
172 cs->hw.hfcpci.timer.expires = jiffies + 75;
173 /* WD RESET */
174 /* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
175 add_timer(&cs->hw.hfcpci.timer);
180 /*********************************/
181 /* schedule a new D-channel task */
182 /*********************************/
183 static void
184 sched_event_D_pci(struct IsdnCardState *cs, int event)
186 test_and_set_bit(event, &cs->event);
187 schedule_work(&cs->tqueue);
190 /*********************************/
191 /* schedule a new b_channel task */
192 /*********************************/
193 static void
194 hfcpci_sched_event(struct BCState *bcs, int event)
196 test_and_set_bit(event, &bcs->event);
197 schedule_work(&bcs->tqueue);
200 /************************************************/
201 /* select a b-channel entry matching and active */
202 /************************************************/
203 static
204 struct BCState *
205 Sel_BCS(struct IsdnCardState *cs, int channel)
207 if (cs->bcs[0].mode && (cs->bcs[0].channel == channel))
208 return (&cs->bcs[0]);
209 else if (cs->bcs[1].mode && (cs->bcs[1].channel == channel))
210 return (&cs->bcs[1]);
211 else
212 return (NULL);
215 /***************************************/
216 /* clear the desired B-channel rx fifo */
217 /***************************************/
218 static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
219 { u_char fifo_state;
220 bzfifo_type *bzr;
222 if (fifo) {
223 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
224 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2RX;
225 } else {
226 bzr = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
227 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1RX;
229 if (fifo_state)
230 cs->hw.hfcpci.fifo_en ^= fifo_state;
231 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
232 cs->hw.hfcpci.last_bfifo_cnt[fifo] = 0;
233 bzr->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
234 bzr->za[MAX_B_FRAMES].z2 = bzr->za[MAX_B_FRAMES].z1;
235 bzr->f1 = MAX_B_FRAMES;
236 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
237 if (fifo_state)
238 cs->hw.hfcpci.fifo_en |= fifo_state;
239 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
242 /***************************************/
243 /* clear the desired B-channel tx fifo */
244 /***************************************/
245 static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
246 { u_char fifo_state;
247 bzfifo_type *bzt;
249 if (fifo) {
250 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
251 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B2TX;
252 } else {
253 bzt = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
254 fifo_state = cs->hw.hfcpci.fifo_en & HFCPCI_FIFOEN_B1TX;
256 if (fifo_state)
257 cs->hw.hfcpci.fifo_en ^= fifo_state;
258 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
259 bzt->za[MAX_B_FRAMES].z1 = B_FIFO_SIZE + B_SUB_VAL - 1;
260 bzt->za[MAX_B_FRAMES].z2 = bzt->za[MAX_B_FRAMES].z1;
261 bzt->f1 = MAX_B_FRAMES;
262 bzt->f2 = bzt->f1; /* init F pointers to remain constant */
263 if (fifo_state)
264 cs->hw.hfcpci.fifo_en |= fifo_state;
265 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
268 /*********************************************/
269 /* read a complete B-frame out of the buffer */
270 /*********************************************/
271 static struct sk_buff
273 hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type * bz, u_char * bdata, int count)
275 u_char *ptr, *ptr1, new_f2;
276 struct sk_buff *skb;
277 struct IsdnCardState *cs = bcs->cs;
278 int total, maxlen, new_z2;
279 z_type *zp;
281 if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
282 debugl1(cs, "hfcpci_empty_fifo");
283 zp = &bz->za[bz->f2]; /* point to Z-Regs */
284 new_z2 = zp->z2 + count; /* new position in fifo */
285 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
286 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
287 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
288 if ((count > HSCX_BUFMAX + 3) || (count < 4) ||
289 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
290 if (cs->debug & L1_DEB_WARN)
291 debugl1(cs, "hfcpci_empty_fifo: incoming packet invalid length %d or crc", count);
292 #ifdef ERROR_STATISTIC
293 bcs->err_inv++;
294 #endif
295 bz->za[new_f2].z2 = new_z2;
296 bz->f2 = new_f2; /* next buffer */
297 skb = NULL;
298 } else if (!(skb = dev_alloc_skb(count - 3)))
299 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
300 else {
301 total = count;
302 count -= 3;
303 ptr = skb_put(skb, count);
305 if (zp->z2 + count <= B_FIFO_SIZE + B_SUB_VAL)
306 maxlen = count; /* complete transfer */
307 else
308 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
310 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
311 memcpy(ptr, ptr1, maxlen); /* copy data */
312 count -= maxlen;
314 if (count) { /* rest remaining */
315 ptr += maxlen;
316 ptr1 = bdata; /* start of buffer */
317 memcpy(ptr, ptr1, count); /* rest */
319 bz->za[new_f2].z2 = new_z2;
320 bz->f2 = new_f2; /* next buffer */
323 return (skb);
326 /*******************************/
327 /* D-channel receive procedure */
328 /*******************************/
329 static
331 receive_dmsg(struct IsdnCardState *cs)
333 struct sk_buff *skb;
334 int maxlen;
335 int rcnt, total;
336 int count = 5;
337 u_char *ptr, *ptr1;
338 dfifo_type *df;
339 z_type *zp;
341 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_rx;
342 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
343 debugl1(cs, "rec_dmsg blocked");
344 return (1);
346 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
347 zp = &df->za[df->f2 & D_FREG_MASK];
348 rcnt = zp->z1 - zp->z2;
349 if (rcnt < 0)
350 rcnt += D_FIFO_SIZE;
351 rcnt++;
352 if (cs->debug & L1_DEB_ISAC)
353 debugl1(cs, "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)",
354 df->f1, df->f2, zp->z1, zp->z2, rcnt);
356 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
357 (df->data[zp->z1])) {
358 if (cs->debug & L1_DEB_WARN)
359 debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]);
360 #ifdef ERROR_STATISTIC
361 cs->err_rx++;
362 #endif
363 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
364 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + rcnt) & (D_FIFO_SIZE - 1);
365 } else if ((skb = dev_alloc_skb(rcnt - 3))) {
366 total = rcnt;
367 rcnt -= 3;
368 ptr = skb_put(skb, rcnt);
370 if (zp->z2 + rcnt <= D_FIFO_SIZE)
371 maxlen = rcnt; /* complete transfer */
372 else
373 maxlen = D_FIFO_SIZE - zp->z2; /* maximum */
375 ptr1 = df->data + zp->z2; /* start of data */
376 memcpy(ptr, ptr1, maxlen); /* copy data */
377 rcnt -= maxlen;
379 if (rcnt) { /* rest remaining */
380 ptr += maxlen;
381 ptr1 = df->data; /* start of buffer */
382 memcpy(ptr, ptr1, rcnt); /* rest */
384 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */
385 df->za[df->f2 & D_FREG_MASK].z2 = (zp->z2 + total) & (D_FIFO_SIZE - 1);
387 skb_queue_tail(&cs->rq, skb);
388 sched_event_D_pci(cs, D_RCVBUFREADY);
389 } else
390 printk(KERN_WARNING "HFC-PCI: D receive out of memory\n");
392 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
393 return (1);
396 /*******************************************************************************/
397 /* check for transparent receive data and read max one threshold size if avail */
398 /*******************************************************************************/
399 static int
400 hfcpci_empty_fifo_trans(struct BCState *bcs, bzfifo_type * bz, u_char * bdata)
402 unsigned short *z1r, *z2r;
403 int new_z2, fcnt, maxlen;
404 struct sk_buff *skb;
405 u_char *ptr, *ptr1;
407 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
408 z2r = z1r + 1;
410 if (!(fcnt = *z1r - *z2r))
411 return (0); /* no data avail */
413 if (fcnt <= 0)
414 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
415 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
416 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
418 new_z2 = *z2r + fcnt; /* new position in fifo */
419 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
420 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
422 if (!(skb = dev_alloc_skb(fcnt)))
423 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
424 else {
425 ptr = skb_put(skb, fcnt);
426 if (*z2r + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
427 maxlen = fcnt; /* complete transfer */
428 else
429 maxlen = B_FIFO_SIZE + B_SUB_VAL - *z2r; /* maximum */
431 ptr1 = bdata + (*z2r - B_SUB_VAL); /* start of data */
432 memcpy(ptr, ptr1, maxlen); /* copy data */
433 fcnt -= maxlen;
435 if (fcnt) { /* rest remaining */
436 ptr += maxlen;
437 ptr1 = bdata; /* start of buffer */
438 memcpy(ptr, ptr1, fcnt); /* rest */
440 skb_queue_tail(&bcs->rqueue, skb);
441 hfcpci_sched_event(bcs, B_RCVBUFREADY);
444 *z2r = new_z2; /* new position */
445 return (1);
446 } /* hfcpci_empty_fifo_trans */
448 /**********************************/
449 /* B-channel main receive routine */
450 /**********************************/
451 static void
452 main_rec_hfcpci(struct BCState *bcs)
454 struct IsdnCardState *cs = bcs->cs;
455 int rcnt, real_fifo;
456 int receive, count = 5;
457 struct sk_buff *skb;
458 bzfifo_type *bz;
459 u_char *bdata;
460 z_type *zp;
463 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
464 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
465 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
466 real_fifo = 1;
467 } else {
468 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b1;
469 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b1;
470 real_fifo = 0;
472 Begin:
473 count--;
474 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
475 debugl1(cs, "rec_data %d blocked", bcs->channel);
476 return;
478 if (bz->f1 != bz->f2) {
479 if (cs->debug & L1_DEB_HSCX)
480 debugl1(cs, "hfcpci rec %d f1(%d) f2(%d)",
481 bcs->channel, bz->f1, bz->f2);
482 zp = &bz->za[bz->f2];
484 rcnt = zp->z1 - zp->z2;
485 if (rcnt < 0)
486 rcnt += B_FIFO_SIZE;
487 rcnt++;
488 if (cs->debug & L1_DEB_HSCX)
489 debugl1(cs, "hfcpci rec %d z1(%x) z2(%x) cnt(%d)",
490 bcs->channel, zp->z1, zp->z2, rcnt);
491 if ((skb = hfcpci_empty_fifo(bcs, bz, bdata, rcnt))) {
492 skb_queue_tail(&bcs->rqueue, skb);
493 hfcpci_sched_event(bcs, B_RCVBUFREADY);
495 rcnt = bz->f1 - bz->f2;
496 if (rcnt < 0)
497 rcnt += MAX_B_FRAMES + 1;
498 if (cs->hw.hfcpci.last_bfifo_cnt[real_fifo] > rcnt + 1) {
499 rcnt = 0;
500 hfcpci_clear_fifo_rx(cs, real_fifo);
502 cs->hw.hfcpci.last_bfifo_cnt[real_fifo] = rcnt;
503 if (rcnt > 1)
504 receive = 1;
505 else
506 receive = 0;
507 } else if (bcs->mode == L1_MODE_TRANS)
508 receive = hfcpci_empty_fifo_trans(bcs, bz, bdata);
509 else
510 receive = 0;
511 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
512 if (count && receive)
513 goto Begin;
514 return;
517 /**************************/
518 /* D-channel send routine */
519 /**************************/
520 static void
521 hfcpci_fill_dfifo(struct IsdnCardState *cs)
523 int fcnt;
524 int count, new_z1, maxlen;
525 dfifo_type *df;
526 u_char *src, *dst, new_f1;
528 if (!cs->tx_skb)
529 return;
530 if (cs->tx_skb->len <= 0)
531 return;
533 df = &((fifo_area *) (cs->hw.hfcpci.fifos))->d_chan.d_tx;
535 if (cs->debug & L1_DEB_ISAC)
536 debugl1(cs, "hfcpci_fill_Dfifo f1(%d) f2(%d) z1(f1)(%x)",
537 df->f1, df->f2,
538 df->za[df->f1 & D_FREG_MASK].z1);
539 fcnt = df->f1 - df->f2; /* frame count actually buffered */
540 if (fcnt < 0)
541 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
542 if (fcnt > (MAX_D_FRAMES - 1)) {
543 if (cs->debug & L1_DEB_ISAC)
544 debugl1(cs, "hfcpci_fill_Dfifo more as 14 frames");
545 #ifdef ERROR_STATISTIC
546 cs->err_tx++;
547 #endif
548 return;
550 /* now determine free bytes in FIFO buffer */
551 count = df->za[df->f2 & D_FREG_MASK].z2 - df->za[df->f1 & D_FREG_MASK].z1 - 1;
552 if (count <= 0)
553 count += D_FIFO_SIZE; /* count now contains available bytes */
555 if (cs->debug & L1_DEB_ISAC)
556 debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
557 cs->tx_skb->len, count);
558 if (count < cs->tx_skb->len) {
559 if (cs->debug & L1_DEB_ISAC)
560 debugl1(cs, "hfcpci_fill_Dfifo no fifo mem");
561 return;
563 count = cs->tx_skb->len; /* get frame len */
564 new_z1 = (df->za[df->f1 & D_FREG_MASK].z1 + count) & (D_FIFO_SIZE - 1);
565 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
566 src = cs->tx_skb->data; /* source pointer */
567 dst = df->data + df->za[df->f1 & D_FREG_MASK].z1;
568 maxlen = D_FIFO_SIZE - df->za[df->f1 & D_FREG_MASK].z1; /* end fifo */
569 if (maxlen > count)
570 maxlen = count; /* limit size */
571 memcpy(dst, src, maxlen); /* first copy */
573 count -= maxlen; /* remaining bytes */
574 if (count) {
575 dst = df->data; /* start of buffer */
576 src += maxlen; /* new position */
577 memcpy(dst, src, count);
579 df->za[new_f1 & D_FREG_MASK].z1 = new_z1; /* for next buffer */
580 df->za[df->f1 & D_FREG_MASK].z1 = new_z1; /* new pos actual buffer */
581 df->f1 = new_f1; /* next frame */
583 dev_kfree_skb_any(cs->tx_skb);
584 cs->tx_skb = NULL;
585 return;
588 /**************************/
589 /* B-channel send routine */
590 /**************************/
591 static void
592 hfcpci_fill_fifo(struct BCState *bcs)
594 struct IsdnCardState *cs = bcs->cs;
595 int maxlen, fcnt;
596 int count, new_z1;
597 bzfifo_type *bz;
598 u_char *bdata;
599 u_char new_f1, *src, *dst;
600 unsigned short *z1t, *z2t;
602 if (!bcs->tx_skb)
603 return;
604 if (bcs->tx_skb->len <= 0)
605 return;
607 if ((bcs->channel) && (!cs->hw.hfcpci.bswapped)) {
608 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b2;
609 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b2;
610 } else {
611 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txbz_b1;
612 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.txdat_b1;
615 if (bcs->mode == L1_MODE_TRANS) {
616 z1t = &bz->za[MAX_B_FRAMES].z1;
617 z2t = z1t + 1;
618 if (cs->debug & L1_DEB_HSCX)
619 debugl1(cs, "hfcpci_fill_fifo_trans %d z1(%x) z2(%x)",
620 bcs->channel, *z1t, *z2t);
621 fcnt = *z2t - *z1t;
622 if (fcnt <= 0)
623 fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */
624 fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send */
626 while ((fcnt < 2 * HFCPCI_BTRANS_THRESHOLD) && (bcs->tx_skb)) {
627 if (bcs->tx_skb->len < B_FIFO_SIZE - fcnt) {
628 /* data is suitable for fifo */
629 count = bcs->tx_skb->len;
631 new_z1 = *z1t + count; /* new buffer Position */
632 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
633 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
634 src = bcs->tx_skb->data; /* source pointer */
635 dst = bdata + (*z1t - B_SUB_VAL);
636 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - *z1t; /* end of fifo */
637 if (maxlen > count)
638 maxlen = count; /* limit size */
639 memcpy(dst, src, maxlen); /* first copy */
641 count -= maxlen; /* remaining bytes */
642 if (count) {
643 dst = bdata; /* start of buffer */
644 src += maxlen; /* new position */
645 memcpy(dst, src, count);
647 bcs->tx_cnt -= bcs->tx_skb->len;
648 fcnt += bcs->tx_skb->len;
649 *z1t = new_z1; /* now send data */
650 } else if (cs->debug & L1_DEB_HSCX)
651 debugl1(cs, "hfcpci_fill_fifo_trans %d frame length %d discarded",
652 bcs->channel, bcs->tx_skb->len);
654 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
655 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
656 u_long flags;
657 spin_lock_irqsave(&bcs->aclock, flags);
658 bcs->ackcnt += bcs->tx_skb->len;
659 spin_unlock_irqrestore(&bcs->aclock, flags);
660 schedule_event(bcs, B_ACKPENDING);
663 dev_kfree_skb_any(bcs->tx_skb);
664 bcs->tx_skb = skb_dequeue(&bcs->squeue); /* fetch next data */
666 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
667 return;
669 if (cs->debug & L1_DEB_HSCX)
670 debugl1(cs, "hfcpci_fill_fifo_hdlc %d f1(%d) f2(%d) z1(f1)(%x)",
671 bcs->channel, bz->f1, bz->f2,
672 bz->za[bz->f1].z1);
674 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
675 if (fcnt < 0)
676 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
677 if (fcnt > (MAX_B_FRAMES - 1)) {
678 if (cs->debug & L1_DEB_HSCX)
679 debugl1(cs, "hfcpci_fill_Bfifo more as 14 frames");
680 return;
682 /* now determine free bytes in FIFO buffer */
683 count = bz->za[bz->f2].z2 - bz->za[bz->f1].z1 - 1;
684 if (count <= 0)
685 count += B_FIFO_SIZE; /* count now contains available bytes */
687 if (cs->debug & L1_DEB_HSCX)
688 debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
689 bcs->channel, bcs->tx_skb->len,
690 count, current->state);
692 if (count < bcs->tx_skb->len) {
693 if (cs->debug & L1_DEB_HSCX)
694 debugl1(cs, "hfcpci_fill_fifo no fifo mem");
695 return;
697 count = bcs->tx_skb->len; /* get frame len */
698 new_z1 = bz->za[bz->f1].z1 + count; /* new buffer Position */
699 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
700 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
702 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
703 src = bcs->tx_skb->data; /* source pointer */
704 dst = bdata + (bz->za[bz->f1].z1 - B_SUB_VAL);
705 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - bz->za[bz->f1].z1; /* end fifo */
706 if (maxlen > count)
707 maxlen = count; /* limit size */
708 memcpy(dst, src, maxlen); /* first copy */
710 count -= maxlen; /* remaining bytes */
711 if (count) {
712 dst = bdata; /* start of buffer */
713 src += maxlen; /* new position */
714 memcpy(dst, src, count);
716 bcs->tx_cnt -= bcs->tx_skb->len;
717 if (test_bit(FLG_LLI_L1WAKEUP,&bcs->st->lli.flag) &&
718 (PACKET_NOACK != bcs->tx_skb->pkt_type)) {
719 u_long flags;
720 spin_lock_irqsave(&bcs->aclock, flags);
721 bcs->ackcnt += bcs->tx_skb->len;
722 spin_unlock_irqrestore(&bcs->aclock, flags);
723 schedule_event(bcs, B_ACKPENDING);
726 bz->za[new_f1].z1 = new_z1; /* for next buffer */
727 bz->f1 = new_f1; /* next frame */
729 dev_kfree_skb_any(bcs->tx_skb);
730 bcs->tx_skb = NULL;
731 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
732 return;
735 /**********************************************/
736 /* D-channel l1 state call for leased NT-mode */
737 /**********************************************/
738 static void
739 dch_nt_l2l1(struct PStack *st, int pr, void *arg)
741 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
743 switch (pr) {
744 case (PH_DATA | REQUEST):
745 case (PH_PULL | REQUEST):
746 case (PH_PULL | INDICATION):
747 st->l1.l1hw(st, pr, arg);
748 break;
749 case (PH_ACTIVATE | REQUEST):
750 st->l1.l1l2(st, PH_ACTIVATE | CONFIRM, NULL);
751 break;
752 case (PH_TESTLOOP | REQUEST):
753 if (1 & (long) arg)
754 debugl1(cs, "PH_TEST_LOOP B1");
755 if (2 & (long) arg)
756 debugl1(cs, "PH_TEST_LOOP B2");
757 if (!(3 & (long) arg))
758 debugl1(cs, "PH_TEST_LOOP DISABLED");
759 st->l1.l1hw(st, HW_TESTLOOP | REQUEST, arg);
760 break;
761 default:
762 if (cs->debug)
763 debugl1(cs, "dch_nt_l2l1 msg %04X unhandled", pr);
764 break;
770 /***********************/
771 /* set/reset echo mode */
772 /***********************/
773 static int
774 hfcpci_auxcmd(struct IsdnCardState *cs, isdn_ctrl * ic)
776 u_long flags;
777 int i = *(unsigned int *) ic->parm.num;
779 if ((ic->arg == 98) &&
780 (!(cs->hw.hfcpci.int_m1 & (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC + HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC)))) {
781 spin_lock_irqsave(&cs->lock, flags);
782 Write_hfc(cs, HFCPCI_CLKDEL, CLKDEL_NT); /* ST-Bit delay for NT-Mode */
783 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 0); /* HFC ST G0 */
784 udelay(10);
785 cs->hw.hfcpci.sctrl |= SCTRL_MODE_NT;
786 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl); /* set NT-mode */
787 udelay(10);
788 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 1); /* HFC ST G1 */
789 udelay(10);
790 Write_hfc(cs, HFCPCI_STATES, 1 | HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
791 cs->dc.hfcpci.ph_state = 1;
792 cs->hw.hfcpci.nt_mode = 1;
793 cs->hw.hfcpci.nt_timer = 0;
794 cs->stlist->l2.l2l1 = dch_nt_l2l1;
795 spin_unlock_irqrestore(&cs->lock, flags);
796 debugl1(cs, "NT mode activated");
797 return (0);
799 if ((cs->chanlimit > 1) || (cs->hw.hfcpci.bswapped) ||
800 (cs->hw.hfcpci.nt_mode) || (ic->arg != 12))
801 return (-EINVAL);
803 spin_lock_irqsave(&cs->lock, flags);
804 if (i) {
805 cs->logecho = 1;
806 cs->hw.hfcpci.trm |= 0x20; /* enable echo chan */
807 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_B2REC;
808 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2RX;
809 } else {
810 cs->logecho = 0;
811 cs->hw.hfcpci.trm &= ~0x20; /* disable echo chan */
812 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_B2REC;
813 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2RX;
815 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
816 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
817 cs->hw.hfcpci.conn |= 0x10; /* B2-IOM -> B2-ST */
818 cs->hw.hfcpci.ctmt &= ~2;
819 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
820 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
821 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
822 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
823 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
824 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
825 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
826 spin_unlock_irqrestore(&cs->lock, flags);
827 return (0);
828 } /* hfcpci_auxcmd */
830 /*****************************/
831 /* E-channel receive routine */
832 /*****************************/
833 static void
834 receive_emsg(struct IsdnCardState *cs)
836 int rcnt;
837 int receive, count = 5;
838 bzfifo_type *bz;
839 u_char *bdata;
840 z_type *zp;
841 u_char *ptr, *ptr1, new_f2;
842 int total, maxlen, new_z2;
843 u_char e_buffer[256];
845 bz = &((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxbz_b2;
846 bdata = ((fifo_area *) (cs->hw.hfcpci.fifos))->b_chans.rxdat_b2;
847 Begin:
848 count--;
849 if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
850 debugl1(cs, "echo_rec_data blocked");
851 return;
853 if (bz->f1 != bz->f2) {
854 if (cs->debug & L1_DEB_ISAC)
855 debugl1(cs, "hfcpci e_rec f1(%d) f2(%d)",
856 bz->f1, bz->f2);
857 zp = &bz->za[bz->f2];
859 rcnt = zp->z1 - zp->z2;
860 if (rcnt < 0)
861 rcnt += B_FIFO_SIZE;
862 rcnt++;
863 if (cs->debug & L1_DEB_ISAC)
864 debugl1(cs, "hfcpci e_rec z1(%x) z2(%x) cnt(%d)",
865 zp->z1, zp->z2, rcnt);
866 new_z2 = zp->z2 + rcnt; /* new position in fifo */
867 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
868 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
869 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
870 if ((rcnt > 256 + 3) || (count < 4) ||
871 (*(bdata + (zp->z1 - B_SUB_VAL)))) {
872 if (cs->debug & L1_DEB_WARN)
873 debugl1(cs, "hfcpci_empty_echan: incoming packet invalid length %d or crc", rcnt);
874 bz->za[new_f2].z2 = new_z2;
875 bz->f2 = new_f2; /* next buffer */
876 } else {
877 total = rcnt;
878 rcnt -= 3;
879 ptr = e_buffer;
881 if (zp->z2 <= B_FIFO_SIZE + B_SUB_VAL)
882 maxlen = rcnt; /* complete transfer */
883 else
884 maxlen = B_FIFO_SIZE + B_SUB_VAL - zp->z2; /* maximum */
886 ptr1 = bdata + (zp->z2 - B_SUB_VAL); /* start of data */
887 memcpy(ptr, ptr1, maxlen); /* copy data */
888 rcnt -= maxlen;
890 if (rcnt) { /* rest remaining */
891 ptr += maxlen;
892 ptr1 = bdata; /* start of buffer */
893 memcpy(ptr, ptr1, rcnt); /* rest */
895 bz->za[new_f2].z2 = new_z2;
896 bz->f2 = new_f2; /* next buffer */
897 if (cs->debug & DEB_DLOG_HEX) {
898 ptr = cs->dlog;
899 if ((total - 3) < MAX_DLOG_SPACE / 3 - 10) {
900 *ptr++ = 'E';
901 *ptr++ = 'C';
902 *ptr++ = 'H';
903 *ptr++ = 'O';
904 *ptr++ = ':';
905 ptr += QuickHex(ptr, e_buffer, total - 3);
906 ptr--;
907 *ptr++ = '\n';
908 *ptr = 0;
909 HiSax_putstatus(cs, NULL, cs->dlog);
910 } else
911 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
915 rcnt = bz->f1 - bz->f2;
916 if (rcnt < 0)
917 rcnt += MAX_B_FRAMES + 1;
918 if (rcnt > 1)
919 receive = 1;
920 else
921 receive = 0;
922 } else
923 receive = 0;
924 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
925 if (count && receive)
926 goto Begin;
927 return;
928 } /* receive_emsg */
930 /*********************/
931 /* Interrupt handler */
932 /*********************/
933 static irqreturn_t
934 hfcpci_interrupt(int intno, void *dev_id)
936 u_long flags;
937 struct IsdnCardState *cs = dev_id;
938 u_char exval;
939 struct BCState *bcs;
940 int count = 15;
941 u_char val, stat;
943 if (!(cs->hw.hfcpci.int_m2 & 0x08)) {
944 debugl1(cs, "HFC-PCI: int_m2 %x not initialised", cs->hw.hfcpci.int_m2);
945 return IRQ_NONE; /* not initialised */
947 spin_lock_irqsave(&cs->lock, flags);
948 if (HFCPCI_ANYINT & (stat = Read_hfc(cs, HFCPCI_STATUS))) {
949 val = Read_hfc(cs, HFCPCI_INT_S1);
950 if (cs->debug & L1_DEB_ISAC)
951 debugl1(cs, "HFC-PCI: stat(%02x) s1(%02x)", stat, val);
952 } else {
953 spin_unlock_irqrestore(&cs->lock, flags);
954 return IRQ_NONE;
956 if (cs->debug & L1_DEB_ISAC)
957 debugl1(cs, "HFC-PCI irq %x %s", val,
958 test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags) ?
959 "locked" : "unlocked");
960 val &= cs->hw.hfcpci.int_m1;
961 if (val & 0x40) { /* state machine irq */
962 exval = Read_hfc(cs, HFCPCI_STATES) & 0xf;
963 if (cs->debug & L1_DEB_ISAC)
964 debugl1(cs, "ph_state chg %d->%d", cs->dc.hfcpci.ph_state,
965 exval);
966 cs->dc.hfcpci.ph_state = exval;
967 sched_event_D_pci(cs, D_L1STATECHANGE);
968 val &= ~0x40;
970 if (val & 0x80) { /* timer irq */
971 if (cs->hw.hfcpci.nt_mode) {
972 if ((--cs->hw.hfcpci.nt_timer) < 0)
973 sched_event_D_pci(cs, D_L1STATECHANGE);
975 val &= ~0x80;
976 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
978 while (val) {
979 if (test_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
980 cs->hw.hfcpci.int_s1 |= val;
981 spin_unlock_irqrestore(&cs->lock, flags);
982 return IRQ_HANDLED;
984 if (cs->hw.hfcpci.int_s1 & 0x18) {
985 exval = val;
986 val = cs->hw.hfcpci.int_s1;
987 cs->hw.hfcpci.int_s1 = exval;
989 if (val & 0x08) {
990 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
991 if (cs->debug)
992 debugl1(cs, "hfcpci spurious 0x08 IRQ");
993 } else
994 main_rec_hfcpci(bcs);
996 if (val & 0x10) {
997 if (cs->logecho)
998 receive_emsg(cs);
999 else if (!(bcs = Sel_BCS(cs, 1))) {
1000 if (cs->debug)
1001 debugl1(cs, "hfcpci spurious 0x10 IRQ");
1002 } else
1003 main_rec_hfcpci(bcs);
1005 if (val & 0x01) {
1006 if (!(bcs = Sel_BCS(cs, cs->hw.hfcpci.bswapped ? 1 : 0))) {
1007 if (cs->debug)
1008 debugl1(cs, "hfcpci spurious 0x01 IRQ");
1009 } else {
1010 if (bcs->tx_skb) {
1011 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1012 hfcpci_fill_fifo(bcs);
1013 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1014 } else
1015 debugl1(cs, "fill_data %d blocked", bcs->channel);
1016 } else {
1017 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1018 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1019 hfcpci_fill_fifo(bcs);
1020 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1021 } else
1022 debugl1(cs, "fill_data %d blocked", bcs->channel);
1023 } else {
1024 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1029 if (val & 0x02) {
1030 if (!(bcs = Sel_BCS(cs, 1))) {
1031 if (cs->debug)
1032 debugl1(cs, "hfcpci spurious 0x02 IRQ");
1033 } else {
1034 if (bcs->tx_skb) {
1035 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1036 hfcpci_fill_fifo(bcs);
1037 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1038 } else
1039 debugl1(cs, "fill_data %d blocked", bcs->channel);
1040 } else {
1041 if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) {
1042 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1043 hfcpci_fill_fifo(bcs);
1044 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1045 } else
1046 debugl1(cs, "fill_data %d blocked", bcs->channel);
1047 } else {
1048 hfcpci_sched_event(bcs, B_XMTBUFREADY);
1053 if (val & 0x20) { /* receive dframe */
1054 receive_dmsg(cs);
1056 if (val & 0x04) { /* dframe transmitted */
1057 if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
1058 del_timer(&cs->dbusytimer);
1059 if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
1060 sched_event_D_pci(cs, D_CLEARBUSY);
1061 if (cs->tx_skb) {
1062 if (cs->tx_skb->len) {
1063 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1064 hfcpci_fill_dfifo(cs);
1065 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1066 } else {
1067 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1069 goto afterXPR;
1070 } else {
1071 dev_kfree_skb_irq(cs->tx_skb);
1072 cs->tx_cnt = 0;
1073 cs->tx_skb = NULL;
1076 if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
1077 cs->tx_cnt = 0;
1078 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1079 hfcpci_fill_dfifo(cs);
1080 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1081 } else {
1082 debugl1(cs, "hfcpci_fill_dfifo irq blocked");
1084 } else
1085 sched_event_D_pci(cs, D_XMTBUFREADY);
1087 afterXPR:
1088 if (cs->hw.hfcpci.int_s1 && count--) {
1089 val = cs->hw.hfcpci.int_s1;
1090 cs->hw.hfcpci.int_s1 = 0;
1091 if (cs->debug & L1_DEB_ISAC)
1092 debugl1(cs, "HFC-PCI irq %x loop %d", val, 15 - count);
1093 } else
1094 val = 0;
1096 spin_unlock_irqrestore(&cs->lock, flags);
1097 return IRQ_HANDLED;
1100 /********************************************************************/
1101 /* timer callback for D-chan busy resolution. Currently no function */
1102 /********************************************************************/
1103 static void
1104 hfcpci_dbusy_timer(struct IsdnCardState *cs)
1108 /*************************************/
1109 /* Layer 1 D-channel hardware access */
1110 /*************************************/
1111 static void
1112 HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
1114 u_long flags;
1115 struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware;
1116 struct sk_buff *skb = arg;
1118 switch (pr) {
1119 case (PH_DATA | REQUEST):
1120 if (cs->debug & DEB_DLOG_HEX)
1121 LogFrame(cs, skb->data, skb->len);
1122 if (cs->debug & DEB_DLOG_VERBOSE)
1123 dlogframe(cs, skb, 0);
1124 spin_lock_irqsave(&cs->lock, flags);
1125 if (cs->tx_skb) {
1126 skb_queue_tail(&cs->sq, skb);
1127 #ifdef L2FRAME_DEBUG /* psa */
1128 if (cs->debug & L1_DEB_LAPD)
1129 Logl2Frame(cs, skb, "PH_DATA Queued", 0);
1130 #endif
1131 } else {
1132 cs->tx_skb = skb;
1133 cs->tx_cnt = 0;
1134 #ifdef L2FRAME_DEBUG /* psa */
1135 if (cs->debug & L1_DEB_LAPD)
1136 Logl2Frame(cs, skb, "PH_DATA", 0);
1137 #endif
1138 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1139 hfcpci_fill_dfifo(cs);
1140 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1141 } else
1142 debugl1(cs, "hfcpci_fill_dfifo blocked");
1145 spin_unlock_irqrestore(&cs->lock, flags);
1146 break;
1147 case (PH_PULL | INDICATION):
1148 spin_lock_irqsave(&cs->lock, flags);
1149 if (cs->tx_skb) {
1150 if (cs->debug & L1_DEB_WARN)
1151 debugl1(cs, " l2l1 tx_skb exist this shouldn't happen");
1152 skb_queue_tail(&cs->sq, skb);
1153 spin_unlock_irqrestore(&cs->lock, flags);
1154 break;
1156 if (cs->debug & DEB_DLOG_HEX)
1157 LogFrame(cs, skb->data, skb->len);
1158 if (cs->debug & DEB_DLOG_VERBOSE)
1159 dlogframe(cs, skb, 0);
1160 cs->tx_skb = skb;
1161 cs->tx_cnt = 0;
1162 #ifdef L2FRAME_DEBUG /* psa */
1163 if (cs->debug & L1_DEB_LAPD)
1164 Logl2Frame(cs, skb, "PH_DATA_PULLED", 0);
1165 #endif
1166 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1167 hfcpci_fill_dfifo(cs);
1168 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1169 } else
1170 debugl1(cs, "hfcpci_fill_dfifo blocked");
1171 spin_unlock_irqrestore(&cs->lock, flags);
1172 break;
1173 case (PH_PULL | REQUEST):
1174 #ifdef L2FRAME_DEBUG /* psa */
1175 if (cs->debug & L1_DEB_LAPD)
1176 debugl1(cs, "-> PH_REQUEST_PULL");
1177 #endif
1178 if (!cs->tx_skb) {
1179 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1180 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1181 } else
1182 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1183 break;
1184 case (HW_RESET | REQUEST):
1185 spin_lock_irqsave(&cs->lock, flags);
1186 Write_hfc(cs, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */
1187 udelay(6);
1188 Write_hfc(cs, HFCPCI_STATES, 3); /* HFC ST 2 */
1189 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1190 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1191 Write_hfc(cs, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION);
1192 spin_unlock_irqrestore(&cs->lock, flags);
1193 l1_msg(cs, HW_POWERUP | CONFIRM, NULL);
1194 break;
1195 case (HW_ENABLE | REQUEST):
1196 spin_lock_irqsave(&cs->lock, flags);
1197 Write_hfc(cs, HFCPCI_STATES, HFCPCI_DO_ACTION);
1198 spin_unlock_irqrestore(&cs->lock, flags);
1199 break;
1200 case (HW_DEACTIVATE | REQUEST):
1201 spin_lock_irqsave(&cs->lock, flags);
1202 cs->hw.hfcpci.mst_m &= ~HFCPCI_MASTER;
1203 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1204 spin_unlock_irqrestore(&cs->lock, flags);
1205 break;
1206 case (HW_INFO3 | REQUEST):
1207 spin_lock_irqsave(&cs->lock, flags);
1208 cs->hw.hfcpci.mst_m |= HFCPCI_MASTER;
1209 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1210 spin_unlock_irqrestore(&cs->lock, flags);
1211 break;
1212 case (HW_TESTLOOP | REQUEST):
1213 spin_lock_irqsave(&cs->lock, flags);
1214 switch ((long) arg) {
1215 case (1):
1216 Write_hfc(cs, HFCPCI_B1_SSL, 0x80); /* tx slot */
1217 Write_hfc(cs, HFCPCI_B1_RSL, 0x80); /* rx slot */
1218 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~7) | 1;
1219 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1220 break;
1222 case (2):
1223 Write_hfc(cs, HFCPCI_B2_SSL, 0x81); /* tx slot */
1224 Write_hfc(cs, HFCPCI_B2_RSL, 0x81); /* rx slot */
1225 cs->hw.hfcpci.conn = (cs->hw.hfcpci.conn & ~0x38) | 0x08;
1226 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1227 break;
1229 default:
1230 spin_unlock_irqrestore(&cs->lock, flags);
1231 if (cs->debug & L1_DEB_WARN)
1232 debugl1(cs, "hfcpci_l1hw loop invalid %4lx", (long) arg);
1233 return;
1235 cs->hw.hfcpci.trm |= 0x80; /* enable IOM-loop */
1236 Write_hfc(cs, HFCPCI_TRM, cs->hw.hfcpci.trm);
1237 spin_unlock_irqrestore(&cs->lock, flags);
1238 break;
1239 default:
1240 if (cs->debug & L1_DEB_WARN)
1241 debugl1(cs, "hfcpci_l1hw unknown pr %4x", pr);
1242 break;
1246 /***********************************************/
1247 /* called during init setting l1 stack pointer */
1248 /***********************************************/
1249 static void
1250 setstack_hfcpci(struct PStack *st, struct IsdnCardState *cs)
1252 st->l1.l1hw = HFCPCI_l1hw;
1255 /**************************************/
1256 /* send B-channel data if not blocked */
1257 /**************************************/
1258 static void
1259 hfcpci_send_data(struct BCState *bcs)
1261 struct IsdnCardState *cs = bcs->cs;
1263 if (!test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) {
1264 hfcpci_fill_fifo(bcs);
1265 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
1266 } else
1267 debugl1(cs, "send_data %d blocked", bcs->channel);
1270 /***************************************************************/
1271 /* activate/deactivate hardware for selected channels and mode */
1272 /***************************************************************/
1273 static void
1274 mode_hfcpci(struct BCState *bcs, int mode, int bc)
1276 struct IsdnCardState *cs = bcs->cs;
1277 int fifo2;
1279 if (cs->debug & L1_DEB_HSCX)
1280 debugl1(cs, "HFCPCI bchannel mode %d bchan %d/%d",
1281 mode, bc, bcs->channel);
1282 bcs->mode = mode;
1283 bcs->channel = bc;
1284 fifo2 = bc;
1285 if (cs->chanlimit > 1) {
1286 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1287 cs->hw.hfcpci.sctrl_e &= ~0x80;
1288 } else {
1289 if (bc) {
1290 if (mode != L1_MODE_NULL) {
1291 cs->hw.hfcpci.bswapped = 1; /* B1 and B2 exchanged */
1292 cs->hw.hfcpci.sctrl_e |= 0x80;
1293 } else {
1294 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1295 cs->hw.hfcpci.sctrl_e &= ~0x80;
1297 fifo2 = 0;
1298 } else {
1299 cs->hw.hfcpci.bswapped = 0; /* B1 and B2 normal mode */
1300 cs->hw.hfcpci.sctrl_e &= ~0x80;
1303 switch (mode) {
1304 case (L1_MODE_NULL):
1305 if (bc) {
1306 cs->hw.hfcpci.sctrl &= ~SCTRL_B2_ENA;
1307 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B2_ENA;
1308 } else {
1309 cs->hw.hfcpci.sctrl &= ~SCTRL_B1_ENA;
1310 cs->hw.hfcpci.sctrl_r &= ~SCTRL_B1_ENA;
1312 if (fifo2) {
1313 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1314 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1315 } else {
1316 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1317 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1319 break;
1320 case (L1_MODE_TRANS):
1321 hfcpci_clear_fifo_rx(cs, fifo2);
1322 hfcpci_clear_fifo_tx(cs, fifo2);
1323 if (bc) {
1324 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1325 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1326 } else {
1327 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1328 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1330 if (fifo2) {
1331 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1332 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1333 cs->hw.hfcpci.ctmt |= 2;
1334 cs->hw.hfcpci.conn &= ~0x18;
1335 } else {
1336 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1337 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1338 cs->hw.hfcpci.ctmt |= 1;
1339 cs->hw.hfcpci.conn &= ~0x03;
1341 break;
1342 case (L1_MODE_HDLC):
1343 hfcpci_clear_fifo_rx(cs, fifo2);
1344 hfcpci_clear_fifo_tx(cs, fifo2);
1345 if (bc) {
1346 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1347 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1348 } else {
1349 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1350 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1352 if (fifo2) {
1353 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1354 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1355 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1356 cs->hw.hfcpci.ctmt &= ~2;
1357 cs->hw.hfcpci.conn &= ~0x18;
1358 } else {
1359 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1360 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1361 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1362 cs->hw.hfcpci.ctmt &= ~1;
1363 cs->hw.hfcpci.conn &= ~0x03;
1365 break;
1366 case (L1_MODE_EXTRN):
1367 if (bc) {
1368 cs->hw.hfcpci.conn |= 0x10;
1369 cs->hw.hfcpci.sctrl |= SCTRL_B2_ENA;
1370 cs->hw.hfcpci.sctrl_r |= SCTRL_B2_ENA;
1371 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B2;
1372 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1373 } else {
1374 cs->hw.hfcpci.conn |= 0x02;
1375 cs->hw.hfcpci.sctrl |= SCTRL_B1_ENA;
1376 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1377 cs->hw.hfcpci.fifo_en &= ~HFCPCI_FIFOEN_B1;
1378 cs->hw.hfcpci.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1380 break;
1382 Write_hfc(cs, HFCPCI_SCTRL_E, cs->hw.hfcpci.sctrl_e);
1383 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1384 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
1385 Write_hfc(cs, HFCPCI_SCTRL, cs->hw.hfcpci.sctrl);
1386 Write_hfc(cs, HFCPCI_SCTRL_R, cs->hw.hfcpci.sctrl_r);
1387 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt);
1388 Write_hfc(cs, HFCPCI_CONNECT, cs->hw.hfcpci.conn);
1391 /******************************/
1392 /* Layer2 -> Layer 1 Transfer */
1393 /******************************/
1394 static void
1395 hfcpci_l2l1(struct PStack *st, int pr, void *arg)
1397 struct BCState *bcs = st->l1.bcs;
1398 u_long flags;
1399 struct sk_buff *skb = arg;
1401 switch (pr) {
1402 case (PH_DATA | REQUEST):
1403 spin_lock_irqsave(&bcs->cs->lock, flags);
1404 if (bcs->tx_skb) {
1405 skb_queue_tail(&bcs->squeue, skb);
1406 } else {
1407 bcs->tx_skb = skb;
1408 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1409 bcs->cs->BC_Send_Data(bcs);
1411 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1412 break;
1413 case (PH_PULL | INDICATION):
1414 spin_lock_irqsave(&bcs->cs->lock, flags);
1415 if (bcs->tx_skb) {
1416 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1417 printk(KERN_WARNING "hfc_l2l1: this shouldn't happen\n");
1418 break;
1420 // test_and_set_bit(BC_FLG_BUSY, &bcs->Flag);
1421 bcs->tx_skb = skb;
1422 bcs->cs->BC_Send_Data(bcs);
1423 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1424 break;
1425 case (PH_PULL | REQUEST):
1426 if (!bcs->tx_skb) {
1427 test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1428 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
1429 } else
1430 test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
1431 break;
1432 case (PH_ACTIVATE | REQUEST):
1433 spin_lock_irqsave(&bcs->cs->lock, flags);
1434 test_and_set_bit(BC_FLG_ACTIV, &bcs->Flag);
1435 mode_hfcpci(bcs, st->l1.mode, st->l1.bc);
1436 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1437 l1_msg_b(st, pr, arg);
1438 break;
1439 case (PH_DEACTIVATE | REQUEST):
1440 l1_msg_b(st, pr, arg);
1441 break;
1442 case (PH_DEACTIVATE | CONFIRM):
1443 spin_lock_irqsave(&bcs->cs->lock, flags);
1444 test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
1445 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1446 mode_hfcpci(bcs, 0, st->l1.bc);
1447 spin_unlock_irqrestore(&bcs->cs->lock, flags);
1448 st->l1.l1l2(st, PH_DEACTIVATE | CONFIRM, NULL);
1449 break;
1453 /******************************************/
1454 /* deactivate B-channel access and queues */
1455 /******************************************/
1456 static void
1457 close_hfcpci(struct BCState *bcs)
1459 mode_hfcpci(bcs, 0, bcs->channel);
1460 if (test_and_clear_bit(BC_FLG_INIT, &bcs->Flag)) {
1461 skb_queue_purge(&bcs->rqueue);
1462 skb_queue_purge(&bcs->squeue);
1463 if (bcs->tx_skb) {
1464 dev_kfree_skb_any(bcs->tx_skb);
1465 bcs->tx_skb = NULL;
1466 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1471 /*************************************/
1472 /* init B-channel queues and control */
1473 /*************************************/
1474 static int
1475 open_hfcpcistate(struct IsdnCardState *cs, struct BCState *bcs)
1477 if (!test_and_set_bit(BC_FLG_INIT, &bcs->Flag)) {
1478 skb_queue_head_init(&bcs->rqueue);
1479 skb_queue_head_init(&bcs->squeue);
1481 bcs->tx_skb = NULL;
1482 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
1483 bcs->event = 0;
1484 bcs->tx_cnt = 0;
1485 return (0);
1488 /*********************************/
1489 /* inits the stack for B-channel */
1490 /*********************************/
1491 static int
1492 setstack_2b(struct PStack *st, struct BCState *bcs)
1494 bcs->channel = st->l1.bc;
1495 if (open_hfcpcistate(st->l1.hardware, bcs))
1496 return (-1);
1497 st->l1.bcs = bcs;
1498 st->l2.l2l1 = hfcpci_l2l1;
1499 setstack_manager(st);
1500 bcs->st = st;
1501 setstack_l1_B(st);
1502 return (0);
1505 /***************************/
1506 /* handle L1 state changes */
1507 /***************************/
1508 static void
1509 hfcpci_bh(struct work_struct *work)
1511 struct IsdnCardState *cs =
1512 container_of(work, struct IsdnCardState, tqueue);
1513 u_long flags;
1514 // struct PStack *stptr;
1516 if (!cs)
1517 return;
1518 if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
1519 if (!cs->hw.hfcpci.nt_mode)
1520 switch (cs->dc.hfcpci.ph_state) {
1521 case (0):
1522 l1_msg(cs, HW_RESET | INDICATION, NULL);
1523 break;
1524 case (3):
1525 l1_msg(cs, HW_DEACTIVATE | INDICATION, NULL);
1526 break;
1527 case (8):
1528 l1_msg(cs, HW_RSYNC | INDICATION, NULL);
1529 break;
1530 case (6):
1531 l1_msg(cs, HW_INFO2 | INDICATION, NULL);
1532 break;
1533 case (7):
1534 l1_msg(cs, HW_INFO4_P8 | INDICATION, NULL);
1535 break;
1536 default:
1537 break;
1538 } else {
1539 spin_lock_irqsave(&cs->lock, flags);
1540 switch (cs->dc.hfcpci.ph_state) {
1541 case (2):
1542 if (cs->hw.hfcpci.nt_timer < 0) {
1543 cs->hw.hfcpci.nt_timer = 0;
1544 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1545 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1546 /* Clear already pending ints */
1547 if (Read_hfc(cs, HFCPCI_INT_S1));
1548 Write_hfc(cs, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
1549 udelay(10);
1550 Write_hfc(cs, HFCPCI_STATES, 4);
1551 cs->dc.hfcpci.ph_state = 4;
1552 } else {
1553 cs->hw.hfcpci.int_m1 |= HFCPCI_INTS_TIMER;
1554 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1555 cs->hw.hfcpci.ctmt &= ~HFCPCI_AUTO_TIMER;
1556 cs->hw.hfcpci.ctmt |= HFCPCI_TIM3_125;
1557 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1558 Write_hfc(cs, HFCPCI_CTMT, cs->hw.hfcpci.ctmt | HFCPCI_CLTIMER);
1559 cs->hw.hfcpci.nt_timer = NT_T1_COUNT;
1560 Write_hfc(cs, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); /* allow G2 -> G3 transition */
1562 break;
1563 case (1):
1564 case (3):
1565 case (4):
1566 cs->hw.hfcpci.nt_timer = 0;
1567 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1568 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1569 break;
1570 default:
1571 break;
1573 spin_unlock_irqrestore(&cs->lock, flags);
1576 if (test_and_clear_bit(D_RCVBUFREADY, &cs->event))
1577 DChannel_proc_rcv(cs);
1578 if (test_and_clear_bit(D_XMTBUFREADY, &cs->event))
1579 DChannel_proc_xmt(cs);
1583 /********************************/
1584 /* called for card init message */
1585 /********************************/
1586 static void
1587 inithfcpci(struct IsdnCardState *cs)
1589 cs->bcs[0].BC_SetStack = setstack_2b;
1590 cs->bcs[1].BC_SetStack = setstack_2b;
1591 cs->bcs[0].BC_Close = close_hfcpci;
1592 cs->bcs[1].BC_Close = close_hfcpci;
1593 cs->dbusytimer.function = (void *) hfcpci_dbusy_timer;
1594 cs->dbusytimer.data = (long) cs;
1595 init_timer(&cs->dbusytimer);
1596 mode_hfcpci(cs->bcs, 0, 0);
1597 mode_hfcpci(cs->bcs + 1, 0, 1);
1602 /*******************************************/
1603 /* handle card messages from control layer */
1604 /*******************************************/
1605 static int
1606 hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1608 u_long flags;
1610 if (cs->debug & L1_DEB_ISAC)
1611 debugl1(cs, "HFCPCI: card_msg %x", mt);
1612 switch (mt) {
1613 case CARD_RESET:
1614 spin_lock_irqsave(&cs->lock, flags);
1615 reset_hfcpci(cs);
1616 spin_unlock_irqrestore(&cs->lock, flags);
1617 return (0);
1618 case CARD_RELEASE:
1619 release_io_hfcpci(cs);
1620 return (0);
1621 case CARD_INIT:
1622 spin_lock_irqsave(&cs->lock, flags);
1623 inithfcpci(cs);
1624 reset_hfcpci(cs);
1625 spin_unlock_irqrestore(&cs->lock, flags);
1626 msleep(80); /* Timeout 80ms */
1627 /* now switch timer interrupt off */
1628 spin_lock_irqsave(&cs->lock, flags);
1629 cs->hw.hfcpci.int_m1 &= ~HFCPCI_INTS_TIMER;
1630 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1631 /* reinit mode reg */
1632 Write_hfc(cs, HFCPCI_MST_MODE, cs->hw.hfcpci.mst_m);
1633 spin_unlock_irqrestore(&cs->lock, flags);
1634 return (0);
1635 case CARD_TEST:
1636 return (0);
1638 return (0);
1642 /* this variable is used as card index when more than one cards are present */
1643 static struct pci_dev *dev_hfcpci __devinitdata = NULL;
1645 #endif /* CONFIG_PCI */
1647 int __devinit
1648 setup_hfcpci(struct IsdnCard *card)
1650 u_long flags;
1651 struct IsdnCardState *cs = card->cs;
1652 char tmp[64];
1653 int i;
1654 struct pci_dev *tmp_hfcpci = NULL;
1656 #ifdef __BIG_ENDIAN
1657 #error "not running on big endian machines now"
1658 #endif
1659 strcpy(tmp, hfcpci_revision);
1660 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1661 #ifdef CONFIG_PCI
1662 cs->hw.hfcpci.int_s1 = 0;
1663 cs->dc.hfcpci.ph_state = 0;
1664 cs->hw.hfcpci.fifo = 255;
1665 if (cs->typ == ISDN_CTYPE_HFC_PCI) {
1666 i = 0;
1667 while (id_list[i].vendor_id) {
1668 tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
1669 id_list[i].device_id,
1670 dev_hfcpci);
1671 i++;
1672 if (tmp_hfcpci) {
1673 if (pci_enable_device(tmp_hfcpci))
1674 continue;
1675 pci_set_master(tmp_hfcpci);
1676 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1677 continue;
1678 else
1679 break;
1683 if (tmp_hfcpci) {
1684 i--;
1685 dev_hfcpci = tmp_hfcpci; /* old device */
1686 cs->hw.hfcpci.dev = dev_hfcpci;
1687 cs->irq = dev_hfcpci->irq;
1688 if (!cs->irq) {
1689 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1690 return (0);
1692 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1693 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1694 } else {
1695 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1696 return (0);
1698 if (!cs->hw.hfcpci.pci_io) {
1699 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1700 return (0);
1702 /* Allocate memory for FIFOS */
1703 /* Because the HFC-PCI needs a 32K physical alignment, we */
1704 /* need to allocate the double mem and align the address */
1705 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1706 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1707 return 0;
1709 cs->hw.hfcpci.fifos = (void *)
1710 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
1711 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
1712 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1713 printk(KERN_INFO
1714 "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
1715 cs->hw.hfcpci.pci_io,
1716 cs->hw.hfcpci.fifos,
1717 (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
1718 cs->irq, HZ);
1719 spin_lock_irqsave(&cs->lock, flags);
1720 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1721 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1722 cs->hw.hfcpci.int_m1 = 0;
1723 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1724 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1725 /* At this point the needed PCI config is done */
1726 /* fifos are still not enabled */
1727 INIT_WORK(&cs->tqueue, hfcpci_bh);
1728 cs->setstack_d = setstack_hfcpci;
1729 cs->BC_Send_Data = &hfcpci_send_data;
1730 cs->readisac = NULL;
1731 cs->writeisac = NULL;
1732 cs->readisacfifo = NULL;
1733 cs->writeisacfifo = NULL;
1734 cs->BC_Read_Reg = NULL;
1735 cs->BC_Write_Reg = NULL;
1736 cs->irq_func = &hfcpci_interrupt;
1737 cs->irq_flags |= IRQF_SHARED;
1738 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1739 cs->hw.hfcpci.timer.data = (long) cs;
1740 init_timer(&cs->hw.hfcpci.timer);
1741 cs->cardmsg = &hfcpci_card_msg;
1742 cs->auxcmd = &hfcpci_auxcmd;
1743 spin_unlock_irqrestore(&cs->lock, flags);
1744 return (1);
1745 } else
1746 return (0); /* no valid card type */
1747 #else
1748 printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n");
1749 return (0);
1750 #endif /* CONFIG_PCI */