MIPS: Yosemite, Emma: Fix off-by-two in arcs_cmdline buffer size check
[linux-2.6/linux-mips.git] / drivers / isdn / hardware / mISDN / hfcpci.c
blob3261de18a91e051dabbe668059462531129479b4
1 /*
3 * hfcpci.c low level driver for CCD's hfc-pci based cards
5 * Author Werner Cornelius (werner@isdn4linux.de)
6 * based on existing driver for CCD hfc ISA cards
7 * type approval valid for HFC-S PCI A based card
9 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
10 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * Module options:
28 * debug:
29 * NOTE: only one poll value must be given for all cards
30 * See hfc_pci.h for debug flags.
32 * poll:
33 * NOTE: only one poll value must be given for all cards
34 * Give the number of samples for each fifo process.
35 * By default 128 is used. Decrease to reduce delay, increase to
36 * reduce cpu load. If unsure, don't mess with it!
37 * A value of 128 will use controller's interrupt. Other values will
38 * use kernel timer, because the controller will not allow lower values
39 * than 128.
40 * Also note that the value depends on the kernel timer frequency.
41 * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible.
42 * If the kernel uses 100 Hz, steps of 80 samples are possible.
43 * If the kernel uses 300 Hz, steps of about 26 samples are possible.
47 #include <linux/interrupt.h>
48 #include <linux/module.h>
49 #include <linux/pci.h>
50 #include <linux/delay.h>
51 #include <linux/mISDNhw.h>
52 #include <linux/slab.h>
54 #include "hfc_pci.h"
56 static const char *hfcpci_revision = "2.0";
58 static int HFC_cnt;
59 static uint debug;
60 static uint poll, tics;
61 static struct timer_list hfc_tl;
62 static unsigned long hfc_jiffies;
64 MODULE_AUTHOR("Karsten Keil");
65 MODULE_LICENSE("GPL");
66 module_param(debug, uint, S_IRUGO | S_IWUSR);
67 module_param(poll, uint, S_IRUGO | S_IWUSR);
69 enum {
70 HFC_CCD_2BD0,
71 HFC_CCD_B000,
72 HFC_CCD_B006,
73 HFC_CCD_B007,
74 HFC_CCD_B008,
75 HFC_CCD_B009,
76 HFC_CCD_B00A,
77 HFC_CCD_B00B,
78 HFC_CCD_B00C,
79 HFC_CCD_B100,
80 HFC_CCD_B700,
81 HFC_CCD_B701,
82 HFC_ASUS_0675,
83 HFC_BERKOM_A1T,
84 HFC_BERKOM_TCONCEPT,
85 HFC_ANIGMA_MC145575,
86 HFC_ZOLTRIX_2BD0,
87 HFC_DIGI_DF_M_IOM2_E,
88 HFC_DIGI_DF_M_E,
89 HFC_DIGI_DF_M_IOM2_A,
90 HFC_DIGI_DF_M_A,
91 HFC_ABOCOM_2BD1,
92 HFC_SITECOM_DC105V2,
95 struct hfcPCI_hw {
96 unsigned char cirm;
97 unsigned char ctmt;
98 unsigned char clkdel;
99 unsigned char states;
100 unsigned char conn;
101 unsigned char mst_m;
102 unsigned char int_m1;
103 unsigned char int_m2;
104 unsigned char sctrl;
105 unsigned char sctrl_r;
106 unsigned char sctrl_e;
107 unsigned char trm;
108 unsigned char fifo_en;
109 unsigned char bswapped;
110 unsigned char protocol;
111 int nt_timer;
112 unsigned char __iomem *pci_io; /* start of PCI IO memory */
113 dma_addr_t dmahandle;
114 void *fifos; /* FIFO memory */
115 int last_bfifo_cnt[2];
116 /* marker saving last b-fifo frame count */
117 struct timer_list timer;
120 #define HFC_CFG_MASTER 1
121 #define HFC_CFG_SLAVE 2
122 #define HFC_CFG_PCM 3
123 #define HFC_CFG_2HFC 4
124 #define HFC_CFG_SLAVEHFC 5
125 #define HFC_CFG_NEG_F0 6
126 #define HFC_CFG_SW_DD_DU 7
128 #define FLG_HFC_TIMER_T1 16
129 #define FLG_HFC_TIMER_T3 17
131 #define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
132 #define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
133 #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
134 #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
137 struct hfc_pci {
138 u_char subtype;
139 u_char chanlimit;
140 u_char initdone;
141 u_long cfg;
142 u_int irq;
143 u_int irqcnt;
144 struct pci_dev *pdev;
145 struct hfcPCI_hw hw;
146 spinlock_t lock; /* card lock */
147 struct dchannel dch;
148 struct bchannel bch[2];
151 /* Interface functions */
152 static void
153 enable_hwirq(struct hfc_pci *hc)
155 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
156 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
159 static void
160 disable_hwirq(struct hfc_pci *hc)
162 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
163 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
167 * free hardware resources used by driver
169 static void
170 release_io_hfcpci(struct hfc_pci *hc)
172 /* disable memory mapped ports + busmaster */
173 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
174 del_timer(&hc->hw.timer);
175 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
176 iounmap(hc->hw.pci_io);
180 * set mode (NT or TE)
182 static void
183 hfcpci_setmode(struct hfc_pci *hc)
185 if (hc->hw.protocol == ISDN_P_NT_S0) {
186 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
187 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
188 hc->hw.states = 1; /* G1 */
189 } else {
190 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
191 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
192 hc->hw.states = 2; /* F2 */
194 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
195 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
196 udelay(10);
197 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
198 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
202 * function called to reset the HFC PCI chip. A complete software reset of chip
203 * and fifos is done.
205 static void
206 reset_hfcpci(struct hfc_pci *hc)
208 u_char val;
209 int cnt = 0;
211 printk(KERN_DEBUG "reset_hfcpci: entered\n");
212 val = Read_hfc(hc, HFCPCI_CHIP_ID);
213 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
214 /* enable memory mapped ports, disable busmaster */
215 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
216 disable_hwirq(hc);
217 /* enable memory ports + busmaster */
218 pci_write_config_word(hc->pdev, PCI_COMMAND,
219 PCI_ENA_MEMIO + PCI_ENA_MASTER);
220 val = Read_hfc(hc, HFCPCI_STATUS);
221 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
222 hc->hw.cirm = HFCPCI_RESET; /* Reset On */
223 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
224 set_current_state(TASK_UNINTERRUPTIBLE);
225 mdelay(10); /* Timeout 10ms */
226 hc->hw.cirm = 0; /* Reset Off */
227 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
228 val = Read_hfc(hc, HFCPCI_STATUS);
229 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
230 while (cnt < 50000) { /* max 50000 us */
231 udelay(5);
232 cnt += 5;
233 val = Read_hfc(hc, HFCPCI_STATUS);
234 if (!(val & 2))
235 break;
237 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
239 hc->hw.fifo_en = 0x30; /* only D fifos enabled */
241 hc->hw.bswapped = 0; /* no exchange */
242 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
243 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
244 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
245 hc->hw.sctrl_r = 0;
246 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
247 hc->hw.mst_m = 0;
248 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
249 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
250 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
251 hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
252 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
253 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
254 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
255 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
257 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
258 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
259 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
261 /* Clear already pending ints */
262 val = Read_hfc(hc, HFCPCI_INT_S1);
264 /* set NT/TE mode */
265 hfcpci_setmode(hc);
267 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
268 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
271 * Init GCI/IOM2 in master mode
272 * Slots 0 and 1 are set for B-chan 1 and 2
273 * D- and monitor/CI channel are not enabled
274 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
275 * STIO2 is used as data input, B1+B2 from IOM->ST
276 * ST B-channel send disabled -> continuous 1s
277 * The IOM slots are always enabled
279 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
280 /* set data flow directions: connect B1,B2: HFC to/from PCM */
281 hc->hw.conn = 0x09;
282 } else {
283 hc->hw.conn = 0x36; /* set data flow directions */
284 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
285 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
286 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
287 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
288 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
289 } else {
290 Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
291 Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
292 Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
293 Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
296 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
297 val = Read_hfc(hc, HFCPCI_INT_S2);
301 * Timer function called when kernel timer expires
303 static void
304 hfcpci_Timer(struct hfc_pci *hc)
306 hc->hw.timer.expires = jiffies + 75;
307 /* WD RESET */
309 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
310 * add_timer(&hc->hw.timer);
316 * select a b-channel entry matching and active
318 static struct bchannel *
319 Sel_BCS(struct hfc_pci *hc, int channel)
321 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
322 (hc->bch[0].nr & channel))
323 return &hc->bch[0];
324 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
325 (hc->bch[1].nr & channel))
326 return &hc->bch[1];
327 else
328 return NULL;
332 * clear the desired B-channel rx fifo
334 static void
335 hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
337 u_char fifo_state;
338 struct bzfifo *bzr;
340 if (fifo) {
341 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
342 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
343 } else {
344 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
345 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
347 if (fifo_state)
348 hc->hw.fifo_en ^= fifo_state;
349 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
350 hc->hw.last_bfifo_cnt[fifo] = 0;
351 bzr->f1 = MAX_B_FRAMES;
352 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
353 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
354 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
355 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
356 if (fifo_state)
357 hc->hw.fifo_en |= fifo_state;
358 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
362 * clear the desired B-channel tx fifo
364 static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
366 u_char fifo_state;
367 struct bzfifo *bzt;
369 if (fifo) {
370 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
371 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
372 } else {
373 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
374 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
376 if (fifo_state)
377 hc->hw.fifo_en ^= fifo_state;
378 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
379 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
380 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
381 "z1(%x) z2(%x) state(%x)\n",
382 fifo, bzt->f1, bzt->f2,
383 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
384 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
385 fifo_state);
386 bzt->f2 = MAX_B_FRAMES;
387 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
388 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
389 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
390 if (fifo_state)
391 hc->hw.fifo_en |= fifo_state;
392 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
393 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
394 printk(KERN_DEBUG
395 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
396 fifo, bzt->f1, bzt->f2,
397 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
398 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
402 * read a complete B-frame out of the buffer
404 static void
405 hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
406 u_char *bdata, int count)
408 u_char *ptr, *ptr1, new_f2;
409 int maxlen, new_z2;
410 struct zt *zp;
412 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
413 printk(KERN_DEBUG "hfcpci_empty_fifo\n");
414 zp = &bz->za[bz->f2]; /* point to Z-Regs */
415 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
416 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
417 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
418 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
419 if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
420 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
421 if (bch->debug & DEBUG_HW)
422 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
423 "invalid length %d or crc\n", count);
424 #ifdef ERROR_STATISTIC
425 bch->err_inv++;
426 #endif
427 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
428 bz->f2 = new_f2; /* next buffer */
429 } else {
430 bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
431 if (!bch->rx_skb) {
432 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
433 return;
435 count -= 3;
436 ptr = skb_put(bch->rx_skb, count);
438 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
439 maxlen = count; /* complete transfer */
440 else
441 maxlen = B_FIFO_SIZE + B_SUB_VAL -
442 le16_to_cpu(zp->z2); /* maximum */
444 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
445 /* start of data */
446 memcpy(ptr, ptr1, maxlen); /* copy data */
447 count -= maxlen;
449 if (count) { /* rest remaining */
450 ptr += maxlen;
451 ptr1 = bdata; /* start of buffer */
452 memcpy(ptr, ptr1, count); /* rest */
454 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
455 bz->f2 = new_f2; /* next buffer */
456 recv_Bchannel(bch, MISDN_ID_ANY);
461 * D-channel receive procedure
463 static int
464 receive_dmsg(struct hfc_pci *hc)
466 struct dchannel *dch = &hc->dch;
467 int maxlen;
468 int rcnt, total;
469 int count = 5;
470 u_char *ptr, *ptr1;
471 struct dfifo *df;
472 struct zt *zp;
474 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
475 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
476 zp = &df->za[df->f2 & D_FREG_MASK];
477 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
478 if (rcnt < 0)
479 rcnt += D_FIFO_SIZE;
480 rcnt++;
481 if (dch->debug & DEBUG_HW_DCHANNEL)
482 printk(KERN_DEBUG
483 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
484 df->f1, df->f2,
485 le16_to_cpu(zp->z1),
486 le16_to_cpu(zp->z2),
487 rcnt);
489 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
490 (df->data[le16_to_cpu(zp->z1)])) {
491 if (dch->debug & DEBUG_HW)
492 printk(KERN_DEBUG
493 "empty_fifo hfcpci paket inv. len "
494 "%d or crc %d\n",
495 rcnt,
496 df->data[le16_to_cpu(zp->z1)]);
497 #ifdef ERROR_STATISTIC
498 cs->err_rx++;
499 #endif
500 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
501 (MAX_D_FRAMES + 1); /* next buffer */
502 df->za[df->f2 & D_FREG_MASK].z2 =
503 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) &
504 (D_FIFO_SIZE - 1));
505 } else {
506 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
507 if (!dch->rx_skb) {
508 printk(KERN_WARNING
509 "HFC-PCI: D receive out of memory\n");
510 break;
512 total = rcnt;
513 rcnt -= 3;
514 ptr = skb_put(dch->rx_skb, rcnt);
516 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
517 maxlen = rcnt; /* complete transfer */
518 else
519 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
520 /* maximum */
522 ptr1 = df->data + le16_to_cpu(zp->z2);
523 /* start of data */
524 memcpy(ptr, ptr1, maxlen); /* copy data */
525 rcnt -= maxlen;
527 if (rcnt) { /* rest remaining */
528 ptr += maxlen;
529 ptr1 = df->data; /* start of buffer */
530 memcpy(ptr, ptr1, rcnt); /* rest */
532 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
533 (MAX_D_FRAMES + 1); /* next buffer */
534 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
535 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
536 recv_Dchannel(dch);
539 return 1;
543 * check for transparent receive data and read max one 'poll' size if avail
545 static void
546 hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz,
547 struct bzfifo *txbz, u_char *bdata)
549 __le16 *z1r, *z2r, *z1t, *z2t;
550 int new_z2, fcnt_rx, fcnt_tx, maxlen;
551 u_char *ptr, *ptr1;
553 z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
554 z2r = z1r + 1;
555 z1t = &txbz->za[MAX_B_FRAMES].z1;
556 z2t = z1t + 1;
558 fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
559 if (!fcnt_rx)
560 return; /* no data avail */
562 if (fcnt_rx <= 0)
563 fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */
564 new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */
565 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
566 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
568 if (fcnt_rx > MAX_DATA_SIZE) { /* flush, if oversized */
569 *z2r = cpu_to_le16(new_z2); /* new position */
570 return;
573 fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
574 if (fcnt_tx <= 0)
575 fcnt_tx += B_FIFO_SIZE;
576 /* fcnt_tx contains available bytes in tx-fifo */
577 fcnt_tx = B_FIFO_SIZE - fcnt_tx;
578 /* remaining bytes to send (bytes in tx-fifo) */
580 bch->rx_skb = mI_alloc_skb(fcnt_rx, GFP_ATOMIC);
581 if (bch->rx_skb) {
582 ptr = skb_put(bch->rx_skb, fcnt_rx);
583 if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL)
584 maxlen = fcnt_rx; /* complete transfer */
585 else
586 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
587 /* maximum */
589 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
590 /* start of data */
591 memcpy(ptr, ptr1, maxlen); /* copy data */
592 fcnt_rx -= maxlen;
594 if (fcnt_rx) { /* rest remaining */
595 ptr += maxlen;
596 ptr1 = bdata; /* start of buffer */
597 memcpy(ptr, ptr1, fcnt_rx); /* rest */
599 recv_Bchannel(bch, fcnt_tx); /* bch, id */
600 } else
601 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
603 *z2r = cpu_to_le16(new_z2); /* new position */
607 * B-channel main receive routine
609 static void
610 main_rec_hfcpci(struct bchannel *bch)
612 struct hfc_pci *hc = bch->hw;
613 int rcnt, real_fifo;
614 int receive = 0, count = 5;
615 struct bzfifo *txbz, *rxbz;
616 u_char *bdata;
617 struct zt *zp;
619 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
620 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
621 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
622 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
623 real_fifo = 1;
624 } else {
625 rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
626 txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
627 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
628 real_fifo = 0;
630 Begin:
631 count--;
632 if (rxbz->f1 != rxbz->f2) {
633 if (bch->debug & DEBUG_HW_BCHANNEL)
634 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
635 bch->nr, rxbz->f1, rxbz->f2);
636 zp = &rxbz->za[rxbz->f2];
638 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
639 if (rcnt < 0)
640 rcnt += B_FIFO_SIZE;
641 rcnt++;
642 if (bch->debug & DEBUG_HW_BCHANNEL)
643 printk(KERN_DEBUG
644 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
645 bch->nr, le16_to_cpu(zp->z1),
646 le16_to_cpu(zp->z2), rcnt);
647 hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt);
648 rcnt = rxbz->f1 - rxbz->f2;
649 if (rcnt < 0)
650 rcnt += MAX_B_FRAMES + 1;
651 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
652 rcnt = 0;
653 hfcpci_clear_fifo_rx(hc, real_fifo);
655 hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
656 if (rcnt > 1)
657 receive = 1;
658 else
659 receive = 0;
660 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
661 hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata);
662 return;
663 } else
664 receive = 0;
665 if (count && receive)
666 goto Begin;
671 * D-channel send routine
673 static void
674 hfcpci_fill_dfifo(struct hfc_pci *hc)
676 struct dchannel *dch = &hc->dch;
677 int fcnt;
678 int count, new_z1, maxlen;
679 struct dfifo *df;
680 u_char *src, *dst, new_f1;
682 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
683 printk(KERN_DEBUG "%s\n", __func__);
685 if (!dch->tx_skb)
686 return;
687 count = dch->tx_skb->len - dch->tx_idx;
688 if (count <= 0)
689 return;
690 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
692 if (dch->debug & DEBUG_HW_DFIFO)
693 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
694 df->f1, df->f2,
695 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
696 fcnt = df->f1 - df->f2; /* frame count actually buffered */
697 if (fcnt < 0)
698 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
699 if (fcnt > (MAX_D_FRAMES - 1)) {
700 if (dch->debug & DEBUG_HW_DCHANNEL)
701 printk(KERN_DEBUG
702 "hfcpci_fill_Dfifo more as 14 frames\n");
703 #ifdef ERROR_STATISTIC
704 cs->err_tx++;
705 #endif
706 return;
708 /* now determine free bytes in FIFO buffer */
709 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
710 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
711 if (maxlen <= 0)
712 maxlen += D_FIFO_SIZE; /* count now contains available bytes */
714 if (dch->debug & DEBUG_HW_DCHANNEL)
715 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
716 count, maxlen);
717 if (count > maxlen) {
718 if (dch->debug & DEBUG_HW_DCHANNEL)
719 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
720 return;
722 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
723 (D_FIFO_SIZE - 1);
724 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
725 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
726 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
727 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
728 /* end fifo */
729 if (maxlen > count)
730 maxlen = count; /* limit size */
731 memcpy(dst, src, maxlen); /* first copy */
733 count -= maxlen; /* remaining bytes */
734 if (count) {
735 dst = df->data; /* start of buffer */
736 src += maxlen; /* new position */
737 memcpy(dst, src, count);
739 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
740 /* for next buffer */
741 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
742 /* new pos actual buffer */
743 df->f1 = new_f1; /* next frame */
744 dch->tx_idx = dch->tx_skb->len;
748 * B-channel send routine
750 static void
751 hfcpci_fill_fifo(struct bchannel *bch)
753 struct hfc_pci *hc = bch->hw;
754 int maxlen, fcnt;
755 int count, new_z1;
756 struct bzfifo *bz;
757 u_char *bdata;
758 u_char new_f1, *src, *dst;
759 __le16 *z1t, *z2t;
761 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
762 printk(KERN_DEBUG "%s\n", __func__);
763 if ((!bch->tx_skb) || bch->tx_skb->len <= 0)
764 return;
765 count = bch->tx_skb->len - bch->tx_idx;
766 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
767 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
768 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
769 } else {
770 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
771 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
774 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
775 z1t = &bz->za[MAX_B_FRAMES].z1;
776 z2t = z1t + 1;
777 if (bch->debug & DEBUG_HW_BCHANNEL)
778 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
779 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
780 le16_to_cpu(*z1t), le16_to_cpu(*z2t));
781 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
782 if (fcnt <= 0)
783 fcnt += B_FIFO_SIZE;
784 /* fcnt contains available bytes in fifo */
785 fcnt = B_FIFO_SIZE - fcnt;
786 /* remaining bytes to send (bytes in fifo) */
788 /* "fill fifo if empty" feature */
789 if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) {
790 /* printk(KERN_DEBUG "%s: buffer empty, so we have "
791 "underrun\n", __func__); */
792 /* fill buffer, to prevent future underrun */
793 count = HFCPCI_FILLEMPTY;
794 new_z1 = le16_to_cpu(*z1t) + count;
795 /* new buffer Position */
796 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
797 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
798 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
799 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
800 /* end of fifo */
801 if (bch->debug & DEBUG_HW_BFIFO)
802 printk(KERN_DEBUG "hfcpci_FFt fillempty "
803 "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n",
804 fcnt, maxlen, new_z1, dst);
805 fcnt += count;
806 if (maxlen > count)
807 maxlen = count; /* limit size */
808 memset(dst, 0x2a, maxlen); /* first copy */
809 count -= maxlen; /* remaining bytes */
810 if (count) {
811 dst = bdata; /* start of buffer */
812 memset(dst, 0x2a, count);
814 *z1t = cpu_to_le16(new_z1); /* now send data */
817 next_t_frame:
818 count = bch->tx_skb->len - bch->tx_idx;
819 /* maximum fill shall be poll*2 */
820 if (count > (poll << 1) - fcnt)
821 count = (poll << 1) - fcnt;
822 if (count <= 0)
823 return;
824 /* data is suitable for fifo */
825 new_z1 = le16_to_cpu(*z1t) + count;
826 /* new buffer Position */
827 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
828 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
829 src = bch->tx_skb->data + bch->tx_idx;
830 /* source pointer */
831 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
832 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
833 /* end of fifo */
834 if (bch->debug & DEBUG_HW_BFIFO)
835 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
836 "maxl(%d) nz1(%x) dst(%p)\n",
837 fcnt, maxlen, new_z1, dst);
838 fcnt += count;
839 bch->tx_idx += count;
840 if (maxlen > count)
841 maxlen = count; /* limit size */
842 memcpy(dst, src, maxlen); /* first copy */
843 count -= maxlen; /* remaining bytes */
844 if (count) {
845 dst = bdata; /* start of buffer */
846 src += maxlen; /* new position */
847 memcpy(dst, src, count);
849 *z1t = cpu_to_le16(new_z1); /* now send data */
850 if (bch->tx_idx < bch->tx_skb->len)
851 return;
852 /* send confirm, on trans, free on hdlc. */
853 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
854 confirm_Bsend(bch);
855 dev_kfree_skb(bch->tx_skb);
856 if (get_next_bframe(bch))
857 goto next_t_frame;
858 return;
860 if (bch->debug & DEBUG_HW_BCHANNEL)
861 printk(KERN_DEBUG
862 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
863 __func__, bch->nr, bz->f1, bz->f2,
864 bz->za[bz->f1].z1);
865 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
866 if (fcnt < 0)
867 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
868 if (fcnt > (MAX_B_FRAMES - 1)) {
869 if (bch->debug & DEBUG_HW_BCHANNEL)
870 printk(KERN_DEBUG
871 "hfcpci_fill_Bfifo more as 14 frames\n");
872 return;
874 /* now determine free bytes in FIFO buffer */
875 maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
876 le16_to_cpu(bz->za[bz->f1].z1) - 1;
877 if (maxlen <= 0)
878 maxlen += B_FIFO_SIZE; /* count now contains available bytes */
880 if (bch->debug & DEBUG_HW_BCHANNEL)
881 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
882 bch->nr, count, maxlen);
884 if (maxlen < count) {
885 if (bch->debug & DEBUG_HW_BCHANNEL)
886 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
887 return;
889 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
890 /* new buffer Position */
891 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
892 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
894 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
895 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
896 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
897 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
898 /* end fifo */
899 if (maxlen > count)
900 maxlen = count; /* limit size */
901 memcpy(dst, src, maxlen); /* first copy */
903 count -= maxlen; /* remaining bytes */
904 if (count) {
905 dst = bdata; /* start of buffer */
906 src += maxlen; /* new position */
907 memcpy(dst, src, count);
909 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
910 bz->f1 = new_f1; /* next frame */
911 dev_kfree_skb(bch->tx_skb);
912 get_next_bframe(bch);
918 * handle L1 state changes TE
921 static void
922 ph_state_te(struct dchannel *dch)
924 if (dch->debug)
925 printk(KERN_DEBUG "%s: TE newstate %x\n",
926 __func__, dch->state);
927 switch (dch->state) {
928 case 0:
929 l1_event(dch->l1, HW_RESET_IND);
930 break;
931 case 3:
932 l1_event(dch->l1, HW_DEACT_IND);
933 break;
934 case 5:
935 case 8:
936 l1_event(dch->l1, ANYSIGNAL);
937 break;
938 case 6:
939 l1_event(dch->l1, INFO2);
940 break;
941 case 7:
942 l1_event(dch->l1, INFO4_P8);
943 break;
948 * handle L1 state changes NT
951 static void
952 handle_nt_timer3(struct dchannel *dch) {
953 struct hfc_pci *hc = dch->hw;
955 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
956 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
957 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
958 hc->hw.nt_timer = 0;
959 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
960 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
961 hc->hw.mst_m |= HFCPCI_MASTER;
962 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
963 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
964 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
967 static void
968 ph_state_nt(struct dchannel *dch)
970 struct hfc_pci *hc = dch->hw;
972 if (dch->debug)
973 printk(KERN_DEBUG "%s: NT newstate %x\n",
974 __func__, dch->state);
975 switch (dch->state) {
976 case 2:
977 if (hc->hw.nt_timer < 0) {
978 hc->hw.nt_timer = 0;
979 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
980 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
981 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
982 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
983 /* Clear already pending ints */
984 (void) Read_hfc(hc, HFCPCI_INT_S1);
985 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
986 udelay(10);
987 Write_hfc(hc, HFCPCI_STATES, 4);
988 dch->state = 4;
989 } else if (hc->hw.nt_timer == 0) {
990 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
991 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
992 hc->hw.nt_timer = NT_T1_COUNT;
993 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
994 hc->hw.ctmt |= HFCPCI_TIM3_125;
995 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
996 HFCPCI_CLTIMER);
997 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
998 test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
999 /* allow G2 -> G3 transition */
1000 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
1001 } else {
1002 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
1004 break;
1005 case 1:
1006 hc->hw.nt_timer = 0;
1007 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1008 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1009 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1010 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1011 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1012 hc->hw.mst_m &= ~HFCPCI_MASTER;
1013 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1014 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1015 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
1016 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1017 break;
1018 case 4:
1019 hc->hw.nt_timer = 0;
1020 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
1021 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1022 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1023 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1024 break;
1025 case 3:
1026 if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
1027 if (!test_and_clear_bit(FLG_L2_ACTIVATED,
1028 &dch->Flags)) {
1029 handle_nt_timer3(dch);
1030 break;
1032 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
1033 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
1034 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1035 hc->hw.nt_timer = NT_T3_COUNT;
1036 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
1037 hc->hw.ctmt |= HFCPCI_TIM3_125;
1038 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
1039 HFCPCI_CLTIMER);
1041 break;
1045 static void
1046 ph_state(struct dchannel *dch)
1048 struct hfc_pci *hc = dch->hw;
1050 if (hc->hw.protocol == ISDN_P_NT_S0) {
1051 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
1052 hc->hw.nt_timer < 0)
1053 handle_nt_timer3(dch);
1054 else
1055 ph_state_nt(dch);
1056 } else
1057 ph_state_te(dch);
1061 * Layer 1 callback function
1063 static int
1064 hfc_l1callback(struct dchannel *dch, u_int cmd)
1066 struct hfc_pci *hc = dch->hw;
1068 switch (cmd) {
1069 case INFO3_P8:
1070 case INFO3_P10:
1071 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1072 hc->hw.mst_m |= HFCPCI_MASTER;
1073 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1074 break;
1075 case HW_RESET_REQ:
1076 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1077 /* HFC ST 3 */
1078 udelay(6);
1079 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
1080 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1081 hc->hw.mst_m |= HFCPCI_MASTER;
1082 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1083 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1084 HFCPCI_DO_ACTION);
1085 l1_event(dch->l1, HW_POWERUP_IND);
1086 break;
1087 case HW_DEACT_REQ:
1088 hc->hw.mst_m &= ~HFCPCI_MASTER;
1089 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1090 skb_queue_purge(&dch->squeue);
1091 if (dch->tx_skb) {
1092 dev_kfree_skb(dch->tx_skb);
1093 dch->tx_skb = NULL;
1095 dch->tx_idx = 0;
1096 if (dch->rx_skb) {
1097 dev_kfree_skb(dch->rx_skb);
1098 dch->rx_skb = NULL;
1100 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1101 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1102 del_timer(&dch->timer);
1103 break;
1104 case HW_POWERUP_REQ:
1105 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1106 break;
1107 case PH_ACTIVATE_IND:
1108 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1109 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1110 GFP_ATOMIC);
1111 break;
1112 case PH_DEACTIVATE_IND:
1113 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1114 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1115 GFP_ATOMIC);
1116 break;
1117 default:
1118 if (dch->debug & DEBUG_HW)
1119 printk(KERN_DEBUG "%s: unknown command %x\n",
1120 __func__, cmd);
1121 return -1;
1123 return 0;
1127 * Interrupt handler
1129 static inline void
1130 tx_birq(struct bchannel *bch)
1132 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1133 hfcpci_fill_fifo(bch);
1134 else {
1135 if (bch->tx_skb)
1136 dev_kfree_skb(bch->tx_skb);
1137 if (get_next_bframe(bch))
1138 hfcpci_fill_fifo(bch);
1142 static inline void
1143 tx_dirq(struct dchannel *dch)
1145 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1146 hfcpci_fill_dfifo(dch->hw);
1147 else {
1148 if (dch->tx_skb)
1149 dev_kfree_skb(dch->tx_skb);
1150 if (get_next_dframe(dch))
1151 hfcpci_fill_dfifo(dch->hw);
1155 static irqreturn_t
1156 hfcpci_int(int intno, void *dev_id)
1158 struct hfc_pci *hc = dev_id;
1159 u_char exval;
1160 struct bchannel *bch;
1161 u_char val, stat;
1163 spin_lock(&hc->lock);
1164 if (!(hc->hw.int_m2 & 0x08)) {
1165 spin_unlock(&hc->lock);
1166 return IRQ_NONE; /* not initialised */
1168 stat = Read_hfc(hc, HFCPCI_STATUS);
1169 if (HFCPCI_ANYINT & stat) {
1170 val = Read_hfc(hc, HFCPCI_INT_S1);
1171 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1172 printk(KERN_DEBUG
1173 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1174 } else {
1175 /* shared */
1176 spin_unlock(&hc->lock);
1177 return IRQ_NONE;
1179 hc->irqcnt++;
1181 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1182 printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1183 val &= hc->hw.int_m1;
1184 if (val & 0x40) { /* state machine irq */
1185 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1186 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1187 printk(KERN_DEBUG "ph_state chg %d->%d\n",
1188 hc->dch.state, exval);
1189 hc->dch.state = exval;
1190 schedule_event(&hc->dch, FLG_PHCHANGE);
1191 val &= ~0x40;
1193 if (val & 0x80) { /* timer irq */
1194 if (hc->hw.protocol == ISDN_P_NT_S0) {
1195 if ((--hc->hw.nt_timer) < 0)
1196 schedule_event(&hc->dch, FLG_PHCHANGE);
1198 val &= ~0x80;
1199 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1201 if (val & 0x08) { /* B1 rx */
1202 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1203 if (bch)
1204 main_rec_hfcpci(bch);
1205 else if (hc->dch.debug)
1206 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1208 if (val & 0x10) { /* B2 rx */
1209 bch = Sel_BCS(hc, 2);
1210 if (bch)
1211 main_rec_hfcpci(bch);
1212 else if (hc->dch.debug)
1213 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1215 if (val & 0x01) { /* B1 tx */
1216 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1217 if (bch)
1218 tx_birq(bch);
1219 else if (hc->dch.debug)
1220 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1222 if (val & 0x02) { /* B2 tx */
1223 bch = Sel_BCS(hc, 2);
1224 if (bch)
1225 tx_birq(bch);
1226 else if (hc->dch.debug)
1227 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1229 if (val & 0x20) /* D rx */
1230 receive_dmsg(hc);
1231 if (val & 0x04) { /* D tx */
1232 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1233 del_timer(&hc->dch.timer);
1234 tx_dirq(&hc->dch);
1236 spin_unlock(&hc->lock);
1237 return IRQ_HANDLED;
1241 * timer callback for D-chan busy resolution. Currently no function
1243 static void
1244 hfcpci_dbusy_timer(struct hfc_pci *hc)
1249 * activate/deactivate hardware for selected channels and mode
1251 static int
1252 mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1254 struct hfc_pci *hc = bch->hw;
1255 int fifo2;
1256 u_char rx_slot = 0, tx_slot = 0, pcm_mode;
1258 if (bch->debug & DEBUG_HW_BCHANNEL)
1259 printk(KERN_DEBUG
1260 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1261 bch->state, protocol, bch->nr, bc);
1263 fifo2 = bc;
1264 pcm_mode = (bc>>24) & 0xff;
1265 if (pcm_mode) { /* PCM SLOT USE */
1266 if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1267 printk(KERN_WARNING
1268 "%s: pcm channel id without HFC_CFG_PCM\n",
1269 __func__);
1270 rx_slot = (bc>>8) & 0xff;
1271 tx_slot = (bc>>16) & 0xff;
1272 bc = bc & 0xff;
1273 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE))
1274 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1275 __func__);
1276 if (hc->chanlimit > 1) {
1277 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1278 hc->hw.sctrl_e &= ~0x80;
1279 } else {
1280 if (bc & 2) {
1281 if (protocol != ISDN_P_NONE) {
1282 hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1283 hc->hw.sctrl_e |= 0x80;
1284 } else {
1285 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1286 hc->hw.sctrl_e &= ~0x80;
1288 fifo2 = 1;
1289 } else {
1290 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1291 hc->hw.sctrl_e &= ~0x80;
1294 switch (protocol) {
1295 case (-1): /* used for init */
1296 bch->state = -1;
1297 bch->nr = bc;
1298 case (ISDN_P_NONE):
1299 if (bch->state == ISDN_P_NONE)
1300 return 0;
1301 if (bc & 2) {
1302 hc->hw.sctrl &= ~SCTRL_B2_ENA;
1303 hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1304 } else {
1305 hc->hw.sctrl &= ~SCTRL_B1_ENA;
1306 hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1308 if (fifo2 & 2) {
1309 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1310 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
1311 HFCPCI_INTS_B2REC);
1312 } else {
1313 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1314 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
1315 HFCPCI_INTS_B1REC);
1317 #ifdef REVERSE_BITORDER
1318 if (bch->nr & 2)
1319 hc->hw.cirm &= 0x7f;
1320 else
1321 hc->hw.cirm &= 0xbf;
1322 #endif
1323 bch->state = ISDN_P_NONE;
1324 bch->nr = bc;
1325 test_and_clear_bit(FLG_HDLC, &bch->Flags);
1326 test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1327 break;
1328 case (ISDN_P_B_RAW):
1329 bch->state = protocol;
1330 bch->nr = bc;
1331 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1332 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1333 if (bc & 2) {
1334 hc->hw.sctrl |= SCTRL_B2_ENA;
1335 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1336 #ifdef REVERSE_BITORDER
1337 hc->hw.cirm |= 0x80;
1338 #endif
1339 } else {
1340 hc->hw.sctrl |= SCTRL_B1_ENA;
1341 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1342 #ifdef REVERSE_BITORDER
1343 hc->hw.cirm |= 0x40;
1344 #endif
1346 if (fifo2 & 2) {
1347 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1348 if (!tics)
1349 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1350 HFCPCI_INTS_B2REC);
1351 hc->hw.ctmt |= 2;
1352 hc->hw.conn &= ~0x18;
1353 } else {
1354 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1355 if (!tics)
1356 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1357 HFCPCI_INTS_B1REC);
1358 hc->hw.ctmt |= 1;
1359 hc->hw.conn &= ~0x03;
1361 test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1362 break;
1363 case (ISDN_P_B_HDLC):
1364 bch->state = protocol;
1365 bch->nr = bc;
1366 hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0);
1367 hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0);
1368 if (bc & 2) {
1369 hc->hw.sctrl |= SCTRL_B2_ENA;
1370 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1371 } else {
1372 hc->hw.sctrl |= SCTRL_B1_ENA;
1373 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1375 if (fifo2 & 2) {
1376 hc->hw.last_bfifo_cnt[1] = 0;
1377 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1378 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1379 HFCPCI_INTS_B2REC);
1380 hc->hw.ctmt &= ~2;
1381 hc->hw.conn &= ~0x18;
1382 } else {
1383 hc->hw.last_bfifo_cnt[0] = 0;
1384 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1385 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1386 HFCPCI_INTS_B1REC);
1387 hc->hw.ctmt &= ~1;
1388 hc->hw.conn &= ~0x03;
1390 test_and_set_bit(FLG_HDLC, &bch->Flags);
1391 break;
1392 default:
1393 printk(KERN_DEBUG "prot not known %x\n", protocol);
1394 return -ENOPROTOOPT;
1396 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1397 if ((protocol == ISDN_P_NONE) ||
1398 (protocol == -1)) { /* init case */
1399 rx_slot = 0;
1400 tx_slot = 0;
1401 } else {
1402 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1403 rx_slot |= 0xC0;
1404 tx_slot |= 0xC0;
1405 } else {
1406 rx_slot |= 0x80;
1407 tx_slot |= 0x80;
1410 if (bc & 2) {
1411 hc->hw.conn &= 0xc7;
1412 hc->hw.conn |= 0x08;
1413 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1414 __func__, tx_slot);
1415 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1416 __func__, rx_slot);
1417 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1418 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1419 } else {
1420 hc->hw.conn &= 0xf8;
1421 hc->hw.conn |= 0x01;
1422 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1423 __func__, tx_slot);
1424 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1425 __func__, rx_slot);
1426 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1427 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1430 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1431 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1432 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1433 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1434 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1435 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1436 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1437 #ifdef REVERSE_BITORDER
1438 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1439 #endif
1440 return 0;
1443 static int
1444 set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1446 struct hfc_pci *hc = bch->hw;
1448 if (bch->debug & DEBUG_HW_BCHANNEL)
1449 printk(KERN_DEBUG
1450 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1451 bch->state, protocol, bch->nr, chan);
1452 if (bch->nr != chan) {
1453 printk(KERN_DEBUG
1454 "HFCPCI rxtest wrong channel parameter %x/%x\n",
1455 bch->nr, chan);
1456 return -EINVAL;
1458 switch (protocol) {
1459 case (ISDN_P_B_RAW):
1460 bch->state = protocol;
1461 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1462 if (chan & 2) {
1463 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1464 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1465 if (!tics)
1466 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1467 hc->hw.ctmt |= 2;
1468 hc->hw.conn &= ~0x18;
1469 #ifdef REVERSE_BITORDER
1470 hc->hw.cirm |= 0x80;
1471 #endif
1472 } else {
1473 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1474 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1475 if (!tics)
1476 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1477 hc->hw.ctmt |= 1;
1478 hc->hw.conn &= ~0x03;
1479 #ifdef REVERSE_BITORDER
1480 hc->hw.cirm |= 0x40;
1481 #endif
1483 break;
1484 case (ISDN_P_B_HDLC):
1485 bch->state = protocol;
1486 hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0);
1487 if (chan & 2) {
1488 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1489 hc->hw.last_bfifo_cnt[1] = 0;
1490 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1491 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1492 hc->hw.ctmt &= ~2;
1493 hc->hw.conn &= ~0x18;
1494 } else {
1495 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1496 hc->hw.last_bfifo_cnt[0] = 0;
1497 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1498 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1499 hc->hw.ctmt &= ~1;
1500 hc->hw.conn &= ~0x03;
1502 break;
1503 default:
1504 printk(KERN_DEBUG "prot not known %x\n", protocol);
1505 return -ENOPROTOOPT;
1507 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1508 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1509 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1510 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1511 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1512 #ifdef REVERSE_BITORDER
1513 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1514 #endif
1515 return 0;
1518 static void
1519 deactivate_bchannel(struct bchannel *bch)
1521 struct hfc_pci *hc = bch->hw;
1522 u_long flags;
1524 spin_lock_irqsave(&hc->lock, flags);
1525 mISDN_clear_bchannel(bch);
1526 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1527 spin_unlock_irqrestore(&hc->lock, flags);
1531 * Layer 1 B-channel hardware access
1533 static int
1534 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1536 int ret = 0;
1538 switch (cq->op) {
1539 case MISDN_CTRL_GETOP:
1540 cq->op = MISDN_CTRL_FILL_EMPTY;
1541 break;
1542 case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */
1543 test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
1544 if (debug & DEBUG_HW_OPEN)
1545 printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d "
1546 "off=%d)\n", __func__, bch->nr, !!cq->p1);
1547 break;
1548 default:
1549 printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
1550 ret = -EINVAL;
1551 break;
1553 return ret;
1555 static int
1556 hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1558 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1559 struct hfc_pci *hc = bch->hw;
1560 int ret = -EINVAL;
1561 u_long flags;
1563 if (bch->debug & DEBUG_HW)
1564 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1565 switch (cmd) {
1566 case HW_TESTRX_RAW:
1567 spin_lock_irqsave(&hc->lock, flags);
1568 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1569 spin_unlock_irqrestore(&hc->lock, flags);
1570 break;
1571 case HW_TESTRX_HDLC:
1572 spin_lock_irqsave(&hc->lock, flags);
1573 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1574 spin_unlock_irqrestore(&hc->lock, flags);
1575 break;
1576 case HW_TESTRX_OFF:
1577 spin_lock_irqsave(&hc->lock, flags);
1578 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1579 spin_unlock_irqrestore(&hc->lock, flags);
1580 ret = 0;
1581 break;
1582 case CLOSE_CHANNEL:
1583 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1584 if (test_bit(FLG_ACTIVE, &bch->Flags))
1585 deactivate_bchannel(bch);
1586 ch->protocol = ISDN_P_NONE;
1587 ch->peer = NULL;
1588 module_put(THIS_MODULE);
1589 ret = 0;
1590 break;
1591 case CONTROL_CHANNEL:
1592 ret = channel_bctrl(bch, arg);
1593 break;
1594 default:
1595 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1596 __func__, cmd);
1598 return ret;
1602 * Layer2 -> Layer 1 Dchannel data
1604 static int
1605 hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1607 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1608 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1609 struct hfc_pci *hc = dch->hw;
1610 int ret = -EINVAL;
1611 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1612 unsigned int id;
1613 u_long flags;
1615 switch (hh->prim) {
1616 case PH_DATA_REQ:
1617 spin_lock_irqsave(&hc->lock, flags);
1618 ret = dchannel_senddata(dch, skb);
1619 if (ret > 0) { /* direct TX */
1620 id = hh->id; /* skb can be freed */
1621 hfcpci_fill_dfifo(dch->hw);
1622 ret = 0;
1623 spin_unlock_irqrestore(&hc->lock, flags);
1624 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1625 } else
1626 spin_unlock_irqrestore(&hc->lock, flags);
1627 return ret;
1628 case PH_ACTIVATE_REQ:
1629 spin_lock_irqsave(&hc->lock, flags);
1630 if (hc->hw.protocol == ISDN_P_NT_S0) {
1631 ret = 0;
1632 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1633 hc->hw.mst_m |= HFCPCI_MASTER;
1634 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1635 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1636 spin_unlock_irqrestore(&hc->lock, flags);
1637 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1638 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1639 break;
1641 test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1642 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1643 HFCPCI_DO_ACTION | 1);
1644 } else
1645 ret = l1_event(dch->l1, hh->prim);
1646 spin_unlock_irqrestore(&hc->lock, flags);
1647 break;
1648 case PH_DEACTIVATE_REQ:
1649 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1650 spin_lock_irqsave(&hc->lock, flags);
1651 if (hc->hw.protocol == ISDN_P_NT_S0) {
1652 /* prepare deactivation */
1653 Write_hfc(hc, HFCPCI_STATES, 0x40);
1654 skb_queue_purge(&dch->squeue);
1655 if (dch->tx_skb) {
1656 dev_kfree_skb(dch->tx_skb);
1657 dch->tx_skb = NULL;
1659 dch->tx_idx = 0;
1660 if (dch->rx_skb) {
1661 dev_kfree_skb(dch->rx_skb);
1662 dch->rx_skb = NULL;
1664 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1665 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1666 del_timer(&dch->timer);
1667 #ifdef FIXME
1668 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1669 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1670 #endif
1671 hc->hw.mst_m &= ~HFCPCI_MASTER;
1672 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1673 ret = 0;
1674 } else {
1675 ret = l1_event(dch->l1, hh->prim);
1677 spin_unlock_irqrestore(&hc->lock, flags);
1678 break;
1680 if (!ret)
1681 dev_kfree_skb(skb);
1682 return ret;
1686 * Layer2 -> Layer 1 Bchannel data
1688 static int
1689 hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1691 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1692 struct hfc_pci *hc = bch->hw;
1693 int ret = -EINVAL;
1694 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1695 unsigned int id;
1696 u_long flags;
1698 switch (hh->prim) {
1699 case PH_DATA_REQ:
1700 spin_lock_irqsave(&hc->lock, flags);
1701 ret = bchannel_senddata(bch, skb);
1702 if (ret > 0) { /* direct TX */
1703 id = hh->id; /* skb can be freed */
1704 hfcpci_fill_fifo(bch);
1705 ret = 0;
1706 spin_unlock_irqrestore(&hc->lock, flags);
1707 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
1708 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1709 } else
1710 spin_unlock_irqrestore(&hc->lock, flags);
1711 return ret;
1712 case PH_ACTIVATE_REQ:
1713 spin_lock_irqsave(&hc->lock, flags);
1714 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1715 ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1716 else
1717 ret = 0;
1718 spin_unlock_irqrestore(&hc->lock, flags);
1719 if (!ret)
1720 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1721 NULL, GFP_KERNEL);
1722 break;
1723 case PH_DEACTIVATE_REQ:
1724 deactivate_bchannel(bch);
1725 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1726 NULL, GFP_KERNEL);
1727 ret = 0;
1728 break;
1730 if (!ret)
1731 dev_kfree_skb(skb);
1732 return ret;
1736 * called for card init message
1739 static void
1740 inithfcpci(struct hfc_pci *hc)
1742 printk(KERN_DEBUG "inithfcpci: entered\n");
1743 hc->dch.timer.function = (void *) hfcpci_dbusy_timer;
1744 hc->dch.timer.data = (long) &hc->dch;
1745 init_timer(&hc->dch.timer);
1746 hc->chanlimit = 2;
1747 mode_hfcpci(&hc->bch[0], 1, -1);
1748 mode_hfcpci(&hc->bch[1], 2, -1);
1752 static int
1753 init_card(struct hfc_pci *hc)
1755 int cnt = 3;
1756 u_long flags;
1758 printk(KERN_DEBUG "init_card: entered\n");
1761 spin_lock_irqsave(&hc->lock, flags);
1762 disable_hwirq(hc);
1763 spin_unlock_irqrestore(&hc->lock, flags);
1764 if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1765 printk(KERN_WARNING
1766 "mISDN: couldn't get interrupt %d\n", hc->irq);
1767 return -EIO;
1769 spin_lock_irqsave(&hc->lock, flags);
1770 reset_hfcpci(hc);
1771 while (cnt) {
1772 inithfcpci(hc);
1774 * Finally enable IRQ output
1775 * this is only allowed, if an IRQ routine is already
1776 * established for this HFC, so don't do that earlier
1778 enable_hwirq(hc);
1779 spin_unlock_irqrestore(&hc->lock, flags);
1780 /* Timeout 80ms */
1781 current->state = TASK_UNINTERRUPTIBLE;
1782 schedule_timeout((80*HZ)/1000);
1783 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1784 hc->irq, hc->irqcnt);
1785 /* now switch timer interrupt off */
1786 spin_lock_irqsave(&hc->lock, flags);
1787 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1788 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1789 /* reinit mode reg */
1790 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1791 if (!hc->irqcnt) {
1792 printk(KERN_WARNING
1793 "HFC PCI: IRQ(%d) getting no interrupts "
1794 "during init %d\n", hc->irq, 4 - cnt);
1795 if (cnt == 1)
1796 break;
1797 else {
1798 reset_hfcpci(hc);
1799 cnt--;
1801 } else {
1802 spin_unlock_irqrestore(&hc->lock, flags);
1803 hc->initdone = 1;
1804 return 0;
1807 disable_hwirq(hc);
1808 spin_unlock_irqrestore(&hc->lock, flags);
1809 free_irq(hc->irq, hc);
1810 return -EIO;
1813 static int
1814 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1816 int ret = 0;
1817 u_char slot;
1819 switch (cq->op) {
1820 case MISDN_CTRL_GETOP:
1821 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1822 MISDN_CTRL_DISCONNECT;
1823 break;
1824 case MISDN_CTRL_LOOP:
1825 /* channel 0 disabled loop */
1826 if (cq->channel < 0 || cq->channel > 2) {
1827 ret = -EINVAL;
1828 break;
1830 if (cq->channel & 1) {
1831 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1832 slot = 0xC0;
1833 else
1834 slot = 0x80;
1835 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1836 __func__, slot);
1837 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1838 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1839 hc->hw.conn = (hc->hw.conn & ~7) | 6;
1840 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1842 if (cq->channel & 2) {
1843 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1844 slot = 0xC1;
1845 else
1846 slot = 0x81;
1847 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1848 __func__, slot);
1849 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1850 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1851 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1852 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1854 if (cq->channel & 3)
1855 hc->hw.trm |= 0x80; /* enable IOM-loop */
1856 else {
1857 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1858 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1859 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1861 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1862 break;
1863 case MISDN_CTRL_CONNECT:
1864 if (cq->channel == cq->p1) {
1865 ret = -EINVAL;
1866 break;
1868 if (cq->channel < 1 || cq->channel > 2 ||
1869 cq->p1 < 1 || cq->p1 > 2) {
1870 ret = -EINVAL;
1871 break;
1873 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1874 slot = 0xC0;
1875 else
1876 slot = 0x80;
1877 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1878 __func__, slot);
1879 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1880 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1881 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1882 slot = 0xC1;
1883 else
1884 slot = 0x81;
1885 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1886 __func__, slot);
1887 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1888 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1889 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1890 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1891 hc->hw.trm |= 0x80;
1892 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1893 break;
1894 case MISDN_CTRL_DISCONNECT:
1895 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1896 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1897 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1898 break;
1899 default:
1900 printk(KERN_WARNING "%s: unknown Op %x\n",
1901 __func__, cq->op);
1902 ret = -EINVAL;
1903 break;
1905 return ret;
1908 static int
1909 open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1910 struct channel_req *rq)
1912 int err = 0;
1914 if (debug & DEBUG_HW_OPEN)
1915 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1916 hc->dch.dev.id, __builtin_return_address(0));
1917 if (rq->protocol == ISDN_P_NONE)
1918 return -EINVAL;
1919 if (rq->adr.channel == 1) {
1920 /* TODO: E-Channel */
1921 return -EINVAL;
1923 if (!hc->initdone) {
1924 if (rq->protocol == ISDN_P_TE_S0) {
1925 err = create_l1(&hc->dch, hfc_l1callback);
1926 if (err)
1927 return err;
1929 hc->hw.protocol = rq->protocol;
1930 ch->protocol = rq->protocol;
1931 err = init_card(hc);
1932 if (err)
1933 return err;
1934 } else {
1935 if (rq->protocol != ch->protocol) {
1936 if (hc->hw.protocol == ISDN_P_TE_S0)
1937 l1_event(hc->dch.l1, CLOSE_CHANNEL);
1938 if (rq->protocol == ISDN_P_TE_S0) {
1939 err = create_l1(&hc->dch, hfc_l1callback);
1940 if (err)
1941 return err;
1943 hc->hw.protocol = rq->protocol;
1944 ch->protocol = rq->protocol;
1945 hfcpci_setmode(hc);
1949 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1950 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1951 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1952 0, NULL, GFP_KERNEL);
1954 rq->ch = ch;
1955 if (!try_module_get(THIS_MODULE))
1956 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1957 return 0;
1960 static int
1961 open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1963 struct bchannel *bch;
1965 if (rq->adr.channel > 2)
1966 return -EINVAL;
1967 if (rq->protocol == ISDN_P_NONE)
1968 return -EINVAL;
1969 bch = &hc->bch[rq->adr.channel - 1];
1970 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1971 return -EBUSY; /* b-channel can be only open once */
1972 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
1973 bch->ch.protocol = rq->protocol;
1974 rq->ch = &bch->ch; /* TODO: E-channel */
1975 if (!try_module_get(THIS_MODULE))
1976 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1977 return 0;
1981 * device control function
1983 static int
1984 hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1986 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1987 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1988 struct hfc_pci *hc = dch->hw;
1989 struct channel_req *rq;
1990 int err = 0;
1992 if (dch->debug & DEBUG_HW)
1993 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1994 __func__, cmd, arg);
1995 switch (cmd) {
1996 case OPEN_CHANNEL:
1997 rq = arg;
1998 if ((rq->protocol == ISDN_P_TE_S0) ||
1999 (rq->protocol == ISDN_P_NT_S0))
2000 err = open_dchannel(hc, ch, rq);
2001 else
2002 err = open_bchannel(hc, rq);
2003 break;
2004 case CLOSE_CHANNEL:
2005 if (debug & DEBUG_HW_OPEN)
2006 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
2007 __func__, hc->dch.dev.id,
2008 __builtin_return_address(0));
2009 module_put(THIS_MODULE);
2010 break;
2011 case CONTROL_CHANNEL:
2012 err = channel_ctrl(hc, arg);
2013 break;
2014 default:
2015 if (dch->debug & DEBUG_HW)
2016 printk(KERN_DEBUG "%s: unknown command %x\n",
2017 __func__, cmd);
2018 return -EINVAL;
2020 return err;
2023 static int
2024 setup_hw(struct hfc_pci *hc)
2026 void *buffer;
2028 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
2029 hc->hw.cirm = 0;
2030 hc->dch.state = 0;
2031 pci_set_master(hc->pdev);
2032 if (!hc->irq) {
2033 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
2034 return 1;
2036 hc->hw.pci_io =
2037 (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
2039 if (!hc->hw.pci_io) {
2040 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
2041 return 1;
2043 /* Allocate memory for FIFOS */
2044 /* the memory needs to be on a 32k boundary within the first 4G */
2045 pci_set_dma_mask(hc->pdev, 0xFFFF8000);
2046 buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle);
2047 /* We silently assume the address is okay if nonzero */
2048 if (!buffer) {
2049 printk(KERN_WARNING
2050 "HFC-PCI: Error allocating memory for FIFO!\n");
2051 return 1;
2053 hc->hw.fifos = buffer;
2054 pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
2055 hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
2056 printk(KERN_INFO
2057 "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
2058 (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
2059 (u_long) hc->hw.dmahandle, hc->irq, HZ);
2060 /* enable memory mapped ports, disable busmaster */
2061 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
2062 hc->hw.int_m2 = 0;
2063 disable_hwirq(hc);
2064 hc->hw.int_m1 = 0;
2065 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
2066 /* At this point the needed PCI config is done */
2067 /* fifos are still not enabled */
2068 hc->hw.timer.function = (void *) hfcpci_Timer;
2069 hc->hw.timer.data = (long) hc;
2070 init_timer(&hc->hw.timer);
2071 /* default PCM master */
2072 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2073 return 0;
2076 static void
2077 release_card(struct hfc_pci *hc) {
2078 u_long flags;
2080 spin_lock_irqsave(&hc->lock, flags);
2081 hc->hw.int_m2 = 0; /* interrupt output off ! */
2082 disable_hwirq(hc);
2083 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2084 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2085 if (hc->dch.timer.function != NULL) {
2086 del_timer(&hc->dch.timer);
2087 hc->dch.timer.function = NULL;
2089 spin_unlock_irqrestore(&hc->lock, flags);
2090 if (hc->hw.protocol == ISDN_P_TE_S0)
2091 l1_event(hc->dch.l1, CLOSE_CHANNEL);
2092 if (hc->initdone)
2093 free_irq(hc->irq, hc);
2094 release_io_hfcpci(hc); /* must release after free_irq! */
2095 mISDN_unregister_device(&hc->dch.dev);
2096 mISDN_freebchannel(&hc->bch[1]);
2097 mISDN_freebchannel(&hc->bch[0]);
2098 mISDN_freedchannel(&hc->dch);
2099 pci_set_drvdata(hc->pdev, NULL);
2100 kfree(hc);
2103 static int
2104 setup_card(struct hfc_pci *card)
2106 int err = -EINVAL;
2107 u_int i;
2108 char name[MISDN_MAX_IDLEN];
2110 card->dch.debug = debug;
2111 spin_lock_init(&card->lock);
2112 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2113 card->dch.hw = card;
2114 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2115 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2116 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2117 card->dch.dev.D.send = hfcpci_l2l1D;
2118 card->dch.dev.D.ctrl = hfc_dctrl;
2119 card->dch.dev.nrbchan = 2;
2120 for (i = 0; i < 2; i++) {
2121 card->bch[i].nr = i + 1;
2122 set_channelmap(i + 1, card->dch.dev.channelmap);
2123 card->bch[i].debug = debug;
2124 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
2125 card->bch[i].hw = card;
2126 card->bch[i].ch.send = hfcpci_l2l1B;
2127 card->bch[i].ch.ctrl = hfc_bctrl;
2128 card->bch[i].ch.nr = i + 1;
2129 list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2131 err = setup_hw(card);
2132 if (err)
2133 goto error;
2134 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2135 err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name);
2136 if (err)
2137 goto error;
2138 HFC_cnt++;
2139 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2140 return 0;
2141 error:
2142 mISDN_freebchannel(&card->bch[1]);
2143 mISDN_freebchannel(&card->bch[0]);
2144 mISDN_freedchannel(&card->dch);
2145 kfree(card);
2146 return err;
2149 /* private data in the PCI devices list */
2150 struct _hfc_map {
2151 u_int subtype;
2152 u_int flag;
2153 char *name;
2156 static const struct _hfc_map hfc_map[] =
2158 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2159 {HFC_CCD_B000, 0, "Billion B000"},
2160 {HFC_CCD_B006, 0, "Billion B006"},
2161 {HFC_CCD_B007, 0, "Billion B007"},
2162 {HFC_CCD_B008, 0, "Billion B008"},
2163 {HFC_CCD_B009, 0, "Billion B009"},
2164 {HFC_CCD_B00A, 0, "Billion B00A"},
2165 {HFC_CCD_B00B, 0, "Billion B00B"},
2166 {HFC_CCD_B00C, 0, "Billion B00C"},
2167 {HFC_CCD_B100, 0, "Seyeon B100"},
2168 {HFC_CCD_B700, 0, "Primux II S0 B700"},
2169 {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2170 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2171 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2172 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2173 {HFC_BERKOM_A1T, 0, "German telekom A1T"},
2174 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2175 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2176 {HFC_DIGI_DF_M_IOM2_E, 0,
2177 "Digi International DataFire Micro V IOM2 (Europe)"},
2178 {HFC_DIGI_DF_M_E, 0,
2179 "Digi International DataFire Micro V (Europe)"},
2180 {HFC_DIGI_DF_M_IOM2_A, 0,
2181 "Digi International DataFire Micro V IOM2 (North America)"},
2182 {HFC_DIGI_DF_M_A, 0,
2183 "Digi International DataFire Micro V (North America)"},
2184 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2188 static struct pci_device_id hfc_ids[] =
2190 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2191 (unsigned long) &hfc_map[0] },
2192 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B000),
2193 (unsigned long) &hfc_map[1] },
2194 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B006),
2195 (unsigned long) &hfc_map[2] },
2196 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B007),
2197 (unsigned long) &hfc_map[3] },
2198 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B008),
2199 (unsigned long) &hfc_map[4] },
2200 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B009),
2201 (unsigned long) &hfc_map[5] },
2202 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00A),
2203 (unsigned long) &hfc_map[6] },
2204 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00B),
2205 (unsigned long) &hfc_map[7] },
2206 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B00C),
2207 (unsigned long) &hfc_map[8] },
2208 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B100),
2209 (unsigned long) &hfc_map[9] },
2210 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B700),
2211 (unsigned long) &hfc_map[10] },
2212 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_B701),
2213 (unsigned long) &hfc_map[11] },
2214 { PCI_VDEVICE(ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1),
2215 (unsigned long) &hfc_map[12] },
2216 { PCI_VDEVICE(ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675),
2217 (unsigned long) &hfc_map[13] },
2218 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT),
2219 (unsigned long) &hfc_map[14] },
2220 { PCI_VDEVICE(BERKOM, PCI_DEVICE_ID_BERKOM_A1T),
2221 (unsigned long) &hfc_map[15] },
2222 { PCI_VDEVICE(ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575),
2223 (unsigned long) &hfc_map[16] },
2224 { PCI_VDEVICE(ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0),
2225 (unsigned long) &hfc_map[17] },
2226 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E),
2227 (unsigned long) &hfc_map[18] },
2228 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_E),
2229 (unsigned long) &hfc_map[19] },
2230 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A),
2231 (unsigned long) &hfc_map[20] },
2232 { PCI_VDEVICE(DIGI, PCI_DEVICE_ID_DIGI_DF_M_A),
2233 (unsigned long) &hfc_map[21] },
2234 { PCI_VDEVICE(SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2),
2235 (unsigned long) &hfc_map[22] },
2239 static int __devinit
2240 hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2242 int err = -ENOMEM;
2243 struct hfc_pci *card;
2244 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
2246 card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
2247 if (!card) {
2248 printk(KERN_ERR "No kmem for HFC card\n");
2249 return err;
2251 card->pdev = pdev;
2252 card->subtype = m->subtype;
2253 err = pci_enable_device(pdev);
2254 if (err) {
2255 kfree(card);
2256 return err;
2259 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2260 m->name, pci_name(pdev));
2262 card->irq = pdev->irq;
2263 pci_set_drvdata(pdev, card);
2264 err = setup_card(card);
2265 if (err)
2266 pci_set_drvdata(pdev, NULL);
2267 return err;
2270 static void __devexit
2271 hfc_remove_pci(struct pci_dev *pdev)
2273 struct hfc_pci *card = pci_get_drvdata(pdev);
2275 if (card)
2276 release_card(card);
2277 else
2278 if (debug)
2279 printk(KERN_DEBUG "%s: drvdata already removed\n",
2280 __func__);
2284 static struct pci_driver hfc_driver = {
2285 .name = "hfcpci",
2286 .probe = hfc_probe,
2287 .remove = __devexit_p(hfc_remove_pci),
2288 .id_table = hfc_ids,
2291 static int
2292 _hfcpci_softirq(struct device *dev, void *arg)
2294 struct hfc_pci *hc = dev_get_drvdata(dev);
2295 struct bchannel *bch;
2296 if (hc == NULL)
2297 return 0;
2299 if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
2300 spin_lock(&hc->lock);
2301 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
2302 if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
2303 main_rec_hfcpci(bch);
2304 tx_birq(bch);
2306 bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2);
2307 if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */
2308 main_rec_hfcpci(bch);
2309 tx_birq(bch);
2311 spin_unlock(&hc->lock);
2313 return 0;
2316 static void
2317 hfcpci_softirq(void *arg)
2319 (void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
2320 _hfcpci_softirq);
2322 /* if next event would be in the past ... */
2323 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
2324 hfc_jiffies = jiffies + 1;
2325 else
2326 hfc_jiffies += tics;
2327 hfc_tl.expires = hfc_jiffies;
2328 add_timer(&hfc_tl);
2331 static int __init
2332 HFC_init(void)
2334 int err;
2336 if (!poll)
2337 poll = HFCPCI_BTRANS_THRESHOLD;
2339 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2340 tics = (poll * HZ) / 8000;
2341 if (tics < 1)
2342 tics = 1;
2343 poll = (tics * 8000) / HZ;
2344 if (poll > 256 || poll < 8) {
2345 printk(KERN_ERR "%s: Wrong poll value %d not in range "
2346 "of 8..256.\n", __func__, poll);
2347 err = -EINVAL;
2348 return err;
2351 if (poll != HFCPCI_BTRANS_THRESHOLD) {
2352 printk(KERN_INFO "%s: Using alternative poll value of %d\n",
2353 __func__, poll);
2354 hfc_tl.function = (void *)hfcpci_softirq;
2355 hfc_tl.data = 0;
2356 init_timer(&hfc_tl);
2357 hfc_tl.expires = jiffies + tics;
2358 hfc_jiffies = hfc_tl.expires;
2359 add_timer(&hfc_tl);
2360 } else
2361 tics = 0; /* indicate the use of controller's timer */
2363 err = pci_register_driver(&hfc_driver);
2364 if (err) {
2365 if (timer_pending(&hfc_tl))
2366 del_timer(&hfc_tl);
2369 return err;
2372 static void __exit
2373 HFC_cleanup(void)
2375 if (timer_pending(&hfc_tl))
2376 del_timer(&hfc_tl);
2378 pci_unregister_driver(&hfc_driver);
2381 module_init(HFC_init);
2382 module_exit(HFC_cleanup);
2384 MODULE_DEVICE_TABLE(pci, hfc_ids);