3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/gfp.h>
19 #include <linux/module.h>
20 #include <linux/mISDNhw.h>
23 dchannel_bh(struct work_struct
*ws
)
25 struct dchannel
*dch
= container_of(ws
, struct dchannel
, workq
);
29 if (test_and_clear_bit(FLG_RECVQUEUE
, &dch
->Flags
)) {
30 while ((skb
= skb_dequeue(&dch
->rqueue
))) {
31 if (likely(dch
->dev
.D
.peer
)) {
32 err
= dch
->dev
.D
.recv(dch
->dev
.D
.peer
, skb
);
39 if (test_and_clear_bit(FLG_PHCHANGE
, &dch
->Flags
)) {
46 bchannel_bh(struct work_struct
*ws
)
48 struct bchannel
*bch
= container_of(ws
, struct bchannel
, workq
);
52 if (test_and_clear_bit(FLG_RECVQUEUE
, &bch
->Flags
)) {
53 while ((skb
= skb_dequeue(&bch
->rqueue
))) {
55 if (likely(bch
->ch
.peer
)) {
56 err
= bch
->ch
.recv(bch
->ch
.peer
, skb
);
66 mISDN_initdchannel(struct dchannel
*ch
, int maxlen
, void *phf
)
68 test_and_set_bit(FLG_HDLC
, &ch
->Flags
);
75 skb_queue_head_init(&ch
->squeue
);
76 skb_queue_head_init(&ch
->rqueue
);
77 INIT_LIST_HEAD(&ch
->dev
.bchannels
);
78 INIT_WORK(&ch
->workq
, dchannel_bh
);
81 EXPORT_SYMBOL(mISDN_initdchannel
);
84 mISDN_initbchannel(struct bchannel
*ch
, unsigned short maxlen
,
85 unsigned short minlen
)
89 ch
->next_minlen
= minlen
;
90 ch
->init_minlen
= minlen
;
92 ch
->next_maxlen
= maxlen
;
93 ch
->init_maxlen
= maxlen
;
98 skb_queue_head_init(&ch
->rqueue
);
101 INIT_WORK(&ch
->workq
, bchannel_bh
);
104 EXPORT_SYMBOL(mISDN_initbchannel
);
107 mISDN_freedchannel(struct dchannel
*ch
)
110 dev_kfree_skb(ch
->tx_skb
);
114 dev_kfree_skb(ch
->rx_skb
);
117 skb_queue_purge(&ch
->squeue
);
118 skb_queue_purge(&ch
->rqueue
);
119 flush_work(&ch
->workq
);
122 EXPORT_SYMBOL(mISDN_freedchannel
);
125 mISDN_clear_bchannel(struct bchannel
*ch
)
128 dev_kfree_skb(ch
->tx_skb
);
133 dev_kfree_skb(ch
->rx_skb
);
137 dev_kfree_skb(ch
->next_skb
);
140 test_and_clear_bit(FLG_TX_BUSY
, &ch
->Flags
);
141 test_and_clear_bit(FLG_TX_NEXT
, &ch
->Flags
);
142 test_and_clear_bit(FLG_ACTIVE
, &ch
->Flags
);
143 test_and_clear_bit(FLG_FILLEMPTY
, &ch
->Flags
);
144 test_and_clear_bit(FLG_TX_EMPTY
, &ch
->Flags
);
145 test_and_clear_bit(FLG_RX_OFF
, &ch
->Flags
);
147 ch
->minlen
= ch
->init_minlen
;
148 ch
->next_minlen
= ch
->init_minlen
;
149 ch
->maxlen
= ch
->init_maxlen
;
150 ch
->next_maxlen
= ch
->init_maxlen
;
151 skb_queue_purge(&ch
->rqueue
);
154 EXPORT_SYMBOL(mISDN_clear_bchannel
);
157 mISDN_freebchannel(struct bchannel
*ch
)
159 cancel_work_sync(&ch
->workq
);
160 mISDN_clear_bchannel(ch
);
162 EXPORT_SYMBOL(mISDN_freebchannel
);
165 mISDN_ctrl_bchannel(struct bchannel
*bch
, struct mISDN_ctrl_req
*cq
)
170 case MISDN_CTRL_GETOP
:
171 cq
->op
= MISDN_CTRL_RX_BUFFER
| MISDN_CTRL_FILL_EMPTY
|
174 case MISDN_CTRL_FILL_EMPTY
:
176 memset(bch
->fill
, cq
->p2
& 0xff, MISDN_BCH_FILL_SIZE
);
177 test_and_set_bit(FLG_FILLEMPTY
, &bch
->Flags
);
179 test_and_clear_bit(FLG_FILLEMPTY
, &bch
->Flags
);
182 case MISDN_CTRL_RX_OFF
:
183 /* read back dropped byte count */
184 cq
->p2
= bch
->dropcnt
;
186 test_and_set_bit(FLG_RX_OFF
, &bch
->Flags
);
188 test_and_clear_bit(FLG_RX_OFF
, &bch
->Flags
);
191 case MISDN_CTRL_RX_BUFFER
:
192 if (cq
->p2
> MISDN_CTRL_RX_SIZE_IGNORE
)
193 bch
->next_maxlen
= cq
->p2
;
194 if (cq
->p1
> MISDN_CTRL_RX_SIZE_IGNORE
)
195 bch
->next_minlen
= cq
->p1
;
196 /* we return the old values */
197 cq
->p1
= bch
->minlen
;
198 cq
->p2
= bch
->maxlen
;
201 pr_info("mISDN unhandled control %x operation\n", cq
->op
);
207 EXPORT_SYMBOL(mISDN_ctrl_bchannel
);
210 get_sapi_tei(u_char
*p
)
216 return sapi
| (tei
<< 8);
220 recv_Dchannel(struct dchannel
*dch
)
222 struct mISDNhead
*hh
;
224 if (dch
->rx_skb
->len
< 2) { /* at least 2 for sapi / tei */
225 dev_kfree_skb(dch
->rx_skb
);
229 hh
= mISDN_HEAD_P(dch
->rx_skb
);
230 hh
->prim
= PH_DATA_IND
;
231 hh
->id
= get_sapi_tei(dch
->rx_skb
->data
);
232 skb_queue_tail(&dch
->rqueue
, dch
->rx_skb
);
234 schedule_event(dch
, FLG_RECVQUEUE
);
236 EXPORT_SYMBOL(recv_Dchannel
);
239 recv_Echannel(struct dchannel
*ech
, struct dchannel
*dch
)
241 struct mISDNhead
*hh
;
243 if (ech
->rx_skb
->len
< 2) { /* at least 2 for sapi / tei */
244 dev_kfree_skb(ech
->rx_skb
);
248 hh
= mISDN_HEAD_P(ech
->rx_skb
);
249 hh
->prim
= PH_DATA_E_IND
;
250 hh
->id
= get_sapi_tei(ech
->rx_skb
->data
);
251 skb_queue_tail(&dch
->rqueue
, ech
->rx_skb
);
253 schedule_event(dch
, FLG_RECVQUEUE
);
255 EXPORT_SYMBOL(recv_Echannel
);
258 recv_Bchannel(struct bchannel
*bch
, unsigned int id
, bool force
)
260 struct mISDNhead
*hh
;
262 /* if allocation did fail upper functions still may call us */
263 if (unlikely(!bch
->rx_skb
))
265 if (unlikely(!bch
->rx_skb
->len
)) {
266 /* we have no data to send - this may happen after recovery
267 * from overflow or too small allocation.
268 * We need to free the buffer here */
269 dev_kfree_skb(bch
->rx_skb
);
272 if (test_bit(FLG_TRANSPARENT
, &bch
->Flags
) &&
273 (bch
->rx_skb
->len
< bch
->minlen
) && !force
)
275 hh
= mISDN_HEAD_P(bch
->rx_skb
);
276 hh
->prim
= PH_DATA_IND
;
278 if (bch
->rcount
>= 64) {
280 "B%d receive queue overflow - flushing!\n",
282 skb_queue_purge(&bch
->rqueue
);
285 skb_queue_tail(&bch
->rqueue
, bch
->rx_skb
);
287 schedule_event(bch
, FLG_RECVQUEUE
);
290 EXPORT_SYMBOL(recv_Bchannel
);
293 recv_Dchannel_skb(struct dchannel
*dch
, struct sk_buff
*skb
)
295 skb_queue_tail(&dch
->rqueue
, skb
);
296 schedule_event(dch
, FLG_RECVQUEUE
);
298 EXPORT_SYMBOL(recv_Dchannel_skb
);
301 recv_Bchannel_skb(struct bchannel
*bch
, struct sk_buff
*skb
)
303 if (bch
->rcount
>= 64) {
304 printk(KERN_WARNING
"B-channel %p receive queue overflow, "
306 skb_queue_purge(&bch
->rqueue
);
310 skb_queue_tail(&bch
->rqueue
, skb
);
311 schedule_event(bch
, FLG_RECVQUEUE
);
313 EXPORT_SYMBOL(recv_Bchannel_skb
);
316 confirm_Dsend(struct dchannel
*dch
)
320 skb
= _alloc_mISDN_skb(PH_DATA_CNF
, mISDN_HEAD_ID(dch
->tx_skb
),
321 0, NULL
, GFP_ATOMIC
);
323 printk(KERN_ERR
"%s: no skb id %x\n", __func__
,
324 mISDN_HEAD_ID(dch
->tx_skb
));
327 skb_queue_tail(&dch
->rqueue
, skb
);
328 schedule_event(dch
, FLG_RECVQUEUE
);
332 get_next_dframe(struct dchannel
*dch
)
335 dch
->tx_skb
= skb_dequeue(&dch
->squeue
);
341 test_and_clear_bit(FLG_TX_BUSY
, &dch
->Flags
);
344 EXPORT_SYMBOL(get_next_dframe
);
347 confirm_Bsend(struct bchannel
*bch
)
351 if (bch
->rcount
>= 64) {
352 printk(KERN_WARNING
"B-channel %p receive queue overflow, "
354 skb_queue_purge(&bch
->rqueue
);
357 skb
= _alloc_mISDN_skb(PH_DATA_CNF
, mISDN_HEAD_ID(bch
->tx_skb
),
358 0, NULL
, GFP_ATOMIC
);
360 printk(KERN_ERR
"%s: no skb id %x\n", __func__
,
361 mISDN_HEAD_ID(bch
->tx_skb
));
365 skb_queue_tail(&bch
->rqueue
, skb
);
366 schedule_event(bch
, FLG_RECVQUEUE
);
370 get_next_bframe(struct bchannel
*bch
)
373 if (test_bit(FLG_TX_NEXT
, &bch
->Flags
)) {
374 bch
->tx_skb
= bch
->next_skb
;
376 bch
->next_skb
= NULL
;
377 test_and_clear_bit(FLG_TX_NEXT
, &bch
->Flags
);
378 /* confirm imediately to allow next data */
382 test_and_clear_bit(FLG_TX_NEXT
, &bch
->Flags
);
383 printk(KERN_WARNING
"B TX_NEXT without skb\n");
387 test_and_clear_bit(FLG_TX_BUSY
, &bch
->Flags
);
390 EXPORT_SYMBOL(get_next_bframe
);
393 queue_ch_frame(struct mISDNchannel
*ch
, u_int pr
, int id
, struct sk_buff
*skb
)
395 struct mISDNhead
*hh
;
398 _queue_data(ch
, pr
, id
, 0, NULL
, GFP_ATOMIC
);
401 hh
= mISDN_HEAD_P(skb
);
404 if (!ch
->recv(ch
->peer
, skb
))
410 EXPORT_SYMBOL(queue_ch_frame
);
413 dchannel_senddata(struct dchannel
*ch
, struct sk_buff
*skb
)
417 printk(KERN_WARNING
"%s: skb too small\n", __func__
);
420 if (skb
->len
> ch
->maxlen
) {
421 printk(KERN_WARNING
"%s: skb too large(%d/%d)\n",
422 __func__
, skb
->len
, ch
->maxlen
);
425 /* HW lock must be obtained */
426 if (test_and_set_bit(FLG_TX_BUSY
, &ch
->Flags
)) {
427 skb_queue_tail(&ch
->squeue
, skb
);
436 EXPORT_SYMBOL(dchannel_senddata
);
439 bchannel_senddata(struct bchannel
*ch
, struct sk_buff
*skb
)
444 printk(KERN_WARNING
"%s: skb too small\n", __func__
);
447 if (skb
->len
> ch
->maxlen
) {
448 printk(KERN_WARNING
"%s: skb too large(%d/%d)\n",
449 __func__
, skb
->len
, ch
->maxlen
);
452 /* HW lock must be obtained */
453 /* check for pending next_skb */
456 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
457 __func__
, skb
->len
, ch
->next_skb
->len
);
460 if (test_and_set_bit(FLG_TX_BUSY
, &ch
->Flags
)) {
461 test_and_set_bit(FLG_TX_NEXT
, &ch
->Flags
);
472 EXPORT_SYMBOL(bchannel_senddata
);
474 /* The function allocates a new receive skb on demand with a size for the
475 * requirements of the current protocol. It returns the tailroom of the
476 * receive skb or an error.
479 bchannel_get_rxbuf(struct bchannel
*bch
, int reqlen
)
484 len
= skb_tailroom(bch
->rx_skb
);
486 pr_warning("B%d no space for %d (only %d) bytes\n",
487 bch
->nr
, reqlen
, len
);
488 if (test_bit(FLG_TRANSPARENT
, &bch
->Flags
)) {
489 /* send what we have now and try a new buffer */
490 recv_Bchannel(bch
, 0, true);
492 /* on HDLC we have to drop too big frames */
499 /* update current min/max length first */
500 if (unlikely(bch
->maxlen
!= bch
->next_maxlen
))
501 bch
->maxlen
= bch
->next_maxlen
;
502 if (unlikely(bch
->minlen
!= bch
->next_minlen
))
503 bch
->minlen
= bch
->next_minlen
;
504 if (unlikely(reqlen
> bch
->maxlen
))
506 if (test_bit(FLG_TRANSPARENT
, &bch
->Flags
)) {
507 if (reqlen
>= bch
->minlen
) {
510 len
= 2 * bch
->minlen
;
511 if (len
> bch
->maxlen
)
515 /* with HDLC we do not know the length yet */
518 bch
->rx_skb
= mI_alloc_skb(len
, GFP_ATOMIC
);
520 pr_warning("B%d receive no memory for %d bytes\n",
526 EXPORT_SYMBOL(bchannel_get_rxbuf
);