1 // SPDX-License-Identifier: GPL-2.0-only
4 * Author Karsten Keil <kkeil@novell.com>
6 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
10 #include <linux/module.h>
11 #include <linux/mISDNhw.h>
14 dchannel_bh(struct work_struct
*ws
)
16 struct dchannel
*dch
= container_of(ws
, struct dchannel
, workq
);
20 if (test_and_clear_bit(FLG_RECVQUEUE
, &dch
->Flags
)) {
21 while ((skb
= skb_dequeue(&dch
->rqueue
))) {
22 if (likely(dch
->dev
.D
.peer
)) {
23 err
= dch
->dev
.D
.recv(dch
->dev
.D
.peer
, skb
);
30 if (test_and_clear_bit(FLG_PHCHANGE
, &dch
->Flags
)) {
37 bchannel_bh(struct work_struct
*ws
)
39 struct bchannel
*bch
= container_of(ws
, struct bchannel
, workq
);
43 if (test_and_clear_bit(FLG_RECVQUEUE
, &bch
->Flags
)) {
44 while ((skb
= skb_dequeue(&bch
->rqueue
))) {
46 if (likely(bch
->ch
.peer
)) {
47 err
= bch
->ch
.recv(bch
->ch
.peer
, skb
);
57 mISDN_initdchannel(struct dchannel
*ch
, int maxlen
, void *phf
)
59 test_and_set_bit(FLG_HDLC
, &ch
->Flags
);
66 skb_queue_head_init(&ch
->squeue
);
67 skb_queue_head_init(&ch
->rqueue
);
68 INIT_LIST_HEAD(&ch
->dev
.bchannels
);
69 INIT_WORK(&ch
->workq
, dchannel_bh
);
72 EXPORT_SYMBOL(mISDN_initdchannel
);
75 mISDN_initbchannel(struct bchannel
*ch
, unsigned short maxlen
,
76 unsigned short minlen
)
80 ch
->next_minlen
= minlen
;
81 ch
->init_minlen
= minlen
;
83 ch
->next_maxlen
= maxlen
;
84 ch
->init_maxlen
= maxlen
;
89 skb_queue_head_init(&ch
->rqueue
);
92 INIT_WORK(&ch
->workq
, bchannel_bh
);
95 EXPORT_SYMBOL(mISDN_initbchannel
);
98 mISDN_freedchannel(struct dchannel
*ch
)
101 dev_kfree_skb(ch
->tx_skb
);
105 dev_kfree_skb(ch
->rx_skb
);
108 skb_queue_purge(&ch
->squeue
);
109 skb_queue_purge(&ch
->rqueue
);
110 flush_work(&ch
->workq
);
113 EXPORT_SYMBOL(mISDN_freedchannel
);
116 mISDN_clear_bchannel(struct bchannel
*ch
)
119 dev_kfree_skb(ch
->tx_skb
);
124 dev_kfree_skb(ch
->rx_skb
);
128 dev_kfree_skb(ch
->next_skb
);
131 test_and_clear_bit(FLG_TX_BUSY
, &ch
->Flags
);
132 test_and_clear_bit(FLG_TX_NEXT
, &ch
->Flags
);
133 test_and_clear_bit(FLG_ACTIVE
, &ch
->Flags
);
134 test_and_clear_bit(FLG_FILLEMPTY
, &ch
->Flags
);
135 test_and_clear_bit(FLG_TX_EMPTY
, &ch
->Flags
);
136 test_and_clear_bit(FLG_RX_OFF
, &ch
->Flags
);
138 ch
->minlen
= ch
->init_minlen
;
139 ch
->next_minlen
= ch
->init_minlen
;
140 ch
->maxlen
= ch
->init_maxlen
;
141 ch
->next_maxlen
= ch
->init_maxlen
;
142 skb_queue_purge(&ch
->rqueue
);
145 EXPORT_SYMBOL(mISDN_clear_bchannel
);
148 mISDN_freebchannel(struct bchannel
*ch
)
150 cancel_work_sync(&ch
->workq
);
151 mISDN_clear_bchannel(ch
);
153 EXPORT_SYMBOL(mISDN_freebchannel
);
156 mISDN_ctrl_bchannel(struct bchannel
*bch
, struct mISDN_ctrl_req
*cq
)
161 case MISDN_CTRL_GETOP
:
162 cq
->op
= MISDN_CTRL_RX_BUFFER
| MISDN_CTRL_FILL_EMPTY
|
165 case MISDN_CTRL_FILL_EMPTY
:
167 memset(bch
->fill
, cq
->p2
& 0xff, MISDN_BCH_FILL_SIZE
);
168 test_and_set_bit(FLG_FILLEMPTY
, &bch
->Flags
);
170 test_and_clear_bit(FLG_FILLEMPTY
, &bch
->Flags
);
173 case MISDN_CTRL_RX_OFF
:
174 /* read back dropped byte count */
175 cq
->p2
= bch
->dropcnt
;
177 test_and_set_bit(FLG_RX_OFF
, &bch
->Flags
);
179 test_and_clear_bit(FLG_RX_OFF
, &bch
->Flags
);
182 case MISDN_CTRL_RX_BUFFER
:
183 if (cq
->p2
> MISDN_CTRL_RX_SIZE_IGNORE
)
184 bch
->next_maxlen
= cq
->p2
;
185 if (cq
->p1
> MISDN_CTRL_RX_SIZE_IGNORE
)
186 bch
->next_minlen
= cq
->p1
;
187 /* we return the old values */
188 cq
->p1
= bch
->minlen
;
189 cq
->p2
= bch
->maxlen
;
192 pr_info("mISDN unhandled control %x operation\n", cq
->op
);
198 EXPORT_SYMBOL(mISDN_ctrl_bchannel
);
201 get_sapi_tei(u_char
*p
)
207 return sapi
| (tei
<< 8);
211 recv_Dchannel(struct dchannel
*dch
)
213 struct mISDNhead
*hh
;
215 if (dch
->rx_skb
->len
< 2) { /* at least 2 for sapi / tei */
216 dev_kfree_skb(dch
->rx_skb
);
220 hh
= mISDN_HEAD_P(dch
->rx_skb
);
221 hh
->prim
= PH_DATA_IND
;
222 hh
->id
= get_sapi_tei(dch
->rx_skb
->data
);
223 skb_queue_tail(&dch
->rqueue
, dch
->rx_skb
);
225 schedule_event(dch
, FLG_RECVQUEUE
);
227 EXPORT_SYMBOL(recv_Dchannel
);
230 recv_Echannel(struct dchannel
*ech
, struct dchannel
*dch
)
232 struct mISDNhead
*hh
;
234 if (ech
->rx_skb
->len
< 2) { /* at least 2 for sapi / tei */
235 dev_kfree_skb(ech
->rx_skb
);
239 hh
= mISDN_HEAD_P(ech
->rx_skb
);
240 hh
->prim
= PH_DATA_E_IND
;
241 hh
->id
= get_sapi_tei(ech
->rx_skb
->data
);
242 skb_queue_tail(&dch
->rqueue
, ech
->rx_skb
);
244 schedule_event(dch
, FLG_RECVQUEUE
);
246 EXPORT_SYMBOL(recv_Echannel
);
249 recv_Bchannel(struct bchannel
*bch
, unsigned int id
, bool force
)
251 struct mISDNhead
*hh
;
253 /* if allocation did fail upper functions still may call us */
254 if (unlikely(!bch
->rx_skb
))
256 if (unlikely(!bch
->rx_skb
->len
)) {
257 /* we have no data to send - this may happen after recovery
258 * from overflow or too small allocation.
259 * We need to free the buffer here */
260 dev_kfree_skb(bch
->rx_skb
);
263 if (test_bit(FLG_TRANSPARENT
, &bch
->Flags
) &&
264 (bch
->rx_skb
->len
< bch
->minlen
) && !force
)
266 hh
= mISDN_HEAD_P(bch
->rx_skb
);
267 hh
->prim
= PH_DATA_IND
;
269 if (bch
->rcount
>= 64) {
271 "B%d receive queue overflow - flushing!\n",
273 skb_queue_purge(&bch
->rqueue
);
276 skb_queue_tail(&bch
->rqueue
, bch
->rx_skb
);
278 schedule_event(bch
, FLG_RECVQUEUE
);
281 EXPORT_SYMBOL(recv_Bchannel
);
284 recv_Dchannel_skb(struct dchannel
*dch
, struct sk_buff
*skb
)
286 skb_queue_tail(&dch
->rqueue
, skb
);
287 schedule_event(dch
, FLG_RECVQUEUE
);
289 EXPORT_SYMBOL(recv_Dchannel_skb
);
292 recv_Bchannel_skb(struct bchannel
*bch
, struct sk_buff
*skb
)
294 if (bch
->rcount
>= 64) {
295 printk(KERN_WARNING
"B-channel %p receive queue overflow, "
297 skb_queue_purge(&bch
->rqueue
);
301 skb_queue_tail(&bch
->rqueue
, skb
);
302 schedule_event(bch
, FLG_RECVQUEUE
);
304 EXPORT_SYMBOL(recv_Bchannel_skb
);
307 confirm_Dsend(struct dchannel
*dch
)
311 skb
= _alloc_mISDN_skb(PH_DATA_CNF
, mISDN_HEAD_ID(dch
->tx_skb
),
312 0, NULL
, GFP_ATOMIC
);
314 printk(KERN_ERR
"%s: no skb id %x\n", __func__
,
315 mISDN_HEAD_ID(dch
->tx_skb
));
318 skb_queue_tail(&dch
->rqueue
, skb
);
319 schedule_event(dch
, FLG_RECVQUEUE
);
323 get_next_dframe(struct dchannel
*dch
)
326 dch
->tx_skb
= skb_dequeue(&dch
->squeue
);
332 test_and_clear_bit(FLG_TX_BUSY
, &dch
->Flags
);
335 EXPORT_SYMBOL(get_next_dframe
);
338 confirm_Bsend(struct bchannel
*bch
)
342 if (bch
->rcount
>= 64) {
343 printk(KERN_WARNING
"B-channel %p receive queue overflow, "
345 skb_queue_purge(&bch
->rqueue
);
348 skb
= _alloc_mISDN_skb(PH_DATA_CNF
, mISDN_HEAD_ID(bch
->tx_skb
),
349 0, NULL
, GFP_ATOMIC
);
351 printk(KERN_ERR
"%s: no skb id %x\n", __func__
,
352 mISDN_HEAD_ID(bch
->tx_skb
));
356 skb_queue_tail(&bch
->rqueue
, skb
);
357 schedule_event(bch
, FLG_RECVQUEUE
);
361 get_next_bframe(struct bchannel
*bch
)
364 if (test_bit(FLG_TX_NEXT
, &bch
->Flags
)) {
365 bch
->tx_skb
= bch
->next_skb
;
367 bch
->next_skb
= NULL
;
368 test_and_clear_bit(FLG_TX_NEXT
, &bch
->Flags
);
369 /* confirm imediately to allow next data */
373 test_and_clear_bit(FLG_TX_NEXT
, &bch
->Flags
);
374 printk(KERN_WARNING
"B TX_NEXT without skb\n");
378 test_and_clear_bit(FLG_TX_BUSY
, &bch
->Flags
);
381 EXPORT_SYMBOL(get_next_bframe
);
384 queue_ch_frame(struct mISDNchannel
*ch
, u_int pr
, int id
, struct sk_buff
*skb
)
386 struct mISDNhead
*hh
;
389 _queue_data(ch
, pr
, id
, 0, NULL
, GFP_ATOMIC
);
392 hh
= mISDN_HEAD_P(skb
);
395 if (!ch
->recv(ch
->peer
, skb
))
401 EXPORT_SYMBOL(queue_ch_frame
);
404 dchannel_senddata(struct dchannel
*ch
, struct sk_buff
*skb
)
408 printk(KERN_WARNING
"%s: skb too small\n", __func__
);
411 if (skb
->len
> ch
->maxlen
) {
412 printk(KERN_WARNING
"%s: skb too large(%d/%d)\n",
413 __func__
, skb
->len
, ch
->maxlen
);
416 /* HW lock must be obtained */
417 if (test_and_set_bit(FLG_TX_BUSY
, &ch
->Flags
)) {
418 skb_queue_tail(&ch
->squeue
, skb
);
427 EXPORT_SYMBOL(dchannel_senddata
);
430 bchannel_senddata(struct bchannel
*ch
, struct sk_buff
*skb
)
435 printk(KERN_WARNING
"%s: skb too small\n", __func__
);
438 if (skb
->len
> ch
->maxlen
) {
439 printk(KERN_WARNING
"%s: skb too large(%d/%d)\n",
440 __func__
, skb
->len
, ch
->maxlen
);
443 /* HW lock must be obtained */
444 /* check for pending next_skb */
447 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
448 __func__
, skb
->len
, ch
->next_skb
->len
);
451 if (test_and_set_bit(FLG_TX_BUSY
, &ch
->Flags
)) {
452 test_and_set_bit(FLG_TX_NEXT
, &ch
->Flags
);
463 EXPORT_SYMBOL(bchannel_senddata
);
465 /* The function allocates a new receive skb on demand with a size for the
466 * requirements of the current protocol. It returns the tailroom of the
467 * receive skb or an error.
470 bchannel_get_rxbuf(struct bchannel
*bch
, int reqlen
)
475 len
= skb_tailroom(bch
->rx_skb
);
477 pr_warn("B%d no space for %d (only %d) bytes\n",
478 bch
->nr
, reqlen
, len
);
479 if (test_bit(FLG_TRANSPARENT
, &bch
->Flags
)) {
480 /* send what we have now and try a new buffer */
481 recv_Bchannel(bch
, 0, true);
483 /* on HDLC we have to drop too big frames */
490 /* update current min/max length first */
491 if (unlikely(bch
->maxlen
!= bch
->next_maxlen
))
492 bch
->maxlen
= bch
->next_maxlen
;
493 if (unlikely(bch
->minlen
!= bch
->next_minlen
))
494 bch
->minlen
= bch
->next_minlen
;
495 if (unlikely(reqlen
> bch
->maxlen
))
497 if (test_bit(FLG_TRANSPARENT
, &bch
->Flags
)) {
498 if (reqlen
>= bch
->minlen
) {
501 len
= 2 * bch
->minlen
;
502 if (len
> bch
->maxlen
)
506 /* with HDLC we do not know the length yet */
509 bch
->rx_skb
= mI_alloc_skb(len
, GFP_ATOMIC
);
511 pr_warn("B%d receive no memory for %d bytes\n", bch
->nr
, len
);
516 EXPORT_SYMBOL(bchannel_get_rxbuf
);