conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / drivers / isdn / mISDN / hwchannel.c
blob0481a0cdf6db9a1915ea8423f4d389655e86c66f
1 /*
3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/module.h>
19 #include <linux/mISDNhw.h>
21 static void
22 dchannel_bh(struct work_struct *ws)
24 struct dchannel *dch = container_of(ws, struct dchannel, workq);
25 struct sk_buff *skb;
26 int err;
28 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
29 while ((skb = skb_dequeue(&dch->rqueue))) {
30 if (likely(dch->dev.D.peer)) {
31 err = dch->dev.D.recv(dch->dev.D.peer, skb);
32 if (err)
33 dev_kfree_skb(skb);
34 } else
35 dev_kfree_skb(skb);
38 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
39 if (dch->phfunc)
40 dch->phfunc(dch);
44 static void
45 bchannel_bh(struct work_struct *ws)
47 struct bchannel *bch = container_of(ws, struct bchannel, workq);
48 struct sk_buff *skb;
49 int err;
51 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
52 while ((skb = skb_dequeue(&bch->rqueue))) {
53 bch->rcount--;
54 if (likely(bch->ch.peer)) {
55 err = bch->ch.recv(bch->ch.peer, skb);
56 if (err)
57 dev_kfree_skb(skb);
58 } else
59 dev_kfree_skb(skb);
64 int
65 mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67 test_and_set_bit(FLG_HDLC, &ch->Flags);
68 ch->maxlen = maxlen;
69 ch->hw = NULL;
70 ch->rx_skb = NULL;
71 ch->tx_skb = NULL;
72 ch->tx_idx = 0;
73 ch->phfunc = phf;
74 skb_queue_head_init(&ch->squeue);
75 skb_queue_head_init(&ch->rqueue);
76 INIT_LIST_HEAD(&ch->dev.bchannels);
77 INIT_WORK(&ch->workq, dchannel_bh);
78 return 0;
80 EXPORT_SYMBOL(mISDN_initdchannel);
82 int
83 mISDN_initbchannel(struct bchannel *ch, int maxlen)
85 ch->Flags = 0;
86 ch->maxlen = maxlen;
87 ch->hw = NULL;
88 ch->rx_skb = NULL;
89 ch->tx_skb = NULL;
90 ch->tx_idx = 0;
91 skb_queue_head_init(&ch->rqueue);
92 ch->rcount = 0;
93 ch->next_skb = NULL;
94 INIT_WORK(&ch->workq, bchannel_bh);
95 return 0;
97 EXPORT_SYMBOL(mISDN_initbchannel);
99 int
100 mISDN_freedchannel(struct dchannel *ch)
102 if (ch->tx_skb) {
103 dev_kfree_skb(ch->tx_skb);
104 ch->tx_skb = NULL;
106 if (ch->rx_skb) {
107 dev_kfree_skb(ch->rx_skb);
108 ch->rx_skb = NULL;
110 skb_queue_purge(&ch->squeue);
111 skb_queue_purge(&ch->rqueue);
112 flush_scheduled_work();
113 return 0;
115 EXPORT_SYMBOL(mISDN_freedchannel);
118 mISDN_freebchannel(struct bchannel *ch)
120 if (ch->tx_skb) {
121 dev_kfree_skb(ch->tx_skb);
122 ch->tx_skb = NULL;
124 if (ch->rx_skb) {
125 dev_kfree_skb(ch->rx_skb);
126 ch->rx_skb = NULL;
128 if (ch->next_skb) {
129 dev_kfree_skb(ch->next_skb);
130 ch->next_skb = NULL;
132 skb_queue_purge(&ch->rqueue);
133 ch->rcount = 0;
134 flush_scheduled_work();
135 return 0;
137 EXPORT_SYMBOL(mISDN_freebchannel);
139 static inline u_int
140 get_sapi_tei(u_char *p)
142 u_int sapi, tei;
144 sapi = *p >> 2;
145 tei = p[1] >> 1;
146 return sapi | (tei << 8);
149 void
150 recv_Dchannel(struct dchannel *dch)
152 struct mISDNhead *hh;
154 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
155 dev_kfree_skb(dch->rx_skb);
156 dch->rx_skb = NULL;
157 return;
159 hh = mISDN_HEAD_P(dch->rx_skb);
160 hh->prim = PH_DATA_IND;
161 hh->id = get_sapi_tei(dch->rx_skb->data);
162 skb_queue_tail(&dch->rqueue, dch->rx_skb);
163 dch->rx_skb = NULL;
164 schedule_event(dch, FLG_RECVQUEUE);
166 EXPORT_SYMBOL(recv_Dchannel);
168 void
169 recv_Echannel(struct dchannel *ech, struct dchannel *dch)
171 struct mISDNhead *hh;
173 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
174 dev_kfree_skb(ech->rx_skb);
175 ech->rx_skb = NULL;
176 return;
178 hh = mISDN_HEAD_P(ech->rx_skb);
179 hh->prim = PH_DATA_E_IND;
180 hh->id = get_sapi_tei(ech->rx_skb->data);
181 skb_queue_tail(&dch->rqueue, ech->rx_skb);
182 ech->rx_skb = NULL;
183 schedule_event(dch, FLG_RECVQUEUE);
185 EXPORT_SYMBOL(recv_Echannel);
187 void
188 recv_Bchannel(struct bchannel *bch, unsigned int id)
190 struct mISDNhead *hh;
192 hh = mISDN_HEAD_P(bch->rx_skb);
193 hh->prim = PH_DATA_IND;
194 hh->id = id;
195 if (bch->rcount >= 64) {
196 printk(KERN_WARNING "B-channel %p receive queue overflow, "
197 "fushing!\n", bch);
198 skb_queue_purge(&bch->rqueue);
199 bch->rcount = 0;
200 return;
202 bch->rcount++;
203 skb_queue_tail(&bch->rqueue, bch->rx_skb);
204 bch->rx_skb = NULL;
205 schedule_event(bch, FLG_RECVQUEUE);
207 EXPORT_SYMBOL(recv_Bchannel);
209 void
210 recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
212 skb_queue_tail(&dch->rqueue, skb);
213 schedule_event(dch, FLG_RECVQUEUE);
215 EXPORT_SYMBOL(recv_Dchannel_skb);
217 void
218 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
220 if (bch->rcount >= 64) {
221 printk(KERN_WARNING "B-channel %p receive queue overflow, "
222 "fushing!\n", bch);
223 skb_queue_purge(&bch->rqueue);
224 bch->rcount = 0;
226 bch->rcount++;
227 skb_queue_tail(&bch->rqueue, skb);
228 schedule_event(bch, FLG_RECVQUEUE);
230 EXPORT_SYMBOL(recv_Bchannel_skb);
232 static void
233 confirm_Dsend(struct dchannel *dch)
235 struct sk_buff *skb;
237 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
238 0, NULL, GFP_ATOMIC);
239 if (!skb) {
240 printk(KERN_ERR "%s: no skb id %x\n", __func__,
241 mISDN_HEAD_ID(dch->tx_skb));
242 return;
244 skb_queue_tail(&dch->rqueue, skb);
245 schedule_event(dch, FLG_RECVQUEUE);
249 get_next_dframe(struct dchannel *dch)
251 dch->tx_idx = 0;
252 dch->tx_skb = skb_dequeue(&dch->squeue);
253 if (dch->tx_skb) {
254 confirm_Dsend(dch);
255 return 1;
257 dch->tx_skb = NULL;
258 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
259 return 0;
261 EXPORT_SYMBOL(get_next_dframe);
263 void
264 confirm_Bsend(struct bchannel *bch)
266 struct sk_buff *skb;
268 if (bch->rcount >= 64) {
269 printk(KERN_WARNING "B-channel %p receive queue overflow, "
270 "fushing!\n", bch);
271 skb_queue_purge(&bch->rqueue);
272 bch->rcount = 0;
274 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
275 0, NULL, GFP_ATOMIC);
276 if (!skb) {
277 printk(KERN_ERR "%s: no skb id %x\n", __func__,
278 mISDN_HEAD_ID(bch->tx_skb));
279 return;
281 bch->rcount++;
282 skb_queue_tail(&bch->rqueue, skb);
283 schedule_event(bch, FLG_RECVQUEUE);
285 EXPORT_SYMBOL(confirm_Bsend);
288 get_next_bframe(struct bchannel *bch)
290 bch->tx_idx = 0;
291 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
292 bch->tx_skb = bch->next_skb;
293 if (bch->tx_skb) {
294 bch->next_skb = NULL;
295 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
296 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
297 confirm_Bsend(bch); /* not for transparent */
298 return 1;
299 } else {
300 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
301 printk(KERN_WARNING "B TX_NEXT without skb\n");
304 bch->tx_skb = NULL;
305 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
306 return 0;
308 EXPORT_SYMBOL(get_next_bframe);
310 void
311 queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
313 struct mISDNhead *hh;
315 if (!skb) {
316 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
317 } else {
318 if (ch->peer) {
319 hh = mISDN_HEAD_P(skb);
320 hh->prim = pr;
321 hh->id = id;
322 if (!ch->recv(ch->peer, skb))
323 return;
325 dev_kfree_skb(skb);
328 EXPORT_SYMBOL(queue_ch_frame);
331 dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
333 /* check oversize */
334 if (skb->len <= 0) {
335 printk(KERN_WARNING "%s: skb too small\n", __func__);
336 return -EINVAL;
338 if (skb->len > ch->maxlen) {
339 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
340 __func__, skb->len, ch->maxlen);
341 return -EINVAL;
343 /* HW lock must be obtained */
344 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
345 skb_queue_tail(&ch->squeue, skb);
346 return 0;
347 } else {
348 /* write to fifo */
349 ch->tx_skb = skb;
350 ch->tx_idx = 0;
351 return 1;
354 EXPORT_SYMBOL(dchannel_senddata);
357 bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
360 /* check oversize */
361 if (skb->len <= 0) {
362 printk(KERN_WARNING "%s: skb too small\n", __func__);
363 return -EINVAL;
365 if (skb->len > ch->maxlen) {
366 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
367 __func__, skb->len, ch->maxlen);
368 return -EINVAL;
370 /* HW lock must be obtained */
371 /* check for pending next_skb */
372 if (ch->next_skb) {
373 printk(KERN_WARNING
374 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
375 __func__, skb->len, ch->next_skb->len);
376 return -EBUSY;
378 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
379 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
380 ch->next_skb = skb;
381 return 0;
382 } else {
383 /* write to fifo */
384 ch->tx_skb = skb;
385 ch->tx_idx = 0;
386 return 1;
389 EXPORT_SYMBOL(bchannel_senddata);