i2c-eg20t: change timeout value 50msec to 1000msec
[zen-stable.git] / drivers / isdn / mISDN / layer2.c
blob5bc00156315e431633b207734de440c135e7d854
1 /*
3 * Author Karsten Keil <kkeil@novell.com>
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/mISDNif.h>
19 #include <linux/slab.h>
20 #include "core.h"
21 #include "fsm.h"
22 #include "layer2.h"
24 static u_int *debug;
26 static
27 struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
29 static char *strL2State[] =
31 "ST_L2_1",
32 "ST_L2_2",
33 "ST_L2_3",
34 "ST_L2_4",
35 "ST_L2_5",
36 "ST_L2_6",
37 "ST_L2_7",
38 "ST_L2_8",
41 enum {
42 EV_L2_UI,
43 EV_L2_SABME,
44 EV_L2_DISC,
45 EV_L2_DM,
46 EV_L2_UA,
47 EV_L2_FRMR,
48 EV_L2_SUPER,
49 EV_L2_I,
50 EV_L2_DL_DATA,
51 EV_L2_ACK_PULL,
52 EV_L2_DL_UNITDATA,
53 EV_L2_DL_ESTABLISH_REQ,
54 EV_L2_DL_RELEASE_REQ,
55 EV_L2_MDL_ASSIGN,
56 EV_L2_MDL_REMOVE,
57 EV_L2_MDL_ERROR,
58 EV_L1_DEACTIVATE,
59 EV_L2_T200,
60 EV_L2_T203,
61 EV_L2_SET_OWN_BUSY,
62 EV_L2_CLEAR_OWN_BUSY,
63 EV_L2_FRAME_ERROR,
66 #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
68 static char *strL2Event[] =
70 "EV_L2_UI",
71 "EV_L2_SABME",
72 "EV_L2_DISC",
73 "EV_L2_DM",
74 "EV_L2_UA",
75 "EV_L2_FRMR",
76 "EV_L2_SUPER",
77 "EV_L2_I",
78 "EV_L2_DL_DATA",
79 "EV_L2_ACK_PULL",
80 "EV_L2_DL_UNITDATA",
81 "EV_L2_DL_ESTABLISH_REQ",
82 "EV_L2_DL_RELEASE_REQ",
83 "EV_L2_MDL_ASSIGN",
84 "EV_L2_MDL_REMOVE",
85 "EV_L2_MDL_ERROR",
86 "EV_L1_DEACTIVATE",
87 "EV_L2_T200",
88 "EV_L2_T203",
89 "EV_L2_SET_OWN_BUSY",
90 "EV_L2_CLEAR_OWN_BUSY",
91 "EV_L2_FRAME_ERROR",
94 static void
95 l2m_debug(struct FsmInst *fi, char *fmt, ...)
97 struct layer2 *l2 = fi->userdata;
98 struct va_format vaf;
99 va_list va;
101 if (!(*debug & DEBUG_L2_FSM))
102 return;
104 va_start(va, fmt);
106 vaf.fmt = fmt;
107 vaf.va = &va;
109 printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
110 l2->sapi, l2->tei, &vaf);
112 va_end(va);
115 inline u_int
116 l2headersize(struct layer2 *l2, int ui)
118 return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
119 (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
122 inline u_int
123 l2addrsize(struct layer2 *l2)
125 return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
128 static u_int
129 l2_newid(struct layer2 *l2)
131 u_int id;
133 id = l2->next_id++;
134 if (id == 0x7fff)
135 l2->next_id = 1;
136 id <<= 16;
137 id |= l2->tei << 8;
138 id |= l2->sapi;
139 return id;
142 static void
143 l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
145 int err;
147 if (!l2->up)
148 return;
149 mISDN_HEAD_PRIM(skb) = prim;
150 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
151 err = l2->up->send(l2->up, skb);
152 if (err) {
153 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
154 dev_kfree_skb(skb);
158 static void
159 l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
161 struct sk_buff *skb;
162 struct mISDNhead *hh;
163 int err;
165 if (!l2->up)
166 return;
167 skb = mI_alloc_skb(len, GFP_ATOMIC);
168 if (!skb)
169 return;
170 hh = mISDN_HEAD_P(skb);
171 hh->prim = prim;
172 hh->id = (l2->ch.nr << 16) | l2->ch.addr;
173 if (len)
174 memcpy(skb_put(skb, len), arg, len);
175 err = l2->up->send(l2->up, skb);
176 if (err) {
177 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
178 dev_kfree_skb(skb);
182 static int
183 l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
184 int ret;
186 ret = l2->ch.recv(l2->ch.peer, skb);
187 if (ret && (*debug & DEBUG_L2_RECV))
188 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
189 return ret;
192 static int
193 l2down_raw(struct layer2 *l2, struct sk_buff *skb)
195 struct mISDNhead *hh = mISDN_HEAD_P(skb);
197 if (hh->prim == PH_DATA_REQ) {
198 if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
199 skb_queue_tail(&l2->down_queue, skb);
200 return 0;
202 l2->down_id = mISDN_HEAD_ID(skb);
204 return l2down_skb(l2, skb);
207 static int
208 l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
210 struct mISDNhead *hh = mISDN_HEAD_P(skb);
212 hh->prim = prim;
213 hh->id = id;
214 return l2down_raw(l2, skb);
217 static int
218 l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
220 struct sk_buff *skb;
221 int err;
222 struct mISDNhead *hh;
224 skb = mI_alloc_skb(len, GFP_ATOMIC);
225 if (!skb)
226 return -ENOMEM;
227 hh = mISDN_HEAD_P(skb);
228 hh->prim = prim;
229 hh->id = id;
230 if (len)
231 memcpy(skb_put(skb, len), arg, len);
232 err = l2down_raw(l2, skb);
233 if (err)
234 dev_kfree_skb(skb);
235 return err;
238 static int
239 ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
240 struct sk_buff *nskb = skb;
241 int ret = -EAGAIN;
243 if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
244 if (hh->id == l2->down_id) {
245 nskb = skb_dequeue(&l2->down_queue);
246 if (nskb) {
247 l2->down_id = mISDN_HEAD_ID(nskb);
248 if (l2down_skb(l2, nskb)) {
249 dev_kfree_skb(nskb);
250 l2->down_id = MISDN_ID_NONE;
252 } else
253 l2->down_id = MISDN_ID_NONE;
254 if (ret) {
255 dev_kfree_skb(skb);
256 ret = 0;
258 if (l2->down_id == MISDN_ID_NONE) {
259 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
260 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
264 if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
265 nskb = skb_dequeue(&l2->down_queue);
266 if (nskb) {
267 l2->down_id = mISDN_HEAD_ID(nskb);
268 if (l2down_skb(l2, nskb)) {
269 dev_kfree_skb(nskb);
270 l2->down_id = MISDN_ID_NONE;
271 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
273 } else
274 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
276 return ret;
279 static int
280 l2mgr(struct layer2 *l2, u_int prim, void *arg) {
281 long c = (long)arg;
283 printk(KERN_WARNING
284 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
285 if (test_bit(FLG_LAPD, &l2->flag) &&
286 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
287 switch (c) {
288 case 'C':
289 case 'D':
290 case 'G':
291 case 'H':
292 l2_tei(l2, prim, (u_long)arg);
293 break;
296 return 0;
299 static void
300 set_peer_busy(struct layer2 *l2) {
301 test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
302 if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
303 test_and_set_bit(FLG_L2BLOCK, &l2->flag);
306 static void
307 clear_peer_busy(struct layer2 *l2) {
308 if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
309 test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
312 static void
313 InitWin(struct layer2 *l2)
315 int i;
317 for (i = 0; i < MAX_WINDOW; i++)
318 l2->windowar[i] = NULL;
321 static int
322 freewin(struct layer2 *l2)
324 int i, cnt = 0;
326 for (i = 0; i < MAX_WINDOW; i++) {
327 if (l2->windowar[i]) {
328 cnt++;
329 dev_kfree_skb(l2->windowar[i]);
330 l2->windowar[i] = NULL;
333 return cnt;
336 static void
337 ReleaseWin(struct layer2 *l2)
339 int cnt = freewin(l2);
341 if (cnt)
342 printk(KERN_WARNING
343 "isdnl2 freed %d skbuffs in release\n", cnt);
346 inline unsigned int
347 cansend(struct layer2 *l2)
349 unsigned int p1;
351 if (test_bit(FLG_MOD128, &l2->flag))
352 p1 = (l2->vs - l2->va) % 128;
353 else
354 p1 = (l2->vs - l2->va) % 8;
355 return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
358 inline void
359 clear_exception(struct layer2 *l2)
361 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
362 test_and_clear_bit(FLG_REJEXC, &l2->flag);
363 test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
364 clear_peer_busy(l2);
367 static int
368 sethdraddr(struct layer2 *l2, u_char *header, int rsp)
370 u_char *ptr = header;
371 int crbit = rsp;
373 if (test_bit(FLG_LAPD, &l2->flag)) {
374 if (test_bit(FLG_LAPD_NET, &l2->flag))
375 crbit = !crbit;
376 *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
377 *ptr++ = (l2->tei << 1) | 1;
378 return 2;
379 } else {
380 if (test_bit(FLG_ORIG, &l2->flag))
381 crbit = !crbit;
382 if (crbit)
383 *ptr++ = l2->addr.B;
384 else
385 *ptr++ = l2->addr.A;
386 return 1;
390 static inline void
391 enqueue_super(struct layer2 *l2, struct sk_buff *skb)
393 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
394 dev_kfree_skb(skb);
397 static inline void
398 enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
400 if (l2->tm)
401 l2_tei(l2, MDL_STATUS_UI_IND, 0);
402 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
403 dev_kfree_skb(skb);
406 inline int
407 IsUI(u_char *data)
409 return (data[0] & 0xef) == UI;
412 inline int
413 IsUA(u_char *data)
415 return (data[0] & 0xef) == UA;
418 inline int
419 IsDM(u_char *data)
421 return (data[0] & 0xef) == DM;
424 inline int
425 IsDISC(u_char *data)
427 return (data[0] & 0xef) == DISC;
430 inline int
431 IsRR(u_char *data, struct layer2 *l2)
433 if (test_bit(FLG_MOD128, &l2->flag))
434 return data[0] == RR;
435 else
436 return (data[0] & 0xf) == 1;
439 inline int
440 IsSFrame(u_char *data, struct layer2 *l2)
442 register u_char d = *data;
444 if (!test_bit(FLG_MOD128, &l2->flag))
445 d &= 0xf;
446 return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
449 inline int
450 IsSABME(u_char *data, struct layer2 *l2)
452 u_char d = data[0] & ~0x10;
454 return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
457 inline int
458 IsREJ(u_char *data, struct layer2 *l2)
460 return test_bit(FLG_MOD128, &l2->flag) ?
461 data[0] == REJ : (data[0] & 0xf) == REJ;
464 inline int
465 IsFRMR(u_char *data)
467 return (data[0] & 0xef) == FRMR;
470 inline int
471 IsRNR(u_char *data, struct layer2 *l2)
473 return test_bit(FLG_MOD128, &l2->flag) ?
474 data[0] == RNR : (data[0] & 0xf) == RNR;
477 static int
478 iframe_error(struct layer2 *l2, struct sk_buff *skb)
480 u_int i;
481 int rsp = *skb->data & 0x2;
483 i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
484 if (test_bit(FLG_ORIG, &l2->flag))
485 rsp = !rsp;
486 if (rsp)
487 return 'L';
488 if (skb->len < i)
489 return 'N';
490 if ((skb->len - i) > l2->maxlen)
491 return 'O';
492 return 0;
495 static int
496 super_error(struct layer2 *l2, struct sk_buff *skb)
498 if (skb->len != l2addrsize(l2) +
499 (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
500 return 'N';
501 return 0;
504 static int
505 unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
507 int rsp = (*skb->data & 0x2) >> 1;
508 if (test_bit(FLG_ORIG, &l2->flag))
509 rsp = !rsp;
510 if (rsp != wantrsp)
511 return 'L';
512 if (skb->len != l2addrsize(l2) + 1)
513 return 'N';
514 return 0;
517 static int
518 UI_error(struct layer2 *l2, struct sk_buff *skb)
520 int rsp = *skb->data & 0x2;
521 if (test_bit(FLG_ORIG, &l2->flag))
522 rsp = !rsp;
523 if (rsp)
524 return 'L';
525 if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
526 return 'O';
527 return 0;
530 static int
531 FRMR_error(struct layer2 *l2, struct sk_buff *skb)
533 u_int headers = l2addrsize(l2) + 1;
534 u_char *datap = skb->data + headers;
535 int rsp = *skb->data & 0x2;
537 if (test_bit(FLG_ORIG, &l2->flag))
538 rsp = !rsp;
539 if (!rsp)
540 return 'L';
541 if (test_bit(FLG_MOD128, &l2->flag)) {
542 if (skb->len < headers + 5)
543 return 'N';
544 else if (*debug & DEBUG_L2)
545 l2m_debug(&l2->l2m,
546 "FRMR information %2x %2x %2x %2x %2x",
547 datap[0], datap[1], datap[2], datap[3], datap[4]);
548 } else {
549 if (skb->len < headers + 3)
550 return 'N';
551 else if (*debug & DEBUG_L2)
552 l2m_debug(&l2->l2m,
553 "FRMR information %2x %2x %2x",
554 datap[0], datap[1], datap[2]);
556 return 0;
559 static unsigned int
560 legalnr(struct layer2 *l2, unsigned int nr)
562 if (test_bit(FLG_MOD128, &l2->flag))
563 return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
564 else
565 return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
568 static void
569 setva(struct layer2 *l2, unsigned int nr)
571 struct sk_buff *skb;
573 while (l2->va != nr) {
574 l2->va++;
575 if (test_bit(FLG_MOD128, &l2->flag))
576 l2->va %= 128;
577 else
578 l2->va %= 8;
579 if (l2->windowar[l2->sow]) {
580 skb_trim(l2->windowar[l2->sow], 0);
581 skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
582 l2->windowar[l2->sow] = NULL;
584 l2->sow = (l2->sow + 1) % l2->window;
586 skb = skb_dequeue(&l2->tmp_queue);
587 while (skb) {
588 dev_kfree_skb(skb);
589 skb = skb_dequeue(&l2->tmp_queue);
593 static void
594 send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
596 u_char tmp[MAX_L2HEADER_LEN];
597 int i;
599 i = sethdraddr(l2, tmp, cr);
600 tmp[i++] = cmd;
601 if (skb)
602 skb_trim(skb, 0);
603 else {
604 skb = mI_alloc_skb(i, GFP_ATOMIC);
605 if (!skb) {
606 printk(KERN_WARNING "%s: can't alloc skbuff\n",
607 __func__);
608 return;
611 memcpy(skb_put(skb, i), tmp, i);
612 enqueue_super(l2, skb);
616 inline u_char
617 get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
619 return skb->data[l2addrsize(l2)] & 0x10;
622 inline u_char
623 get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
625 u_char PF;
627 PF = get_PollFlag(l2, skb);
628 dev_kfree_skb(skb);
629 return PF;
632 inline void
633 start_t200(struct layer2 *l2, int i)
635 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
636 test_and_set_bit(FLG_T200_RUN, &l2->flag);
639 inline void
640 restart_t200(struct layer2 *l2, int i)
642 mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
643 test_and_set_bit(FLG_T200_RUN, &l2->flag);
646 inline void
647 stop_t200(struct layer2 *l2, int i)
649 if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
650 mISDN_FsmDelTimer(&l2->t200, i);
653 inline void
654 st5_dl_release_l2l3(struct layer2 *l2)
656 int pr;
658 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
659 pr = DL_RELEASE_CNF;
660 else
661 pr = DL_RELEASE_IND;
662 l2up_create(l2, pr, 0, NULL);
665 inline void
666 lapb_dl_release_l2l3(struct layer2 *l2, int f)
668 if (test_bit(FLG_LAPB, &l2->flag))
669 l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
670 l2up_create(l2, f, 0, NULL);
673 static void
674 establishlink(struct FsmInst *fi)
676 struct layer2 *l2 = fi->userdata;
677 u_char cmd;
679 clear_exception(l2);
680 l2->rc = 0;
681 cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
682 send_uframe(l2, NULL, cmd, CMD);
683 mISDN_FsmDelTimer(&l2->t203, 1);
684 restart_t200(l2, 1);
685 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
686 freewin(l2);
687 mISDN_FsmChangeState(fi, ST_L2_5);
690 static void
691 l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
693 struct sk_buff *skb = arg;
694 struct layer2 *l2 = fi->userdata;
696 if (get_PollFlagFree(l2, skb))
697 l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
698 else
699 l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
703 static void
704 l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
706 struct sk_buff *skb = arg;
707 struct layer2 *l2 = fi->userdata;
709 if (get_PollFlagFree(l2, skb))
710 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
711 else {
712 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
713 establishlink(fi);
714 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
718 static void
719 l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
721 struct sk_buff *skb = arg;
722 struct layer2 *l2 = fi->userdata;
724 if (get_PollFlagFree(l2, skb))
725 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
726 else
727 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
728 establishlink(fi);
729 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
732 static void
733 l2_go_st3(struct FsmInst *fi, int event, void *arg)
735 dev_kfree_skb((struct sk_buff *)arg);
736 mISDN_FsmChangeState(fi, ST_L2_3);
739 static void
740 l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
742 struct layer2 *l2 = fi->userdata;
744 mISDN_FsmChangeState(fi, ST_L2_3);
745 dev_kfree_skb((struct sk_buff *)arg);
746 l2_tei(l2, MDL_ASSIGN_IND, 0);
749 static void
750 l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
752 struct layer2 *l2 = fi->userdata;
753 struct sk_buff *skb = arg;
755 skb_queue_tail(&l2->ui_queue, skb);
756 mISDN_FsmChangeState(fi, ST_L2_2);
757 l2_tei(l2, MDL_ASSIGN_IND, 0);
760 static void
761 l2_queue_ui(struct FsmInst *fi, int event, void *arg)
763 struct layer2 *l2 = fi->userdata;
764 struct sk_buff *skb = arg;
766 skb_queue_tail(&l2->ui_queue, skb);
769 static void
770 tx_ui(struct layer2 *l2)
772 struct sk_buff *skb;
773 u_char header[MAX_L2HEADER_LEN];
774 int i;
776 i = sethdraddr(l2, header, CMD);
777 if (test_bit(FLG_LAPD_NET, &l2->flag))
778 header[1] = 0xff; /* tei 127 */
779 header[i++] = UI;
780 while ((skb = skb_dequeue(&l2->ui_queue))) {
781 memcpy(skb_push(skb, i), header, i);
782 enqueue_ui(l2, skb);
786 static void
787 l2_send_ui(struct FsmInst *fi, int event, void *arg)
789 struct layer2 *l2 = fi->userdata;
790 struct sk_buff *skb = arg;
792 skb_queue_tail(&l2->ui_queue, skb);
793 tx_ui(l2);
796 static void
797 l2_got_ui(struct FsmInst *fi, int event, void *arg)
799 struct layer2 *l2 = fi->userdata;
800 struct sk_buff *skb = arg;
802 skb_pull(skb, l2headersize(l2, 1));
804 * in states 1-3 for broadcast
807 if (l2->tm)
808 l2_tei(l2, MDL_STATUS_UI_IND, 0);
809 l2up(l2, DL_UNITDATA_IND, skb);
812 static void
813 l2_establish(struct FsmInst *fi, int event, void *arg)
815 struct sk_buff *skb = arg;
816 struct layer2 *l2 = fi->userdata;
818 establishlink(fi);
819 test_and_set_bit(FLG_L3_INIT, &l2->flag);
820 dev_kfree_skb(skb);
823 static void
824 l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
826 struct sk_buff *skb = arg;
827 struct layer2 *l2 = fi->userdata;
829 skb_queue_purge(&l2->i_queue);
830 test_and_set_bit(FLG_L3_INIT, &l2->flag);
831 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
832 dev_kfree_skb(skb);
835 static void
836 l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
838 struct sk_buff *skb = arg;
839 struct layer2 *l2 = fi->userdata;
841 skb_queue_purge(&l2->i_queue);
842 establishlink(fi);
843 test_and_set_bit(FLG_L3_INIT, &l2->flag);
844 dev_kfree_skb(skb);
847 static void
848 l2_release(struct FsmInst *fi, int event, void *arg)
850 struct layer2 *l2 = fi->userdata;
851 struct sk_buff *skb = arg;
853 skb_trim(skb, 0);
854 l2up(l2, DL_RELEASE_CNF, skb);
857 static void
858 l2_pend_rel(struct FsmInst *fi, int event, void *arg)
860 struct sk_buff *skb = arg;
861 struct layer2 *l2 = fi->userdata;
863 test_and_set_bit(FLG_PEND_REL, &l2->flag);
864 dev_kfree_skb(skb);
867 static void
868 l2_disconnect(struct FsmInst *fi, int event, void *arg)
870 struct layer2 *l2 = fi->userdata;
871 struct sk_buff *skb = arg;
873 skb_queue_purge(&l2->i_queue);
874 freewin(l2);
875 mISDN_FsmChangeState(fi, ST_L2_6);
876 l2->rc = 0;
877 send_uframe(l2, NULL, DISC | 0x10, CMD);
878 mISDN_FsmDelTimer(&l2->t203, 1);
879 restart_t200(l2, 2);
880 if (skb)
881 dev_kfree_skb(skb);
884 static void
885 l2_start_multi(struct FsmInst *fi, int event, void *arg)
887 struct layer2 *l2 = fi->userdata;
888 struct sk_buff *skb = arg;
890 l2->vs = 0;
891 l2->va = 0;
892 l2->vr = 0;
893 l2->sow = 0;
894 clear_exception(l2);
895 send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
896 mISDN_FsmChangeState(fi, ST_L2_7);
897 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
898 skb_trim(skb, 0);
899 l2up(l2, DL_ESTABLISH_IND, skb);
900 if (l2->tm)
901 l2_tei(l2, MDL_STATUS_UP_IND, 0);
904 static void
905 l2_send_UA(struct FsmInst *fi, int event, void *arg)
907 struct layer2 *l2 = fi->userdata;
908 struct sk_buff *skb = arg;
910 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
913 static void
914 l2_send_DM(struct FsmInst *fi, int event, void *arg)
916 struct layer2 *l2 = fi->userdata;
917 struct sk_buff *skb = arg;
919 send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
922 static void
923 l2_restart_multi(struct FsmInst *fi, int event, void *arg)
925 struct layer2 *l2 = fi->userdata;
926 struct sk_buff *skb = arg;
927 int est = 0;
929 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
931 l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
933 if (l2->vs != l2->va) {
934 skb_queue_purge(&l2->i_queue);
935 est = 1;
938 clear_exception(l2);
939 l2->vs = 0;
940 l2->va = 0;
941 l2->vr = 0;
942 l2->sow = 0;
943 mISDN_FsmChangeState(fi, ST_L2_7);
944 stop_t200(l2, 3);
945 mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
947 if (est)
948 l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
949 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
950 * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
951 * 0, NULL, 0);
953 if (skb_queue_len(&l2->i_queue) && cansend(l2))
954 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
957 static void
958 l2_stop_multi(struct FsmInst *fi, int event, void *arg)
960 struct layer2 *l2 = fi->userdata;
961 struct sk_buff *skb = arg;
963 mISDN_FsmChangeState(fi, ST_L2_4);
964 mISDN_FsmDelTimer(&l2->t203, 3);
965 stop_t200(l2, 4);
967 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
968 skb_queue_purge(&l2->i_queue);
969 freewin(l2);
970 lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
971 if (l2->tm)
972 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
975 static void
976 l2_connected(struct FsmInst *fi, int event, void *arg)
978 struct layer2 *l2 = fi->userdata;
979 struct sk_buff *skb = arg;
980 int pr = -1;
982 if (!get_PollFlag(l2, skb)) {
983 l2_mdl_error_ua(fi, event, arg);
984 return;
986 dev_kfree_skb(skb);
987 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
988 l2_disconnect(fi, event, NULL);
989 if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
990 pr = DL_ESTABLISH_CNF;
991 } else if (l2->vs != l2->va) {
992 skb_queue_purge(&l2->i_queue);
993 pr = DL_ESTABLISH_IND;
995 stop_t200(l2, 5);
996 l2->vr = 0;
997 l2->vs = 0;
998 l2->va = 0;
999 l2->sow = 0;
1000 mISDN_FsmChangeState(fi, ST_L2_7);
1001 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
1002 if (pr != -1)
1003 l2up_create(l2, pr, 0, NULL);
1005 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1006 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1008 if (l2->tm)
1009 l2_tei(l2, MDL_STATUS_UP_IND, 0);
1012 static void
1013 l2_released(struct FsmInst *fi, int event, void *arg)
1015 struct layer2 *l2 = fi->userdata;
1016 struct sk_buff *skb = arg;
1018 if (!get_PollFlag(l2, skb)) {
1019 l2_mdl_error_ua(fi, event, arg);
1020 return;
1022 dev_kfree_skb(skb);
1023 stop_t200(l2, 6);
1024 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1025 mISDN_FsmChangeState(fi, ST_L2_4);
1026 if (l2->tm)
1027 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1030 static void
1031 l2_reestablish(struct FsmInst *fi, int event, void *arg)
1033 struct layer2 *l2 = fi->userdata;
1034 struct sk_buff *skb = arg;
1036 if (!get_PollFlagFree(l2, skb)) {
1037 establishlink(fi);
1038 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1042 static void
1043 l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
1045 struct layer2 *l2 = fi->userdata;
1046 struct sk_buff *skb = arg;
1048 if (get_PollFlagFree(l2, skb)) {
1049 stop_t200(l2, 7);
1050 if (!test_bit(FLG_L3_INIT, &l2->flag))
1051 skb_queue_purge(&l2->i_queue);
1052 if (test_bit(FLG_LAPB, &l2->flag))
1053 l2down_create(l2, PH_DEACTIVATE_REQ,
1054 l2_newid(l2), 0, NULL);
1055 st5_dl_release_l2l3(l2);
1056 mISDN_FsmChangeState(fi, ST_L2_4);
1057 if (l2->tm)
1058 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1062 static void
1063 l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
1065 struct layer2 *l2 = fi->userdata;
1066 struct sk_buff *skb = arg;
1068 if (get_PollFlagFree(l2, skb)) {
1069 stop_t200(l2, 8);
1070 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1071 mISDN_FsmChangeState(fi, ST_L2_4);
1072 if (l2->tm)
1073 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1077 static void
1078 enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1080 struct sk_buff *skb;
1081 u_char tmp[MAX_L2HEADER_LEN];
1082 int i;
1084 i = sethdraddr(l2, tmp, cr);
1085 if (test_bit(FLG_MOD128, &l2->flag)) {
1086 tmp[i++] = typ;
1087 tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
1088 } else
1089 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1090 skb = mI_alloc_skb(i, GFP_ATOMIC);
1091 if (!skb) {
1092 printk(KERN_WARNING
1093 "isdnl2 can't alloc sbbuff for enquiry_cr\n");
1094 return;
1096 memcpy(skb_put(skb, i), tmp, i);
1097 enqueue_super(l2, skb);
1100 inline void
1101 enquiry_response(struct layer2 *l2)
1103 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1104 enquiry_cr(l2, RNR, RSP, 1);
1105 else
1106 enquiry_cr(l2, RR, RSP, 1);
1107 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1110 inline void
1111 transmit_enquiry(struct layer2 *l2)
1113 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1114 enquiry_cr(l2, RNR, CMD, 1);
1115 else
1116 enquiry_cr(l2, RR, CMD, 1);
1117 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1118 start_t200(l2, 9);
1122 static void
1123 nrerrorrecovery(struct FsmInst *fi)
1125 struct layer2 *l2 = fi->userdata;
1127 l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
1128 establishlink(fi);
1129 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1132 static void
1133 invoke_retransmission(struct layer2 *l2, unsigned int nr)
1135 u_int p1;
1137 if (l2->vs != nr) {
1138 while (l2->vs != nr) {
1139 (l2->vs)--;
1140 if (test_bit(FLG_MOD128, &l2->flag)) {
1141 l2->vs %= 128;
1142 p1 = (l2->vs - l2->va) % 128;
1143 } else {
1144 l2->vs %= 8;
1145 p1 = (l2->vs - l2->va) % 8;
1147 p1 = (p1 + l2->sow) % l2->window;
1148 if (l2->windowar[p1])
1149 skb_queue_head(&l2->i_queue, l2->windowar[p1]);
1150 else
1151 printk(KERN_WARNING
1152 "%s: windowar[%d] is NULL\n",
1153 __func__, p1);
1154 l2->windowar[p1] = NULL;
1156 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
1160 static void
1161 l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
1163 struct layer2 *l2 = fi->userdata;
1164 struct sk_buff *skb = arg;
1165 int PollFlag, rsp, typ = RR;
1166 unsigned int nr;
1168 rsp = *skb->data & 0x2;
1169 if (test_bit(FLG_ORIG, &l2->flag))
1170 rsp = !rsp;
1172 skb_pull(skb, l2addrsize(l2));
1173 if (IsRNR(skb->data, l2)) {
1174 set_peer_busy(l2);
1175 typ = RNR;
1176 } else
1177 clear_peer_busy(l2);
1178 if (IsREJ(skb->data, l2))
1179 typ = REJ;
1181 if (test_bit(FLG_MOD128, &l2->flag)) {
1182 PollFlag = (skb->data[1] & 0x1) == 0x1;
1183 nr = skb->data[1] >> 1;
1184 } else {
1185 PollFlag = (skb->data[0] & 0x10);
1186 nr = (skb->data[0] >> 5) & 0x7;
1188 dev_kfree_skb(skb);
1190 if (PollFlag) {
1191 if (rsp)
1192 l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
1193 else
1194 enquiry_response(l2);
1196 if (legalnr(l2, nr)) {
1197 if (typ == REJ) {
1198 setva(l2, nr);
1199 invoke_retransmission(l2, nr);
1200 stop_t200(l2, 10);
1201 if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
1202 EV_L2_T203, NULL, 6))
1203 l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
1204 } else if ((nr == l2->vs) && (typ == RR)) {
1205 setva(l2, nr);
1206 stop_t200(l2, 11);
1207 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1208 EV_L2_T203, NULL, 7);
1209 } else if ((l2->va != nr) || (typ == RNR)) {
1210 setva(l2, nr);
1211 if (typ != RR)
1212 mISDN_FsmDelTimer(&l2->t203, 9);
1213 restart_t200(l2, 12);
1215 if (skb_queue_len(&l2->i_queue) && (typ == RR))
1216 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1217 } else
1218 nrerrorrecovery(fi);
1221 static void
1222 l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
1224 struct layer2 *l2 = fi->userdata;
1225 struct sk_buff *skb = arg;
1227 if (!test_bit(FLG_L3_INIT, &l2->flag))
1228 skb_queue_tail(&l2->i_queue, skb);
1229 else
1230 dev_kfree_skb(skb);
1233 static void
1234 l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
1236 struct layer2 *l2 = fi->userdata;
1237 struct sk_buff *skb = arg;
1239 skb_queue_tail(&l2->i_queue, skb);
1240 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1243 static void
1244 l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
1246 struct layer2 *l2 = fi->userdata;
1247 struct sk_buff *skb = arg;
1249 skb_queue_tail(&l2->i_queue, skb);
1252 static void
1253 l2_got_iframe(struct FsmInst *fi, int event, void *arg)
1255 struct layer2 *l2 = fi->userdata;
1256 struct sk_buff *skb = arg;
1257 int PollFlag, i;
1258 u_int ns, nr;
1260 i = l2addrsize(l2);
1261 if (test_bit(FLG_MOD128, &l2->flag)) {
1262 PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
1263 ns = skb->data[i] >> 1;
1264 nr = (skb->data[i + 1] >> 1) & 0x7f;
1265 } else {
1266 PollFlag = (skb->data[i] & 0x10);
1267 ns = (skb->data[i] >> 1) & 0x7;
1268 nr = (skb->data[i] >> 5) & 0x7;
1270 if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
1271 dev_kfree_skb(skb);
1272 if (PollFlag)
1273 enquiry_response(l2);
1274 } else {
1275 if (l2->vr == ns) {
1276 l2->vr++;
1277 if (test_bit(FLG_MOD128, &l2->flag))
1278 l2->vr %= 128;
1279 else
1280 l2->vr %= 8;
1281 test_and_clear_bit(FLG_REJEXC, &l2->flag);
1282 if (PollFlag)
1283 enquiry_response(l2);
1284 else
1285 test_and_set_bit(FLG_ACK_PEND, &l2->flag);
1286 skb_pull(skb, l2headersize(l2, 0));
1287 l2up(l2, DL_DATA_IND, skb);
1288 } else {
1289 /* n(s)!=v(r) */
1290 dev_kfree_skb(skb);
1291 if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
1292 if (PollFlag)
1293 enquiry_response(l2);
1294 } else {
1295 enquiry_cr(l2, REJ, RSP, PollFlag);
1296 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1300 if (legalnr(l2, nr)) {
1301 if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
1302 (fi->state == ST_L2_7)) {
1303 if (nr == l2->vs) {
1304 stop_t200(l2, 13);
1305 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1306 EV_L2_T203, NULL, 7);
1307 } else if (nr != l2->va)
1308 restart_t200(l2, 14);
1310 setva(l2, nr);
1311 } else {
1312 nrerrorrecovery(fi);
1313 return;
1315 if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
1316 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1317 if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
1318 enquiry_cr(l2, RR, RSP, 0);
1321 static void
1322 l2_got_tei(struct FsmInst *fi, int event, void *arg)
1324 struct layer2 *l2 = fi->userdata;
1325 u_int info;
1327 l2->tei = (signed char)(long)arg;
1328 set_channel_address(&l2->ch, l2->sapi, l2->tei);
1329 info = DL_INFO_L2_CONNECT;
1330 l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
1331 if (fi->state == ST_L2_3) {
1332 establishlink(fi);
1333 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1334 } else
1335 mISDN_FsmChangeState(fi, ST_L2_4);
1336 if (skb_queue_len(&l2->ui_queue))
1337 tx_ui(l2);
1340 static void
1341 l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
1343 struct layer2 *l2 = fi->userdata;
1345 if (test_bit(FLG_LAPD, &l2->flag) &&
1346 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1347 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1348 } else if (l2->rc == l2->N200) {
1349 mISDN_FsmChangeState(fi, ST_L2_4);
1350 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1351 skb_queue_purge(&l2->i_queue);
1352 l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
1353 if (test_bit(FLG_LAPB, &l2->flag))
1354 l2down_create(l2, PH_DEACTIVATE_REQ,
1355 l2_newid(l2), 0, NULL);
1356 st5_dl_release_l2l3(l2);
1357 if (l2->tm)
1358 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1359 } else {
1360 l2->rc++;
1361 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1362 send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
1363 SABME : SABM) | 0x10, CMD);
1367 static void
1368 l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
1370 struct layer2 *l2 = fi->userdata;
1372 if (test_bit(FLG_LAPD, &l2->flag) &&
1373 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1374 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1375 } else if (l2->rc == l2->N200) {
1376 mISDN_FsmChangeState(fi, ST_L2_4);
1377 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1378 l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
1379 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1380 if (l2->tm)
1381 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1382 } else {
1383 l2->rc++;
1384 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
1385 NULL, 9);
1386 send_uframe(l2, NULL, DISC | 0x10, CMD);
1390 static void
1391 l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
1393 struct layer2 *l2 = fi->userdata;
1395 if (test_bit(FLG_LAPD, &l2->flag) &&
1396 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1397 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1398 return;
1400 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1401 l2->rc = 0;
1402 mISDN_FsmChangeState(fi, ST_L2_8);
1403 transmit_enquiry(l2);
1404 l2->rc++;
1407 static void
1408 l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
1410 struct layer2 *l2 = fi->userdata;
1412 if (test_bit(FLG_LAPD, &l2->flag) &&
1413 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1414 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1415 return;
1417 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1418 if (l2->rc == l2->N200) {
1419 l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
1420 establishlink(fi);
1421 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1422 } else {
1423 transmit_enquiry(l2);
1424 l2->rc++;
1428 static void
1429 l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
1431 struct layer2 *l2 = fi->userdata;
1433 if (test_bit(FLG_LAPD, &l2->flag) &&
1434 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1435 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
1436 return;
1438 mISDN_FsmChangeState(fi, ST_L2_8);
1439 transmit_enquiry(l2);
1440 l2->rc = 0;
1443 static void
1444 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1446 struct layer2 *l2 = fi->userdata;
1447 struct sk_buff *skb, *nskb, *oskb;
1448 u_char header[MAX_L2HEADER_LEN];
1449 u_int i, p1;
1451 if (!cansend(l2))
1452 return;
1454 skb = skb_dequeue(&l2->i_queue);
1455 if (!skb)
1456 return;
1458 if (test_bit(FLG_MOD128, &l2->flag))
1459 p1 = (l2->vs - l2->va) % 128;
1460 else
1461 p1 = (l2->vs - l2->va) % 8;
1462 p1 = (p1 + l2->sow) % l2->window;
1463 if (l2->windowar[p1]) {
1464 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
1465 p1);
1466 dev_kfree_skb(l2->windowar[p1]);
1468 l2->windowar[p1] = skb;
1469 i = sethdraddr(l2, header, CMD);
1470 if (test_bit(FLG_MOD128, &l2->flag)) {
1471 header[i++] = l2->vs << 1;
1472 header[i++] = l2->vr << 1;
1473 l2->vs = (l2->vs + 1) % 128;
1474 } else {
1475 header[i++] = (l2->vr << 5) | (l2->vs << 1);
1476 l2->vs = (l2->vs + 1) % 8;
1479 nskb = skb_clone(skb, GFP_ATOMIC);
1480 p1 = skb_headroom(nskb);
1481 if (p1 >= i)
1482 memcpy(skb_push(nskb, i), header, i);
1483 else {
1484 printk(KERN_WARNING
1485 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1486 oskb = nskb;
1487 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1488 if (!nskb) {
1489 dev_kfree_skb(oskb);
1490 printk(KERN_WARNING "%s: no skb mem\n", __func__);
1491 return;
1493 memcpy(skb_put(nskb, i), header, i);
1494 memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
1495 dev_kfree_skb(oskb);
1497 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1498 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1499 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
1500 mISDN_FsmDelTimer(&l2->t203, 13);
1501 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
1505 static void
1506 l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
1508 struct layer2 *l2 = fi->userdata;
1509 struct sk_buff *skb = arg;
1510 int PollFlag, rsp, rnr = 0;
1511 unsigned int nr;
1513 rsp = *skb->data & 0x2;
1514 if (test_bit(FLG_ORIG, &l2->flag))
1515 rsp = !rsp;
1517 skb_pull(skb, l2addrsize(l2));
1519 if (IsRNR(skb->data, l2)) {
1520 set_peer_busy(l2);
1521 rnr = 1;
1522 } else
1523 clear_peer_busy(l2);
1525 if (test_bit(FLG_MOD128, &l2->flag)) {
1526 PollFlag = (skb->data[1] & 0x1) == 0x1;
1527 nr = skb->data[1] >> 1;
1528 } else {
1529 PollFlag = (skb->data[0] & 0x10);
1530 nr = (skb->data[0] >> 5) & 0x7;
1532 dev_kfree_skb(skb);
1533 if (rsp && PollFlag) {
1534 if (legalnr(l2, nr)) {
1535 if (rnr) {
1536 restart_t200(l2, 15);
1537 } else {
1538 stop_t200(l2, 16);
1539 mISDN_FsmAddTimer(&l2->t203, l2->T203,
1540 EV_L2_T203, NULL, 5);
1541 setva(l2, nr);
1543 invoke_retransmission(l2, nr);
1544 mISDN_FsmChangeState(fi, ST_L2_7);
1545 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1546 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1547 } else
1548 nrerrorrecovery(fi);
1549 } else {
1550 if (!rsp && PollFlag)
1551 enquiry_response(l2);
1552 if (legalnr(l2, nr))
1553 setva(l2, nr);
1554 else
1555 nrerrorrecovery(fi);
1559 static void
1560 l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
1562 struct layer2 *l2 = fi->userdata;
1563 struct sk_buff *skb = arg;
1565 skb_pull(skb, l2addrsize(l2) + 1);
1567 if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
1568 (IsUA(skb->data) && (fi->state == ST_L2_7))) {
1569 l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
1570 establishlink(fi);
1571 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1573 dev_kfree_skb(skb);
1576 static void
1577 l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
1579 struct layer2 *l2 = fi->userdata;
1581 skb_queue_purge(&l2->ui_queue);
1582 l2->tei = GROUP_TEI;
1583 mISDN_FsmChangeState(fi, ST_L2_1);
1586 static void
1587 l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
1589 struct layer2 *l2 = fi->userdata;
1591 skb_queue_purge(&l2->ui_queue);
1592 l2->tei = GROUP_TEI;
1593 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1594 mISDN_FsmChangeState(fi, ST_L2_1);
1597 static void
1598 l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
1600 struct layer2 *l2 = fi->userdata;
1602 skb_queue_purge(&l2->i_queue);
1603 skb_queue_purge(&l2->ui_queue);
1604 freewin(l2);
1605 l2->tei = GROUP_TEI;
1606 stop_t200(l2, 17);
1607 st5_dl_release_l2l3(l2);
1608 mISDN_FsmChangeState(fi, ST_L2_1);
1611 static void
1612 l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
1614 struct layer2 *l2 = fi->userdata;
1616 skb_queue_purge(&l2->ui_queue);
1617 l2->tei = GROUP_TEI;
1618 stop_t200(l2, 18);
1619 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1620 mISDN_FsmChangeState(fi, ST_L2_1);
1623 static void
1624 l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1626 struct layer2 *l2 = fi->userdata;
1628 skb_queue_purge(&l2->i_queue);
1629 skb_queue_purge(&l2->ui_queue);
1630 freewin(l2);
1631 l2->tei = GROUP_TEI;
1632 stop_t200(l2, 17);
1633 mISDN_FsmDelTimer(&l2->t203, 19);
1634 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1635 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
1636 * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
1637 * 0, NULL, 0);
1639 mISDN_FsmChangeState(fi, ST_L2_1);
1642 static void
1643 l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
1645 struct layer2 *l2 = fi->userdata;
1646 struct sk_buff *skb = arg;
1648 skb_queue_purge(&l2->i_queue);
1649 skb_queue_purge(&l2->ui_queue);
1650 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1651 l2up(l2, DL_RELEASE_IND, skb);
1652 else
1653 dev_kfree_skb(skb);
1656 static void
1657 l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
1659 struct layer2 *l2 = fi->userdata;
1660 struct sk_buff *skb = arg;
1662 skb_queue_purge(&l2->i_queue);
1663 skb_queue_purge(&l2->ui_queue);
1664 freewin(l2);
1665 stop_t200(l2, 19);
1666 st5_dl_release_l2l3(l2);
1667 mISDN_FsmChangeState(fi, ST_L2_4);
1668 if (l2->tm)
1669 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1670 dev_kfree_skb(skb);
1673 static void
1674 l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
1676 struct layer2 *l2 = fi->userdata;
1677 struct sk_buff *skb = arg;
1679 skb_queue_purge(&l2->ui_queue);
1680 stop_t200(l2, 20);
1681 l2up(l2, DL_RELEASE_CNF, skb);
1682 mISDN_FsmChangeState(fi, ST_L2_4);
1683 if (l2->tm)
1684 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1687 static void
1688 l2_persistent_da(struct FsmInst *fi, int event, void *arg)
1690 struct layer2 *l2 = fi->userdata;
1691 struct sk_buff *skb = arg;
1693 skb_queue_purge(&l2->i_queue);
1694 skb_queue_purge(&l2->ui_queue);
1695 freewin(l2);
1696 stop_t200(l2, 19);
1697 mISDN_FsmDelTimer(&l2->t203, 19);
1698 l2up(l2, DL_RELEASE_IND, skb);
1699 mISDN_FsmChangeState(fi, ST_L2_4);
1700 if (l2->tm)
1701 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1704 static void
1705 l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
1707 struct layer2 *l2 = fi->userdata;
1708 struct sk_buff *skb = arg;
1710 if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
1711 enquiry_cr(l2, RNR, RSP, 0);
1712 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1714 if (skb)
1715 dev_kfree_skb(skb);
1718 static void
1719 l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
1721 struct layer2 *l2 = fi->userdata;
1722 struct sk_buff *skb = arg;
1724 if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
1725 enquiry_cr(l2, RR, RSP, 0);
1726 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1728 if (skb)
1729 dev_kfree_skb(skb);
1732 static void
1733 l2_frame_error(struct FsmInst *fi, int event, void *arg)
1735 struct layer2 *l2 = fi->userdata;
1737 l2mgr(l2, MDL_ERROR_IND, arg);
1740 static void
1741 l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
1743 struct layer2 *l2 = fi->userdata;
1745 l2mgr(l2, MDL_ERROR_IND, arg);
1746 establishlink(fi);
1747 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1750 static struct FsmNode L2FnList[] =
1752 {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
1753 {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
1754 {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
1755 {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
1756 {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1757 {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1758 {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
1759 {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
1760 {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1761 {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1762 {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
1763 {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
1764 {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
1765 {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
1766 {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
1767 {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
1768 {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
1769 {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
1770 {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
1771 {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
1772 {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
1773 {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
1774 {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
1775 {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
1776 {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
1777 {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
1778 {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
1779 {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
1780 {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
1781 {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
1782 {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
1783 {ST_L2_4, EV_L2_SABME, l2_start_multi},
1784 {ST_L2_5, EV_L2_SABME, l2_send_UA},
1785 {ST_L2_6, EV_L2_SABME, l2_send_DM},
1786 {ST_L2_7, EV_L2_SABME, l2_restart_multi},
1787 {ST_L2_8, EV_L2_SABME, l2_restart_multi},
1788 {ST_L2_4, EV_L2_DISC, l2_send_DM},
1789 {ST_L2_5, EV_L2_DISC, l2_send_DM},
1790 {ST_L2_6, EV_L2_DISC, l2_send_UA},
1791 {ST_L2_7, EV_L2_DISC, l2_stop_multi},
1792 {ST_L2_8, EV_L2_DISC, l2_stop_multi},
1793 {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
1794 {ST_L2_5, EV_L2_UA, l2_connected},
1795 {ST_L2_6, EV_L2_UA, l2_released},
1796 {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
1797 {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
1798 {ST_L2_4, EV_L2_DM, l2_reestablish},
1799 {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
1800 {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
1801 {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
1802 {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
1803 {ST_L2_1, EV_L2_UI, l2_got_ui},
1804 {ST_L2_2, EV_L2_UI, l2_got_ui},
1805 {ST_L2_3, EV_L2_UI, l2_got_ui},
1806 {ST_L2_4, EV_L2_UI, l2_got_ui},
1807 {ST_L2_5, EV_L2_UI, l2_got_ui},
1808 {ST_L2_6, EV_L2_UI, l2_got_ui},
1809 {ST_L2_7, EV_L2_UI, l2_got_ui},
1810 {ST_L2_8, EV_L2_UI, l2_got_ui},
1811 {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
1812 {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
1813 {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
1814 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1815 {ST_L2_7, EV_L2_I, l2_got_iframe},
1816 {ST_L2_8, EV_L2_I, l2_got_iframe},
1817 {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
1818 {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
1819 {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
1820 {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
1821 {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
1822 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1823 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1824 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1825 {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1826 {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1827 {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
1828 {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
1829 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1830 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1831 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1832 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
1833 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1834 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1835 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
1836 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
1837 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
1838 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
1839 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
1842 static int
1843 ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1845 u_char *datap = skb->data;
1846 int ret = -EINVAL;
1847 int psapi, ptei;
1848 u_int l;
1849 int c = 0;
1851 l = l2addrsize(l2);
1852 if (skb->len <= l) {
1853 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
1854 return ret;
1856 if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
1857 psapi = *datap++;
1858 ptei = *datap++;
1859 if ((psapi & 1) || !(ptei & 1)) {
1860 printk(KERN_WARNING
1861 "l2 D-channel frame wrong EA0/EA1\n");
1862 return ret;
1864 psapi >>= 2;
1865 ptei >>= 1;
1866 if (psapi != l2->sapi) {
1867 /* not our business */
1868 if (*debug & DEBUG_L2)
1869 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
1870 __func__, psapi, l2->sapi);
1871 dev_kfree_skb(skb);
1872 return 0;
1874 if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
1875 /* not our business */
1876 if (*debug & DEBUG_L2)
1877 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
1878 __func__, ptei, l2->tei);
1879 dev_kfree_skb(skb);
1880 return 0;
1882 } else
1883 datap += l;
1884 if (!(*datap & 1)) { /* I-Frame */
1885 c = iframe_error(l2, skb);
1886 if (!c)
1887 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
1888 } else if (IsSFrame(datap, l2)) { /* S-Frame */
1889 c = super_error(l2, skb);
1890 if (!c)
1891 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
1892 } else if (IsUI(datap)) {
1893 c = UI_error(l2, skb);
1894 if (!c)
1895 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
1896 } else if (IsSABME(datap, l2)) {
1897 c = unnum_error(l2, skb, CMD);
1898 if (!c)
1899 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
1900 } else if (IsUA(datap)) {
1901 c = unnum_error(l2, skb, RSP);
1902 if (!c)
1903 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
1904 } else if (IsDISC(datap)) {
1905 c = unnum_error(l2, skb, CMD);
1906 if (!c)
1907 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
1908 } else if (IsDM(datap)) {
1909 c = unnum_error(l2, skb, RSP);
1910 if (!c)
1911 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
1912 } else if (IsFRMR(datap)) {
1913 c = FRMR_error(l2, skb);
1914 if (!c)
1915 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
1916 } else
1917 c = 'L';
1918 if (c) {
1919 printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
1920 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1922 return ret;
1925 static int
1926 l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1928 struct layer2 *l2 = container_of(ch, struct layer2, ch);
1929 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1930 int ret = -EINVAL;
1932 if (*debug & DEBUG_L2_RECV)
1933 printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
1934 __func__, hh->prim, hh->id, l2->sapi, l2->tei);
1935 switch (hh->prim) {
1936 case PH_DATA_IND:
1937 ret = ph_data_indication(l2, hh, skb);
1938 break;
1939 case PH_DATA_CNF:
1940 ret = ph_data_confirm(l2, hh, skb);
1941 break;
1942 case PH_ACTIVATE_IND:
1943 test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
1944 l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
1945 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1946 ret = mISDN_FsmEvent(&l2->l2m,
1947 EV_L2_DL_ESTABLISH_REQ, skb);
1948 break;
1949 case PH_DEACTIVATE_IND:
1950 test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
1951 l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
1952 ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
1953 break;
1954 case MPH_INFORMATION_IND:
1955 if (!l2->up)
1956 break;
1957 ret = l2->up->send(l2->up, skb);
1958 break;
1959 case DL_DATA_REQ:
1960 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
1961 break;
1962 case DL_UNITDATA_REQ:
1963 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
1964 break;
1965 case DL_ESTABLISH_REQ:
1966 if (test_bit(FLG_LAPB, &l2->flag))
1967 test_and_set_bit(FLG_ORIG, &l2->flag);
1968 if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
1969 if (test_bit(FLG_LAPD, &l2->flag) ||
1970 test_bit(FLG_ORIG, &l2->flag))
1971 ret = mISDN_FsmEvent(&l2->l2m,
1972 EV_L2_DL_ESTABLISH_REQ, skb);
1973 } else {
1974 if (test_bit(FLG_LAPD, &l2->flag) ||
1975 test_bit(FLG_ORIG, &l2->flag)) {
1976 test_and_set_bit(FLG_ESTAB_PEND,
1977 &l2->flag);
1979 ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
1980 skb);
1982 break;
1983 case DL_RELEASE_REQ:
1984 if (test_bit(FLG_LAPB, &l2->flag))
1985 l2down_create(l2, PH_DEACTIVATE_REQ,
1986 l2_newid(l2), 0, NULL);
1987 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1988 skb);
1989 break;
1990 default:
1991 if (*debug & DEBUG_L2)
1992 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
1993 hh->prim);
1995 if (ret) {
1996 dev_kfree_skb(skb);
1997 ret = 0;
1999 return ret;
2003 tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2005 int ret = -EINVAL;
2007 if (*debug & DEBUG_L2_TEI)
2008 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
2009 switch (cmd) {
2010 case (MDL_ASSIGN_REQ):
2011 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
2012 break;
2013 case (MDL_REMOVE_REQ):
2014 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
2015 break;
2016 case (MDL_ERROR_IND):
2017 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2018 break;
2019 case (MDL_ERROR_RSP):
2020 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2021 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
2022 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2023 break;
2025 return ret;
2028 static void
2029 release_l2(struct layer2 *l2)
2031 mISDN_FsmDelTimer(&l2->t200, 21);
2032 mISDN_FsmDelTimer(&l2->t203, 16);
2033 skb_queue_purge(&l2->i_queue);
2034 skb_queue_purge(&l2->ui_queue);
2035 skb_queue_purge(&l2->down_queue);
2036 ReleaseWin(l2);
2037 if (test_bit(FLG_LAPD, &l2->flag)) {
2038 TEIrelease(l2);
2039 if (l2->ch.st)
2040 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
2041 CLOSE_CHANNEL, NULL);
2043 kfree(l2);
2046 static int
2047 l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2049 struct layer2 *l2 = container_of(ch, struct layer2, ch);
2050 u_int info;
2052 if (*debug & DEBUG_L2_CTRL)
2053 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
2055 switch (cmd) {
2056 case OPEN_CHANNEL:
2057 if (test_bit(FLG_LAPD, &l2->flag)) {
2058 set_channel_address(&l2->ch, l2->sapi, l2->tei);
2059 info = DL_INFO_L2_CONNECT;
2060 l2up_create(l2, DL_INFORMATION_IND,
2061 sizeof(info), &info);
2063 break;
2064 case CLOSE_CHANNEL:
2065 if (l2->ch.peer)
2066 l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
2067 release_l2(l2);
2068 break;
2070 return 0;
2073 struct layer2 *
2074 create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
2075 int sapi)
2077 struct layer2 *l2;
2078 struct channel_req rq;
2080 l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
2081 if (!l2) {
2082 printk(KERN_ERR "kzalloc layer2 failed\n");
2083 return NULL;
2085 l2->next_id = 1;
2086 l2->down_id = MISDN_ID_NONE;
2087 l2->up = ch;
2088 l2->ch.st = ch->st;
2089 l2->ch.send = l2_send;
2090 l2->ch.ctrl = l2_ctrl;
2091 switch (protocol) {
2092 case ISDN_P_LAPD_NT:
2093 test_and_set_bit(FLG_LAPD, &l2->flag);
2094 test_and_set_bit(FLG_LAPD_NET, &l2->flag);
2095 test_and_set_bit(FLG_MOD128, &l2->flag);
2096 l2->sapi = sapi;
2097 l2->maxlen = MAX_DFRAME_LEN;
2098 if (test_bit(OPTION_L2_PMX, &options))
2099 l2->window = 7;
2100 else
2101 l2->window = 1;
2102 if (test_bit(OPTION_L2_PTP, &options))
2103 test_and_set_bit(FLG_PTP, &l2->flag);
2104 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2105 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2106 l2->tei = tei;
2107 l2->T200 = 1000;
2108 l2->N200 = 3;
2109 l2->T203 = 10000;
2110 if (test_bit(OPTION_L2_PMX, &options))
2111 rq.protocol = ISDN_P_NT_E1;
2112 else
2113 rq.protocol = ISDN_P_NT_S0;
2114 rq.adr.channel = 0;
2115 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2116 break;
2117 case ISDN_P_LAPD_TE:
2118 test_and_set_bit(FLG_LAPD, &l2->flag);
2119 test_and_set_bit(FLG_MOD128, &l2->flag);
2120 test_and_set_bit(FLG_ORIG, &l2->flag);
2121 l2->sapi = sapi;
2122 l2->maxlen = MAX_DFRAME_LEN;
2123 if (test_bit(OPTION_L2_PMX, &options))
2124 l2->window = 7;
2125 else
2126 l2->window = 1;
2127 if (test_bit(OPTION_L2_PTP, &options))
2128 test_and_set_bit(FLG_PTP, &l2->flag);
2129 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2130 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2131 l2->tei = tei;
2132 l2->T200 = 1000;
2133 l2->N200 = 3;
2134 l2->T203 = 10000;
2135 if (test_bit(OPTION_L2_PMX, &options))
2136 rq.protocol = ISDN_P_TE_E1;
2137 else
2138 rq.protocol = ISDN_P_TE_S0;
2139 rq.adr.channel = 0;
2140 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2141 break;
2142 case ISDN_P_B_X75SLP:
2143 test_and_set_bit(FLG_LAPB, &l2->flag);
2144 l2->window = 7;
2145 l2->maxlen = MAX_DATA_SIZE;
2146 l2->T200 = 1000;
2147 l2->N200 = 4;
2148 l2->T203 = 5000;
2149 l2->addr.A = 3;
2150 l2->addr.B = 1;
2151 break;
2152 default:
2153 printk(KERN_ERR "layer2 create failed prt %x\n",
2154 protocol);
2155 kfree(l2);
2156 return NULL;
2158 skb_queue_head_init(&l2->i_queue);
2159 skb_queue_head_init(&l2->ui_queue);
2160 skb_queue_head_init(&l2->down_queue);
2161 skb_queue_head_init(&l2->tmp_queue);
2162 InitWin(l2);
2163 l2->l2m.fsm = &l2fsm;
2164 if (test_bit(FLG_LAPB, &l2->flag) ||
2165 test_bit(FLG_PTP, &l2->flag) ||
2166 test_bit(FLG_LAPD_NET, &l2->flag))
2167 l2->l2m.state = ST_L2_4;
2168 else
2169 l2->l2m.state = ST_L2_1;
2170 l2->l2m.debug = *debug;
2171 l2->l2m.userdata = l2;
2172 l2->l2m.userint = 0;
2173 l2->l2m.printdebug = l2m_debug;
2175 mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
2176 mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
2177 return l2;
2180 static int
2181 x75create(struct channel_req *crq)
2183 struct layer2 *l2;
2185 if (crq->protocol != ISDN_P_B_X75SLP)
2186 return -EPROTONOSUPPORT;
2187 l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
2188 if (!l2)
2189 return -ENOMEM;
2190 crq->ch = &l2->ch;
2191 crq->protocol = ISDN_P_B_HDLC;
2192 return 0;
2195 static struct Bprotocol X75SLP = {
2196 .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
2197 .name = "X75SLP",
2198 .create = x75create
2202 Isdnl2_Init(u_int *deb)
2204 debug = deb;
2205 mISDN_register_Bprotocol(&X75SLP);
2206 l2fsm.state_count = L2_STATE_COUNT;
2207 l2fsm.event_count = L2_EVENT_COUNT;
2208 l2fsm.strEvent = strL2Event;
2209 l2fsm.strState = strL2State;
2210 mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2211 TEIInit(deb);
2212 return 0;
2215 void
2216 Isdnl2_cleanup(void)
2218 mISDN_unregister_Bprotocol(&X75SLP);
2219 TEIFree();
2220 mISDN_FsmFree(&l2fsm);