2 * PPP async serial channel driver for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/skbuff.h>
24 #include <linux/tty.h>
25 #include <linux/netdevice.h>
26 #include <linux/poll.h>
27 #include <linux/crc-ccitt.h>
28 #include <linux/ppp_defs.h>
29 #include <linux/if_ppp.h>
30 #include <linux/ppp_channel.h>
31 #include <linux/spinlock.h>
32 #include <linux/init.h>
33 #include <linux/jiffies.h>
34 #include <linux/slab.h>
35 #include <asm/uaccess.h>
36 #include <asm/string.h>
38 #define PPP_VERSION "2.4.2"
42 /* Structure for storing local state. */
44 struct tty_struct
*tty
;
51 unsigned long xmit_flags
;
54 unsigned int bytes_sent
;
55 unsigned int bytes_rcvd
;
62 unsigned long last_xmit
;
66 struct sk_buff_head rqueue
;
68 struct tasklet_struct tsk
;
71 struct semaphore dead_sem
;
72 struct ppp_channel chan
; /* interface to generic ppp layer */
73 unsigned char obuf
[OBUFSIZE
];
76 /* Bit numbers in xmit_flags */
84 #define SC_PREV_ERROR 4
87 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
89 static int flag_time
= HZ
;
90 module_param(flag_time
, int, 0);
91 MODULE_PARM_DESC(flag_time
, "ppp_async: interval between flagged packets (in clock ticks)");
92 MODULE_LICENSE("GPL");
93 MODULE_ALIAS_LDISC(N_PPP
);
98 static int ppp_async_encode(struct asyncppp
*ap
);
99 static int ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
);
100 static int ppp_async_push(struct asyncppp
*ap
);
101 static void ppp_async_flush_output(struct asyncppp
*ap
);
102 static void ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
103 char *flags
, int count
);
104 static int ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
,
106 static void ppp_async_process(unsigned long arg
);
108 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
109 int len
, int inbound
);
111 static const struct ppp_channel_ops async_ops
= {
112 .start_xmit
= ppp_async_send
,
113 .ioctl
= ppp_async_ioctl
,
117 * Routines implementing the PPP line discipline.
121 * We have a potential race on dereferencing tty->disc_data,
122 * because the tty layer provides no locking at all - thus one
123 * cpu could be running ppp_asynctty_receive while another
124 * calls ppp_asynctty_close, which zeroes tty->disc_data and
125 * frees the memory that ppp_asynctty_receive is using. The best
126 * way to fix this is to use a rwlock in the tty struct, but for now
127 * we use a single global rwlock for all ttys in ppp line discipline.
129 * FIXME: this is no longer true. The _close path for the ldisc is
130 * now guaranteed to be sane.
132 static DEFINE_RWLOCK(disc_data_lock
);
134 static struct asyncppp
*ap_get(struct tty_struct
*tty
)
138 read_lock(&disc_data_lock
);
141 atomic_inc(&ap
->refcnt
);
142 read_unlock(&disc_data_lock
);
146 static void ap_put(struct asyncppp
*ap
)
148 if (atomic_dec_and_test(&ap
->refcnt
))
153 * Called when a tty is put into PPP line discipline. Called in process
157 ppp_asynctty_open(struct tty_struct
*tty
)
163 if (tty
->ops
->write
== NULL
)
167 ap
= kzalloc(sizeof(*ap
), GFP_KERNEL
);
171 /* initialize the asyncppp structure */
174 spin_lock_init(&ap
->xmit_lock
);
175 spin_lock_init(&ap
->recv_lock
);
177 ap
->xaccm
[3] = 0x60000000U
;
183 skb_queue_head_init(&ap
->rqueue
);
184 tasklet_init(&ap
->tsk
, ppp_async_process
, (unsigned long) ap
);
186 atomic_set(&ap
->refcnt
, 1);
187 init_MUTEX_LOCKED(&ap
->dead_sem
);
189 ap
->chan
.private = ap
;
190 ap
->chan
.ops
= &async_ops
;
191 ap
->chan
.mtu
= PPP_MRU
;
192 speed
= tty_get_baud_rate(tty
);
193 ap
->chan
.speed
= speed
;
194 err
= ppp_register_channel(&ap
->chan
);
199 tty
->receive_room
= 65536;
209 * Called when the tty is put into another line discipline
210 * or it hangs up. We have to wait for any cpu currently
211 * executing in any of the other ppp_asynctty_* routines to
212 * finish before we can call ppp_unregister_channel and free
213 * the asyncppp struct. This routine must be called from
214 * process context, not interrupt or softirq context.
217 ppp_asynctty_close(struct tty_struct
*tty
)
221 write_lock_irq(&disc_data_lock
);
223 tty
->disc_data
= NULL
;
224 write_unlock_irq(&disc_data_lock
);
229 * We have now ensured that nobody can start using ap from now
230 * on, but we have to wait for all existing users to finish.
231 * Note that ppp_unregister_channel ensures that no calls to
232 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
233 * by the time it returns.
235 if (!atomic_dec_and_test(&ap
->refcnt
))
237 tasklet_kill(&ap
->tsk
);
239 ppp_unregister_channel(&ap
->chan
);
241 skb_queue_purge(&ap
->rqueue
);
247 * Called on tty hangup in process context.
249 * Wait for I/O to driver to complete and unregister PPP channel.
250 * This is already done by the close routine, so just call that.
252 static int ppp_asynctty_hangup(struct tty_struct
*tty
)
254 ppp_asynctty_close(tty
);
259 * Read does nothing - no data is ever available this way.
260 * Pppd reads and writes packets via /dev/ppp instead.
263 ppp_asynctty_read(struct tty_struct
*tty
, struct file
*file
,
264 unsigned char __user
*buf
, size_t count
)
270 * Write on the tty does nothing, the packets all come in
271 * from the ppp generic stuff.
274 ppp_asynctty_write(struct tty_struct
*tty
, struct file
*file
,
275 const unsigned char *buf
, size_t count
)
281 * Called in process context only. May be re-entered by multiple
282 * ioctl calling threads.
286 ppp_asynctty_ioctl(struct tty_struct
*tty
, struct file
*file
,
287 unsigned int cmd
, unsigned long arg
)
289 struct asyncppp
*ap
= ap_get(tty
);
291 int __user
*p
= (int __user
*)arg
;
299 if (put_user(ppp_channel_index(&ap
->chan
), p
))
306 if (put_user(ppp_unit_number(&ap
->chan
), p
))
312 /* flush our buffers and the serial port's buffer */
313 if (arg
== TCIOFLUSH
|| arg
== TCOFLUSH
)
314 ppp_async_flush_output(ap
);
315 err
= tty_perform_flush(tty
, arg
);
320 if (put_user(val
, p
))
326 /* Try the various mode ioctls */
327 err
= tty_mode_ioctl(tty
, file
, cmd
, arg
);
334 /* No kernel lock - fine */
336 ppp_asynctty_poll(struct tty_struct
*tty
, struct file
*file
, poll_table
*wait
)
341 /* May sleep, don't call from interrupt level or with interrupts disabled */
343 ppp_asynctty_receive(struct tty_struct
*tty
, const unsigned char *buf
,
344 char *cflags
, int count
)
346 struct asyncppp
*ap
= ap_get(tty
);
351 spin_lock_irqsave(&ap
->recv_lock
, flags
);
352 ppp_async_input(ap
, buf
, cflags
, count
);
353 spin_unlock_irqrestore(&ap
->recv_lock
, flags
);
354 if (!skb_queue_empty(&ap
->rqueue
))
355 tasklet_schedule(&ap
->tsk
);
361 ppp_asynctty_wakeup(struct tty_struct
*tty
)
363 struct asyncppp
*ap
= ap_get(tty
);
365 clear_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
368 set_bit(XMIT_WAKEUP
, &ap
->xmit_flags
);
369 tasklet_schedule(&ap
->tsk
);
374 static struct tty_ldisc_ops ppp_ldisc
= {
375 .owner
= THIS_MODULE
,
376 .magic
= TTY_LDISC_MAGIC
,
378 .open
= ppp_asynctty_open
,
379 .close
= ppp_asynctty_close
,
380 .hangup
= ppp_asynctty_hangup
,
381 .read
= ppp_asynctty_read
,
382 .write
= ppp_asynctty_write
,
383 .ioctl
= ppp_asynctty_ioctl
,
384 .poll
= ppp_asynctty_poll
,
385 .receive_buf
= ppp_asynctty_receive
,
386 .write_wakeup
= ppp_asynctty_wakeup
,
394 err
= tty_register_ldisc(N_PPP
, &ppp_ldisc
);
396 printk(KERN_ERR
"PPP_async: error %d registering line disc.\n",
402 * The following routines provide the PPP channel interface.
405 ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
, unsigned long arg
)
407 struct asyncppp
*ap
= chan
->private;
408 void __user
*argp
= (void __user
*)arg
;
409 int __user
*p
= argp
;
416 val
= ap
->flags
| ap
->rbits
;
417 if (put_user(val
, p
))
422 if (get_user(val
, p
))
424 ap
->flags
= val
& ~SC_RCV_BITS
;
425 spin_lock_irq(&ap
->recv_lock
);
426 ap
->rbits
= val
& SC_RCV_BITS
;
427 spin_unlock_irq(&ap
->recv_lock
);
431 case PPPIOCGASYNCMAP
:
432 if (put_user(ap
->xaccm
[0], (u32 __user
*)argp
))
436 case PPPIOCSASYNCMAP
:
437 if (get_user(ap
->xaccm
[0], (u32 __user
*)argp
))
442 case PPPIOCGRASYNCMAP
:
443 if (put_user(ap
->raccm
, (u32 __user
*)argp
))
447 case PPPIOCSRASYNCMAP
:
448 if (get_user(ap
->raccm
, (u32 __user
*)argp
))
453 case PPPIOCGXASYNCMAP
:
454 if (copy_to_user(argp
, ap
->xaccm
, sizeof(ap
->xaccm
)))
458 case PPPIOCSXASYNCMAP
:
459 if (copy_from_user(accm
, argp
, sizeof(accm
)))
461 accm
[2] &= ~0x40000000U
; /* can't escape 0x5e */
462 accm
[3] |= 0x60000000U
; /* must escape 0x7d, 0x7e */
463 memcpy(ap
->xaccm
, accm
, sizeof(ap
->xaccm
));
468 if (put_user(ap
->mru
, p
))
473 if (get_user(val
, p
))
489 * This is called at softirq level to deliver received packets
490 * to the ppp_generic code, and to tell the ppp_generic code
491 * if we can accept more output now.
493 static void ppp_async_process(unsigned long arg
)
495 struct asyncppp
*ap
= (struct asyncppp
*) arg
;
498 /* process received packets */
499 while ((skb
= skb_dequeue(&ap
->rqueue
)) != NULL
) {
501 ppp_input_error(&ap
->chan
, 0);
502 ppp_input(&ap
->chan
, skb
);
505 /* try to push more stuff out */
506 if (test_bit(XMIT_WAKEUP
, &ap
->xmit_flags
) && ppp_async_push(ap
))
507 ppp_output_wakeup(&ap
->chan
);
511 * Procedures for encapsulation and framing.
515 * Procedure to encode the data for async serial transmission.
516 * Does octet stuffing (escaping), puts the address/control bytes
517 * on if A/C compression is disabled, and does protocol compression.
518 * Assumes ap->tpkt != 0 on entry.
519 * Returns 1 if we finished the current frame, 0 otherwise.
522 #define PUT_BYTE(ap, buf, c, islcp) do { \
523 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
524 *buf++ = PPP_ESCAPE; \
531 ppp_async_encode(struct asyncppp
*ap
)
533 int fcs
, i
, count
, c
, proto
;
534 unsigned char *buf
, *buflim
;
542 data
= ap
->tpkt
->data
;
543 count
= ap
->tpkt
->len
;
545 proto
= (data
[0] << 8) + data
[1];
548 * LCP packets with code values between 1 (configure-reqest)
549 * and 7 (code-reject) must be sent as though no options
550 * had been negotiated.
552 islcp
= proto
== PPP_LCP
&& 1 <= data
[2] && data
[2] <= 7;
556 async_lcp_peek(ap
, data
, count
, 0);
559 * Start of a new packet - insert the leading FLAG
560 * character if necessary.
562 if (islcp
|| flag_time
== 0 ||
563 time_after_eq(jiffies
, ap
->last_xmit
+ flag_time
))
565 ap
->last_xmit
= jiffies
;
569 * Put in the address/control bytes if necessary
571 if ((ap
->flags
& SC_COMP_AC
) == 0 || islcp
) {
572 PUT_BYTE(ap
, buf
, 0xff, islcp
);
573 fcs
= PPP_FCS(fcs
, 0xff);
574 PUT_BYTE(ap
, buf
, 0x03, islcp
);
575 fcs
= PPP_FCS(fcs
, 0x03);
580 * Once we put in the last byte, we need to put in the FCS
581 * and closing flag, so make sure there is at least 7 bytes
582 * of free space in the output buffer.
584 buflim
= ap
->obuf
+ OBUFSIZE
- 6;
585 while (i
< count
&& buf
< buflim
) {
587 if (i
== 1 && c
== 0 && (ap
->flags
& SC_COMP_PROT
))
588 continue; /* compress protocol field */
589 fcs
= PPP_FCS(fcs
, c
);
590 PUT_BYTE(ap
, buf
, c
, islcp
);
595 * Remember where we are up to in this packet.
604 * We have finished the packet. Add the FCS and flag.
608 PUT_BYTE(ap
, buf
, c
, islcp
);
609 c
= (fcs
>> 8) & 0xff;
610 PUT_BYTE(ap
, buf
, c
, islcp
);
620 * Transmit-side routines.
624 * Send a packet to the peer over an async tty line.
625 * Returns 1 iff the packet was accepted.
626 * If the packet was not accepted, we will call ppp_output_wakeup
627 * at some later time.
630 ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
)
632 struct asyncppp
*ap
= chan
->private;
636 if (test_and_set_bit(XMIT_FULL
, &ap
->xmit_flags
))
637 return 0; /* already full */
646 * Push as much data as possible out to the tty.
649 ppp_async_push(struct asyncppp
*ap
)
651 int avail
, sent
, done
= 0;
652 struct tty_struct
*tty
= ap
->tty
;
656 * We can get called recursively here if the tty write
657 * function calls our wakeup function. This can happen
658 * for example on a pty with both the master and slave
659 * set to PPP line discipline.
660 * We use the XMIT_BUSY bit to detect this and get out,
661 * leaving the XMIT_WAKEUP bit set to tell the other
662 * instance that it may now be able to write more now.
664 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
666 spin_lock_bh(&ap
->xmit_lock
);
668 if (test_and_clear_bit(XMIT_WAKEUP
, &ap
->xmit_flags
))
670 if (!tty_stuffed
&& ap
->optr
< ap
->olim
) {
671 avail
= ap
->olim
- ap
->optr
;
672 set_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
673 sent
= tty
->ops
->write(tty
, ap
->optr
, avail
);
675 goto flush
; /* error, e.g. loss of CD */
681 if (ap
->optr
>= ap
->olim
&& ap
->tpkt
) {
682 if (ppp_async_encode(ap
)) {
683 /* finished processing ap->tpkt */
684 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
690 * We haven't made any progress this time around.
691 * Clear XMIT_BUSY to let other callers in, but
692 * after doing so we have to check if anyone set
693 * XMIT_WAKEUP since we last checked it. If they
694 * did, we should try again to set XMIT_BUSY and go
695 * around again in case XMIT_BUSY was still set when
696 * the other caller tried.
698 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
699 /* any more work to do? if not, exit the loop */
700 if (!(test_bit(XMIT_WAKEUP
, &ap
->xmit_flags
) ||
701 (!tty_stuffed
&& ap
->tpkt
)))
703 /* more work to do, see if we can do it now */
704 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
707 spin_unlock_bh(&ap
->xmit_lock
);
711 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
715 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
719 spin_unlock_bh(&ap
->xmit_lock
);
724 * Flush output from our internal buffers.
725 * Called for the TCFLSH ioctl. Can be entered in parallel
726 * but this is covered by the xmit_lock.
729 ppp_async_flush_output(struct asyncppp
*ap
)
733 spin_lock_bh(&ap
->xmit_lock
);
735 if (ap
->tpkt
!= NULL
) {
738 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
741 spin_unlock_bh(&ap
->xmit_lock
);
743 ppp_output_wakeup(&ap
->chan
);
747 * Receive-side routines.
750 /* see how many ordinary chars there are at the start of buf */
752 scan_ordinary(struct asyncppp
*ap
, const unsigned char *buf
, int count
)
756 for (i
= 0; i
< count
; ++i
) {
758 if (c
== PPP_ESCAPE
|| c
== PPP_FLAG
||
759 (c
< 0x20 && (ap
->raccm
& (1 << c
)) != 0))
765 /* called when a flag is seen - do end-of-packet processing */
767 process_input_packet(struct asyncppp
*ap
)
771 unsigned int len
, fcs
, proto
;
774 if (ap
->state
& (SC_TOSS
| SC_ESCAPE
))
778 return; /* 0-length packet */
784 goto err
; /* too short */
786 for (; len
> 0; --len
)
787 fcs
= PPP_FCS(fcs
, *p
++);
788 if (fcs
!= PPP_GOODFCS
)
789 goto err
; /* bad FCS */
790 skb_trim(skb
, skb
->len
- 2);
792 /* check for address/control and protocol compression */
794 if (p
[0] == PPP_ALLSTATIONS
) {
795 /* chop off address/control */
796 if (p
[1] != PPP_UI
|| skb
->len
< 3)
798 p
= skb_pull(skb
, 2);
802 /* protocol is compressed */
803 skb_push(skb
, 1)[0] = 0;
807 proto
= (proto
<< 8) + p
[1];
808 if (proto
== PPP_LCP
)
809 async_lcp_peek(ap
, p
, skb
->len
, 1);
812 /* queue the frame to be processed */
813 skb
->cb
[0] = ap
->state
;
814 skb_queue_tail(&ap
->rqueue
, skb
);
820 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
821 ap
->state
= SC_PREV_ERROR
;
823 /* make skb appear as freshly allocated */
825 skb_reserve(skb
, - skb_headroom(skb
));
829 /* Called when the tty driver has data for us. Runs parallel with the
830 other ldisc functions but will not be re-entered */
833 ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
834 char *flags
, int count
)
837 int c
, i
, j
, n
, s
, f
;
840 /* update bits used for 8-bit cleanness detection */
841 if (~ap
->rbits
& SC_RCV_BITS
) {
843 for (i
= 0; i
< count
; ++i
) {
845 if (flags
&& flags
[i
] != 0)
847 s
|= (c
& 0x80)? SC_RCV_B7_1
: SC_RCV_B7_0
;
848 c
= ((c
>> 4) ^ c
) & 0xf;
849 s
|= (0x6996 & (1 << c
))? SC_RCV_ODDP
: SC_RCV_EVNP
;
855 /* scan through and see how many chars we can do in bulk */
856 if ((ap
->state
& SC_ESCAPE
) && buf
[0] == PPP_ESCAPE
)
859 n
= scan_ordinary(ap
, buf
, count
);
862 if (flags
&& (ap
->state
& SC_TOSS
) == 0) {
863 /* check the flags to see if any char had an error */
864 for (j
= 0; j
< n
; ++j
)
865 if ((f
= flags
[j
]) != 0)
870 ap
->state
|= SC_TOSS
;
872 } else if (n
> 0 && (ap
->state
& SC_TOSS
) == 0) {
873 /* stuff the chars in the skb */
876 skb
= dev_alloc_skb(ap
->mru
+ PPP_HDRLEN
+ 2);
882 /* Try to get the payload 4-byte aligned.
883 * This should match the
884 * PPP_ALLSTATIONS/PPP_UI/compressed tests in
885 * process_input_packet, but we do not have
886 * enough chars here to test buf[1] and buf[2].
888 if (buf
[0] != PPP_ALLSTATIONS
)
889 skb_reserve(skb
, 2 + (buf
[0] & 1));
891 if (n
> skb_tailroom(skb
)) {
892 /* packet overflowed MRU */
893 ap
->state
|= SC_TOSS
;
895 sp
= skb_put(skb
, n
);
897 if (ap
->state
& SC_ESCAPE
) {
899 ap
->state
&= ~SC_ESCAPE
;
908 if (flags
!= NULL
&& flags
[n
] != 0) {
909 ap
->state
|= SC_TOSS
;
910 } else if (c
== PPP_FLAG
) {
911 process_input_packet(ap
);
912 } else if (c
== PPP_ESCAPE
) {
913 ap
->state
|= SC_ESCAPE
;
914 } else if (I_IXON(ap
->tty
)) {
915 if (c
== START_CHAR(ap
->tty
))
917 else if (c
== STOP_CHAR(ap
->tty
))
920 /* otherwise it's a char in the recv ACCM */
931 printk(KERN_ERR
"PPPasync: no memory (input pkt)\n");
932 ap
->state
|= SC_TOSS
;
936 * We look at LCP frames going past so that we can notice
937 * and react to the LCP configure-ack from the peer.
938 * In the situation where the peer has been sent a configure-ack
939 * already, LCP is up once it has sent its configure-ack
940 * so the immediately following packet can be sent with the
941 * configured LCP options. This allows us to process the following
942 * packet correctly without pppd needing to respond quickly.
944 * We only respond to the received configure-ack if we have just
945 * sent a configure-request, and the configure-ack contains the
946 * same data (this is checked using a 16-bit crc of the data).
948 #define CONFREQ 1 /* LCP code field values */
950 #define LCP_MRU 1 /* LCP option numbers */
951 #define LCP_ASYNCMAP 2
953 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
954 int len
, int inbound
)
956 int dlen
, fcs
, i
, code
;
959 data
+= 2; /* skip protocol bytes */
961 if (len
< 4) /* 4 = code, ID, length */
964 if (code
!= CONFACK
&& code
!= CONFREQ
)
966 dlen
= (data
[2] << 8) + data
[3];
968 return; /* packet got truncated or length is bogus */
970 if (code
== (inbound
? CONFACK
: CONFREQ
)) {
972 * sent confreq or received confack:
973 * calculate the crc of the data from the ID field on.
976 for (i
= 1; i
< dlen
; ++i
)
977 fcs
= PPP_FCS(fcs
, data
[i
]);
980 /* outbound confreq - remember the crc for later */
985 /* received confack, check the crc */
991 return; /* not interested in received confreq */
993 /* process the options in the confack */
996 /* data[0] is code, data[1] is length */
997 while (dlen
>= 2 && dlen
>= data
[1] && data
[1] >= 2) {
1000 val
= (data
[2] << 8) + data
[3];
1007 val
= (data
[2] << 24) + (data
[3] << 16)
1008 + (data
[4] << 8) + data
[5];
1020 static void __exit
ppp_async_cleanup(void)
1022 if (tty_unregister_ldisc(N_PPP
) != 0)
1023 printk(KERN_ERR
"failed to unregister PPP line discipline\n");
1026 module_init(ppp_async_init
);
1027 module_exit(ppp_async_cleanup
);