2 * PPP async serial channel driver for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/skbuff.h>
24 #include <linux/tty.h>
25 #include <linux/netdevice.h>
26 #include <linux/poll.h>
27 #include <linux/crc-ccitt.h>
28 #include <linux/ppp_defs.h>
29 #include <linux/if_ppp.h>
30 #include <linux/ppp_channel.h>
31 #include <linux/spinlock.h>
32 #include <linux/init.h>
33 #include <asm/uaccess.h>
34 #include <asm/string.h>
36 #define PPP_VERSION "2.4.2"
40 /* Structure for storing local state. */
42 struct tty_struct
*tty
;
49 unsigned long xmit_flags
;
52 unsigned int bytes_sent
;
53 unsigned int bytes_rcvd
;
60 unsigned long last_xmit
;
64 struct sk_buff_head rqueue
;
66 struct tasklet_struct tsk
;
69 struct semaphore dead_sem
;
70 struct ppp_channel chan
; /* interface to generic ppp layer */
71 unsigned char obuf
[OBUFSIZE
];
74 /* Bit numbers in xmit_flags */
82 #define SC_PREV_ERROR 4
85 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
87 static int flag_time
= HZ
;
88 module_param(flag_time
, int, 0);
89 MODULE_PARM_DESC(flag_time
, "ppp_async: interval between flagged packets (in clock ticks)");
90 MODULE_LICENSE("GPL");
91 MODULE_ALIAS_LDISC(N_PPP
);
96 static int ppp_async_encode(struct asyncppp
*ap
);
97 static int ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
);
98 static int ppp_async_push(struct asyncppp
*ap
);
99 static void ppp_async_flush_output(struct asyncppp
*ap
);
100 static void ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
101 char *flags
, int count
);
102 static int ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
,
104 static void ppp_async_process(unsigned long arg
);
106 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
107 int len
, int inbound
);
109 static struct ppp_channel_ops async_ops
= {
115 * Routines implementing the PPP line discipline.
119 * We have a potential race on dereferencing tty->disc_data,
120 * because the tty layer provides no locking at all - thus one
121 * cpu could be running ppp_asynctty_receive while another
122 * calls ppp_asynctty_close, which zeroes tty->disc_data and
123 * frees the memory that ppp_asynctty_receive is using. The best
124 * way to fix this is to use a rwlock in the tty struct, but for now
125 * we use a single global rwlock for all ttys in ppp line discipline.
127 * FIXME: this is no longer true. The _close path for the ldisc is
128 * now guaranteed to be sane.
130 static DEFINE_RWLOCK(disc_data_lock
);
132 static struct asyncppp
*ap_get(struct tty_struct
*tty
)
136 read_lock(&disc_data_lock
);
139 atomic_inc(&ap
->refcnt
);
140 read_unlock(&disc_data_lock
);
144 static void ap_put(struct asyncppp
*ap
)
146 if (atomic_dec_and_test(&ap
->refcnt
))
151 * Called when a tty is put into PPP line discipline. Called in process
155 ppp_asynctty_open(struct tty_struct
*tty
)
161 ap
= kmalloc(sizeof(*ap
), GFP_KERNEL
);
165 /* initialize the asyncppp structure */
166 memset(ap
, 0, sizeof(*ap
));
169 spin_lock_init(&ap
->xmit_lock
);
170 spin_lock_init(&ap
->recv_lock
);
172 ap
->xaccm
[3] = 0x60000000U
;
178 skb_queue_head_init(&ap
->rqueue
);
179 tasklet_init(&ap
->tsk
, ppp_async_process
, (unsigned long) ap
);
181 atomic_set(&ap
->refcnt
, 1);
182 init_MUTEX_LOCKED(&ap
->dead_sem
);
184 ap
->chan
.private = ap
;
185 ap
->chan
.ops
= &async_ops
;
186 ap
->chan
.mtu
= PPP_MRU
;
187 err
= ppp_register_channel(&ap
->chan
);
202 * Called when the tty is put into another line discipline
203 * or it hangs up. We have to wait for any cpu currently
204 * executing in any of the other ppp_asynctty_* routines to
205 * finish before we can call ppp_unregister_channel and free
206 * the asyncppp struct. This routine must be called from
207 * process context, not interrupt or softirq context.
210 ppp_asynctty_close(struct tty_struct
*tty
)
214 write_lock_irq(&disc_data_lock
);
216 tty
->disc_data
= NULL
;
217 write_unlock_irq(&disc_data_lock
);
222 * We have now ensured that nobody can start using ap from now
223 * on, but we have to wait for all existing users to finish.
224 * Note that ppp_unregister_channel ensures that no calls to
225 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
226 * by the time it returns.
228 if (!atomic_dec_and_test(&ap
->refcnt
))
230 tasklet_kill(&ap
->tsk
);
232 ppp_unregister_channel(&ap
->chan
);
235 skb_queue_purge(&ap
->rqueue
);
242 * Called on tty hangup in process context.
244 * Wait for I/O to driver to complete and unregister PPP channel.
245 * This is already done by the close routine, so just call that.
247 static int ppp_asynctty_hangup(struct tty_struct
*tty
)
249 ppp_asynctty_close(tty
);
254 * Read does nothing - no data is ever available this way.
255 * Pppd reads and writes packets via /dev/ppp instead.
258 ppp_asynctty_read(struct tty_struct
*tty
, struct file
*file
,
259 unsigned char __user
*buf
, size_t count
)
265 * Write on the tty does nothing, the packets all come in
266 * from the ppp generic stuff.
269 ppp_asynctty_write(struct tty_struct
*tty
, struct file
*file
,
270 const unsigned char *buf
, size_t count
)
276 * Called in process context only. May be re-entered by multiple
277 * ioctl calling threads.
281 ppp_asynctty_ioctl(struct tty_struct
*tty
, struct file
*file
,
282 unsigned int cmd
, unsigned long arg
)
284 struct asyncppp
*ap
= ap_get(tty
);
286 int __user
*p
= (int __user
*)arg
;
297 if (put_user(ppp_channel_index(&ap
->chan
), p
))
307 if (put_user(ppp_unit_number(&ap
->chan
), p
))
314 err
= n_tty_ioctl(tty
, file
, cmd
, arg
);
318 /* flush our buffers and the serial port's buffer */
319 if (arg
== TCIOFLUSH
|| arg
== TCOFLUSH
)
320 ppp_async_flush_output(ap
);
321 err
= n_tty_ioctl(tty
, file
, cmd
, arg
);
326 if (put_user(val
, p
))
339 /* No kernel lock - fine */
341 ppp_asynctty_poll(struct tty_struct
*tty
, struct file
*file
, poll_table
*wait
)
347 ppp_asynctty_room(struct tty_struct
*tty
)
353 * This can now be called from hard interrupt level as well
354 * as soft interrupt level or mainline.
357 ppp_asynctty_receive(struct tty_struct
*tty
, const unsigned char *buf
,
358 char *cflags
, int count
)
360 struct asyncppp
*ap
= ap_get(tty
);
365 spin_lock_irqsave(&ap
->recv_lock
, flags
);
366 ppp_async_input(ap
, buf
, cflags
, count
);
367 spin_unlock_irqrestore(&ap
->recv_lock
, flags
);
368 if (!skb_queue_empty(&ap
->rqueue
))
369 tasklet_schedule(&ap
->tsk
);
371 if (test_and_clear_bit(TTY_THROTTLED
, &tty
->flags
)
372 && tty
->driver
->unthrottle
)
373 tty
->driver
->unthrottle(tty
);
377 ppp_asynctty_wakeup(struct tty_struct
*tty
)
379 struct asyncppp
*ap
= ap_get(tty
);
381 clear_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
384 set_bit(XMIT_WAKEUP
, &ap
->xmit_flags
);
385 tasklet_schedule(&ap
->tsk
);
390 static struct tty_ldisc ppp_ldisc
= {
391 .owner
= THIS_MODULE
,
392 .magic
= TTY_LDISC_MAGIC
,
394 .open
= ppp_asynctty_open
,
395 .close
= ppp_asynctty_close
,
396 .hangup
= ppp_asynctty_hangup
,
397 .read
= ppp_asynctty_read
,
398 .write
= ppp_asynctty_write
,
399 .ioctl
= ppp_asynctty_ioctl
,
400 .poll
= ppp_asynctty_poll
,
401 .receive_room
= ppp_asynctty_room
,
402 .receive_buf
= ppp_asynctty_receive
,
403 .write_wakeup
= ppp_asynctty_wakeup
,
411 err
= tty_register_ldisc(N_PPP
, &ppp_ldisc
);
413 printk(KERN_ERR
"PPP_async: error %d registering line disc.\n",
419 * The following routines provide the PPP channel interface.
422 ppp_async_ioctl(struct ppp_channel
*chan
, unsigned int cmd
, unsigned long arg
)
424 struct asyncppp
*ap
= chan
->private;
425 void __user
*argp
= (void __user
*)arg
;
426 int __user
*p
= argp
;
433 val
= ap
->flags
| ap
->rbits
;
434 if (put_user(val
, p
))
439 if (get_user(val
, p
))
441 ap
->flags
= val
& ~SC_RCV_BITS
;
442 spin_lock_irq(&ap
->recv_lock
);
443 ap
->rbits
= val
& SC_RCV_BITS
;
444 spin_unlock_irq(&ap
->recv_lock
);
448 case PPPIOCGASYNCMAP
:
449 if (put_user(ap
->xaccm
[0], (u32 __user
*)argp
))
453 case PPPIOCSASYNCMAP
:
454 if (get_user(ap
->xaccm
[0], (u32 __user
*)argp
))
459 case PPPIOCGRASYNCMAP
:
460 if (put_user(ap
->raccm
, (u32 __user
*)argp
))
464 case PPPIOCSRASYNCMAP
:
465 if (get_user(ap
->raccm
, (u32 __user
*)argp
))
470 case PPPIOCGXASYNCMAP
:
471 if (copy_to_user(argp
, ap
->xaccm
, sizeof(ap
->xaccm
)))
475 case PPPIOCSXASYNCMAP
:
476 if (copy_from_user(accm
, argp
, sizeof(accm
)))
478 accm
[2] &= ~0x40000000U
; /* can't escape 0x5e */
479 accm
[3] |= 0x60000000U
; /* must escape 0x7d, 0x7e */
480 memcpy(ap
->xaccm
, accm
, sizeof(ap
->xaccm
));
485 if (put_user(ap
->mru
, p
))
490 if (get_user(val
, p
))
506 * This is called at softirq level to deliver received packets
507 * to the ppp_generic code, and to tell the ppp_generic code
508 * if we can accept more output now.
510 static void ppp_async_process(unsigned long arg
)
512 struct asyncppp
*ap
= (struct asyncppp
*) arg
;
515 /* process received packets */
516 while ((skb
= skb_dequeue(&ap
->rqueue
)) != NULL
) {
518 ppp_input_error(&ap
->chan
, 0);
519 ppp_input(&ap
->chan
, skb
);
522 /* try to push more stuff out */
523 if (test_bit(XMIT_WAKEUP
, &ap
->xmit_flags
) && ppp_async_push(ap
))
524 ppp_output_wakeup(&ap
->chan
);
528 * Procedures for encapsulation and framing.
532 * Procedure to encode the data for async serial transmission.
533 * Does octet stuffing (escaping), puts the address/control bytes
534 * on if A/C compression is disabled, and does protocol compression.
535 * Assumes ap->tpkt != 0 on entry.
536 * Returns 1 if we finished the current frame, 0 otherwise.
539 #define PUT_BYTE(ap, buf, c, islcp) do { \
540 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
541 *buf++ = PPP_ESCAPE; \
548 ppp_async_encode(struct asyncppp
*ap
)
550 int fcs
, i
, count
, c
, proto
;
551 unsigned char *buf
, *buflim
;
559 data
= ap
->tpkt
->data
;
560 count
= ap
->tpkt
->len
;
562 proto
= (data
[0] << 8) + data
[1];
565 * LCP packets with code values between 1 (configure-reqest)
566 * and 7 (code-reject) must be sent as though no options
567 * had been negotiated.
569 islcp
= proto
== PPP_LCP
&& 1 <= data
[2] && data
[2] <= 7;
573 async_lcp_peek(ap
, data
, count
, 0);
576 * Start of a new packet - insert the leading FLAG
577 * character if necessary.
579 if (islcp
|| flag_time
== 0
580 || jiffies
- ap
->last_xmit
>= flag_time
)
582 ap
->last_xmit
= jiffies
;
586 * Put in the address/control bytes if necessary
588 if ((ap
->flags
& SC_COMP_AC
) == 0 || islcp
) {
589 PUT_BYTE(ap
, buf
, 0xff, islcp
);
590 fcs
= PPP_FCS(fcs
, 0xff);
591 PUT_BYTE(ap
, buf
, 0x03, islcp
);
592 fcs
= PPP_FCS(fcs
, 0x03);
597 * Once we put in the last byte, we need to put in the FCS
598 * and closing flag, so make sure there is at least 7 bytes
599 * of free space in the output buffer.
601 buflim
= ap
->obuf
+ OBUFSIZE
- 6;
602 while (i
< count
&& buf
< buflim
) {
604 if (i
== 1 && c
== 0 && (ap
->flags
& SC_COMP_PROT
))
605 continue; /* compress protocol field */
606 fcs
= PPP_FCS(fcs
, c
);
607 PUT_BYTE(ap
, buf
, c
, islcp
);
612 * Remember where we are up to in this packet.
621 * We have finished the packet. Add the FCS and flag.
625 PUT_BYTE(ap
, buf
, c
, islcp
);
626 c
= (fcs
>> 8) & 0xff;
627 PUT_BYTE(ap
, buf
, c
, islcp
);
637 * Transmit-side routines.
641 * Send a packet to the peer over an async tty line.
642 * Returns 1 iff the packet was accepted.
643 * If the packet was not accepted, we will call ppp_output_wakeup
644 * at some later time.
647 ppp_async_send(struct ppp_channel
*chan
, struct sk_buff
*skb
)
649 struct asyncppp
*ap
= chan
->private;
653 if (test_and_set_bit(XMIT_FULL
, &ap
->xmit_flags
))
654 return 0; /* already full */
663 * Push as much data as possible out to the tty.
666 ppp_async_push(struct asyncppp
*ap
)
668 int avail
, sent
, done
= 0;
669 struct tty_struct
*tty
= ap
->tty
;
673 * We can get called recursively here if the tty write
674 * function calls our wakeup function. This can happen
675 * for example on a pty with both the master and slave
676 * set to PPP line discipline.
677 * We use the XMIT_BUSY bit to detect this and get out,
678 * leaving the XMIT_WAKEUP bit set to tell the other
679 * instance that it may now be able to write more now.
681 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
683 spin_lock_bh(&ap
->xmit_lock
);
685 if (test_and_clear_bit(XMIT_WAKEUP
, &ap
->xmit_flags
))
687 if (!tty_stuffed
&& ap
->optr
< ap
->olim
) {
688 avail
= ap
->olim
- ap
->optr
;
689 set_bit(TTY_DO_WRITE_WAKEUP
, &tty
->flags
);
690 sent
= tty
->driver
->write(tty
, ap
->optr
, avail
);
692 goto flush
; /* error, e.g. loss of CD */
698 if (ap
->optr
>= ap
->olim
&& ap
->tpkt
!= 0) {
699 if (ppp_async_encode(ap
)) {
700 /* finished processing ap->tpkt */
701 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
707 * We haven't made any progress this time around.
708 * Clear XMIT_BUSY to let other callers in, but
709 * after doing so we have to check if anyone set
710 * XMIT_WAKEUP since we last checked it. If they
711 * did, we should try again to set XMIT_BUSY and go
712 * around again in case XMIT_BUSY was still set when
713 * the other caller tried.
715 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
716 /* any more work to do? if not, exit the loop */
717 if (!(test_bit(XMIT_WAKEUP
, &ap
->xmit_flags
)
718 || (!tty_stuffed
&& ap
->tpkt
!= 0)))
720 /* more work to do, see if we can do it now */
721 if (test_and_set_bit(XMIT_BUSY
, &ap
->xmit_flags
))
724 spin_unlock_bh(&ap
->xmit_lock
);
728 clear_bit(XMIT_BUSY
, &ap
->xmit_flags
);
732 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
736 spin_unlock_bh(&ap
->xmit_lock
);
741 * Flush output from our internal buffers.
742 * Called for the TCFLSH ioctl. Can be entered in parallel
743 * but this is covered by the xmit_lock.
746 ppp_async_flush_output(struct asyncppp
*ap
)
750 spin_lock_bh(&ap
->xmit_lock
);
752 if (ap
->tpkt
!= NULL
) {
755 clear_bit(XMIT_FULL
, &ap
->xmit_flags
);
758 spin_unlock_bh(&ap
->xmit_lock
);
760 ppp_output_wakeup(&ap
->chan
);
764 * Receive-side routines.
767 /* see how many ordinary chars there are at the start of buf */
769 scan_ordinary(struct asyncppp
*ap
, const unsigned char *buf
, int count
)
773 for (i
= 0; i
< count
; ++i
) {
775 if (c
== PPP_ESCAPE
|| c
== PPP_FLAG
776 || (c
< 0x20 && (ap
->raccm
& (1 << c
)) != 0))
782 /* called when a flag is seen - do end-of-packet processing */
784 process_input_packet(struct asyncppp
*ap
)
788 unsigned int len
, fcs
, proto
;
791 if (ap
->state
& (SC_TOSS
| SC_ESCAPE
))
795 return; /* 0-length packet */
801 goto err
; /* too short */
803 for (; len
> 0; --len
)
804 fcs
= PPP_FCS(fcs
, *p
++);
805 if (fcs
!= PPP_GOODFCS
)
806 goto err
; /* bad FCS */
807 skb_trim(skb
, skb
->len
- 2);
809 /* check for address/control and protocol compression */
811 if (p
[0] == PPP_ALLSTATIONS
&& p
[1] == PPP_UI
) {
812 /* chop off address/control */
815 p
= skb_pull(skb
, 2);
819 /* protocol is compressed */
820 skb_push(skb
, 1)[0] = 0;
824 proto
= (proto
<< 8) + p
[1];
825 if (proto
== PPP_LCP
)
826 async_lcp_peek(ap
, p
, skb
->len
, 1);
829 /* queue the frame to be processed */
830 skb
->cb
[0] = ap
->state
;
831 skb_queue_tail(&ap
->rqueue
, skb
);
837 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
838 ap
->state
= SC_PREV_ERROR
;
840 /* make skb appear as freshly allocated */
842 skb_reserve(skb
, - skb_headroom(skb
));
846 /* Called when the tty driver has data for us. Runs parallel with the
847 other ldisc functions but will not be re-entered */
850 ppp_async_input(struct asyncppp
*ap
, const unsigned char *buf
,
851 char *flags
, int count
)
854 int c
, i
, j
, n
, s
, f
;
857 /* update bits used for 8-bit cleanness detection */
858 if (~ap
->rbits
& SC_RCV_BITS
) {
860 for (i
= 0; i
< count
; ++i
) {
862 if (flags
!= 0 && flags
[i
] != 0)
864 s
|= (c
& 0x80)? SC_RCV_B7_1
: SC_RCV_B7_0
;
865 c
= ((c
>> 4) ^ c
) & 0xf;
866 s
|= (0x6996 & (1 << c
))? SC_RCV_ODDP
: SC_RCV_EVNP
;
872 /* scan through and see how many chars we can do in bulk */
873 if ((ap
->state
& SC_ESCAPE
) && buf
[0] == PPP_ESCAPE
)
876 n
= scan_ordinary(ap
, buf
, count
);
879 if (flags
!= 0 && (ap
->state
& SC_TOSS
) == 0) {
880 /* check the flags to see if any char had an error */
881 for (j
= 0; j
< n
; ++j
)
882 if ((f
= flags
[j
]) != 0)
887 ap
->state
|= SC_TOSS
;
889 } else if (n
> 0 && (ap
->state
& SC_TOSS
) == 0) {
890 /* stuff the chars in the skb */
893 skb
= dev_alloc_skb(ap
->mru
+ PPP_HDRLEN
+ 2);
899 /* Try to get the payload 4-byte aligned.
900 * This should match the
901 * PPP_ALLSTATIONS/PPP_UI/compressed tests in
902 * process_input_packet, but we do not have
903 * enough chars here to test buf[1] and buf[2].
905 if (buf
[0] != PPP_ALLSTATIONS
)
906 skb_reserve(skb
, 2 + (buf
[0] & 1));
908 if (n
> skb_tailroom(skb
)) {
909 /* packet overflowed MRU */
910 ap
->state
|= SC_TOSS
;
912 sp
= skb_put(skb
, n
);
914 if (ap
->state
& SC_ESCAPE
) {
916 ap
->state
&= ~SC_ESCAPE
;
925 if (flags
!= NULL
&& flags
[n
] != 0) {
926 ap
->state
|= SC_TOSS
;
927 } else if (c
== PPP_FLAG
) {
928 process_input_packet(ap
);
929 } else if (c
== PPP_ESCAPE
) {
930 ap
->state
|= SC_ESCAPE
;
931 } else if (I_IXON(ap
->tty
)) {
932 if (c
== START_CHAR(ap
->tty
))
934 else if (c
== STOP_CHAR(ap
->tty
))
937 /* otherwise it's a char in the recv ACCM */
948 printk(KERN_ERR
"PPPasync: no memory (input pkt)\n");
949 ap
->state
|= SC_TOSS
;
953 * We look at LCP frames going past so that we can notice
954 * and react to the LCP configure-ack from the peer.
955 * In the situation where the peer has been sent a configure-ack
956 * already, LCP is up once it has sent its configure-ack
957 * so the immediately following packet can be sent with the
958 * configured LCP options. This allows us to process the following
959 * packet correctly without pppd needing to respond quickly.
961 * We only respond to the received configure-ack if we have just
962 * sent a configure-request, and the configure-ack contains the
963 * same data (this is checked using a 16-bit crc of the data).
965 #define CONFREQ 1 /* LCP code field values */
967 #define LCP_MRU 1 /* LCP option numbers */
968 #define LCP_ASYNCMAP 2
970 static void async_lcp_peek(struct asyncppp
*ap
, unsigned char *data
,
971 int len
, int inbound
)
973 int dlen
, fcs
, i
, code
;
976 data
+= 2; /* skip protocol bytes */
978 if (len
< 4) /* 4 = code, ID, length */
981 if (code
!= CONFACK
&& code
!= CONFREQ
)
983 dlen
= (data
[2] << 8) + data
[3];
985 return; /* packet got truncated or length is bogus */
987 if (code
== (inbound
? CONFACK
: CONFREQ
)) {
989 * sent confreq or received confack:
990 * calculate the crc of the data from the ID field on.
993 for (i
= 1; i
< dlen
; ++i
)
994 fcs
= PPP_FCS(fcs
, data
[i
]);
997 /* outbound confreq - remember the crc for later */
1002 /* received confack, check the crc */
1008 return; /* not interested in received confreq */
1010 /* process the options in the confack */
1013 /* data[0] is code, data[1] is length */
1014 while (dlen
>= 2 && dlen
>= data
[1] && data
[1] >= 2) {
1017 val
= (data
[2] << 8) + data
[3];
1024 val
= (data
[2] << 24) + (data
[3] << 16)
1025 + (data
[4] << 8) + data
[5];
1037 static void __exit
ppp_async_cleanup(void)
1039 if (tty_unregister_ldisc(N_PPP
) != 0)
1040 printk(KERN_ERR
"failed to unregister PPP line discipline\n");
1043 module_init(ppp_async_init
);
1044 module_exit(ppp_async_cleanup
);