3 * Michael Callahan <callahan@maths.ox.ac.uk>
4 * Al Longyear <longyear@netcom.com>
5 * Extensively rewritten by Paul Mackerras <paulus@cs.anu.edu.au>
7 * ==FILEVERSION 981004==
10 * If you modify this file at all, please set the number above to the
11 * date of the modification as YYMMDD (year month day).
12 * ppp.c is shipped with a PPP distribution as well as with the kernel;
13 * if everyone increases the FILEVERSION number above, then scripts
14 * can do the right thing when deciding whether to install a new ppp.c
15 * file. Don't change the format of that line otherwise, so the
16 * installation script can recognize it.
24 RFC1331: The Point-to-Point Protocol (PPP) for the Transmission of
25 Multi-protocol Datagrams over Point-to-Point Links
31 Flags for this module (any combination is acceptable for testing.):
33 OPTIMIZE_FLAG_TIME - Number of jiffies to force sending of leading flag
34 character. This is normally set to ((HZ * 3) / 2).
35 This is 1.5 seconds. If zero then the leading
38 CHECK_CHARACTERS - Enable the checking on all received characters for
39 8 data bits, no parity. This adds a small amount of
40 processing for each received character.
43 #define OPTIMIZE_FLAG_TIME ((HZ * 3)/2)
44 #define CHECK_CHARACTERS 1
46 #define PPP_MAX_RCV_QLEN 32 /* max # frames we queue up for pppd */
48 /* $Id: ppp.c,v 1.20 1999/01/19 23:57:44 paulus Exp $ */
50 #include <linux/version.h>
51 #include <linux/config.h>
52 #include <linux/module.h>
53 #include <linux/kernel.h>
54 #include <linux/sched.h>
55 #include <linux/types.h>
56 #include <linux/fcntl.h>
57 #include <linux/interrupt.h>
58 #include <linux/ptrace.h>
60 /* a macro to generate linux version number codes */
61 #define VERSION(major,minor,patch) (((((major)<<8)+(minor))<<8)+(patch))
63 #if LINUX_VERSION_CODE < VERSION(2,1,14)
64 #include <linux/ioport.h>
67 #if LINUX_VERSION_CODE >= VERSION(2,1,23)
68 #include <linux/poll.h>
72 #include <linux/malloc.h>
73 #include <linux/tty.h>
74 #include <linux/errno.h>
75 #include <linux/sched.h> /* to get the struct task_struct */
76 #include <linux/string.h> /* used in new tty drivers */
77 #include <linux/signal.h> /* used in new tty drivers */
78 #include <asm/system.h>
79 #include <asm/bitops.h>
81 #include <linux/if_ether.h>
82 #include <linux/netdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/inet.h>
85 #include <linux/ioctl.h>
87 #include <linux/tcp.h>
88 #include <linux/if_arp.h>
89 #include <net/slhc_vj.h>
91 #define fcstab ppp_crc16_table /* Name of the table in the kernel */
92 #include <linux/ppp_defs.h>
94 #include <linux/socket.h>
95 #include <linux/if_ppp.h>
96 #include <linux/if_pppvar.h>
97 #include <linux/ppp-comp.h>
100 #include <linux/kmod.h>
102 #ifdef CONFIG_KERNELD
103 #include <linux/kerneld.h>
106 #define PPP_VERSION "2.3.5"
108 #if LINUX_VERSION_CODE >= VERSION(2,1,4)
110 #if LINUX_VERSION_CODE >= VERSION(2,1,5)
111 #include <asm/uaccess.h>
113 #include <asm/segment.h>
116 #define GET_USER get_user
117 #define PUT_USER put_user
118 #define COPY_FROM_USER copy_from_user
119 #define COPY_TO_USER copy_to_user
121 #else /* 2.0.x and 2.1.x before 2.1.4 */
123 #define GET_USER(val, src) \
124 (verify_area(VERIFY_READ, src, sizeof(*src))? -EFAULT: \
125 ((val) = get_user(src), 0))
126 #define PUT_USER(val, dst) \
127 (verify_area(VERIFY_WRITE, dst, sizeof(*dst))? -EFAULT: \
128 (put_user(val, dst), 0))
129 #define COPY_FROM_USER(dst, src, size) \
130 (verify_area(VERIFY_READ, src, size)? -EFAULT: \
131 (memcpy_fromfs(dst, src, size), 0))
132 #define COPY_TO_USER(dst, src, size) \
133 (verify_area(VERIFY_WRITE, dst, size)? -EFAULT: \
134 (memcpy_tofs(dst, src, size), 0))
138 #if LINUX_VERSION_CODE < VERSION(2,1,37)
139 #define test_and_set_bit(nr, addr) set_bit(nr, addr)
142 #if LINUX_VERSION_CODE < VERSION(2,1,25)
143 #define net_device_stats enet_statistics
146 #if LINUX_VERSION_CODE < VERSION(2,1,57)
147 #define signal_pending(tsk) ((tsk)->pending & ~(tsk)->blocked)
150 #if LINUX_VERSION_CODE < VERSION(2,1,60)
151 typedef int rw_ret_t
;
152 typedef unsigned int rw_count_t
;
154 typedef ssize_t rw_ret_t
;
155 typedef size_t rw_count_t
;
162 #ifdef CONFIG_MODULES
163 static int ppp_register_compressor (struct compressor
*cp
);
164 static void ppp_unregister_compressor (struct compressor
*cp
);
167 static void ppp_async_init(struct ppp
*ppp
);
168 static void ppp_async_release(struct ppp
*ppp
);
169 static int ppp_tty_push(struct ppp
*ppp
);
170 static int ppp_async_encode(struct ppp
*ppp
);
171 static int ppp_async_send(struct ppp
*, struct sk_buff
*);
173 static int ppp_ioctl(struct ppp
*, unsigned int, unsigned long);
174 static int ppp_set_compression (struct ppp
*ppp
, struct ppp_option_data
*odp
);
175 static void ppp_proto_ccp(struct ppp
*ppp
, __u8
*dp
, int len
, int rcvd
);
176 static void ppp_ccp_closed(struct ppp
*ppp
);
177 static int ppp_receive_frame(struct ppp
*, struct sk_buff
*);
178 static void ppp_receive_error(struct ppp
*ppp
);
179 static void ppp_output_wakeup(struct ppp
*ppp
);
180 static void ppp_send_ctrl(struct ppp
*ppp
, struct sk_buff
*skb
);
181 static void ppp_send_frame(struct ppp
*ppp
, struct sk_buff
*skb
);
182 static void ppp_send_frames(struct ppp
*ppp
);
183 static struct sk_buff
*ppp_vj_compress(struct ppp
*ppp
, struct sk_buff
*skb
);
185 static struct ppp
*ppp_find (int pid_value
);
186 static struct ppp
*ppp_alloc (void);
187 static void ppp_generic_init(struct ppp
*ppp
);
188 static void ppp_release(struct ppp
*ppp
);
189 static void ppp_print_buffer (const char *, const __u8
*, int);
190 static struct compressor
*find_compressor (int type
);
192 #ifndef OPTIMIZE_FLAG_TIME
193 #define OPTIMIZE_FLAG_TIME 0
197 * Parameters which may be changed via insmod.
200 static int flag_time
= OPTIMIZE_FLAG_TIME
;
201 #if LINUX_VERSION_CODE >= VERSION(2,1,19)
202 MODULE_PARM(flag_time
, "i");
205 #define CHECK_PPP_MAGIC(ppp) do { \
206 if (ppp->magic != PPP_MAGIC) { \
207 printk(ppp_magic_warn, ppp, __FILE__, __LINE__); \
210 #define CHECK_PPP(a) do { \
211 CHECK_PPP_MAGIC(ppp); \
213 printk(ppp_warning, __LINE__); \
217 #define CHECK_PPP_VOID() do { \
218 CHECK_PPP_MAGIC(ppp); \
220 printk(ppp_warning, __LINE__); \
225 #define tty2ppp(tty) ((struct ppp *) ((tty)->disc_data))
226 #define dev2ppp(dev) ((struct ppp *) ((dev)->priv))
227 #define ppp2tty(ppp) ((ppp)->tty)
228 #define ppp2dev(ppp) (&(ppp)->dev)
230 static struct ppp
*ppp_list
= NULL
;
231 static struct ppp
*ppp_last
= NULL
;
233 /* Define these strings only once for all macro invocations */
234 static char ppp_warning
[] = KERN_WARNING
"PPP: ALERT! not INUSE! %d\n";
235 static char ppp_magic_warn
[] = KERN_WARNING
"bad magic for ppp %p at %s:%d\n";
237 static char szVersion
[] = PPP_VERSION
;
239 #if LINUX_VERSION_CODE < VERSION(2,1,18)
240 static struct symbol_table ppp_syms
= {
241 #include <linux/symtab_begin.h>
242 X(ppp_register_compressor
),
243 X(ppp_unregister_compressor
),
245 #include <linux/symtab_end.h>
248 EXPORT_SYMBOL(ppp_register_compressor
);
249 EXPORT_SYMBOL(ppp_unregister_compressor
);
252 /*************************************************************
253 * LINE DISCIPLINE SUPPORT
254 * The following code implements the PPP line discipline
255 * and supports using PPP on an async serial line.
256 *************************************************************/
258 #define in_xmap(ppp,c) (ppp->xmit_async_map[(c) >> 5] & (1 << ((c) & 0x1f)))
259 #define in_rmap(ppp,c) ((((unsigned int) (__u8) (c)) < 0x20) && \
260 ppp->recv_async_map & (1 << (c)))
266 static rw_ret_t
ppp_tty_read(struct tty_struct
*, struct file
*, __u8
*,
268 static rw_ret_t
ppp_tty_write(struct tty_struct
*, struct file
*, const __u8
*,
270 static int ppp_tty_ioctl(struct tty_struct
*, struct file
*, unsigned int,
272 #if LINUX_VERSION_CODE < VERSION(2,1,23)
273 static int ppp_tty_select(struct tty_struct
*tty
, struct inode
*inode
,
274 struct file
*filp
, int sel_type
, select_table
* wait
);
276 static unsigned int ppp_tty_poll(struct tty_struct
*tty
, struct file
*filp
,
279 static int ppp_tty_open (struct tty_struct
*);
280 static void ppp_tty_close (struct tty_struct
*);
281 static int ppp_tty_room (struct tty_struct
*tty
);
282 static void ppp_tty_receive (struct tty_struct
*tty
, const __u8
* cp
,
283 char *fp
, int count
);
284 static void ppp_tty_wakeup (struct tty_struct
*tty
);
286 __u16 ppp_crc16_table
[256] =
288 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
289 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
290 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
291 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
292 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
293 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
294 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
295 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
296 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
297 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
298 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
299 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
300 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
301 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
302 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
303 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
304 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
305 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
306 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
307 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
308 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
309 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
310 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
311 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
312 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
313 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
314 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
315 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
316 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
317 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
318 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
319 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
321 EXPORT_SYMBOL(ppp_crc16_table
);
323 #ifdef CHECK_CHARACTERS
324 static __u32 paritytab
[8] =
326 0x96696996, 0x69969669, 0x69969669, 0x96696996,
327 0x69969669, 0x96696996, 0x96696996, 0x69969669
332 * This procedure is called at initialization time to register
333 * the PPP line discipline.
338 static struct tty_ldisc ppp_ldisc
;
342 "PPP: version %s (demand dialling)"
345 #ifndef MODULE /* slhc module logic has its own copyright announcement */
347 "TCP compression code copyright 1989 Regents of the "
348 "University of California\n");
352 * Register the tty discipline
354 (void) memset (&ppp_ldisc
, 0, sizeof (ppp_ldisc
));
355 ppp_ldisc
.magic
= TTY_LDISC_MAGIC
;
356 ppp_ldisc
.name
= "ppp";
357 ppp_ldisc
.open
= ppp_tty_open
;
358 ppp_ldisc
.close
= ppp_tty_close
;
359 ppp_ldisc
.read
= ppp_tty_read
;
360 ppp_ldisc
.write
= ppp_tty_write
;
361 ppp_ldisc
.ioctl
= ppp_tty_ioctl
;
362 #if LINUX_VERSION_CODE < VERSION(2,1,23)
363 ppp_ldisc
.select
= ppp_tty_select
;
365 ppp_ldisc
.poll
= ppp_tty_poll
;
367 ppp_ldisc
.receive_room
= ppp_tty_room
;
368 ppp_ldisc
.receive_buf
= ppp_tty_receive
;
369 ppp_ldisc
.write_wakeup
= ppp_tty_wakeup
;
371 status
= tty_register_ldisc (N_PPP
, &ppp_ldisc
);
373 printk(KERN_INFO
"PPP line discipline registered.\n");
375 printk(KERN_ERR
"error registering line discipline: %d\n",
383 * Called at boot time if the PPP driver is compiled into the kernel.
386 ppp_init(struct device
*dev
)
388 static int first_time
= 1;
393 answer
= ppp_first_time();
394 #if LINUX_VERSION_CODE < VERSION(2,1,18)
396 (void) register_symtab(&ppp_syms
);
406 * Initialize the async-specific parts of the ppp structure.
409 ppp_async_init(struct ppp
*ppp
)
413 ppp
->tty_pushing
= 0;
415 memset (ppp
->xmit_async_map
, 0, sizeof (ppp
->xmit_async_map
));
416 ppp
->xmit_async_map
[0] = 0xffffffff;
417 ppp
->xmit_async_map
[3] = 0x60000000;
418 ppp
->recv_async_map
= 0xffffffff;
421 ppp
->tfcs
= PPP_INITFCS
;
422 ppp
->optr
= ppp
->obuf
;
423 ppp
->olim
= ppp
->obuf
;
426 ppp
->rfcs
= PPP_INITFCS
;
429 ppp
->backup_tty
= NULL
;
436 * Clean up the async-specific parts of the ppp structure.
439 ppp_async_release(struct ppp
*ppp
)
443 if ((skb
= ppp
->rpkt
) != NULL
)
446 if ((skb
= ppp
->tpkt
) != NULL
)
454 * Called when the tty discipline is switched to PPP.
458 ppp_tty_open (struct tty_struct
*tty
)
463 * Allocate a ppp structure to use.
465 tty
->disc_data
= NULL
;
466 ppp
= ppp_find(current
->pid
);
469 * If we are taking over a ppp unit which is currently
470 * connected to a loopback pty, there's not much to do.
477 printk(KERN_ERR
"ppp_alloc failed\n");
482 * Initialize the control block
484 ppp_generic_init(ppp
);
490 tty
->disc_data
= ppp
;
494 * Flush any pending characters in the driver
496 if (tty
->driver
.flush_buffer
)
497 tty
->driver
.flush_buffer (tty
);
505 * Called when the line discipline is changed to something
506 * else, the tty is closed, or the tty detects a hangup.
510 ppp_tty_close (struct tty_struct
*tty
)
512 struct ppp
*ppp
= tty2ppp(tty
);
516 tty
->disc_data
= NULL
;
517 if (ppp
->magic
!= PPP_MAGIC
) {
518 printk(KERN_WARNING
"ppp_tty_close: bogus\n");
522 printk(KERN_WARNING
"ppp_tty_close: not inuse\n");
523 ppp
->tty
= ppp
->backup_tty
= 0;
526 if (tty
== ppp
->backup_tty
)
530 if (ppp
->backup_tty
) {
531 ppp
->tty
= ppp
->backup_tty
;
532 if (ppp_tty_push(ppp
))
533 ppp_output_wakeup(ppp
);
537 if (ppp
->flags
& SC_DEBUG
)
538 printk(KERN_INFO
"ppp: channel %s closing.\n",
541 ppp_async_release(ppp
);
548 * Read a PPP frame from the rcv_q list,
549 * waiting if necessary
552 ppp_tty_read(struct tty_struct
*tty
, struct file
*file
, __u8
* buf
,
555 struct ppp
*ppp
= tty2ppp (tty
);
560 * Validate the pointers
567 * Before we attempt to write the frame to the user, ensure that the
568 * user has access to the pages for the total buffer length.
570 err
= verify_area(VERIFY_WRITE
, buf
, nr
);
575 * Wait for a frame to arrive if necessary.
576 * We increment the module use count so that the module
577 * can't go away while we're sleeping.
584 if (!ppp
|| ppp
->magic
!= PPP_MAGIC
|| !ppp
->inuse
588 skb
= skb_dequeue(&ppp
->rcv_q
);
593 * If no frame is available, return -EAGAIN or wait.
596 if (file
->f_flags
& O_NONBLOCK
)
599 current
->timeout
= 0;
600 interruptible_sleep_on(&ppp
->read_wait
);
602 if (signal_pending(current
))
610 * Ensure that the frame will fit within the caller's buffer.
611 * If not, just discard the frame.
615 if (ppp
->flags
& SC_DEBUG
)
617 "ppp: read of %lu bytes too small for %ld "
618 "frame\n", (unsigned long) nr
, (long) len
);
619 ppp
->stats
.ppp_ierrors
++;
625 * Copy the received data from the buffer to the caller's area.
628 if (COPY_TO_USER(buf
, skb
->data
, len
))
637 * Writing to a tty in ppp line discipline sends a PPP frame.
638 * Used by pppd to send control packets (LCP, etc.).
641 ppp_tty_write(struct tty_struct
*tty
, struct file
*file
, const __u8
* data
,
644 struct ppp
*ppp
= tty2ppp (tty
);
649 * Verify the pointers.
654 if (ppp
->magic
!= PPP_MAGIC
)
660 * Ensure that the caller does not wish to send too much.
662 if (count
> PPP_MTU
+ PPP_HDRLEN
) {
663 if (ppp
->flags
& SC_DEBUG
)
665 "ppp_tty_write: truncating user packet "
666 "from %lu to mtu %d\n", (unsigned long) count
,
667 PPP_MTU
+ PPP_HDRLEN
);
668 count
= PPP_MTU
+ PPP_HDRLEN
;
672 * Allocate a buffer for the data and fetch it from the user space.
674 skb
= alloc_skb(count
, GFP_KERNEL
);
676 printk(KERN_ERR
"ppp_tty_write: no memory\n");
679 new_data
= skb_put(skb
, count
);
682 * Retrieve the user's buffer
684 if (COPY_FROM_USER(new_data
, data
, count
)) {
692 ppp_send_ctrl(ppp
, skb
);
694 return (rw_ret_t
) count
;
698 * Process the IOCTL call for the tty device.
699 * Only the ioctls that relate to using ppp on async serial lines
700 * are processed here; the rest are handled by ppp_ioctl.
703 ppp_tty_ioctl (struct tty_struct
*tty
, struct file
* file
,
704 unsigned int param2
, unsigned long param3
)
706 struct ppp
*ppp
= tty2ppp (tty
);
707 register int temp_i
= 0;
711 * Verify the status of the PPP device.
713 if (!ppp
|| ppp
->magic
!= PPP_MAGIC
|| !ppp
->inuse
)
717 * The user must have an euid of root to do these requests.
719 if (!capable(CAP_NET_ADMIN
))
723 case PPPIOCGASYNCMAP
:
725 * Retrieve the transmit async map
727 if (PUT_USER(ppp
->xmit_async_map
[0], (int *) param3
))
732 case PPPIOCSASYNCMAP
:
734 * Set the transmit async map
736 if (GET_USER(temp_i
, (int *) param3
))
738 ppp
->xmit_async_map
[0] = temp_i
;
739 if (ppp
->flags
& SC_DEBUG
)
741 "ppp_tty_ioctl: set xmit asyncmap %x\n",
742 ppp
->xmit_async_map
[0]);
746 case PPPIOCSRASYNCMAP
:
748 * Set the receive async map
750 if (GET_USER(temp_i
, (int *) param3
))
752 ppp
->recv_async_map
= temp_i
;
753 if (ppp
->flags
& SC_DEBUG
)
755 "ppp_tty_ioctl: set rcv asyncmap %x\n",
756 ppp
->recv_async_map
);
760 case PPPIOCGXASYNCMAP
:
762 * Get the map of characters to be escaped on transmission.
764 if (COPY_TO_USER((void *) param3
, ppp
->xmit_async_map
,
765 sizeof (ppp
->xmit_async_map
)))
770 case PPPIOCSXASYNCMAP
:
772 * Set the map of characters to be escaped on transmission.
777 if (COPY_FROM_USER(temp_tbl
, (void *) param3
,
781 temp_tbl
[1] = 0x00000000;
782 temp_tbl
[2] &= ~0x40000000;
783 temp_tbl
[3] |= 0x60000000;
785 memcpy(ppp
->xmit_async_map
, temp_tbl
,
786 sizeof (ppp
->xmit_async_map
));
788 if (ppp
->flags
& SC_DEBUG
)
790 "ppp_tty_ioctl: set xasyncmap\n");
797 * Set up this PPP unit to be used next time this
798 * process sets a tty to PPP line discipline.
800 ppp
->backup_tty
= tty
;
801 ppp
->sc_xfer
= current
->pid
;
808 * Allow users to read, but not set, the serial port parameters
810 error
= n_tty_ioctl (tty
, file
, param2
, param3
);
815 * Returns how many bytes are available for a read().
824 skb
= skb_peek(&ppp
->rcv_q
);
827 restore_flags(flags
);
828 if (PUT_USER(count
, (int *) param3
))
836 * All other ioctl() events will come here.
838 error
= ppp_ioctl(ppp
, param2
, param3
);
847 * Process the select() or poll() statement for the PPP device.
850 #if LINUX_VERSION_CODE < VERSION(2,1,23)
852 ppp_tty_select(struct tty_struct
*tty
, struct inode
*inode
,
853 struct file
*filp
, int sel_type
, select_table
* wait
)
855 struct ppp
*ppp
= tty2ppp(tty
);
859 * Verify the status of the PPP device.
861 if (!ppp
|| tty
!= ppp
->tty
)
868 /* The fd is readable if the receive queue isn't empty. */
869 if (skb_peek(&ppp
->rcv_q
) != NULL
)
873 /* Check for exceptions or read errors. */
874 /* Is this a pty link and the remote disconnected? */
875 if (tty
->flags
& (1 << TTY_OTHER_CLOSED
))
878 /* Is this a local link and the modem disconnected? */
879 if (tty_hung_up_p (filp
))
882 select_wait(&ppp
->read_wait
, wait
);
887 /* The fd is always writable. */
893 #else /* 2.1.23 or later */
896 ppp_tty_poll(struct tty_struct
*tty
, struct file
*filp
, poll_table
* wait
)
898 struct ppp
*ppp
= tty2ppp(tty
);
899 unsigned int mask
= 0;
901 if (ppp
&& ppp
->magic
== PPP_MAGIC
&& tty
== ppp
->tty
) {
904 poll_wait(filp
, &ppp
->read_wait
, wait
);
906 if (skb_peek(&ppp
->rcv_q
) != NULL
)
907 mask
|= POLLIN
| POLLRDNORM
;
908 if (tty
->flags
& (1 << TTY_OTHER_CLOSED
)
909 || tty_hung_up_p(filp
))
911 mask
|= POLLOUT
| POLLWRNORM
;
915 #endif /* >= 2.1.23 */
918 * This function is called by the tty driver when the transmit buffer has
919 * additional space. It is used by the ppp code to continue to transmit
920 * the current buffer should the buffer have been partially sent.
923 ppp_tty_wakeup (struct tty_struct
*tty
)
925 struct ppp
*ppp
= tty2ppp (tty
);
927 tty
->flags
&= ~(1 << TTY_DO_WRITE_WAKEUP
);
934 if (ppp_tty_push(ppp
))
935 ppp_output_wakeup(ppp
);
939 * Send a packet to the peer over an async tty line.
940 * Returns -1 iff the packet could not be accepted at present,
941 * 0 if the packet was accepted but we can't accept another yet, or
942 * 1 if we can accept another packet immediately.
943 * If this procedure returns 0, ppp_output_wakeup will be called
947 ppp_async_send(struct ppp
*ppp
, struct sk_buff
*skb
)
953 if (ppp
->tpkt
!= NULL
)
958 return ppp_tty_push(ppp
);
962 * Push as much data as possible out to the tty.
963 * Returns 1 if we finished encoding the current frame, 0 otherwise.
966 ppp_tty_push(struct ppp
*ppp
)
968 int avail
, sent
, done
= 0;
969 struct tty_struct
*tty
= ppp2tty(ppp
);
972 if (ppp
->tty_pushing
)
974 if (tty
== NULL
|| tty
->disc_data
!= (void *) ppp
)
976 while (ppp
->optr
< ppp
->olim
|| ppp
->tpkt
!= 0) {
977 ppp
->tty_pushing
= 1;
978 avail
= ppp
->olim
- ppp
->optr
;
980 tty
->flags
|= (1 << TTY_DO_WRITE_WAKEUP
);
981 sent
= tty
->driver
.write(tty
, 0, ppp
->optr
, avail
);
983 goto flush
; /* error, e.g. loss of CD */
984 ppp
->stats
.ppp_obytes
+= sent
;
987 ppp
->tty_pushing
= 0;
992 done
= ppp_async_encode(ppp
);
993 ppp
->tty_pushing
= 0;
998 ppp
->tty_pushing
= 1;
999 ppp
->stats
.ppp_oerrors
++;
1000 if (ppp
->tpkt
!= 0) {
1001 kfree_skb(ppp
->tpkt
);
1005 ppp
->optr
= ppp
->olim
;
1006 ppp
->tty_pushing
= 0;
1011 * Procedure to encode the data for async serial transmission.
1012 * Does octet stuffing (escaping) and address/control
1013 * and protocol compression.
1014 * Assumes ppp->opkt != 0 on entry.
1015 * Returns 1 if we finished the current frame, 0 otherwise.
1018 ppp_async_encode(struct ppp
*ppp
)
1020 int fcs
, i
, count
, c
;
1021 unsigned char *buf
, *buflim
;
1022 unsigned char *data
;
1031 data
= ppp
->tpkt
->data
;
1032 count
= ppp
->tpkt
->len
;
1036 * LCP packets with code values between 1 (configure-reqest)
1037 * and 7 (code-reject) must be sent as though no options
1038 * had been negotiated.
1040 islcp
= PPP_PROTOCOL(data
) == PPP_LCP
1041 && 1 <= data
[PPP_HDRLEN
] && data
[PPP_HDRLEN
] <= 7;
1045 * Start of a new packet - insert the leading FLAG
1046 * character if necessary.
1048 if (islcp
|| flag_time
== 0
1049 || jiffies
- ppp
->last_xmit
>= flag_time
)
1051 /* only reset idle time for data packets */
1052 if (PPP_PROTOCOL(data
) < 0x8000)
1053 ppp
->last_xmit
= jiffies
;
1055 ++ppp
->stats
.ppp_opackets
;
1056 ppp
->stats
.ppp_ooctects
+= count
;
1059 * Do address/control compression
1061 if ((ppp
->flags
& SC_COMP_AC
) != 0 && !islcp
1062 && PPP_ADDRESS(data
) == PPP_ALLSTATIONS
1063 && PPP_CONTROL(data
) == PPP_UI
)
1068 * Once we put in the last byte, we need to put in the FCS
1069 * and closing flag, so make sure there is at least 7 bytes
1070 * of free space in the output buffer.
1072 buflim
= buf
+ OBUFSIZE
- 6;
1073 while (i
< count
&& buf
< buflim
) {
1075 if (i
== 3 && c
== 0 && (ppp
->flags
& SC_COMP_PROT
))
1076 continue; /* compress protocol field */
1077 fcs
= PPP_FCS(fcs
, c
);
1078 if (in_xmap(ppp
, c
) || (islcp
&& c
< 0x20)) {
1079 *buf
++ = PPP_ESCAPE
;
1087 * We have finished the packet. Add the FCS and flag.
1091 if (in_xmap(ppp
, c
) || (islcp
&& c
< 0x20)) {
1092 *buf
++ = PPP_ESCAPE
;
1096 c
= (fcs
>> 8) & 0xff;
1097 if (in_xmap(ppp
, c
) || (islcp
&& c
< 0x20)) {
1098 *buf
++ = PPP_ESCAPE
;
1105 kfree_skb(ppp
->tpkt
);
1111 * Remember where we are up to in this packet.
1120 * Callback function from tty driver. Return the amount of space left
1121 * in the receiver's buffer to decide if remote transmitter is to be
1125 ppp_tty_room (struct tty_struct
*tty
)
1127 return 65536; /* We can handle an infinite amount of data. :-) */
1131 * Callback function when data is available at the tty driver.
1134 ppp_tty_receive (struct tty_struct
*tty
, const __u8
* data
,
1135 char *flags
, int count
)
1137 register struct ppp
*ppp
= tty2ppp (tty
);
1138 struct sk_buff
*skb
;
1145 * This can happen if stuff comes in on the backup tty.
1147 if (ppp
== 0 || tty
!= ppp
->tty
)
1150 * Verify the table pointer and ensure that the line is
1151 * still in PPP discipline.
1153 if (ppp
->magic
!= PPP_MAGIC
) {
1154 if (ppp
->flags
& SC_DEBUG
)
1156 "PPP: tty_receive called but couldn't find "
1161 * Print the buffer if desired
1163 if (ppp
->flags
& SC_LOG_RAWIN
)
1164 ppp_print_buffer ("receive buffer", data
, count
);
1166 ppp
->stats
.ppp_ibytes
+= count
;
1168 while (count
-- > 0) {
1170 * Collect the character and error condition for the character.
1171 * Set the toss flag for the first character error.
1181 ++ppp
->estats
.rx_fifo_errors
;
1185 ++ppp
->estats
.rx_frame_errors
;
1193 * Set the flags for d7 being 0/1 and parity being
1194 * even/odd so that the normal processing would have
1195 * all flags set at the end of the session. A
1196 * missing flag bit indicates an error condition.
1199 #ifdef CHECK_CHARACTERS
1201 ppp
->flags
|= SC_RCV_B7_1
;
1203 ppp
->flags
|= SC_RCV_B7_0
;
1205 if (paritytab
[chr
>> 5] & (1 << (chr
& 0x1F)))
1206 ppp
->flags
|= SC_RCV_ODDP
;
1208 ppp
->flags
|= SC_RCV_EVNP
;
1211 if (chr
== PPP_FLAG
) {
1213 * FLAG. This is the end of the block. If the block
1214 * ends with ESC FLAG, then the block is to be ignored.
1219 * Process the frame if it was received correctly.
1220 * If there was an error, let the VJ decompressor know.
1221 * There are 4 cases here:
1222 * skb != NULL, toss != 0: error in frame
1223 * skb != NULL, toss == 0: frame ok
1224 * skb == NULL, toss != 0: very first frame,
1225 * error on 1st char, or alloc_skb failed
1226 * skb == NULL, toss == 0: empty frame (~~)
1228 if (ppp
->toss
|| !ppp_receive_frame(ppp
, skb
)) {
1229 if (ppp
->toss
&& (ppp
->flags
& SC_DEBUG
))
1231 "ppp: tossing frame (%x)\n",
1235 if (!(ppp
->toss
== 0xE0 || ppp
->toss
== 0x80))
1236 ++ppp
->stats
.ppp_ierrors
;
1237 ppp_receive_error(ppp
);
1240 * Reset for the next frame.
1243 ppp
->rfcs
= PPP_INITFCS
;
1249 /* If we're tossing, look no further. */
1253 /* If this is a control char to be ignored, do so */
1254 if (in_rmap(ppp
, chr
))
1258 * Modify the next character if preceded by escape.
1259 * The escape character (0x7d) could be an escaped
1260 * 0x5d, if it follows an escape :-)
1265 } else if (chr
== PPP_ESCAPE
) {
1266 ppp
->escape
= PPP_TRANS
;
1271 * Allocate an skbuff on the first character received.
1272 * The 128 is room for VJ header expansion and FCS.
1275 skb
= dev_alloc_skb(ppp
->mru
+ 128 + PPP_HDRLEN
);
1277 if (ppp
->flags
& SC_DEBUG
)
1278 printk(KERN_DEBUG
"couldn't "
1279 "alloc skb for recv\n");
1286 * Decompress A/C and protocol compression here.
1288 if (skb
->len
== 0 && chr
!= PPP_ALLSTATIONS
) {
1289 p
= skb_put(skb
, 2);
1290 p
[0] = PPP_ALLSTATIONS
;
1293 if (skb
->len
== 2 && (chr
& 1) != 0) {
1294 p
= skb_put(skb
, 1);
1299 * Check if we've overflowed the MRU
1301 if (skb
->len
>= ppp
->mru
+ PPP_HDRLEN
+ 2
1302 || skb_tailroom(skb
) <= 0) {
1303 ++ppp
->estats
.rx_length_errors
;
1305 if (ppp
->flags
& SC_DEBUG
)
1306 printk(KERN_DEBUG
"rcv frame too long: "
1307 "len=%d mru=%d hroom=%d troom=%d\n",
1308 skb
->len
, ppp
->mru
, skb_headroom(skb
),
1314 * Store the character and update the FCS.
1316 p
= skb_put(skb
, 1);
1318 ppp
->rfcs
= PPP_FCS(ppp
->rfcs
, chr
);
1323 /*************************************************************
1324 * PPP NETWORK INTERFACE SUPPORT
1325 * The following code implements the PPP network
1326 * interface device and handles those parts of
1327 * the PPP processing which are independent of the
1328 * type of hardware link being used, including
1329 * VJ and packet compression.
1330 *************************************************************/
1333 * Network device driver callback routines
1336 static int ppp_init_dev(struct device
*dev
);
1337 static int ppp_dev_open(struct device
*);
1338 static int ppp_dev_ioctl(struct device
*dev
, struct ifreq
*ifr
, int cmd
);
1339 static int ppp_dev_close(struct device
*);
1340 static int ppp_dev_xmit(struct sk_buff
*, struct device
*);
1341 static struct net_device_stats
*ppp_dev_stats (struct device
*);
1343 #if LINUX_VERSION_CODE < VERSION(2,1,15)
1344 static int ppp_dev_header(struct sk_buff
*, struct device
*, __u16
,
1345 void *, void *, unsigned int);
1346 static int ppp_dev_rebuild(void *eth
, struct device
*dev
,
1347 unsigned long raddr
, struct sk_buff
*skb
);
1351 * Information for the protocol decoder
1354 typedef int (*pfn_proto
) (struct ppp
*, struct sk_buff
*);
1356 typedef struct ppp_proto_struct
{
1361 static int rcv_proto_ip (struct ppp
*, struct sk_buff
*);
1362 static int rcv_proto_ipx (struct ppp
*, struct sk_buff
*);
1363 static int rcv_proto_at (struct ppp
*, struct sk_buff
*);
1364 static int rcv_proto_vjc_comp (struct ppp
*, struct sk_buff
*);
1365 static int rcv_proto_vjc_uncomp (struct ppp
*, struct sk_buff
*);
1366 static int rcv_proto_ccp (struct ppp
*, struct sk_buff
*);
1367 static int rcv_proto_unknown (struct ppp
*, struct sk_buff
*);
1370 ppp_proto_type proto_list
[] = {
1371 { PPP_IP
, rcv_proto_ip
},
1372 { PPP_IPX
, rcv_proto_ipx
},
1373 { PPP_AT
, rcv_proto_at
},
1374 { PPP_VJC_COMP
, rcv_proto_vjc_comp
},
1375 { PPP_VJC_UNCOMP
, rcv_proto_vjc_uncomp
},
1376 { PPP_CCP
, rcv_proto_ccp
},
1377 { 0, rcv_proto_unknown
} /* !!! MUST BE LAST !!! */
1381 * Called when the PPP network interface device is actually created.
1384 ppp_init_dev (struct device
*dev
)
1386 dev
->hard_header_len
= PPP_HDRLEN
;
1387 #if LINUX_VERSION_CODE < VERSION(2,1,15)
1388 dev
->hard_header
= ppp_dev_header
;
1389 dev
->rebuild_header
= ppp_dev_rebuild
;
1394 dev
->hard_start_xmit
= ppp_dev_xmit
;
1395 dev
->open
= ppp_dev_open
;
1396 dev
->stop
= ppp_dev_close
;
1397 dev
->get_stats
= ppp_dev_stats
;
1398 dev
->do_ioctl
= ppp_dev_ioctl
;
1400 dev
->tx_queue_len
= 10;
1401 dev
->type
= ARPHRD_PPP
;
1403 #if LINUX_VERSION_CODE < VERSION(2,1,20)
1407 for (indx
= 0; indx
< DEV_NUMBUFFS
; indx
++)
1408 skb_queue_head_init (&dev
->buffs
[indx
]);
1411 dev_init_buffers(dev
);
1414 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
1420 * Callback from the network layer when the device goes up.
1424 ppp_dev_open (struct device
*dev
)
1426 struct ppp
*ppp
= dev2ppp(dev
);
1428 if (!ppp
->inuse
|| ppp2tty(ppp
) == NULL
) {
1429 printk(KERN_ERR
"ppp: %s not active\n", dev
->name
);
1439 * Callback from the network layer when the ppp device goes down.
1443 ppp_dev_close (struct device
*dev
)
1445 struct ppp
*ppp
= dev2ppp (dev
);
1447 CHECK_PPP_MAGIC(ppp
);
1455 get_vj_stats(struct vjstat
*vj
, struct slcompress
*slc
)
1457 vj
->vjs_packets
= slc
->sls_o_compressed
+ slc
->sls_o_uncompressed
;
1458 vj
->vjs_compressed
= slc
->sls_o_compressed
;
1459 vj
->vjs_searches
= slc
->sls_o_searches
;
1460 vj
->vjs_misses
= slc
->sls_o_misses
;
1461 vj
->vjs_errorin
= slc
->sls_i_error
;
1462 vj
->vjs_tossed
= slc
->sls_i_tossed
;
1463 vj
->vjs_uncompressedin
= slc
->sls_i_uncompressed
;
1464 vj
->vjs_compressedin
= slc
->sls_i_compressed
;
1468 * Callback from the network layer to process the sockioctl functions.
1471 ppp_dev_ioctl (struct device
*dev
, struct ifreq
*ifr
, int cmd
)
1473 struct ppp
*ppp
= dev2ppp(dev
);
1476 struct ppp_stats stats
;
1477 struct ppp_comp_stats cstats
;
1481 CHECK_PPP_MAGIC(ppp
);
1483 memset(&u
, 0, sizeof(u
));
1486 u
.stats
.p
= ppp
->stats
;
1487 if (ppp
->slcomp
!= NULL
)
1488 get_vj_stats(&u
.stats
.vj
, ppp
->slcomp
);
1489 nb
= sizeof(u
.stats
);
1492 case SIOCGPPPCSTATS
:
1493 if (ppp
->sc_xc_state
!= NULL
)
1494 (*ppp
->sc_xcomp
->comp_stat
)
1495 (ppp
->sc_xc_state
, &u
.cstats
.c
);
1496 if (ppp
->sc_rc_state
!= NULL
)
1497 (*ppp
->sc_rcomp
->decomp_stat
)
1498 (ppp
->sc_rc_state
, &u
.cstats
.d
);
1499 nb
= sizeof(u
.cstats
);
1503 strcpy(u
.vers
, szVersion
);
1504 nb
= strlen(u
.vers
) + 1;
1511 if (COPY_TO_USER((void *) ifr
->ifr_ifru
.ifru_data
, &u
, nb
))
1517 * Process the generic PPP ioctls, i.e. those which are not specific
1518 * to any particular type of hardware link.
1521 ppp_ioctl(struct ppp
*ppp
, unsigned int param2
, unsigned long param3
)
1523 register int temp_i
= 0, oldflags
;
1524 int error
= -EFAULT
;
1525 unsigned long flags
;
1526 struct ppp_idle cur_ddinfo
;
1532 * The user must have an euid of root to do these requests.
1534 if (!capable(CAP_NET_ADMIN
))
1542 if (GET_USER(temp_i
, (int *) param3
))
1544 if (temp_i
< PPP_MRU
)
1547 if (ppp
->flags
& SC_DEBUG
)
1549 "ppp_ioctl: set mru to %x\n", temp_i
);
1555 * Fetch the current flags
1557 temp_i
= ppp
->flags
& SC_MASK
;
1558 #ifndef CHECK_CHARACTERS /* Don't generate errors if we don't check chars. */
1559 temp_i
|= SC_RCV_B7_1
| SC_RCV_B7_0
|
1560 SC_RCV_ODDP
| SC_RCV_EVNP
;
1562 if (PUT_USER(temp_i
, (int *) param3
))
1569 * Set the flags for the various options
1571 if (GET_USER(temp_i
, (int *) param3
))
1574 if (ppp
->flags
& ~temp_i
& SC_CCP_OPEN
)
1575 ppp_ccp_closed(ppp
);
1579 oldflags
= ppp
->flags
;
1580 temp_i
= (temp_i
& SC_MASK
) | (oldflags
& ~SC_MASK
);
1581 ppp
->flags
= temp_i
;
1582 restore_flags(flags
);
1584 if ((oldflags
| temp_i
) & SC_DEBUG
)
1586 "ppp_ioctl: set flags to %x\n", temp_i
);
1590 case PPPIOCSCOMPRESS
:
1592 * Set the compression mode
1594 error
= ppp_set_compression
1595 (ppp
, (struct ppp_option_data
*) param3
);
1600 * Obtain the unit number for this device.
1602 if (PUT_USER(ppp
->line
, (int *) param3
))
1604 if (ppp
->flags
& SC_DEBUG
)
1606 "ppp_ioctl: get unit: %d\n", ppp
->line
);
1612 * Set the debug level
1614 if (GET_USER(temp_i
, (int *) param3
))
1616 temp_i
= (temp_i
& 0x1F) << 16;
1618 if ((ppp
->flags
| temp_i
) & SC_DEBUG
)
1620 "ppp_ioctl: set dbg flags to %x\n", temp_i
);
1624 ppp
->flags
= (ppp
->flags
& ~0x1F0000) | temp_i
;
1625 restore_flags(flags
);
1631 * Get the debug level
1633 temp_i
= (ppp
->flags
>> 16) & 0x1F;
1634 if (PUT_USER(temp_i
, (int *) param3
))
1641 * Get the times since the last send/receive frame operation
1643 /* change absolute times to relative times. */
1644 cur_ddinfo
.xmit_idle
= (jiffies
- ppp
->last_xmit
) / HZ
;
1645 cur_ddinfo
.recv_idle
= (jiffies
- ppp
->last_recv
) / HZ
;
1646 if (COPY_TO_USER((void *) param3
, &cur_ddinfo
,
1647 sizeof (cur_ddinfo
)))
1654 * Set the maximum VJ header compression slot number.
1656 if (GET_USER(temp_i
, (int *) param3
))
1659 if (temp_i
< 2 || temp_i
> 255)
1662 if (ppp
->flags
& SC_DEBUG
)
1663 printk(KERN_INFO
"ppp_ioctl: set maxcid to %d\n",
1665 if (ppp
->slcomp
!= NULL
)
1666 slhc_free(ppp
->slcomp
);
1667 ppp
->slcomp
= slhc_init(16, temp_i
);
1670 if (ppp
->slcomp
== NULL
) {
1671 printk(KERN_ERR
"ppp: no memory for VJ compression\n");
1679 if (COPY_FROM_USER(&npi
, (void *) param3
, sizeof(npi
)))
1682 switch (npi
.protocol
) {
1684 npi
.protocol
= NP_IP
;
1687 npi
.protocol
= NP_IPX
;
1690 npi
.protocol
= NP_AT
;
1693 if (ppp
->flags
& SC_DEBUG
)
1694 printk(KERN_DEBUG
"pppioc[gs]npmode: "
1695 "invalid proto %d\n", npi
.protocol
);
1700 if (param2
== PPPIOCGNPMODE
) {
1701 npi
.mode
= ppp
->sc_npmode
[npi
.protocol
];
1702 if (COPY_TO_USER((void *) param3
, &npi
, sizeof(npi
)))
1705 ppp
->sc_npmode
[npi
.protocol
] = npi
.mode
;
1706 if (ppp
->flags
& SC_DEBUG
)
1707 printk(KERN_DEBUG
"ppp: set np %d to %d\n",
1708 npi
.protocol
, npi
.mode
);
1716 * All other ioctl() events will come here.
1718 if (ppp
->flags
& SC_DEBUG
)
1720 "ppp_ioctl: invalid ioctl: %x, addr %lx\n",
1723 error
= -ENOIOCTLCMD
;
1731 * Process the set-compression ioctl.
1734 ppp_set_compression (struct ppp
*ppp
, struct ppp_option_data
*odp
)
1736 struct compressor
*cp
;
1738 unsigned long flags
;
1740 __u8 ccp_option
[CCP_MAX_OPTION_LENGTH
];
1741 struct ppp_option_data data
;
1744 * Fetch the compression parameters
1747 if (COPY_FROM_USER(&data
, odp
, sizeof (data
)))
1752 if ((unsigned) nb
>= CCP_MAX_OPTION_LENGTH
)
1753 nb
= CCP_MAX_OPTION_LENGTH
;
1755 if (COPY_FROM_USER(ccp_option
, ptr
, nb
))
1759 if (ccp_option
[1] < 2) /* preliminary check on the length byte */
1764 ppp
->flags
&= ~(data
.transmit
? SC_COMP_RUN
: SC_DECOMP_RUN
);
1765 restore_flags(flags
);
1767 cp
= find_compressor (ccp_option
[0]);
1768 #if defined(CONFIG_KMOD) || defined(CONFIG_KERNELD)
1771 sprintf(modname
, "ppp-compress-%d", ccp_option
[0]);
1772 request_module(modname
);
1773 cp
= find_compressor(ccp_option
[0]);
1775 #endif /* CONFIG_KMOD */
1778 if (ppp
->flags
& SC_DEBUG
)
1780 "%s: no compressor for [%x %x %x], %x\n",
1781 ppp
->name
, ccp_option
[0], ccp_option
[1],
1783 goto out
; /* compressor not loaded */
1787 * Found a handler for the protocol - try to allocate
1788 * a compressor or decompressor.
1791 if (data
.transmit
) {
1792 if (ppp
->sc_xc_state
!= NULL
)
1793 (*ppp
->sc_xcomp
->comp_free
)(ppp
->sc_xc_state
);
1794 ppp
->sc_xc_state
= NULL
;
1797 ppp
->sc_xc_state
= cp
->comp_alloc(ccp_option
, nb
);
1798 if (ppp
->sc_xc_state
== NULL
) {
1799 if (ppp
->flags
& SC_DEBUG
)
1800 printk(KERN_DEBUG
"%s: comp_alloc failed\n",
1805 if (ppp
->sc_rc_state
!= NULL
)
1806 (*ppp
->sc_rcomp
->decomp_free
)(ppp
->sc_rc_state
);
1807 ppp
->sc_rc_state
= NULL
;
1810 ppp
->sc_rc_state
= cp
->decomp_alloc(ccp_option
, nb
);
1811 if (ppp
->sc_rc_state
== NULL
) {
1812 if (ppp
->flags
& SC_DEBUG
)
1813 printk(KERN_DEBUG
"%s: decomp_alloc failed\n",
1823 * Handle a CCP packet.
1825 * The CCP packet is passed along to the pppd process just like any
1826 * other PPP frame. The difference is that some processing needs to be
1827 * immediate or the compressors will become confused on the peer.
1830 static void ppp_proto_ccp(struct ppp
*ppp
, __u8
*dp
, int len
, int rcvd
)
1832 int slen
= CCP_LENGTH(dp
);
1833 __u8
*opt
= dp
+ CCP_HDRLEN
;
1834 int opt_len
= slen
- CCP_HDRLEN
;
1835 unsigned long flags
;
1840 if (ppp
->flags
& SC_DEBUG
)
1841 printk(KERN_DEBUG
"ppp_proto_ccp rcvd=%d code=%x flags=%x\n",
1842 rcvd
, CCP_CODE(dp
), ppp
->flags
);
1844 switch (CCP_CODE(dp
)) {
1849 * CCP must be going down - disable compression
1851 if (ppp
->flags
& SC_CCP_UP
) {
1853 ppp
->flags
&= ~(SC_CCP_UP
|
1860 if ((ppp
->flags
& SC_CCP_OPEN
) == 0)
1862 if (ppp
->flags
& SC_CCP_UP
)
1864 if (slen
< (CCP_HDRLEN
+ CCP_OPT_MINLEN
))
1866 if (slen
< (CCP_OPT_LENGTH (opt
) + CCP_HDRLEN
))
1870 * we're agreeing to send compressed packets.
1872 if (ppp
->sc_xc_state
== NULL
)
1875 if ((*ppp
->sc_xcomp
->comp_init
)
1878 ppp
->line
, 0, ppp
->flags
& SC_DEBUG
)) {
1879 if (ppp
->flags
& SC_DEBUG
)
1880 printk(KERN_DEBUG
"%s: comp running\n",
1883 ppp
->flags
|= SC_COMP_RUN
;
1889 * peer is agreeing to send compressed packets.
1891 if (ppp
->sc_rc_state
== NULL
)
1894 if ((*ppp
->sc_rcomp
->decomp_init
)
1897 ppp
->line
, 0, ppp
->mru
, ppp
->flags
& SC_DEBUG
)) {
1898 if (ppp
->flags
& SC_DEBUG
)
1899 printk(KERN_DEBUG
"%s: decomp running\n",
1902 ppp
->flags
|= SC_DECOMP_RUN
;
1903 ppp
->flags
&= ~(SC_DC_ERROR
| SC_DC_FERROR
);
1909 * CCP Reset-ack resets compressors and decompressors
1910 * as it passes through.
1912 if ((ppp
->flags
& SC_CCP_UP
) == 0)
1916 if (ppp
->sc_xc_state
&& (ppp
->flags
& SC_COMP_RUN
)) {
1917 (*ppp
->sc_xcomp
->comp_reset
)(ppp
->sc_xc_state
);
1918 if (ppp
->flags
& SC_DEBUG
)
1919 printk(KERN_DEBUG
"%s: comp reset\n",
1923 if (ppp
->sc_rc_state
&& (ppp
->flags
& SC_DECOMP_RUN
)) {
1924 (*ppp
->sc_rcomp
->decomp_reset
)(ppp
->sc_rc_state
);
1925 if (ppp
->flags
& SC_DEBUG
)
1926 printk(KERN_DEBUG
"%s: decomp reset\n",
1929 ppp
->flags
&= ~SC_DC_ERROR
;
1934 restore_flags(flags
);
1938 * CCP is down; free (de)compressor state if necessary.
1942 ppp_ccp_closed(struct ppp
*ppp
)
1944 unsigned long flags
;
1948 ppp
->flags
&= ~(SC_CCP_OPEN
| SC_CCP_UP
| SC_COMP_RUN
| SC_DECOMP_RUN
);
1949 restore_flags(flags
);
1950 if (ppp
->flags
& SC_DEBUG
)
1951 printk(KERN_DEBUG
"%s: ccp closed\n", ppp
->name
);
1952 if (ppp
->sc_xc_state
) {
1953 (*ppp
->sc_xcomp
->comp_free
) (ppp
->sc_xc_state
);
1954 ppp
->sc_xc_state
= NULL
;
1957 if (ppp
->sc_rc_state
) {
1958 (*ppp
->sc_rcomp
->decomp_free
) (ppp
->sc_rc_state
);
1959 ppp
->sc_rc_state
= NULL
;
1963 /*************************************************************
1964 * RECEIVE-SIDE ROUTINES
1965 *************************************************************/
1968 * On entry, a received frame is in skb.
1969 * Check it and dispose as appropriate.
1972 ppp_receive_frame(struct ppp
*ppp
, struct sk_buff
*skb
)
1978 struct sk_buff
*new_skb
;
1979 ppp_proto_type
*proto_ptr
;
1982 * An empty frame is ignored. This occurs if the FLAG sequence
1983 * precedes and follows each frame.
1987 if (skb
->len
== 0) {
1995 * Generate an error if the frame is too small.
1997 if (count
< PPP_HDRLEN
+ 2) {
1998 if (ppp
->flags
& SC_DEBUG
)
2000 "ppp: got runt ppp frame, %d chars\n", count
);
2001 ++ppp
->estats
.rx_length_errors
;
2006 * Verify the FCS of the frame and discard the FCS characters
2007 * from the end of the buffer.
2009 if (ppp
->rfcs
!= PPP_GOODFCS
) {
2010 if (ppp
->flags
& SC_DEBUG
) {
2012 "ppp: frame with bad fcs, length = %d\n",
2014 ppp_print_buffer("bad frame", data
, count
);
2016 ++ppp
->estats
.rx_crc_errors
;
2019 count
-= 2; /* ignore the fcs characters */
2020 skb_trim(skb
, count
);
2023 * Process the active decompressor.
2025 if (ppp
->sc_rc_state
!= NULL
&&
2026 (ppp
->flags
& SC_DECOMP_RUN
) &&
2027 ((ppp
->flags
& (SC_DC_FERROR
| SC_DC_ERROR
)) == 0)) {
2028 if (PPP_PROTOCOL(data
) == PPP_COMP
) {
2030 * If the frame is compressed then decompress it.
2032 new_skb
= dev_alloc_skb(ppp
->mru
+ 128 + PPP_HDRLEN
);
2033 if (new_skb
== NULL
) {
2034 printk(KERN_ERR
"ppp_recv_frame: no memory\n");
2035 new_count
= DECOMP_ERROR
;
2037 new_count
= (*ppp
->sc_rcomp
->decompress
)
2038 (ppp
->sc_rc_state
, data
, count
,
2039 new_skb
->data
, ppp
->mru
+ PPP_HDRLEN
);
2041 if (new_count
> 0) {
2042 /* Frame was decompressed OK */
2046 data
= skb_put(skb
, count
);
2050 * On a decompression error, we pass the
2051 * compressed frame up to pppd as an
2054 if (ppp
->flags
& SC_DEBUG
)
2055 printk(KERN_INFO
"%s: decomp err %d\n",
2056 ppp
->name
, new_count
);
2059 if (ppp
->slcomp
!= 0)
2060 slhc_toss(ppp
->slcomp
);
2061 ++ppp
->stats
.ppp_ierrors
;
2062 if (new_count
== DECOMP_FATALERROR
) {
2063 ppp
->flags
|= SC_DC_FERROR
;
2065 ppp
->flags
|= SC_DC_ERROR
;
2072 * The frame is not compressed. Pass it to the
2073 * decompression code so it can update its
2074 * dictionary if necessary.
2076 (*ppp
->sc_rcomp
->incomp
)(ppp
->sc_rc_state
,
2080 else if (PPP_PROTOCOL(data
) == PPP_COMP
&& (ppp
->flags
& SC_DEBUG
))
2081 printk(KERN_INFO
"%s: not decomp, rc_state=%p flags=%x\n",
2082 ppp
->name
, ppp
->sc_rc_state
, ppp
->flags
);
2085 * Count the frame and print it
2087 ++ppp
->stats
.ppp_ipackets
;
2088 ppp
->stats
.ppp_ioctects
+= count
;
2089 if (ppp
->flags
& SC_LOG_INPKT
)
2090 ppp_print_buffer ("receive frame", data
, count
);
2093 * Find the procedure to handle this protocol.
2094 * The last one is marked as protocol 0 which is the 'catch-all'
2095 * to feed it to the pppd daemon.
2097 proto
= PPP_PROTOCOL(data
);
2098 proto_ptr
= proto_list
;
2099 while (proto_ptr
->proto
!= 0 && proto_ptr
->proto
!= proto
)
2103 * Update the appropriate statistic counter.
2105 if (!(*proto_ptr
->func
)(ppp
, skb
)) {
2107 ++ppp
->stats
.ppp_discards
;
2114 * An input error has been detected, so we need to inform
2115 * the VJ decompressor.
2118 ppp_receive_error(struct ppp
*ppp
)
2122 if (ppp
->slcomp
!= 0)
2123 slhc_toss(ppp
->slcomp
);
2127 * Put the input frame into the networking system for the indicated protocol
2130 ppp_rcv_rx(struct ppp
*ppp
, __u16 proto
, struct sk_buff
*skb
)
2134 * Fill in a few fields of the skb and give it to netif_rx().
2136 skb
->dev
= ppp2dev(ppp
); /* We are the device */
2137 skb
->protocol
= htons(proto
);
2138 skb_pull(skb
, PPP_HDRLEN
); /* pull off ppp header */
2139 skb
->mac
.raw
= skb
->data
;
2140 ppp
->last_recv
= jiffies
;
2141 #if LINUX_VERSION_CODE < VERSION(2,1,15)
2149 * Process the receipt of an IP frame
2152 rcv_proto_ip(struct ppp
*ppp
, struct sk_buff
*skb
)
2155 if ((ppp2dev(ppp
)->flags
& IFF_UP
) && (skb
->len
> 0)
2156 && ppp
->sc_npmode
[NP_IP
] == NPMODE_PASS
)
2157 return ppp_rcv_rx(ppp
, ETH_P_IP
, skb
);
2162 * Process the receipt of an IPX frame
2165 rcv_proto_ipx(struct ppp
*ppp
, struct sk_buff
*skb
)
2168 if (((ppp2dev(ppp
)->flags
& IFF_UP
) != 0) && (skb
->len
> 0)
2169 && ppp
->sc_npmode
[NP_IPX
] == NPMODE_PASS
)
2170 return ppp_rcv_rx(ppp
, ETH_P_IPX
, skb
);
2175 * Process the receipt of an Appletalk frame
2178 rcv_proto_at(struct ppp
*ppp
, struct sk_buff
*skb
)
2181 if ((ppp2dev(ppp
)->flags
& IFF_UP
) && (skb
->len
> 0)
2182 && ppp
->sc_npmode
[NP_AT
] == NPMODE_PASS
)
2183 return ppp_rcv_rx(ppp
, ETH_P_PPPTALK
, skb
);
2188 * Process the receipt of an VJ Compressed frame
2191 rcv_proto_vjc_comp(struct ppp
*ppp
, struct sk_buff
*skb
)
2196 if ((ppp
->flags
& SC_REJ_COMP_TCP
) || ppp
->slcomp
== NULL
)
2198 new_count
= slhc_uncompress(ppp
->slcomp
, skb
->data
+ PPP_HDRLEN
,
2199 skb
->len
- PPP_HDRLEN
);
2200 if (new_count
< 0) {
2201 if (ppp
->flags
& SC_DEBUG
)
2203 "ppp: error in VJ decompression\n");
2206 skb_put(skb
, new_count
+ PPP_HDRLEN
- skb
->len
);
2207 return rcv_proto_ip(ppp
, skb
);
2211 * Process the receipt of an VJ Un-compressed frame
2214 rcv_proto_vjc_uncomp(struct ppp
*ppp
, struct sk_buff
*skb
)
2217 if ((ppp
->flags
& SC_REJ_COMP_TCP
) || ppp
->slcomp
== NULL
)
2219 if (slhc_remember(ppp
->slcomp
, skb
->data
+ PPP_HDRLEN
,
2220 skb
->len
- PPP_HDRLEN
) <= 0) {
2221 if (ppp
->flags
& SC_DEBUG
)
2222 printk(KERN_NOTICE
"ppp: error in VJ memorizing\n");
2225 return rcv_proto_ip(ppp
, skb
);
2229 rcv_proto_ccp(struct ppp
*ppp
, struct sk_buff
*skb
)
2232 ppp_proto_ccp (ppp
, skb
->data
+ PPP_HDRLEN
, skb
->len
- PPP_HDRLEN
, 1);
2233 return rcv_proto_unknown(ppp
, skb
);
2237 * Receive all unclassified protocols.
2240 rcv_proto_unknown(struct ppp
*ppp
, struct sk_buff
*skb
)
2245 * Limit queue length by dropping old frames.
2247 skb_queue_tail(&ppp
->rcv_q
, skb
);
2248 while (ppp
->rcv_q
.qlen
> PPP_MAX_RCV_QLEN
) {
2249 struct sk_buff
*skb
= skb_dequeue(&ppp
->rcv_q
);
2254 wake_up_interruptible (&ppp
->read_wait
);
2255 if (ppp
->tty
->fasync
!= NULL
)
2256 kill_fasync (ppp
->tty
->fasync
, SIGIO
);
2261 /*************************************************************
2262 * TRANSMIT-SIDE ROUTINES
2263 *************************************************************/
2265 /* local function to store a value into the LQR frame */
2266 extern inline __u8
* store_long (register __u8
*p
, register int value
) {
2267 *p
++ = (__u8
) (value
>> 24);
2268 *p
++ = (__u8
) (value
>> 16);
2269 *p
++ = (__u8
) (value
>> 8);
2270 *p
++ = (__u8
) value
;
2275 * Compress and send an frame to the peer.
2276 * Should be called with xmit_busy == 1, having been set by the caller.
2277 * That is, we use xmit_busy as a lock to prevent reentry of this
2281 ppp_send_frame(struct ppp
*ppp
, struct sk_buff
*skb
)
2293 /* dump the buffer */
2294 if (ppp
->flags
& SC_LOG_OUTPKT
)
2295 ppp_print_buffer ("write frame", data
, count
);
2298 * Handle various types of protocol-specific compression
2299 * and other processing, including:
2300 * - VJ TCP header compression
2301 * - updating LQR packets
2302 * - updating CCP state on CCP packets
2304 proto
= PPP_PROTOCOL(data
);
2307 if ((ppp
->flags
& SC_COMP_TCP
) && ppp
->slcomp
!= NULL
)
2308 skb
= ppp_vj_compress(ppp
, skb
);
2313 * Update the LQR frame with the current MIB information.
2314 * This way the information is accurate and up-to-date.
2318 p
= data
+ 40; /* Point to last two items. */
2319 p
= store_long(p
, ppp
->stats
.ppp_opackets
+ 1);
2320 p
= store_long(p
, ppp
->stats
.ppp_ooctects
+ count
);
2321 ++ppp
->stats
.ppp_olqrs
;
2326 * Outbound compression control frames
2328 ppp_proto_ccp(ppp
, data
+ PPP_HDRLEN
, count
- PPP_HDRLEN
, 0);
2335 * Compress the whole frame if possible.
2337 if (((ppp
->flags
& SC_COMP_RUN
) != 0) &&
2338 (ppp
->sc_xc_state
!= (void *) 0) &&
2339 (proto
!= PPP_LCP
) &&
2340 (proto
!= PPP_CCP
)) {
2341 struct sk_buff
*new_skb
;
2344 /* Allocate an skb for the compressed frame. */
2345 new_skb
= alloc_skb(ppp
->mtu
+ PPP_HDRLEN
, GFP_ATOMIC
);
2346 if (new_skb
== NULL
) {
2347 printk(KERN_ERR
"ppp_send_frame: no memory\n");
2353 /* Compress the frame. */
2354 new_count
= (*ppp
->sc_xcomp
->compress
)
2355 (ppp
->sc_xc_state
, data
, new_skb
->data
,
2356 count
, ppp
->mtu
+ PPP_HDRLEN
);
2358 /* Did it compress? */
2359 if (new_count
> 0 && (ppp
->flags
& SC_CCP_UP
)) {
2360 skb_put(new_skb
, new_count
);
2365 * The frame could not be compressed, or it could not
2366 * be sent in compressed form because CCP is down.
2375 ret
= ppp_async_send(ppp
, skb
);
2377 /* we can release the lock */
2379 } else if (ret
< 0) {
2380 /* can't happen, since the caller got the xmit_busy lock */
2381 printk(KERN_ERR
"ppp: ppp_async_send didn't accept pkt\n");
2386 * Apply VJ TCP header compression to a packet.
2388 static struct sk_buff
*
2389 ppp_vj_compress(struct ppp
*ppp
, struct sk_buff
*skb
)
2391 __u8
*orig_data
, *data
;
2392 struct sk_buff
*new_skb
;
2395 new_skb
= alloc_skb(skb
->len
, GFP_ATOMIC
);
2396 if (new_skb
== NULL
) {
2397 printk(KERN_ERR
"ppp: no memory for vj compression\n");
2401 orig_data
= data
= skb
->data
+ PPP_HDRLEN
;
2402 len
= slhc_compress(ppp
->slcomp
, data
, skb
->len
- PPP_HDRLEN
,
2403 new_skb
->data
+ PPP_HDRLEN
, &data
,
2404 (ppp
->flags
& SC_NO_TCP_CCID
) == 0);
2406 if (data
== orig_data
) {
2407 /* Couldn't compress the data */
2412 /* The data has been changed */
2413 if (data
[0] & SL_TYPE_COMPRESSED_TCP
) {
2414 proto
= PPP_VJC_COMP
;
2415 data
[0] ^= SL_TYPE_COMPRESSED_TCP
;
2417 if (data
[0] >= SL_TYPE_UNCOMPRESSED_TCP
)
2418 proto
= PPP_VJC_UNCOMP
;
2421 data
[0] = orig_data
[0];
2424 data
= skb_put(new_skb
, len
+ PPP_HDRLEN
);
2425 data
[0] = PPP_ALLSTATIONS
;
2435 ppp_send_frames(struct ppp
*ppp
)
2437 struct sk_buff
*skb
;
2439 while (!test_and_set_bit(0, &ppp
->xmit_busy
)) {
2440 skb
= skb_dequeue(&ppp
->xmt_q
);
2445 ppp_send_frame(ppp
, skb
);
2447 if (!ppp
->xmit_busy
&& ppp
->dev
.tbusy
) {
2454 * Called from the hardware (tty) layer when it can accept
2458 ppp_output_wakeup(struct ppp
*ppp
)
2462 if (!ppp
->xmit_busy
) {
2463 printk(KERN_ERR
"ppp_output_wakeup called but xmit_busy==0\n");
2467 ppp_send_frames(ppp
);
2471 * Send a control frame (from pppd).
2474 ppp_send_ctrl(struct ppp
*ppp
, struct sk_buff
*skb
)
2479 * Put the packet on the queue, then send as many as we can.
2481 skb_queue_tail(&ppp
->xmt_q
, skb
);
2482 ppp_send_frames(ppp
);
2486 /*************************************************************
2488 * This routine accepts requests from the network layer
2489 * and attempts to deliver the packets.
2490 *************************************************************/
2492 * Send a frame to the peer.
2493 * Returns 1 iff the frame was not accepted.
2496 ppp_dev_xmit(struct sk_buff
*skb
, struct device
*dev
)
2498 struct ppp
*ppp
= dev2ppp(dev
);
2499 struct tty_struct
*tty
= ppp2tty(ppp
);
2504 /* just a little sanity check. */
2507 if (skb
->data
== NULL
) {
2513 * Avoid timing problem should tty hangup while data is
2514 * queued to be sent.
2522 * Validate the tty interface
2525 if (ppp
->flags
& SC_DEBUG
)
2527 "ppp_dev_xmit: %s not connected to a TTY!\n",
2534 * Work out the appropriate network-protocol mode for this packet.
2536 npmode
= NPMODE_PASS
; /* default */
2537 switch (ntohs(skb
->protocol
)) {
2540 npmode
= ppp
->sc_npmode
[NP_IP
];
2544 npmode
= ppp
->sc_npmode
[NP_IPX
];
2549 npmode
= ppp
->sc_npmode
[NP_AT
];
2552 if (ppp
->flags
& SC_DEBUG
)
2553 printk(KERN_INFO
"%s: packet for unknown proto %x\n",
2554 ppp
->name
, ntohs(skb
->protocol
));
2560 * Drop, accept or reject the packet depending on the mode.
2568 * We may not send the packet now, so drop it.
2569 * XXX It would be nice to be able to return it to the
2570 * network system to be queued and retransmitted later.
2572 if (ppp
->flags
& SC_DEBUG
)
2573 printk(KERN_DEBUG
"%s: returning frame\n", ppp
->name
);
2579 if (ppp
->flags
& SC_DEBUG
)
2581 "ppp_dev_xmit: dropping (npmode = %d) on %s\n",
2588 * The dev->tbusy field acts as a lock to allow only
2589 * one packet to be processed at a time. If we can't
2590 * get the lock, try again later.
2591 * We deliberately queue as little as possible inside
2592 * the ppp driver in order to minimize the latency
2593 * for high-priority packets.
2595 if (test_and_set_bit(0, &ppp
->xmit_busy
)) {
2596 dev
->tbusy
= 1; /* can't take it now */
2602 * Put the 4-byte PPP header on the packet.
2603 * If there isn't room for it, we have to copy the packet.
2605 if (skb_headroom(skb
) < PPP_HDRLEN
) {
2606 struct sk_buff
*new_skb
;
2608 new_skb
= alloc_skb(skb
->len
+ PPP_HDRLEN
, GFP_ATOMIC
);
2609 if (new_skb
== NULL
) {
2610 printk(KERN_ERR
"%s: skb hdr alloc failed\n",
2614 ppp_send_frames(ppp
);
2617 skb_reserve(new_skb
, PPP_HDRLEN
);
2618 memcpy(skb_put(new_skb
, skb
->len
), skb
->data
, skb
->len
);
2623 hdr
= skb_push(skb
, PPP_HDRLEN
);
2624 hdr
[0] = PPP_ALLSTATIONS
;
2626 hdr
[2] = proto
>> 8;
2629 ppp_send_frame(ppp
, skb
);
2630 if (!ppp
->xmit_busy
)
2631 ppp_send_frames(ppp
);
2635 #if LINUX_VERSION_CODE < VERSION(2,1,15)
2637 * Null hard_header and header_rebuild routines.
2639 static int ppp_dev_header(struct sk_buff
*skb
, struct device
*dev
,
2640 unsigned short type
, void *daddr
,
2641 void *saddr
, unsigned int len
)
2646 static int ppp_dev_rebuild(void *eth
, struct device
*dev
,
2647 unsigned long raddr
, struct sk_buff
*skb
)
2651 #endif /* < 2.1.15 */
2654 * Generate the statistic information for the /proc/net/dev listing.
2656 static struct net_device_stats
*
2657 ppp_dev_stats (struct device
*dev
)
2659 struct ppp
*ppp
= dev2ppp (dev
);
2661 ppp
->estats
.rx_packets
= ppp
->stats
.ppp_ipackets
;
2662 ppp
->estats
.rx_errors
= ppp
->stats
.ppp_ierrors
;
2663 ppp
->estats
.tx_packets
= ppp
->stats
.ppp_opackets
;
2664 ppp
->estats
.tx_errors
= ppp
->stats
.ppp_oerrors
;
2665 #if LINUX_VERSION_CODE >= VERSION(2,1,25)
2666 ppp
->estats
.rx_bytes
= ppp
->stats
.ppp_ibytes
;
2667 ppp
->estats
.tx_bytes
= ppp
->stats
.ppp_obytes
;
2670 return &ppp
->estats
;
2673 /*************************************************************
2675 * Miscellany called by various functions above.
2676 *************************************************************/
2678 /* Locate the previous instance of the PPP channel */
2680 ppp_find(int pid_value
)
2684 /* try to find the device which this pid is already using */
2685 for (ppp
= ppp_list
; ppp
!= 0; ppp
= ppp
->next
) {
2686 if (ppp
->inuse
&& ppp
->sc_xfer
== pid_value
) {
2694 /* allocate or create a PPP channel */
2703 /* try to find an free device */
2704 for (ppp
= ppp_list
; ppp
!= 0; ppp
= ppp
->next
) {
2705 if (!test_and_set_bit(0, &ppp
->inuse
)) {
2707 if (dev
->flags
& IFF_UP
) {
2708 clear_bit(0, &ppp
->inuse
);
2711 /* Reregister device */
2712 unregister_netdev(dev
);
2713 if (register_netdev(dev
) == 0)
2715 printk(KERN_DEBUG
"could not reregister ppp device\n");
2716 /* leave inuse set in this case */
2721 * There are no available units, so make a new one.
2723 ppp
= (struct ppp
*) kmalloc(sizeof(struct ppp
), GFP_KERNEL
);
2725 printk(KERN_ERR
"ppp: struct ppp allocation failed\n");
2728 memset(ppp
, 0, sizeof(*ppp
));
2730 /* initialize channel control data */
2731 ppp
->magic
= PPP_MAGIC
;
2734 ppp
->read_wait
= NULL
;
2737 * Make up a suitable name for this device
2740 dev
->name
= ppp
->name
;
2741 if_num
= dev_alloc_name(dev
, "ppp%d");
2743 printk(KERN_ERR
"ppp: dev_alloc_name failed (%d)\n", if_num
);
2751 dev
->init
= ppp_init_dev
;
2752 dev
->name
= ppp
->name
;
2753 dev
->priv
= (void *) ppp
;
2755 /* register device so that we can be ifconfig'd */
2756 /* ppp_init_dev() will be called as a side-effect */
2757 status
= register_netdev (dev
);
2759 printk(KERN_INFO
"registered device %s\n", dev
->name
);
2762 "ppp_alloc - register_netdev(%s) = %d failure.\n",
2768 /* link this unit into our list */
2772 ppp_last
->next
= ppp
;
2779 * Initialize the generic parts of the ppp structure.
2782 ppp_generic_init(struct ppp
*ppp
)
2790 skb_queue_head_init(&ppp
->xmt_q
);
2791 skb_queue_head_init(&ppp
->rcv_q
);
2793 ppp
->last_xmit
= jiffies
;
2794 ppp
->last_recv
= jiffies
;
2797 /* clear statistics */
2798 memset(&ppp
->stats
, 0, sizeof (struct pppstat
));
2799 memset(&ppp
->estats
, 0, sizeof(struct net_device_stats
));
2801 /* PPP compression data */
2802 ppp
->sc_xc_state
= NULL
;
2803 ppp
->sc_rc_state
= NULL
;
2805 for (indx
= 0; indx
< NUM_NP
; ++indx
)
2806 ppp
->sc_npmode
[indx
] = NPMODE_PASS
;
2810 * Called to clean up the generic parts of the ppp structure.
2813 ppp_release(struct ppp
*ppp
)
2815 struct sk_buff
*skb
;
2817 CHECK_PPP_MAGIC(ppp
);
2819 if (ppp
->flags
& SC_DEBUG
)
2820 printk(KERN_DEBUG
"%s released\n", ppp
->name
);
2822 ppp_ccp_closed(ppp
);
2824 /* Ensure that the pppd process is not hanging on select()/poll() */
2825 wake_up_interruptible(&ppp
->read_wait
);
2828 slhc_free(ppp
->slcomp
);
2832 while ((skb
= skb_dequeue(&ppp
->rcv_q
)) != NULL
)
2834 while ((skb
= skb_dequeue(&ppp
->xmt_q
)) != NULL
)
2838 if (ppp
->dev
.tbusy
) {
2845 * Utility procedures to print a buffer in hex/ascii
2848 ppp_print_hex (register __u8
* out
, const __u8
* in
, int count
)
2850 register __u8 next_ch
;
2851 static char hex
[] = "0123456789ABCDEF";
2853 while (count
-- > 0) {
2855 *out
++ = hex
[(next_ch
>> 4) & 0x0F];
2856 *out
++ = hex
[next_ch
& 0x0F];
2862 ppp_print_char (register __u8
* out
, const __u8
* in
, int count
)
2864 register __u8 next_ch
;
2866 while (count
-- > 0) {
2869 if (next_ch
< 0x20 || next_ch
> 0x7e)
2873 if (next_ch
== '%') /* printk/syslogd has a bug !! */
2881 ppp_print_buffer (const char *name
, const __u8
*buf
, int count
)
2886 printk(KERN_DEBUG
"ppp: %s, count = %d\n", name
, count
);
2889 memset (line
, 32, 44);
2890 ppp_print_hex (line
, buf
, 8);
2891 ppp_print_char (&line
[8 * 3], buf
, 8);
2892 printk(KERN_DEBUG
"%s\n", line
);
2898 memset (line
, 32, 44);
2899 ppp_print_hex (line
, buf
, count
);
2900 ppp_print_char (&line
[8 * 3], buf
, count
);
2901 printk(KERN_DEBUG
"%s\n", line
);
2905 /*************************************************************
2906 * Compressor module interface
2907 *************************************************************/
2909 struct compressor_link
{
2910 struct compressor_link
*next
;
2911 struct compressor
*comp
;
2914 static struct compressor_link
*ppp_compressors
= (struct compressor_link
*) 0;
2916 static struct compressor
*find_compressor (int type
)
2918 struct compressor_link
*lnk
;
2919 unsigned long flags
;
2924 lnk
= ppp_compressors
;
2925 while (lnk
!= (struct compressor_link
*) 0) {
2926 if ((int) (__u8
) lnk
->comp
->compress_proto
== type
) {
2927 restore_flags(flags
);
2933 restore_flags(flags
);
2934 return (struct compressor
*) 0;
2937 #ifdef CONFIG_MODULES
2938 static int ppp_register_compressor (struct compressor
*cp
)
2940 struct compressor_link
*new;
2941 unsigned long flags
;
2943 new = (struct compressor_link
*)
2944 kmalloc (sizeof (struct compressor_link
), GFP_KERNEL
);
2946 if (new == (struct compressor_link
*) 0)
2952 if (find_compressor (cp
->compress_proto
)) {
2953 restore_flags(flags
);
2958 new->next
= ppp_compressors
;
2960 ppp_compressors
= new;
2962 restore_flags(flags
);
2966 static void ppp_unregister_compressor (struct compressor
*cp
)
2968 struct compressor_link
*prev
= (struct compressor_link
*) 0;
2969 struct compressor_link
*lnk
;
2970 unsigned long flags
;
2975 lnk
= ppp_compressors
;
2976 while (lnk
!= (struct compressor_link
*) 0) {
2977 if (lnk
->comp
== cp
) {
2979 prev
->next
= lnk
->next
;
2981 ppp_compressors
= lnk
->next
;
2988 restore_flags(flags
);
2992 /*************************************************************
2993 * Module support routines
2994 *************************************************************/
3002 /* register our line disciplines */
3003 status
= ppp_first_time();
3005 printk(KERN_INFO
"PPP: ppp_init() failure %d\n", status
);
3006 #if LINUX_VERSION_CODE < VERSION(2,1,18)
3008 (void) register_symtab (&ppp_syms
);
3015 cleanup_module(void)
3018 struct ppp
*ppp
, *next_ppp
;
3022 * Ensure that the devices are not in operation.
3024 for (ppp
= ppp_list
; ppp
!= 0; ppp
= ppp
->next
) {
3025 CHECK_PPP_MAGIC(ppp
);
3026 if (ppp
->inuse
|| (ppp
->dev
.flags
& IFF_UP
))
3030 printk(KERN_CRIT
"PPP: removing despite %d units in use!\n",
3034 * Release the tty registration of the line discipline so that
3035 * ttys can no longer be put into PPP line discipline.
3037 status
= tty_register_ldisc (N_PPP
, NULL
);
3040 "PPP: Unable to unregister ppp line discipline "
3041 "(err = %d)\n", status
);
3044 "PPP: ppp line discipline successfully unregistered\n");
3047 * De-register the devices so that there is no problem with them
3049 for (ppp
= ppp_list
; ppp
!= 0; ppp
= next_ppp
) {
3050 next_ppp
= ppp
->next
;
3051 unregister_netdev(&ppp
->dev
);