spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / drivers / net / wan / hdlc_ppp.c
blob0d7645581f912d04b79e4374fa46c2ed451b7617
1 /*
2 * Generic HDLC support routines for Linux
3 * Point-to-point protocol support
5 * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
12 #include <linux/errno.h>
13 #include <linux/hdlc.h>
14 #include <linux/if_arp.h>
15 #include <linux/inetdevice.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/pkt_sched.h>
20 #include <linux/poll.h>
21 #include <linux/skbuff.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
25 #define DEBUG_CP 0 /* also bytes# to dump */
26 #define DEBUG_STATE 0
27 #define DEBUG_HARD_HEADER 0
29 #define HDLC_ADDR_ALLSTATIONS 0xFF
30 #define HDLC_CTRL_UI 0x03
32 #define PID_LCP 0xC021
33 #define PID_IP 0x0021
34 #define PID_IPCP 0x8021
35 #define PID_IPV6 0x0057
36 #define PID_IPV6CP 0x8057
38 enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
39 enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
40 CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
41 LCP_DISC_REQ, CP_CODES};
42 #if DEBUG_CP
43 static const char *const code_names[CP_CODES] = {
44 "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
45 "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
47 static char debug_buffer[64 + 3 * DEBUG_CP];
48 #endif
50 enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
52 struct hdlc_header {
53 u8 address;
54 u8 control;
55 __be16 protocol;
58 struct cp_header {
59 u8 code;
60 u8 id;
61 __be16 len;
65 struct proto {
66 struct net_device *dev;
67 struct timer_list timer;
68 unsigned long timeout;
69 u16 pid; /* protocol ID */
70 u8 state;
71 u8 cr_id; /* ID of last Configuration-Request */
72 u8 restart_counter;
75 struct ppp {
76 struct proto protos[IDX_COUNT];
77 spinlock_t lock;
78 unsigned long last_pong;
79 unsigned int req_timeout, cr_retries, term_retries;
80 unsigned int keepalive_interval, keepalive_timeout;
81 u8 seq; /* local sequence number for requests */
82 u8 echo_id; /* ID of last Echo-Request (LCP) */
85 enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
86 STATES, STATE_MASK = 0xF};
87 enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
88 RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
89 enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
90 SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
92 #if DEBUG_STATE
93 static const char *const state_names[STATES] = {
94 "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
95 "Opened"
97 static const char *const event_names[EVENTS] = {
98 "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
99 "RTR", "RTA", "RUC", "RXJ+", "RXJ-"
101 #endif
103 static struct sk_buff_head tx_queue; /* used when holding the spin lock */
105 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
107 static inline struct ppp* get_ppp(struct net_device *dev)
109 return (struct ppp *)dev_to_hdlc(dev)->state;
112 static inline struct proto* get_proto(struct net_device *dev, u16 pid)
114 struct ppp *ppp = get_ppp(dev);
116 switch (pid) {
117 case PID_LCP:
118 return &ppp->protos[IDX_LCP];
119 case PID_IPCP:
120 return &ppp->protos[IDX_IPCP];
121 case PID_IPV6CP:
122 return &ppp->protos[IDX_IPV6CP];
123 default:
124 return NULL;
128 static inline const char* proto_name(u16 pid)
130 switch (pid) {
131 case PID_LCP:
132 return "LCP";
133 case PID_IPCP:
134 return "IPCP";
135 case PID_IPV6CP:
136 return "IPV6CP";
137 default:
138 return NULL;
142 static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
144 struct hdlc_header *data = (struct hdlc_header*)skb->data;
146 if (skb->len < sizeof(struct hdlc_header))
147 return htons(ETH_P_HDLC);
148 if (data->address != HDLC_ADDR_ALLSTATIONS ||
149 data->control != HDLC_CTRL_UI)
150 return htons(ETH_P_HDLC);
152 switch (data->protocol) {
153 case cpu_to_be16(PID_IP):
154 skb_pull(skb, sizeof(struct hdlc_header));
155 return htons(ETH_P_IP);
157 case cpu_to_be16(PID_IPV6):
158 skb_pull(skb, sizeof(struct hdlc_header));
159 return htons(ETH_P_IPV6);
161 default:
162 return htons(ETH_P_HDLC);
167 static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
168 u16 type, const void *daddr, const void *saddr,
169 unsigned int len)
171 struct hdlc_header *data;
172 #if DEBUG_HARD_HEADER
173 printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
174 #endif
176 skb_push(skb, sizeof(struct hdlc_header));
177 data = (struct hdlc_header*)skb->data;
179 data->address = HDLC_ADDR_ALLSTATIONS;
180 data->control = HDLC_CTRL_UI;
181 switch (type) {
182 case ETH_P_IP:
183 data->protocol = htons(PID_IP);
184 break;
185 case ETH_P_IPV6:
186 data->protocol = htons(PID_IPV6);
187 break;
188 case PID_LCP:
189 case PID_IPCP:
190 case PID_IPV6CP:
191 data->protocol = htons(type);
192 break;
193 default: /* unknown protocol */
194 data->protocol = 0;
196 return sizeof(struct hdlc_header);
200 static void ppp_tx_flush(void)
202 struct sk_buff *skb;
203 while ((skb = skb_dequeue(&tx_queue)) != NULL)
204 dev_queue_xmit(skb);
207 static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
208 u8 id, unsigned int len, const void *data)
210 struct sk_buff *skb;
211 struct cp_header *cp;
212 unsigned int magic_len = 0;
213 static u32 magic;
215 #if DEBUG_CP
216 int i;
217 char *ptr;
218 #endif
220 if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
221 magic_len = sizeof(magic);
223 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
224 sizeof(struct cp_header) + magic_len + len);
225 if (!skb) {
226 netdev_warn(dev, "out of memory in ppp_tx_cp()\n");
227 return;
229 skb_reserve(skb, sizeof(struct hdlc_header));
231 cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header));
232 cp->code = code;
233 cp->id = id;
234 cp->len = htons(sizeof(struct cp_header) + magic_len + len);
236 if (magic_len)
237 memcpy(skb_put(skb, magic_len), &magic, magic_len);
238 if (len)
239 memcpy(skb_put(skb, len), data, len);
241 #if DEBUG_CP
242 BUG_ON(code >= CP_CODES);
243 ptr = debug_buffer;
244 *ptr = '\x0';
245 for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
246 sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
247 ptr += strlen(ptr);
249 printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
250 proto_name(pid), code_names[code], id, debug_buffer);
251 #endif
253 ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
255 skb->priority = TC_PRIO_CONTROL;
256 skb->dev = dev;
257 skb_reset_network_header(skb);
258 skb_queue_tail(&tx_queue, skb);
262 /* State transition table (compare STD-51)
263 Events Actions
264 TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
265 TO- = Timeout with counter expired zrc = Zero-Restart-Count
267 RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request
268 RCR- = Receive-Configure-Request (Bad)
269 RCA = Receive-Configure-Ack sca = Send-Configure-Ack
270 RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej
272 RTR = Receive-Terminate-Request str = Send-Terminate-Request
273 RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack
275 RUC = Receive-Unknown-Code scj = Send-Code-Reject
276 RXJ+ = Receive-Code-Reject (permitted)
277 or Receive-Protocol-Reject
278 RXJ- = Receive-Code-Reject (catastrophic)
279 or Receive-Protocol-Reject
281 static int cp_table[EVENTS][STATES] = {
282 /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
283 0 1 2 3 4 5 6 */
284 {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */
285 { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */
286 { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */
287 { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */
288 { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */
289 { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */
290 { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */
291 { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */
292 { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */
293 { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */
294 { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */
295 { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */
296 { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
300 /* SCA: RCR+ must supply id, len and data
301 SCN: RCR- must supply code, id, len and data
302 STA: RTR must supply id
303 SCJ: RUC must supply CP packet len and data */
304 static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
305 u8 id, unsigned int len, const void *data)
307 int old_state, action;
308 struct ppp *ppp = get_ppp(dev);
309 struct proto *proto = get_proto(dev, pid);
311 old_state = proto->state;
312 BUG_ON(old_state >= STATES);
313 BUG_ON(event >= EVENTS);
315 #if DEBUG_STATE
316 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
317 proto_name(pid), event_names[event], state_names[proto->state]);
318 #endif
320 action = cp_table[event][old_state];
322 proto->state = action & STATE_MASK;
323 if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
324 mod_timer(&proto->timer, proto->timeout =
325 jiffies + ppp->req_timeout * HZ);
326 if (action & ZRC)
327 proto->restart_counter = 0;
328 if (action & IRC)
329 proto->restart_counter = (proto->state == STOPPING) ?
330 ppp->term_retries : ppp->cr_retries;
332 if (action & SCR) /* send Configure-Request */
333 ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
334 0, NULL);
335 if (action & SCA) /* send Configure-Ack */
336 ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
337 if (action & SCN) /* send Configure-Nak/Reject */
338 ppp_tx_cp(dev, pid, code, id, len, data);
339 if (action & STR) /* send Terminate-Request */
340 ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
341 if (action & STA) /* send Terminate-Ack */
342 ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
343 if (action & SCJ) /* send Code-Reject */
344 ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
346 if (old_state != OPENED && proto->state == OPENED) {
347 netdev_info(dev, "%s up\n", proto_name(pid));
348 if (pid == PID_LCP) {
349 netif_dormant_off(dev);
350 ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
351 ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
352 ppp->last_pong = jiffies;
353 mod_timer(&proto->timer, proto->timeout =
354 jiffies + ppp->keepalive_interval * HZ);
357 if (old_state == OPENED && proto->state != OPENED) {
358 netdev_info(dev, "%s down\n", proto_name(pid));
359 if (pid == PID_LCP) {
360 netif_dormant_on(dev);
361 ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
362 ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
365 if (old_state != CLOSED && proto->state == CLOSED)
366 del_timer(&proto->timer);
368 #if DEBUG_STATE
369 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
370 proto_name(pid), event_names[event], state_names[proto->state]);
371 #endif
375 static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
376 unsigned int req_len, const u8 *data)
378 static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
379 const u8 *opt;
380 u8 *out;
381 unsigned int len = req_len, nak_len = 0, rej_len = 0;
383 if (!(out = kmalloc(len, GFP_ATOMIC))) {
384 dev->stats.rx_dropped++;
385 return; /* out of memory, ignore CR packet */
388 for (opt = data; len; len -= opt[1], opt += opt[1]) {
389 if (len < 2 || len < opt[1]) {
390 dev->stats.rx_errors++;
391 kfree(out);
392 return; /* bad packet, drop silently */
395 if (pid == PID_LCP)
396 switch (opt[0]) {
397 case LCP_OPTION_MRU:
398 continue; /* MRU always OK and > 1500 bytes? */
400 case LCP_OPTION_ACCM: /* async control character map */
401 if (!memcmp(opt, valid_accm,
402 sizeof(valid_accm)))
403 continue;
404 if (!rej_len) { /* NAK it */
405 memcpy(out + nak_len, valid_accm,
406 sizeof(valid_accm));
407 nak_len += sizeof(valid_accm);
408 continue;
410 break;
411 case LCP_OPTION_MAGIC:
412 if (opt[1] != 6 || (!opt[2] && !opt[3] &&
413 !opt[4] && !opt[5]))
414 break; /* reject invalid magic number */
415 continue;
417 /* reject this option */
418 memcpy(out + rej_len, opt, opt[1]);
419 rej_len += opt[1];
422 if (rej_len)
423 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
424 else if (nak_len)
425 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
426 else
427 ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
429 kfree(out);
432 static int ppp_rx(struct sk_buff *skb)
434 struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
435 struct net_device *dev = skb->dev;
436 struct ppp *ppp = get_ppp(dev);
437 struct proto *proto;
438 struct cp_header *cp;
439 unsigned long flags;
440 unsigned int len;
441 u16 pid;
442 #if DEBUG_CP
443 int i;
444 char *ptr;
445 #endif
447 spin_lock_irqsave(&ppp->lock, flags);
448 /* Check HDLC header */
449 if (skb->len < sizeof(struct hdlc_header))
450 goto rx_error;
451 cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header));
452 if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
453 hdr->control != HDLC_CTRL_UI)
454 goto rx_error;
456 pid = ntohs(hdr->protocol);
457 proto = get_proto(dev, pid);
458 if (!proto) {
459 if (ppp->protos[IDX_LCP].state == OPENED)
460 ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
461 ++ppp->seq, skb->len + 2, &hdr->protocol);
462 goto rx_error;
465 len = ntohs(cp->len);
466 if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
467 skb->len < len /* truncated packet? */)
468 goto rx_error;
469 skb_pull(skb, sizeof(struct cp_header));
470 len -= sizeof(struct cp_header);
472 /* HDLC and CP headers stripped from skb */
473 #if DEBUG_CP
474 if (cp->code < CP_CODES)
475 sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
476 cp->id);
477 else
478 sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
479 ptr = debug_buffer + strlen(debug_buffer);
480 for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
481 sprintf(ptr, " %02X", skb->data[i]);
482 ptr += strlen(ptr);
484 printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
485 debug_buffer);
486 #endif
488 /* LCP only */
489 if (pid == PID_LCP)
490 switch (cp->code) {
491 case LCP_PROTO_REJ:
492 pid = ntohs(*(__be16*)skb->data);
493 if (pid == PID_LCP || pid == PID_IPCP ||
494 pid == PID_IPV6CP)
495 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
496 0, NULL);
497 goto out;
499 case LCP_ECHO_REQ: /* send Echo-Reply */
500 if (len >= 4 && proto->state == OPENED)
501 ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
502 cp->id, len - 4, skb->data + 4);
503 goto out;
505 case LCP_ECHO_REPLY:
506 if (cp->id == ppp->echo_id)
507 ppp->last_pong = jiffies;
508 goto out;
510 case LCP_DISC_REQ: /* discard */
511 goto out;
514 /* LCP, IPCP and IPV6CP */
515 switch (cp->code) {
516 case CP_CONF_REQ:
517 ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
518 break;
520 case CP_CONF_ACK:
521 if (cp->id == proto->cr_id)
522 ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
523 break;
525 case CP_CONF_REJ:
526 case CP_CONF_NAK:
527 if (cp->id == proto->cr_id)
528 ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
529 break;
531 case CP_TERM_REQ:
532 ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
533 break;
535 case CP_TERM_ACK:
536 ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
537 break;
539 case CP_CODE_REJ:
540 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
541 break;
543 default:
544 len += sizeof(struct cp_header);
545 if (len > dev->mtu)
546 len = dev->mtu;
547 ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
548 break;
550 goto out;
552 rx_error:
553 dev->stats.rx_errors++;
554 out:
555 spin_unlock_irqrestore(&ppp->lock, flags);
556 dev_kfree_skb_any(skb);
557 ppp_tx_flush();
558 return NET_RX_DROP;
561 static void ppp_timer(unsigned long arg)
563 struct proto *proto = (struct proto *)arg;
564 struct ppp *ppp = get_ppp(proto->dev);
565 unsigned long flags;
567 spin_lock_irqsave(&ppp->lock, flags);
568 switch (proto->state) {
569 case STOPPING:
570 case REQ_SENT:
571 case ACK_RECV:
572 case ACK_SENT:
573 if (proto->restart_counter) {
574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
575 0, NULL);
576 proto->restart_counter--;
577 } else
578 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
579 0, NULL);
580 break;
582 case OPENED:
583 if (proto->pid != PID_LCP)
584 break;
585 if (time_after(jiffies, ppp->last_pong +
586 ppp->keepalive_timeout * HZ)) {
587 netdev_info(proto->dev, "Link down\n");
588 ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
589 ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
590 } else { /* send keep-alive packet */
591 ppp->echo_id = ++ppp->seq;
592 ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
593 ppp->echo_id, 0, NULL);
594 proto->timer.expires = jiffies +
595 ppp->keepalive_interval * HZ;
596 add_timer(&proto->timer);
598 break;
600 spin_unlock_irqrestore(&ppp->lock, flags);
601 ppp_tx_flush();
605 static void ppp_start(struct net_device *dev)
607 struct ppp *ppp = get_ppp(dev);
608 int i;
610 for (i = 0; i < IDX_COUNT; i++) {
611 struct proto *proto = &ppp->protos[i];
612 proto->dev = dev;
613 init_timer(&proto->timer);
614 proto->timer.function = ppp_timer;
615 proto->timer.data = (unsigned long)proto;
616 proto->state = CLOSED;
618 ppp->protos[IDX_LCP].pid = PID_LCP;
619 ppp->protos[IDX_IPCP].pid = PID_IPCP;
620 ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
622 ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
625 static void ppp_stop(struct net_device *dev)
627 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
630 static void ppp_close(struct net_device *dev)
632 ppp_tx_flush();
635 static struct hdlc_proto proto = {
636 .start = ppp_start,
637 .stop = ppp_stop,
638 .close = ppp_close,
639 .type_trans = ppp_type_trans,
640 .ioctl = ppp_ioctl,
641 .netif_rx = ppp_rx,
642 .module = THIS_MODULE,
645 static const struct header_ops ppp_header_ops = {
646 .create = ppp_hard_header,
649 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
651 hdlc_device *hdlc = dev_to_hdlc(dev);
652 struct ppp *ppp;
653 int result;
655 switch (ifr->ifr_settings.type) {
656 case IF_GET_PROTO:
657 if (dev_to_hdlc(dev)->proto != &proto)
658 return -EINVAL;
659 ifr->ifr_settings.type = IF_PROTO_PPP;
660 return 0; /* return protocol only, no settable parameters */
662 case IF_PROTO_PPP:
663 if (!capable(CAP_NET_ADMIN))
664 return -EPERM;
666 if (dev->flags & IFF_UP)
667 return -EBUSY;
669 /* no settable parameters */
671 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
672 if (result)
673 return result;
675 result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
676 if (result)
677 return result;
679 ppp = get_ppp(dev);
680 spin_lock_init(&ppp->lock);
681 ppp->req_timeout = 2;
682 ppp->cr_retries = 10;
683 ppp->term_retries = 2;
684 ppp->keepalive_interval = 10;
685 ppp->keepalive_timeout = 60;
687 dev->hard_header_len = sizeof(struct hdlc_header);
688 dev->header_ops = &ppp_header_ops;
689 dev->type = ARPHRD_PPP;
690 netif_dormant_on(dev);
691 return 0;
694 return -EINVAL;
698 static int __init mod_init(void)
700 skb_queue_head_init(&tx_queue);
701 register_hdlc_protocol(&proto);
702 return 0;
705 static void __exit mod_exit(void)
707 unregister_hdlc_protocol(&proto);
711 module_init(mod_init);
712 module_exit(mod_exit);
714 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
715 MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
716 MODULE_LICENSE("GPL v2");