4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
6 * This file contains some code of the original L2TPv2 pppol2tp
7 * driver, which has the following copyright:
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/string.h>
25 #include <linux/list.h>
26 #include <linux/rculist.h>
27 #include <linux/uaccess.h>
29 #include <linux/kernel.h>
30 #include <linux/spinlock.h>
31 #include <linux/kthread.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/errno.h>
35 #include <linux/jiffies.h>
37 #include <linux/netdevice.h>
38 #include <linux/net.h>
39 #include <linux/inetdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
44 #include <linux/udp.h>
45 #include <linux/l2tp.h>
46 #include <linux/hash.h>
47 #include <linux/sort.h>
48 #include <linux/file.h>
49 #include <linux/nsproxy.h>
50 #include <net/net_namespace.h>
51 #include <net/netns/generic.h>
55 #include <net/udp_tunnel.h>
56 #include <net/inet_common.h>
58 #include <net/protocol.h>
59 #include <net/inet6_connection_sock.h>
60 #include <net/inet_ecn.h>
61 #include <net/ip6_route.h>
62 #include <net/ip6_checksum.h>
64 #include <asm/byteorder.h>
65 #include <linux/atomic.h>
67 #include "l2tp_core.h"
69 #define L2TP_DRV_VERSION "V2.0"
71 /* L2TP header constants */
72 #define L2TP_HDRFLAG_T 0x8000
73 #define L2TP_HDRFLAG_L 0x4000
74 #define L2TP_HDRFLAG_S 0x0800
75 #define L2TP_HDRFLAG_O 0x0200
76 #define L2TP_HDRFLAG_P 0x0100
78 #define L2TP_HDR_VER_MASK 0x000F
79 #define L2TP_HDR_VER_2 0x0002
80 #define L2TP_HDR_VER_3 0x0003
82 /* L2TPv3 default L2-specific sublayer */
83 #define L2TP_SLFLAG_S 0x40000000
84 #define L2TP_SL_SEQ_MASK 0x00ffffff
86 #define L2TP_HDR_SIZE_SEQ 10
87 #define L2TP_HDR_SIZE_NOSEQ 6
89 /* Default trace flags */
90 #define L2TP_DEFAULT_DEBUG_FLAGS 0
92 /* Private data stored for received packets in the skb.
98 unsigned long expires
;
101 #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
103 static struct workqueue_struct
*l2tp_wq
;
105 /* per-net private data for this module */
106 static unsigned int l2tp_net_id
;
108 struct list_head l2tp_tunnel_list
;
109 spinlock_t l2tp_tunnel_list_lock
;
110 struct hlist_head l2tp_session_hlist
[L2TP_HASH_SIZE_2
];
111 spinlock_t l2tp_session_hlist_lock
;
115 static inline struct l2tp_tunnel
*l2tp_tunnel(struct sock
*sk
)
117 return sk
->sk_user_data
;
120 static inline struct l2tp_net
*l2tp_pernet(const struct net
*net
)
124 return net_generic(net
, l2tp_net_id
);
127 /* Session hash global list for L2TPv3.
128 * The session_id SHOULD be random according to RFC3931, but several
129 * L2TP implementations use incrementing session_ids. So we do a real
130 * hash on the session_id, rather than a simple bitmask.
132 static inline struct hlist_head
*
133 l2tp_session_id_hash_2(struct l2tp_net
*pn
, u32 session_id
)
135 return &pn
->l2tp_session_hlist
[hash_32(session_id
, L2TP_HASH_BITS_2
)];
139 /* Session hash list.
140 * The session_id SHOULD be random according to RFC2661, but several
141 * L2TP implementations (Cisco and Microsoft) use incrementing
142 * session_ids. So we do a real hash on the session_id, rather than a
145 static inline struct hlist_head
*
146 l2tp_session_id_hash(struct l2tp_tunnel
*tunnel
, u32 session_id
)
148 return &tunnel
->session_hlist
[hash_32(session_id
, L2TP_HASH_BITS
)];
151 void l2tp_tunnel_free(struct l2tp_tunnel
*tunnel
)
153 sock_put(tunnel
->sock
);
154 /* the tunnel is freed in the socket destructor */
156 EXPORT_SYMBOL(l2tp_tunnel_free
);
158 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
159 struct l2tp_tunnel
*l2tp_tunnel_get(const struct net
*net
, u32 tunnel_id
)
161 const struct l2tp_net
*pn
= l2tp_pernet(net
);
162 struct l2tp_tunnel
*tunnel
;
165 list_for_each_entry_rcu(tunnel
, &pn
->l2tp_tunnel_list
, list
) {
166 if (tunnel
->tunnel_id
== tunnel_id
) {
167 l2tp_tunnel_inc_refcount(tunnel
);
168 rcu_read_unlock_bh();
173 rcu_read_unlock_bh();
177 EXPORT_SYMBOL_GPL(l2tp_tunnel_get
);
179 /* Lookup a session. A new reference is held on the returned session. */
180 struct l2tp_session
*l2tp_session_get(const struct net
*net
,
181 struct l2tp_tunnel
*tunnel
,
184 struct hlist_head
*session_list
;
185 struct l2tp_session
*session
;
188 struct l2tp_net
*pn
= l2tp_pernet(net
);
190 session_list
= l2tp_session_id_hash_2(pn
, session_id
);
193 hlist_for_each_entry_rcu(session
, session_list
, global_hlist
) {
194 if (session
->session_id
== session_id
) {
195 l2tp_session_inc_refcount(session
);
196 rcu_read_unlock_bh();
201 rcu_read_unlock_bh();
206 session_list
= l2tp_session_id_hash(tunnel
, session_id
);
207 read_lock_bh(&tunnel
->hlist_lock
);
208 hlist_for_each_entry(session
, session_list
, hlist
) {
209 if (session
->session_id
== session_id
) {
210 l2tp_session_inc_refcount(session
);
211 read_unlock_bh(&tunnel
->hlist_lock
);
216 read_unlock_bh(&tunnel
->hlist_lock
);
220 EXPORT_SYMBOL_GPL(l2tp_session_get
);
222 struct l2tp_session
*l2tp_session_get_nth(struct l2tp_tunnel
*tunnel
, int nth
)
225 struct l2tp_session
*session
;
228 read_lock_bh(&tunnel
->hlist_lock
);
229 for (hash
= 0; hash
< L2TP_HASH_SIZE
; hash
++) {
230 hlist_for_each_entry(session
, &tunnel
->session_hlist
[hash
], hlist
) {
232 l2tp_session_inc_refcount(session
);
233 read_unlock_bh(&tunnel
->hlist_lock
);
239 read_unlock_bh(&tunnel
->hlist_lock
);
243 EXPORT_SYMBOL_GPL(l2tp_session_get_nth
);
245 /* Lookup a session by interface name.
246 * This is very inefficient but is only used by management interfaces.
248 struct l2tp_session
*l2tp_session_get_by_ifname(const struct net
*net
,
251 struct l2tp_net
*pn
= l2tp_pernet(net
);
253 struct l2tp_session
*session
;
256 for (hash
= 0; hash
< L2TP_HASH_SIZE_2
; hash
++) {
257 hlist_for_each_entry_rcu(session
, &pn
->l2tp_session_hlist
[hash
], global_hlist
) {
258 if (!strcmp(session
->ifname
, ifname
)) {
259 l2tp_session_inc_refcount(session
);
260 rcu_read_unlock_bh();
267 rcu_read_unlock_bh();
271 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname
);
273 int l2tp_session_register(struct l2tp_session
*session
,
274 struct l2tp_tunnel
*tunnel
)
276 struct l2tp_session
*session_walk
;
277 struct hlist_head
*g_head
;
278 struct hlist_head
*head
;
282 head
= l2tp_session_id_hash(tunnel
, session
->session_id
);
284 write_lock_bh(&tunnel
->hlist_lock
);
285 if (!tunnel
->acpt_newsess
) {
290 hlist_for_each_entry(session_walk
, head
, hlist
)
291 if (session_walk
->session_id
== session
->session_id
) {
296 if (tunnel
->version
== L2TP_HDR_VER_3
) {
297 pn
= l2tp_pernet(tunnel
->l2tp_net
);
298 g_head
= l2tp_session_id_hash_2(l2tp_pernet(tunnel
->l2tp_net
),
299 session
->session_id
);
301 spin_lock_bh(&pn
->l2tp_session_hlist_lock
);
303 hlist_for_each_entry(session_walk
, g_head
, global_hlist
)
304 if (session_walk
->session_id
== session
->session_id
) {
306 goto err_tlock_pnlock
;
309 l2tp_tunnel_inc_refcount(tunnel
);
310 hlist_add_head_rcu(&session
->global_hlist
, g_head
);
312 spin_unlock_bh(&pn
->l2tp_session_hlist_lock
);
314 l2tp_tunnel_inc_refcount(tunnel
);
317 hlist_add_head(&session
->hlist
, head
);
318 write_unlock_bh(&tunnel
->hlist_lock
);
323 spin_unlock_bh(&pn
->l2tp_session_hlist_lock
);
325 write_unlock_bh(&tunnel
->hlist_lock
);
329 EXPORT_SYMBOL_GPL(l2tp_session_register
);
331 /* Lookup a tunnel by id
333 struct l2tp_tunnel
*l2tp_tunnel_find(const struct net
*net
, u32 tunnel_id
)
335 struct l2tp_tunnel
*tunnel
;
336 struct l2tp_net
*pn
= l2tp_pernet(net
);
339 list_for_each_entry_rcu(tunnel
, &pn
->l2tp_tunnel_list
, list
) {
340 if (tunnel
->tunnel_id
== tunnel_id
) {
341 rcu_read_unlock_bh();
345 rcu_read_unlock_bh();
349 EXPORT_SYMBOL_GPL(l2tp_tunnel_find
);
351 struct l2tp_tunnel
*l2tp_tunnel_find_nth(const struct net
*net
, int nth
)
353 struct l2tp_net
*pn
= l2tp_pernet(net
);
354 struct l2tp_tunnel
*tunnel
;
358 list_for_each_entry_rcu(tunnel
, &pn
->l2tp_tunnel_list
, list
) {
360 rcu_read_unlock_bh();
365 rcu_read_unlock_bh();
369 EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth
);
371 /*****************************************************************************
372 * Receive data handling
373 *****************************************************************************/
375 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
378 static void l2tp_recv_queue_skb(struct l2tp_session
*session
, struct sk_buff
*skb
)
380 struct sk_buff
*skbp
;
382 u32 ns
= L2TP_SKB_CB(skb
)->ns
;
384 spin_lock_bh(&session
->reorder_q
.lock
);
385 skb_queue_walk_safe(&session
->reorder_q
, skbp
, tmp
) {
386 if (L2TP_SKB_CB(skbp
)->ns
> ns
) {
387 __skb_queue_before(&session
->reorder_q
, skbp
, skb
);
388 l2tp_dbg(session
, L2TP_MSG_SEQ
,
389 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
390 session
->name
, ns
, L2TP_SKB_CB(skbp
)->ns
,
391 skb_queue_len(&session
->reorder_q
));
392 atomic_long_inc(&session
->stats
.rx_oos_packets
);
397 __skb_queue_tail(&session
->reorder_q
, skb
);
400 spin_unlock_bh(&session
->reorder_q
.lock
);
403 /* Dequeue a single skb.
405 static void l2tp_recv_dequeue_skb(struct l2tp_session
*session
, struct sk_buff
*skb
)
407 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
408 int length
= L2TP_SKB_CB(skb
)->length
;
410 /* We're about to requeue the skb, so return resources
411 * to its current owner (a socket receive buffer).
415 atomic_long_inc(&tunnel
->stats
.rx_packets
);
416 atomic_long_add(length
, &tunnel
->stats
.rx_bytes
);
417 atomic_long_inc(&session
->stats
.rx_packets
);
418 atomic_long_add(length
, &session
->stats
.rx_bytes
);
420 if (L2TP_SKB_CB(skb
)->has_seq
) {
423 session
->nr
&= session
->nr_max
;
425 l2tp_dbg(session
, L2TP_MSG_SEQ
, "%s: updated nr to %hu\n",
426 session
->name
, session
->nr
);
429 /* call private receive handler */
430 if (session
->recv_skb
!= NULL
)
431 (*session
->recv_skb
)(session
, skb
, L2TP_SKB_CB(skb
)->length
);
436 /* Dequeue skbs from the session's reorder_q, subject to packet order.
437 * Skbs that have been in the queue for too long are simply discarded.
439 static void l2tp_recv_dequeue(struct l2tp_session
*session
)
444 /* If the pkt at the head of the queue has the nr that we
445 * expect to send up next, dequeue it and any other
446 * in-sequence packets behind it.
449 spin_lock_bh(&session
->reorder_q
.lock
);
450 skb_queue_walk_safe(&session
->reorder_q
, skb
, tmp
) {
451 if (time_after(jiffies
, L2TP_SKB_CB(skb
)->expires
)) {
452 atomic_long_inc(&session
->stats
.rx_seq_discards
);
453 atomic_long_inc(&session
->stats
.rx_errors
);
454 l2tp_dbg(session
, L2TP_MSG_SEQ
,
455 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
456 session
->name
, L2TP_SKB_CB(skb
)->ns
,
457 L2TP_SKB_CB(skb
)->length
, session
->nr
,
458 skb_queue_len(&session
->reorder_q
));
459 session
->reorder_skip
= 1;
460 __skb_unlink(skb
, &session
->reorder_q
);
465 if (L2TP_SKB_CB(skb
)->has_seq
) {
466 if (session
->reorder_skip
) {
467 l2tp_dbg(session
, L2TP_MSG_SEQ
,
468 "%s: advancing nr to next pkt: %u -> %u",
469 session
->name
, session
->nr
,
470 L2TP_SKB_CB(skb
)->ns
);
471 session
->reorder_skip
= 0;
472 session
->nr
= L2TP_SKB_CB(skb
)->ns
;
474 if (L2TP_SKB_CB(skb
)->ns
!= session
->nr
) {
475 l2tp_dbg(session
, L2TP_MSG_SEQ
,
476 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
477 session
->name
, L2TP_SKB_CB(skb
)->ns
,
478 L2TP_SKB_CB(skb
)->length
, session
->nr
,
479 skb_queue_len(&session
->reorder_q
));
483 __skb_unlink(skb
, &session
->reorder_q
);
485 /* Process the skb. We release the queue lock while we
486 * do so to let other contexts process the queue.
488 spin_unlock_bh(&session
->reorder_q
.lock
);
489 l2tp_recv_dequeue_skb(session
, skb
);
494 spin_unlock_bh(&session
->reorder_q
.lock
);
497 static int l2tp_seq_check_rx_window(struct l2tp_session
*session
, u32 nr
)
501 if (nr
>= session
->nr
)
502 nws
= nr
- session
->nr
;
504 nws
= (session
->nr_max
+ 1) - (session
->nr
- nr
);
506 return nws
< session
->nr_window_size
;
509 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
510 * acceptable, else non-zero.
512 static int l2tp_recv_data_seq(struct l2tp_session
*session
, struct sk_buff
*skb
)
514 if (!l2tp_seq_check_rx_window(session
, L2TP_SKB_CB(skb
)->ns
)) {
515 /* Packet sequence number is outside allowed window.
518 l2tp_dbg(session
, L2TP_MSG_SEQ
,
519 "%s: pkt %u len %d discarded, outside window, nr=%u\n",
520 session
->name
, L2TP_SKB_CB(skb
)->ns
,
521 L2TP_SKB_CB(skb
)->length
, session
->nr
);
525 if (session
->reorder_timeout
!= 0) {
526 /* Packet reordering enabled. Add skb to session's
527 * reorder queue, in order of ns.
529 l2tp_recv_queue_skb(session
, skb
);
533 /* Packet reordering disabled. Discard out-of-sequence packets, while
534 * tracking the number if in-sequence packets after the first OOS packet
535 * is seen. After nr_oos_count_max in-sequence packets, reset the
536 * sequence number to re-enable packet reception.
538 if (L2TP_SKB_CB(skb
)->ns
== session
->nr
) {
539 skb_queue_tail(&session
->reorder_q
, skb
);
541 u32 nr_oos
= L2TP_SKB_CB(skb
)->ns
;
542 u32 nr_next
= (session
->nr_oos
+ 1) & session
->nr_max
;
544 if (nr_oos
== nr_next
)
545 session
->nr_oos_count
++;
547 session
->nr_oos_count
= 0;
549 session
->nr_oos
= nr_oos
;
550 if (session
->nr_oos_count
> session
->nr_oos_count_max
) {
551 session
->reorder_skip
= 1;
552 l2tp_dbg(session
, L2TP_MSG_SEQ
,
553 "%s: %d oos packets received. Resetting sequence numbers\n",
554 session
->name
, session
->nr_oos_count
);
556 if (!session
->reorder_skip
) {
557 atomic_long_inc(&session
->stats
.rx_seq_discards
);
558 l2tp_dbg(session
, L2TP_MSG_SEQ
,
559 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
560 session
->name
, L2TP_SKB_CB(skb
)->ns
,
561 L2TP_SKB_CB(skb
)->length
, session
->nr
,
562 skb_queue_len(&session
->reorder_q
));
565 skb_queue_tail(&session
->reorder_q
, skb
);
575 /* Do receive processing of L2TP data frames. We handle both L2TPv2
576 * and L2TPv3 data frames here.
578 * L2TPv2 Data Message Header
581 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
582 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
583 * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
584 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
585 * | Tunnel ID | Session ID |
586 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
587 * | Ns (opt) | Nr (opt) |
588 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
589 * | Offset Size (opt) | Offset pad... (opt)
590 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
592 * Data frames are marked by T=0. All other fields are the same as
593 * those in L2TP control frames.
595 * L2TPv3 Data Message Header
597 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
598 * | L2TP Session Header |
599 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
600 * | L2-Specific Sublayer |
601 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
602 * | Tunnel Payload ...
603 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
605 * L2TPv3 Session Header Over IP
608 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
609 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
611 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
612 * | Cookie (optional, maximum 64 bits)...
613 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
615 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
617 * L2TPv3 L2-Specific Sublayer Format
620 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
621 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
622 * |x|S|x|x|x|x|x|x| Sequence Number |
623 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
625 * Cookie value and sublayer format are negotiated with the peer when
626 * the session is set up. Unlike L2TPv2, we do not need to parse the
627 * packet header to determine if optional fields are present.
629 * Caller must already have parsed the frame and determined that it is
630 * a data (not control) frame before coming here. Fields up to the
631 * session-id have already been parsed and ptr points to the data
632 * after the session-id.
634 void l2tp_recv_common(struct l2tp_session
*session
, struct sk_buff
*skb
,
635 unsigned char *ptr
, unsigned char *optr
, u16 hdrflags
,
636 int length
, int (*payload_hook
)(struct sk_buff
*skb
))
638 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
642 /* Parse and check optional cookie */
643 if (session
->peer_cookie_len
> 0) {
644 if (memcmp(ptr
, &session
->peer_cookie
[0], session
->peer_cookie_len
)) {
645 l2tp_info(tunnel
, L2TP_MSG_DATA
,
646 "%s: cookie mismatch (%u/%u). Discarding.\n",
647 tunnel
->name
, tunnel
->tunnel_id
,
648 session
->session_id
);
649 atomic_long_inc(&session
->stats
.rx_cookie_discards
);
652 ptr
+= session
->peer_cookie_len
;
655 /* Handle the optional sequence numbers. Sequence numbers are
656 * in different places for L2TPv2 and L2TPv3.
658 * If we are the LAC, enable/disable sequence numbers under
659 * the control of the LNS. If no sequence numbers present but
660 * we were expecting them, discard frame.
663 L2TP_SKB_CB(skb
)->has_seq
= 0;
664 if (tunnel
->version
== L2TP_HDR_VER_2
) {
665 if (hdrflags
& L2TP_HDRFLAG_S
) {
666 ns
= ntohs(*(__be16
*) ptr
);
668 nr
= ntohs(*(__be16
*) ptr
);
671 /* Store L2TP info in the skb */
672 L2TP_SKB_CB(skb
)->ns
= ns
;
673 L2TP_SKB_CB(skb
)->has_seq
= 1;
675 l2tp_dbg(session
, L2TP_MSG_SEQ
,
676 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
677 session
->name
, ns
, nr
, session
->nr
);
679 } else if (session
->l2specific_type
== L2TP_L2SPECTYPE_DEFAULT
) {
680 u32 l2h
= ntohl(*(__be32
*) ptr
);
682 if (l2h
& 0x40000000) {
683 ns
= l2h
& 0x00ffffff;
685 /* Store L2TP info in the skb */
686 L2TP_SKB_CB(skb
)->ns
= ns
;
687 L2TP_SKB_CB(skb
)->has_seq
= 1;
689 l2tp_dbg(session
, L2TP_MSG_SEQ
,
690 "%s: recv data ns=%u, session nr=%u\n",
691 session
->name
, ns
, session
->nr
);
696 if (L2TP_SKB_CB(skb
)->has_seq
) {
697 /* Received a packet with sequence numbers. If we're the LNS,
698 * check if we sre sending sequence numbers and if not,
701 if ((!session
->lns_mode
) && (!session
->send_seq
)) {
702 l2tp_info(session
, L2TP_MSG_SEQ
,
703 "%s: requested to enable seq numbers by LNS\n",
705 session
->send_seq
= 1;
706 l2tp_session_set_header_len(session
, tunnel
->version
);
709 /* No sequence numbers.
710 * If user has configured mandatory sequence numbers, discard.
712 if (session
->recv_seq
) {
713 l2tp_warn(session
, L2TP_MSG_SEQ
,
714 "%s: recv data has no seq numbers when required. Discarding.\n",
716 atomic_long_inc(&session
->stats
.rx_seq_discards
);
720 /* If we're the LAC and we're sending sequence numbers, the
721 * LNS has requested that we no longer send sequence numbers.
722 * If we're the LNS and we're sending sequence numbers, the
723 * LAC is broken. Discard the frame.
725 if ((!session
->lns_mode
) && (session
->send_seq
)) {
726 l2tp_info(session
, L2TP_MSG_SEQ
,
727 "%s: requested to disable seq numbers by LNS\n",
729 session
->send_seq
= 0;
730 l2tp_session_set_header_len(session
, tunnel
->version
);
731 } else if (session
->send_seq
) {
732 l2tp_warn(session
, L2TP_MSG_SEQ
,
733 "%s: recv data has no seq numbers when required. Discarding.\n",
735 atomic_long_inc(&session
->stats
.rx_seq_discards
);
740 /* Session data offset is defined only for L2TPv2 and is
741 * indicated by an optional 16-bit value in the header.
743 if (tunnel
->version
== L2TP_HDR_VER_2
) {
744 /* If offset bit set, skip it. */
745 if (hdrflags
& L2TP_HDRFLAG_O
) {
746 offset
= ntohs(*(__be16
*)ptr
);
752 if (!pskb_may_pull(skb
, offset
))
755 __skb_pull(skb
, offset
);
757 /* If caller wants to process the payload before we queue the
761 if ((*payload_hook
)(skb
))
764 /* Prepare skb for adding to the session's reorder_q. Hold
765 * packets for max reorder_timeout or 1 second if not
768 L2TP_SKB_CB(skb
)->length
= length
;
769 L2TP_SKB_CB(skb
)->expires
= jiffies
+
770 (session
->reorder_timeout
? session
->reorder_timeout
: HZ
);
772 /* Add packet to the session's receive queue. Reordering is done here, if
773 * enabled. Saved L2TP protocol info is stored in skb->sb[].
775 if (L2TP_SKB_CB(skb
)->has_seq
) {
776 if (l2tp_recv_data_seq(session
, skb
))
779 /* No sequence numbers. Add the skb to the tail of the
780 * reorder queue. This ensures that it will be
781 * delivered after all previous sequenced skbs.
783 skb_queue_tail(&session
->reorder_q
, skb
);
786 /* Try to dequeue as many skbs from reorder_q as we can. */
787 l2tp_recv_dequeue(session
);
792 atomic_long_inc(&session
->stats
.rx_errors
);
795 EXPORT_SYMBOL(l2tp_recv_common
);
797 /* Drop skbs from the session's reorder_q
799 int l2tp_session_queue_purge(struct l2tp_session
*session
)
801 struct sk_buff
*skb
= NULL
;
803 BUG_ON(session
->magic
!= L2TP_SESSION_MAGIC
);
804 while ((skb
= skb_dequeue(&session
->reorder_q
))) {
805 atomic_long_inc(&session
->stats
.rx_errors
);
810 EXPORT_SYMBOL_GPL(l2tp_session_queue_purge
);
812 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
813 * here. The skb is not on a list when we get here.
814 * Returns 0 if the packet was a data packet and was successfully passed on.
815 * Returns 1 if the packet was not a good data packet and could not be
816 * forwarded. All such packets are passed up to userspace to deal with.
818 static int l2tp_udp_recv_core(struct l2tp_tunnel
*tunnel
, struct sk_buff
*skb
,
819 int (*payload_hook
)(struct sk_buff
*skb
))
821 struct l2tp_session
*session
= NULL
;
822 unsigned char *ptr
, *optr
;
824 u32 tunnel_id
, session_id
;
828 /* UDP has verifed checksum */
830 /* UDP always verifies the packet length. */
831 __skb_pull(skb
, sizeof(struct udphdr
));
834 if (!pskb_may_pull(skb
, L2TP_HDR_SIZE_SEQ
)) {
835 l2tp_info(tunnel
, L2TP_MSG_DATA
,
836 "%s: recv short packet (len=%d)\n",
837 tunnel
->name
, skb
->len
);
841 /* Trace packet contents, if enabled */
842 if (tunnel
->debug
& L2TP_MSG_DATA
) {
843 length
= min(32u, skb
->len
);
844 if (!pskb_may_pull(skb
, length
))
847 pr_debug("%s: recv\n", tunnel
->name
);
848 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET
, skb
->data
, length
);
851 /* Point to L2TP header */
852 optr
= ptr
= skb
->data
;
854 /* Get L2TP header flags */
855 hdrflags
= ntohs(*(__be16
*) ptr
);
857 /* Check protocol version */
858 version
= hdrflags
& L2TP_HDR_VER_MASK
;
859 if (version
!= tunnel
->version
) {
860 l2tp_info(tunnel
, L2TP_MSG_DATA
,
861 "%s: recv protocol version mismatch: got %d expected %d\n",
862 tunnel
->name
, version
, tunnel
->version
);
866 /* Get length of L2TP packet */
869 /* If type is control packet, it is handled by userspace. */
870 if (hdrflags
& L2TP_HDRFLAG_T
) {
871 l2tp_dbg(tunnel
, L2TP_MSG_DATA
,
872 "%s: recv control packet, len=%d\n",
873 tunnel
->name
, length
);
880 if (tunnel
->version
== L2TP_HDR_VER_2
) {
881 /* If length is present, skip it */
882 if (hdrflags
& L2TP_HDRFLAG_L
)
885 /* Extract tunnel and session ID */
886 tunnel_id
= ntohs(*(__be16
*) ptr
);
888 session_id
= ntohs(*(__be16
*) ptr
);
891 ptr
+= 2; /* skip reserved bits */
892 tunnel_id
= tunnel
->tunnel_id
;
893 session_id
= ntohl(*(__be32
*) ptr
);
897 /* Find the session context */
898 session
= l2tp_session_get(tunnel
->l2tp_net
, tunnel
, session_id
);
899 if (!session
|| !session
->recv_skb
) {
901 l2tp_session_dec_refcount(session
);
903 /* Not found? Pass to userspace to deal with */
904 l2tp_info(tunnel
, L2TP_MSG_DATA
,
905 "%s: no session found (%u/%u). Passing up.\n",
906 tunnel
->name
, tunnel_id
, session_id
);
910 l2tp_recv_common(session
, skb
, ptr
, optr
, hdrflags
, length
, payload_hook
);
911 l2tp_session_dec_refcount(session
);
916 /* Put UDP header back */
917 __skb_push(skb
, sizeof(struct udphdr
));
922 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
926 * >0: skb should be passed up to userspace as UDP.
928 int l2tp_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
930 struct l2tp_tunnel
*tunnel
;
932 tunnel
= l2tp_tunnel(sk
);
936 l2tp_dbg(tunnel
, L2TP_MSG_DATA
, "%s: received %d bytes\n",
937 tunnel
->name
, skb
->len
);
939 if (l2tp_udp_recv_core(tunnel
, skb
, tunnel
->recv_payload_hook
))
947 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv
);
949 /************************************************************************
951 ***********************************************************************/
953 /* Build an L2TP header for the session into the buffer provided.
955 static int l2tp_build_l2tpv2_header(struct l2tp_session
*session
, void *buf
)
957 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
960 u16 flags
= L2TP_HDR_VER_2
;
961 u32 tunnel_id
= tunnel
->peer_tunnel_id
;
962 u32 session_id
= session
->peer_session_id
;
964 if (session
->send_seq
)
965 flags
|= L2TP_HDRFLAG_S
;
967 /* Setup L2TP header. */
968 *bufp
++ = htons(flags
);
969 *bufp
++ = htons(tunnel_id
);
970 *bufp
++ = htons(session_id
);
971 if (session
->send_seq
) {
972 *bufp
++ = htons(session
->ns
);
975 session
->ns
&= 0xffff;
976 l2tp_dbg(session
, L2TP_MSG_SEQ
, "%s: updated ns to %u\n",
977 session
->name
, session
->ns
);
983 static int l2tp_build_l2tpv3_header(struct l2tp_session
*session
, void *buf
)
985 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
989 /* Setup L2TP header. The header differs slightly for UDP and
990 * IP encapsulations. For UDP, there is 4 bytes of flags.
992 if (tunnel
->encap
== L2TP_ENCAPTYPE_UDP
) {
993 u16 flags
= L2TP_HDR_VER_3
;
994 *((__be16
*) bufp
) = htons(flags
);
996 *((__be16
*) bufp
) = 0;
1000 *((__be32
*) bufp
) = htonl(session
->peer_session_id
);
1002 if (session
->cookie_len
) {
1003 memcpy(bufp
, &session
->cookie
[0], session
->cookie_len
);
1004 bufp
+= session
->cookie_len
;
1006 if (session
->l2specific_type
== L2TP_L2SPECTYPE_DEFAULT
) {
1009 if (session
->send_seq
) {
1010 l2h
= 0x40000000 | session
->ns
;
1012 session
->ns
&= 0xffffff;
1013 l2tp_dbg(session
, L2TP_MSG_SEQ
,
1014 "%s: updated ns to %u\n",
1015 session
->name
, session
->ns
);
1018 *((__be32
*)bufp
) = htonl(l2h
);
1025 static int l2tp_xmit_core(struct l2tp_session
*session
, struct sk_buff
*skb
,
1026 struct flowi
*fl
, size_t data_len
)
1028 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
1029 unsigned int len
= skb
->len
;
1033 if (session
->send_seq
)
1034 l2tp_dbg(session
, L2TP_MSG_DATA
, "%s: send %zd bytes, ns=%u\n",
1035 session
->name
, data_len
, session
->ns
- 1);
1037 l2tp_dbg(session
, L2TP_MSG_DATA
, "%s: send %zd bytes\n",
1038 session
->name
, data_len
);
1040 if (session
->debug
& L2TP_MSG_DATA
) {
1041 int uhlen
= (tunnel
->encap
== L2TP_ENCAPTYPE_UDP
) ? sizeof(struct udphdr
) : 0;
1042 unsigned char *datap
= skb
->data
+ uhlen
;
1044 pr_debug("%s: xmit\n", session
->name
);
1045 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET
,
1046 datap
, min_t(size_t, 32, len
- uhlen
));
1049 /* Queue the packet to IP for output */
1051 #if IS_ENABLED(CONFIG_IPV6)
1052 if (tunnel
->sock
->sk_family
== PF_INET6
&& !tunnel
->v4mapped
)
1053 error
= inet6_csk_xmit(tunnel
->sock
, skb
, NULL
);
1056 error
= ip_queue_xmit(tunnel
->sock
, skb
, fl
);
1060 atomic_long_inc(&tunnel
->stats
.tx_packets
);
1061 atomic_long_add(len
, &tunnel
->stats
.tx_bytes
);
1062 atomic_long_inc(&session
->stats
.tx_packets
);
1063 atomic_long_add(len
, &session
->stats
.tx_bytes
);
1065 atomic_long_inc(&tunnel
->stats
.tx_errors
);
1066 atomic_long_inc(&session
->stats
.tx_errors
);
1072 /* If caller requires the skb to have a ppp header, the header must be
1073 * inserted in the skb data before calling this function.
1075 int l2tp_xmit_skb(struct l2tp_session
*session
, struct sk_buff
*skb
, int hdr_len
)
1077 int data_len
= skb
->len
;
1078 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
1079 struct sock
*sk
= tunnel
->sock
;
1082 struct inet_sock
*inet
;
1084 int uhlen
= (tunnel
->encap
== L2TP_ENCAPTYPE_UDP
) ? sizeof(struct udphdr
) : 0;
1086 int ret
= NET_XMIT_SUCCESS
;
1088 /* Check that there's enough headroom in the skb to insert IP,
1089 * UDP and L2TP headers. If not enough, expand it to
1090 * make room. Adjust truesize.
1092 headroom
= NET_SKB_PAD
+ sizeof(struct iphdr
) +
1094 if (skb_cow_head(skb
, headroom
)) {
1096 return NET_XMIT_DROP
;
1099 /* Setup L2TP header */
1100 session
->build_header(session
, __skb_push(skb
, hdr_len
));
1102 /* Reset skb netfilter state */
1103 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1104 IPCB(skb
)->flags
&= ~(IPSKB_XFRM_TUNNEL_SIZE
| IPSKB_XFRM_TRANSFORMED
|
1109 if (sock_owned_by_user(sk
)) {
1111 ret
= NET_XMIT_DROP
;
1115 /* Get routing info from the tunnel socket */
1117 skb_dst_set(skb
, dst_clone(__sk_dst_check(sk
, 0)));
1120 fl
= &inet
->cork
.fl
;
1121 switch (tunnel
->encap
) {
1122 case L2TP_ENCAPTYPE_UDP
:
1123 /* Setup UDP header */
1124 __skb_push(skb
, sizeof(*uh
));
1125 skb_reset_transport_header(skb
);
1127 uh
->source
= inet
->inet_sport
;
1128 uh
->dest
= inet
->inet_dport
;
1129 udp_len
= uhlen
+ hdr_len
+ data_len
;
1130 uh
->len
= htons(udp_len
);
1132 /* Calculate UDP checksum if configured to do so */
1133 #if IS_ENABLED(CONFIG_IPV6)
1134 if (sk
->sk_family
== PF_INET6
&& !tunnel
->v4mapped
)
1135 udp6_set_csum(udp_get_no_check6_tx(sk
),
1136 skb
, &inet6_sk(sk
)->saddr
,
1137 &sk
->sk_v6_daddr
, udp_len
);
1140 udp_set_csum(sk
->sk_no_check_tx
, skb
, inet
->inet_saddr
,
1141 inet
->inet_daddr
, udp_len
);
1144 case L2TP_ENCAPTYPE_IP
:
1148 l2tp_xmit_core(session
, skb
, fl
, data_len
);
1154 EXPORT_SYMBOL_GPL(l2tp_xmit_skb
);
1156 /*****************************************************************************
1157 * Tinnel and session create/destroy.
1158 *****************************************************************************/
1160 /* Tunnel socket destruct hook.
1161 * The tunnel context is deleted only when all session sockets have been
1164 static void l2tp_tunnel_destruct(struct sock
*sk
)
1166 struct l2tp_tunnel
*tunnel
= l2tp_tunnel(sk
);
1171 l2tp_info(tunnel
, L2TP_MSG_CONTROL
, "%s: closing...\n", tunnel
->name
);
1173 /* Disable udp encapsulation */
1174 switch (tunnel
->encap
) {
1175 case L2TP_ENCAPTYPE_UDP
:
1176 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1177 (udp_sk(sk
))->encap_type
= 0;
1178 (udp_sk(sk
))->encap_rcv
= NULL
;
1179 (udp_sk(sk
))->encap_destroy
= NULL
;
1181 case L2TP_ENCAPTYPE_IP
:
1185 /* Remove hooks into tunnel socket */
1186 sk
->sk_destruct
= tunnel
->old_sk_destruct
;
1187 sk
->sk_user_data
= NULL
;
1189 /* Call the original destructor */
1190 if (sk
->sk_destruct
)
1191 (*sk
->sk_destruct
)(sk
);
1193 kfree_rcu(tunnel
, rcu
);
1198 /* When the tunnel is closed, all the attached sessions need to go too.
1200 void l2tp_tunnel_closeall(struct l2tp_tunnel
*tunnel
)
1203 struct hlist_node
*walk
;
1204 struct hlist_node
*tmp
;
1205 struct l2tp_session
*session
;
1207 BUG_ON(tunnel
== NULL
);
1209 l2tp_info(tunnel
, L2TP_MSG_CONTROL
, "%s: closing all sessions...\n",
1212 write_lock_bh(&tunnel
->hlist_lock
);
1213 tunnel
->acpt_newsess
= false;
1214 for (hash
= 0; hash
< L2TP_HASH_SIZE
; hash
++) {
1216 hlist_for_each_safe(walk
, tmp
, &tunnel
->session_hlist
[hash
]) {
1217 session
= hlist_entry(walk
, struct l2tp_session
, hlist
);
1219 l2tp_info(session
, L2TP_MSG_CONTROL
,
1220 "%s: closing session\n", session
->name
);
1222 hlist_del_init(&session
->hlist
);
1224 if (test_and_set_bit(0, &session
->dead
))
1227 write_unlock_bh(&tunnel
->hlist_lock
);
1229 __l2tp_session_unhash(session
);
1230 l2tp_session_queue_purge(session
);
1232 if (session
->session_close
!= NULL
)
1233 (*session
->session_close
)(session
);
1235 l2tp_session_dec_refcount(session
);
1237 write_lock_bh(&tunnel
->hlist_lock
);
1239 /* Now restart from the beginning of this hash
1240 * chain. We always remove a session from the
1241 * list so we are guaranteed to make forward
1247 write_unlock_bh(&tunnel
->hlist_lock
);
1249 EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall
);
1251 /* Tunnel socket destroy hook for UDP encapsulation */
1252 static void l2tp_udp_encap_destroy(struct sock
*sk
)
1254 struct l2tp_tunnel
*tunnel
= l2tp_tunnel(sk
);
1257 l2tp_tunnel_delete(tunnel
);
1260 /* Workqueue tunnel deletion function */
1261 static void l2tp_tunnel_del_work(struct work_struct
*work
)
1263 struct l2tp_tunnel
*tunnel
= container_of(work
, struct l2tp_tunnel
,
1265 struct sock
*sk
= tunnel
->sock
;
1266 struct socket
*sock
= sk
->sk_socket
;
1267 struct l2tp_net
*pn
;
1269 l2tp_tunnel_closeall(tunnel
);
1271 /* If the tunnel socket was created within the kernel, use
1272 * the sk API to release it here.
1274 if (tunnel
->fd
< 0) {
1276 kernel_sock_shutdown(sock
, SHUT_RDWR
);
1281 /* Remove the tunnel struct from the tunnel list */
1282 pn
= l2tp_pernet(tunnel
->l2tp_net
);
1283 spin_lock_bh(&pn
->l2tp_tunnel_list_lock
);
1284 list_del_rcu(&tunnel
->list
);
1285 spin_unlock_bh(&pn
->l2tp_tunnel_list_lock
);
1287 /* drop initial ref */
1288 l2tp_tunnel_dec_refcount(tunnel
);
1290 /* drop workqueue ref */
1291 l2tp_tunnel_dec_refcount(tunnel
);
1294 /* Create a socket for the tunnel, if one isn't set up by
1295 * userspace. This is used for static tunnels where there is no
1296 * managing L2TP daemon.
1298 * Since we don't want these sockets to keep a namespace alive by
1299 * themselves, we drop the socket's namespace refcount after creation.
1300 * These sockets are freed when the namespace exits using the pernet
1303 static int l2tp_tunnel_sock_create(struct net
*net
,
1306 struct l2tp_tunnel_cfg
*cfg
,
1307 struct socket
**sockp
)
1310 struct socket
*sock
= NULL
;
1311 struct udp_port_cfg udp_conf
;
1313 switch (cfg
->encap
) {
1314 case L2TP_ENCAPTYPE_UDP
:
1315 memset(&udp_conf
, 0, sizeof(udp_conf
));
1317 #if IS_ENABLED(CONFIG_IPV6)
1318 if (cfg
->local_ip6
&& cfg
->peer_ip6
) {
1319 udp_conf
.family
= AF_INET6
;
1320 memcpy(&udp_conf
.local_ip6
, cfg
->local_ip6
,
1321 sizeof(udp_conf
.local_ip6
));
1322 memcpy(&udp_conf
.peer_ip6
, cfg
->peer_ip6
,
1323 sizeof(udp_conf
.peer_ip6
));
1324 udp_conf
.use_udp6_tx_checksums
=
1325 ! cfg
->udp6_zero_tx_checksums
;
1326 udp_conf
.use_udp6_rx_checksums
=
1327 ! cfg
->udp6_zero_rx_checksums
;
1331 udp_conf
.family
= AF_INET
;
1332 udp_conf
.local_ip
= cfg
->local_ip
;
1333 udp_conf
.peer_ip
= cfg
->peer_ip
;
1334 udp_conf
.use_udp_checksums
= cfg
->use_udp_checksums
;
1337 udp_conf
.local_udp_port
= htons(cfg
->local_udp_port
);
1338 udp_conf
.peer_udp_port
= htons(cfg
->peer_udp_port
);
1340 err
= udp_sock_create(net
, &udp_conf
, &sock
);
1346 case L2TP_ENCAPTYPE_IP
:
1347 #if IS_ENABLED(CONFIG_IPV6)
1348 if (cfg
->local_ip6
&& cfg
->peer_ip6
) {
1349 struct sockaddr_l2tpip6 ip6_addr
= {0};
1351 err
= sock_create_kern(net
, AF_INET6
, SOCK_DGRAM
,
1352 IPPROTO_L2TP
, &sock
);
1356 ip6_addr
.l2tp_family
= AF_INET6
;
1357 memcpy(&ip6_addr
.l2tp_addr
, cfg
->local_ip6
,
1358 sizeof(ip6_addr
.l2tp_addr
));
1359 ip6_addr
.l2tp_conn_id
= tunnel_id
;
1360 err
= kernel_bind(sock
, (struct sockaddr
*) &ip6_addr
,
1365 ip6_addr
.l2tp_family
= AF_INET6
;
1366 memcpy(&ip6_addr
.l2tp_addr
, cfg
->peer_ip6
,
1367 sizeof(ip6_addr
.l2tp_addr
));
1368 ip6_addr
.l2tp_conn_id
= peer_tunnel_id
;
1369 err
= kernel_connect(sock
,
1370 (struct sockaddr
*) &ip6_addr
,
1371 sizeof(ip6_addr
), 0);
1377 struct sockaddr_l2tpip ip_addr
= {0};
1379 err
= sock_create_kern(net
, AF_INET
, SOCK_DGRAM
,
1380 IPPROTO_L2TP
, &sock
);
1384 ip_addr
.l2tp_family
= AF_INET
;
1385 ip_addr
.l2tp_addr
= cfg
->local_ip
;
1386 ip_addr
.l2tp_conn_id
= tunnel_id
;
1387 err
= kernel_bind(sock
, (struct sockaddr
*) &ip_addr
,
1392 ip_addr
.l2tp_family
= AF_INET
;
1393 ip_addr
.l2tp_addr
= cfg
->peer_ip
;
1394 ip_addr
.l2tp_conn_id
= peer_tunnel_id
;
1395 err
= kernel_connect(sock
, (struct sockaddr
*) &ip_addr
,
1396 sizeof(ip_addr
), 0);
1408 if ((err
< 0) && sock
) {
1409 kernel_sock_shutdown(sock
, SHUT_RDWR
);
1417 static struct lock_class_key l2tp_socket_class
;
1419 int l2tp_tunnel_create(struct net
*net
, int fd
, int version
, u32 tunnel_id
, u32 peer_tunnel_id
, struct l2tp_tunnel_cfg
*cfg
, struct l2tp_tunnel
**tunnelp
)
1421 struct l2tp_tunnel
*tunnel
= NULL
;
1423 struct socket
*sock
= NULL
;
1424 struct sock
*sk
= NULL
;
1425 struct l2tp_net
*pn
;
1426 enum l2tp_encap_type encap
= L2TP_ENCAPTYPE_UDP
;
1428 /* Get the tunnel socket from the fd, which was opened by
1429 * the userspace L2TP daemon. If not specified, create a
1433 err
= l2tp_tunnel_sock_create(net
, tunnel_id
, peer_tunnel_id
,
1438 sock
= sockfd_lookup(fd
, &err
);
1440 pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1441 tunnel_id
, fd
, err
);
1446 /* Reject namespace mismatches */
1447 if (!net_eq(sock_net(sock
->sk
), net
)) {
1448 pr_err("tunl %u: netns mismatch\n", tunnel_id
);
1459 /* Quick sanity checks */
1461 case L2TP_ENCAPTYPE_UDP
:
1462 err
= -EPROTONOSUPPORT
;
1463 if (sk
->sk_protocol
!= IPPROTO_UDP
) {
1464 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1465 tunnel_id
, fd
, sk
->sk_protocol
, IPPROTO_UDP
);
1469 case L2TP_ENCAPTYPE_IP
:
1470 err
= -EPROTONOSUPPORT
;
1471 if (sk
->sk_protocol
!= IPPROTO_L2TP
) {
1472 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1473 tunnel_id
, fd
, sk
->sk_protocol
, IPPROTO_L2TP
);
1479 /* Check if this socket has already been prepped */
1480 tunnel
= l2tp_tunnel(sk
);
1481 if (tunnel
!= NULL
) {
1482 /* This socket has already been prepped */
1487 tunnel
= kzalloc(sizeof(struct l2tp_tunnel
), GFP_KERNEL
);
1488 if (tunnel
== NULL
) {
1493 tunnel
->version
= version
;
1494 tunnel
->tunnel_id
= tunnel_id
;
1495 tunnel
->peer_tunnel_id
= peer_tunnel_id
;
1496 tunnel
->debug
= L2TP_DEFAULT_DEBUG_FLAGS
;
1498 tunnel
->magic
= L2TP_TUNNEL_MAGIC
;
1499 sprintf(&tunnel
->name
[0], "tunl %u", tunnel_id
);
1500 rwlock_init(&tunnel
->hlist_lock
);
1501 tunnel
->acpt_newsess
= true;
1503 /* The net we belong to */
1504 tunnel
->l2tp_net
= net
;
1505 pn
= l2tp_pernet(net
);
1508 tunnel
->debug
= cfg
->debug
;
1510 #if IS_ENABLED(CONFIG_IPV6)
1511 if (sk
->sk_family
== PF_INET6
) {
1512 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1514 if (ipv6_addr_v4mapped(&np
->saddr
) &&
1515 ipv6_addr_v4mapped(&sk
->sk_v6_daddr
)) {
1516 struct inet_sock
*inet
= inet_sk(sk
);
1518 tunnel
->v4mapped
= true;
1519 inet
->inet_saddr
= np
->saddr
.s6_addr32
[3];
1520 inet
->inet_rcv_saddr
= sk
->sk_v6_rcv_saddr
.s6_addr32
[3];
1521 inet
->inet_daddr
= sk
->sk_v6_daddr
.s6_addr32
[3];
1523 tunnel
->v4mapped
= false;
1528 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1529 tunnel
->encap
= encap
;
1530 if (encap
== L2TP_ENCAPTYPE_UDP
) {
1531 struct udp_tunnel_sock_cfg udp_cfg
= { };
1533 udp_cfg
.sk_user_data
= tunnel
;
1534 udp_cfg
.encap_type
= UDP_ENCAP_L2TPINUDP
;
1535 udp_cfg
.encap_rcv
= l2tp_udp_encap_recv
;
1536 udp_cfg
.encap_destroy
= l2tp_udp_encap_destroy
;
1538 setup_udp_tunnel_sock(net
, sock
, &udp_cfg
);
1540 sk
->sk_user_data
= tunnel
;
1543 /* Bump the reference count. The tunnel context is deleted
1544 * only when this drops to zero. A reference is also held on
1545 * the tunnel socket to ensure that it is not released while
1546 * the tunnel is extant. Must be done before sk_destruct is
1549 refcount_set(&tunnel
->ref_count
, 1);
1554 /* Hook on the tunnel socket destructor so that we can cleanup
1555 * if the tunnel socket goes away.
1557 tunnel
->old_sk_destruct
= sk
->sk_destruct
;
1558 sk
->sk_destruct
= &l2tp_tunnel_destruct
;
1559 lockdep_set_class_and_name(&sk
->sk_lock
.slock
, &l2tp_socket_class
, "l2tp_sock");
1561 sk
->sk_allocation
= GFP_ATOMIC
;
1563 /* Init delete workqueue struct */
1564 INIT_WORK(&tunnel
->del_work
, l2tp_tunnel_del_work
);
1566 /* Add tunnel to our list */
1567 INIT_LIST_HEAD(&tunnel
->list
);
1568 spin_lock_bh(&pn
->l2tp_tunnel_list_lock
);
1569 list_add_rcu(&tunnel
->list
, &pn
->l2tp_tunnel_list
);
1570 spin_unlock_bh(&pn
->l2tp_tunnel_list_lock
);
1577 /* If tunnel's socket was created by the kernel, it doesn't
1580 if (sock
&& sock
->file
)
1585 EXPORT_SYMBOL_GPL(l2tp_tunnel_create
);
1587 /* This function is used by the netlink TUNNEL_DELETE command.
1589 void l2tp_tunnel_delete(struct l2tp_tunnel
*tunnel
)
1591 if (!test_and_set_bit(0, &tunnel
->dead
)) {
1592 l2tp_tunnel_inc_refcount(tunnel
);
1593 queue_work(l2tp_wq
, &tunnel
->del_work
);
1596 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete
);
1598 /* Really kill the session.
1600 void l2tp_session_free(struct l2tp_session
*session
)
1602 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
1604 BUG_ON(refcount_read(&session
->ref_count
) != 0);
1607 BUG_ON(tunnel
->magic
!= L2TP_TUNNEL_MAGIC
);
1608 l2tp_tunnel_dec_refcount(tunnel
);
1613 EXPORT_SYMBOL_GPL(l2tp_session_free
);
1615 /* Remove an l2tp session from l2tp_core's hash lists.
1616 * Provides a tidyup interface for pseudowire code which can't just route all
1617 * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
1620 void __l2tp_session_unhash(struct l2tp_session
*session
)
1622 struct l2tp_tunnel
*tunnel
= session
->tunnel
;
1624 /* Remove the session from core hashes */
1626 /* Remove from the per-tunnel hash */
1627 write_lock_bh(&tunnel
->hlist_lock
);
1628 hlist_del_init(&session
->hlist
);
1629 write_unlock_bh(&tunnel
->hlist_lock
);
1631 /* For L2TPv3 we have a per-net hash: remove from there, too */
1632 if (tunnel
->version
!= L2TP_HDR_VER_2
) {
1633 struct l2tp_net
*pn
= l2tp_pernet(tunnel
->l2tp_net
);
1634 spin_lock_bh(&pn
->l2tp_session_hlist_lock
);
1635 hlist_del_init_rcu(&session
->global_hlist
);
1636 spin_unlock_bh(&pn
->l2tp_session_hlist_lock
);
1641 EXPORT_SYMBOL_GPL(__l2tp_session_unhash
);
1643 /* This function is used by the netlink SESSION_DELETE command and by
1646 int l2tp_session_delete(struct l2tp_session
*session
)
1648 if (test_and_set_bit(0, &session
->dead
))
1651 __l2tp_session_unhash(session
);
1652 l2tp_session_queue_purge(session
);
1653 if (session
->session_close
!= NULL
)
1654 (*session
->session_close
)(session
);
1656 l2tp_session_dec_refcount(session
);
1660 EXPORT_SYMBOL_GPL(l2tp_session_delete
);
1662 /* We come here whenever a session's send_seq, cookie_len or
1663 * l2specific_type parameters are set.
1665 void l2tp_session_set_header_len(struct l2tp_session
*session
, int version
)
1667 if (version
== L2TP_HDR_VER_2
) {
1668 session
->hdr_len
= 6;
1669 if (session
->send_seq
)
1670 session
->hdr_len
+= 4;
1672 session
->hdr_len
= 4 + session
->cookie_len
;
1673 session
->hdr_len
+= l2tp_get_l2specific_len(session
);
1674 if (session
->tunnel
->encap
== L2TP_ENCAPTYPE_UDP
)
1675 session
->hdr_len
+= 4;
1679 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len
);
1681 struct l2tp_session
*l2tp_session_create(int priv_size
, struct l2tp_tunnel
*tunnel
, u32 session_id
, u32 peer_session_id
, struct l2tp_session_cfg
*cfg
)
1683 struct l2tp_session
*session
;
1685 session
= kzalloc(sizeof(struct l2tp_session
) + priv_size
, GFP_KERNEL
);
1686 if (session
!= NULL
) {
1687 session
->magic
= L2TP_SESSION_MAGIC
;
1688 session
->tunnel
= tunnel
;
1690 session
->session_id
= session_id
;
1691 session
->peer_session_id
= peer_session_id
;
1693 if (tunnel
->version
== L2TP_HDR_VER_2
)
1694 session
->nr_max
= 0xffff;
1696 session
->nr_max
= 0xffffff;
1697 session
->nr_window_size
= session
->nr_max
/ 2;
1698 session
->nr_oos_count_max
= 4;
1700 /* Use NR of first received packet */
1701 session
->reorder_skip
= 1;
1703 sprintf(&session
->name
[0], "sess %u/%u",
1704 tunnel
->tunnel_id
, session
->session_id
);
1706 skb_queue_head_init(&session
->reorder_q
);
1708 INIT_HLIST_NODE(&session
->hlist
);
1709 INIT_HLIST_NODE(&session
->global_hlist
);
1711 /* Inherit debug options from tunnel */
1712 session
->debug
= tunnel
->debug
;
1715 session
->pwtype
= cfg
->pw_type
;
1716 session
->debug
= cfg
->debug
;
1717 session
->mtu
= cfg
->mtu
;
1718 session
->mru
= cfg
->mru
;
1719 session
->send_seq
= cfg
->send_seq
;
1720 session
->recv_seq
= cfg
->recv_seq
;
1721 session
->lns_mode
= cfg
->lns_mode
;
1722 session
->reorder_timeout
= cfg
->reorder_timeout
;
1723 session
->l2specific_type
= cfg
->l2specific_type
;
1724 session
->cookie_len
= cfg
->cookie_len
;
1725 memcpy(&session
->cookie
[0], &cfg
->cookie
[0], cfg
->cookie_len
);
1726 session
->peer_cookie_len
= cfg
->peer_cookie_len
;
1727 memcpy(&session
->peer_cookie
[0], &cfg
->peer_cookie
[0], cfg
->peer_cookie_len
);
1730 if (tunnel
->version
== L2TP_HDR_VER_2
)
1731 session
->build_header
= l2tp_build_l2tpv2_header
;
1733 session
->build_header
= l2tp_build_l2tpv3_header
;
1735 l2tp_session_set_header_len(session
, tunnel
->version
);
1737 refcount_set(&session
->ref_count
, 1);
1742 return ERR_PTR(-ENOMEM
);
1744 EXPORT_SYMBOL_GPL(l2tp_session_create
);
1746 /*****************************************************************************
1748 *****************************************************************************/
1750 static __net_init
int l2tp_init_net(struct net
*net
)
1752 struct l2tp_net
*pn
= net_generic(net
, l2tp_net_id
);
1755 INIT_LIST_HEAD(&pn
->l2tp_tunnel_list
);
1756 spin_lock_init(&pn
->l2tp_tunnel_list_lock
);
1758 for (hash
= 0; hash
< L2TP_HASH_SIZE_2
; hash
++)
1759 INIT_HLIST_HEAD(&pn
->l2tp_session_hlist
[hash
]);
1761 spin_lock_init(&pn
->l2tp_session_hlist_lock
);
1766 static __net_exit
void l2tp_exit_net(struct net
*net
)
1768 struct l2tp_net
*pn
= l2tp_pernet(net
);
1769 struct l2tp_tunnel
*tunnel
= NULL
;
1773 list_for_each_entry_rcu(tunnel
, &pn
->l2tp_tunnel_list
, list
) {
1774 l2tp_tunnel_delete(tunnel
);
1776 rcu_read_unlock_bh();
1778 flush_workqueue(l2tp_wq
);
1781 for (hash
= 0; hash
< L2TP_HASH_SIZE_2
; hash
++)
1782 WARN_ON_ONCE(!hlist_empty(&pn
->l2tp_session_hlist
[hash
]));
1785 static struct pernet_operations l2tp_net_ops
= {
1786 .init
= l2tp_init_net
,
1787 .exit
= l2tp_exit_net
,
1789 .size
= sizeof(struct l2tp_net
),
1792 static int __init
l2tp_init(void)
1796 rc
= register_pernet_device(&l2tp_net_ops
);
1800 l2tp_wq
= alloc_workqueue("l2tp", WQ_UNBOUND
, 0);
1802 pr_err("alloc_workqueue failed\n");
1803 unregister_pernet_device(&l2tp_net_ops
);
1808 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION
);
1814 static void __exit
l2tp_exit(void)
1816 unregister_pernet_device(&l2tp_net_ops
);
1818 destroy_workqueue(l2tp_wq
);
1823 module_init(l2tp_init
);
1824 module_exit(l2tp_exit
);
1826 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1827 MODULE_DESCRIPTION("L2TP core");
1828 MODULE_LICENSE("GPL");
1829 MODULE_VERSION(L2TP_DRV_VERSION
);