1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
5 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
7 * Author: Harald Welte <hwelte@sysmocom.de>
8 * Pablo Neira Ayuso <pablo@netfilter.org>
9 * Andreas Schultz <aschultz@travelping.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/udp.h>
17 #include <linux/rculist.h>
18 #include <linux/jhash.h>
19 #include <linux/if_tunnel.h>
20 #include <linux/net.h>
21 #include <linux/file.h>
22 #include <linux/gtp.h>
24 #include <net/net_namespace.h>
25 #include <net/protocol.h>
29 #include <net/udp_tunnel.h>
32 #include <net/genetlink.h>
33 #include <net/netns/generic.h>
36 /* An active session for the subscriber. */
38 struct hlist_node hlist_tid
;
39 struct hlist_node hlist_addr
;
56 struct in6_addr addr6
;
60 struct in6_addr addr6
;
64 struct net_device
*dev
;
67 struct rcu_head rcu_head
;
70 /* One instance of the GTP device. */
72 struct list_head list
;
78 struct net_device
*dev
;
82 unsigned int hash_size
;
83 struct hlist_head
*tid_hash
;
84 struct hlist_head
*addr_hash
;
101 static unsigned int gtp_net_id __read_mostly
;
104 struct list_head gtp_dev_list
;
107 static u32 gtp_h_initval
;
109 static struct genl_family gtp_genl_family
;
111 enum gtp_multicast_groups
{
115 static const struct genl_multicast_group gtp_genl_mcgrps
[] = {
116 [GTP_GENL_MCGRP
] = { .name
= GTP_GENL_MCGRP_NAME
},
119 static void pdp_context_delete(struct pdp_ctx
*pctx
);
121 static inline u32
gtp0_hashfn(u64 tid
)
123 u32
*tid32
= (u32
*) &tid
;
124 return jhash_2words(tid32
[0], tid32
[1], gtp_h_initval
);
127 static inline u32
gtp1u_hashfn(u32 tid
)
129 return jhash_1word(tid
, gtp_h_initval
);
132 static inline u32
ipv4_hashfn(__be32 ip
)
134 return jhash_1word((__force u32
)ip
, gtp_h_initval
);
137 static u32
ipv6_hashfn(const struct in6_addr
*ip6
)
139 return jhash_2words((__force u32
)ip6
->s6_addr32
[0],
140 (__force u32
)ip6
->s6_addr32
[1], gtp_h_initval
);
143 /* Resolve a PDP context structure based on the 64bit TID. */
144 static struct pdp_ctx
*gtp0_pdp_find(struct gtp_dev
*gtp
, u64 tid
, u16 family
)
146 struct hlist_head
*head
;
149 head
= >p
->tid_hash
[gtp0_hashfn(tid
) % gtp
->hash_size
];
151 hlist_for_each_entry_rcu(pdp
, head
, hlist_tid
) {
152 if (pdp
->af
== family
&&
153 pdp
->gtp_version
== GTP_V0
&&
154 pdp
->u
.v0
.tid
== tid
)
160 /* Resolve a PDP context structure based on the 32bit TEI. */
161 static struct pdp_ctx
*gtp1_pdp_find(struct gtp_dev
*gtp
, u32 tid
, u16 family
)
163 struct hlist_head
*head
;
166 head
= >p
->tid_hash
[gtp1u_hashfn(tid
) % gtp
->hash_size
];
168 hlist_for_each_entry_rcu(pdp
, head
, hlist_tid
) {
169 if (pdp
->af
== family
&&
170 pdp
->gtp_version
== GTP_V1
&&
171 pdp
->u
.v1
.i_tei
== tid
)
177 /* Resolve a PDP context based on IPv4 address of MS. */
178 static struct pdp_ctx
*ipv4_pdp_find(struct gtp_dev
*gtp
, __be32 ms_addr
)
180 struct hlist_head
*head
;
183 head
= >p
->addr_hash
[ipv4_hashfn(ms_addr
) % gtp
->hash_size
];
185 hlist_for_each_entry_rcu(pdp
, head
, hlist_addr
) {
186 if (pdp
->af
== AF_INET
&&
187 pdp
->ms
.addr
.s_addr
== ms_addr
)
194 /* 3GPP TS 29.060: PDN Connection: the association between a MS represented by
195 * [...] one IPv6 *prefix* and a PDN represented by an APN.
197 * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be
198 * according to the maximum prefix length for a global IPv6 address as
199 * specified in the IPv6 Addressing Architecture, see RFC 4291.
201 * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other
202 * than those that start with binary 000 have a 64-bit interface ID field
203 * (i.e., n + m = 64).
205 static bool ipv6_pdp_addr_equal(const struct in6_addr
*a
,
206 const struct in6_addr
*b
)
208 return a
->s6_addr32
[0] == b
->s6_addr32
[0] &&
209 a
->s6_addr32
[1] == b
->s6_addr32
[1];
212 static struct pdp_ctx
*ipv6_pdp_find(struct gtp_dev
*gtp
,
213 const struct in6_addr
*ms_addr
)
215 struct hlist_head
*head
;
218 head
= >p
->addr_hash
[ipv6_hashfn(ms_addr
) % gtp
->hash_size
];
220 hlist_for_each_entry_rcu(pdp
, head
, hlist_addr
) {
221 if (pdp
->af
== AF_INET6
&&
222 ipv6_pdp_addr_equal(&pdp
->ms
.addr6
, ms_addr
))
229 static bool gtp_check_ms_ipv4(struct sk_buff
*skb
, struct pdp_ctx
*pctx
,
230 unsigned int hdrlen
, unsigned int role
)
234 if (!pskb_may_pull(skb
, hdrlen
+ sizeof(struct iphdr
)))
237 iph
= (struct iphdr
*)(skb
->data
+ hdrlen
);
239 if (role
== GTP_ROLE_SGSN
)
240 return iph
->daddr
== pctx
->ms
.addr
.s_addr
;
242 return iph
->saddr
== pctx
->ms
.addr
.s_addr
;
245 static bool gtp_check_ms_ipv6(struct sk_buff
*skb
, struct pdp_ctx
*pctx
,
246 unsigned int hdrlen
, unsigned int role
)
248 struct ipv6hdr
*ip6h
;
251 if (!pskb_may_pull(skb
, hdrlen
+ sizeof(struct ipv6hdr
)))
254 ip6h
= (struct ipv6hdr
*)(skb
->data
+ hdrlen
);
256 if ((ipv6_addr_type(&ip6h
->saddr
) & IPV6_ADDR_LINKLOCAL
) ||
257 (ipv6_addr_type(&ip6h
->daddr
) & IPV6_ADDR_LINKLOCAL
))
260 if (role
== GTP_ROLE_SGSN
) {
261 ret
= ipv6_pdp_addr_equal(&ip6h
->daddr
, &pctx
->ms
.addr6
);
263 ret
= ipv6_pdp_addr_equal(&ip6h
->saddr
, &pctx
->ms
.addr6
);
269 /* Check if the inner IP address in this packet is assigned to any
270 * existing mobile subscriber.
272 static bool gtp_check_ms(struct sk_buff
*skb
, struct pdp_ctx
*pctx
,
273 unsigned int hdrlen
, unsigned int role
,
276 switch (inner_proto
) {
278 return gtp_check_ms_ipv4(skb
, pctx
, hdrlen
, role
);
280 return gtp_check_ms_ipv6(skb
, pctx
, hdrlen
, role
);
285 static int gtp_inner_proto(struct sk_buff
*skb
, unsigned int hdrlen
,
288 __u8
*ip_version
, _ip_version
;
290 ip_version
= skb_header_pointer(skb
, hdrlen
, sizeof(*ip_version
),
295 switch (*ip_version
& 0xf0) {
297 *inner_proto
= ETH_P_IP
;
300 *inner_proto
= ETH_P_IPV6
;
309 static int gtp_rx(struct pdp_ctx
*pctx
, struct sk_buff
*skb
,
310 unsigned int hdrlen
, unsigned int role
, __u16 inner_proto
)
312 if (!gtp_check_ms(skb
, pctx
, hdrlen
, role
, inner_proto
)) {
313 netdev_dbg(pctx
->dev
, "No PDP ctx for this MS\n");
317 /* Get rid of the GTP + UDP headers. */
318 if (iptunnel_pull_header(skb
, hdrlen
, htons(inner_proto
),
319 !net_eq(sock_net(pctx
->sk
), dev_net(pctx
->dev
)))) {
320 pctx
->dev
->stats
.rx_length_errors
++;
324 netdev_dbg(pctx
->dev
, "forwarding packet from GGSN to uplink\n");
326 /* Now that the UDP and the GTP header have been removed, set up the
327 * new network header. This is required by the upper layer to
328 * calculate the transport header.
330 skb_reset_network_header(skb
);
331 skb_reset_mac_header(skb
);
333 skb
->dev
= pctx
->dev
;
335 dev_sw_netstats_rx_add(pctx
->dev
, skb
->len
);
341 pctx
->dev
->stats
.rx_dropped
++;
345 static struct rtable
*ip4_route_output_gtp(struct flowi4
*fl4
,
346 const struct sock
*sk
,
347 __be32 daddr
, __be32 saddr
)
349 memset(fl4
, 0, sizeof(*fl4
));
350 fl4
->flowi4_oif
= sk
->sk_bound_dev_if
;
353 fl4
->flowi4_tos
= ip_sock_rt_tos(sk
);
354 fl4
->flowi4_scope
= ip_sock_rt_scope(sk
);
355 fl4
->flowi4_proto
= sk
->sk_protocol
;
357 return ip_route_output_key(sock_net(sk
), fl4
);
360 static struct rt6_info
*ip6_route_output_gtp(struct net
*net
,
362 const struct sock
*sk
,
363 const struct in6_addr
*daddr
,
364 struct in6_addr
*saddr
)
366 struct dst_entry
*dst
;
368 memset(fl6
, 0, sizeof(*fl6
));
369 fl6
->flowi6_oif
= sk
->sk_bound_dev_if
;
372 fl6
->flowi6_proto
= sk
->sk_protocol
;
374 dst
= ipv6_stub
->ipv6_dst_lookup_flow(net
, sk
, fl6
, NULL
);
376 return ERR_PTR(-ENETUNREACH
);
378 return (struct rt6_info
*)dst
;
382 * In all Path Management messages:
383 * - TID: is not used and shall be set to 0.
384 * - Flow Label is not used and shall be set to 0
385 * In signalling messages:
386 * - number: this field is not yet used in signalling messages.
387 * It shall be set to 255 by the sender and shall be ignored
389 * Returns true if the echo req was correct, false otherwise.
391 static bool gtp0_validate_echo_hdr(struct gtp0_header
*gtp0
)
393 return !(gtp0
->tid
|| (gtp0
->flags
^ 0x1e) ||
394 gtp0
->number
!= 0xff || gtp0
->flow
);
397 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
398 static void gtp0_build_echo_msg(struct gtp0_header
*hdr
, __u8 msg_type
)
400 int len_pkt
, len_hdr
;
402 hdr
->flags
= 0x1e; /* v0, GTP-non-prime. */
403 hdr
->type
= msg_type
;
404 /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
405 * are not used and shall be set to 0.
410 hdr
->spare
[0] = 0xff;
411 hdr
->spare
[1] = 0xff;
412 hdr
->spare
[2] = 0xff;
414 len_pkt
= sizeof(struct gtp0_packet
);
415 len_hdr
= sizeof(struct gtp0_header
);
417 if (msg_type
== GTP_ECHO_RSP
)
418 hdr
->length
= htons(len_pkt
- len_hdr
);
423 static int gtp0_send_echo_resp_ip(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
425 struct iphdr
*iph
= ip_hdr(skb
);
429 /* find route to the sender,
430 * src address becomes dst address and vice versa.
432 rt
= ip4_route_output_gtp(&fl4
, gtp
->sk0
, iph
->saddr
, iph
->daddr
);
434 netdev_dbg(gtp
->dev
, "no route for echo response from %pI4\n",
439 udp_tunnel_xmit_skb(rt
, gtp
->sk0
, skb
,
440 fl4
.saddr
, fl4
.daddr
,
442 ip4_dst_hoplimit(&rt
->dst
),
444 htons(GTP0_PORT
), htons(GTP0_PORT
),
445 !net_eq(sock_net(gtp
->sk1u
),
452 static int gtp0_send_echo_resp(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
454 struct gtp0_packet
*gtp_pkt
;
455 struct gtp0_header
*gtp0
;
458 gtp0
= (struct gtp0_header
*)(skb
->data
+ sizeof(struct udphdr
));
460 if (!gtp0_validate_echo_hdr(gtp0
))
465 /* pull GTP and UDP headers */
466 skb_pull_data(skb
, sizeof(struct gtp0_header
) + sizeof(struct udphdr
));
468 gtp_pkt
= skb_push(skb
, sizeof(struct gtp0_packet
));
469 memset(gtp_pkt
, 0, sizeof(struct gtp0_packet
));
471 gtp0_build_echo_msg(>p_pkt
->gtp0_h
, GTP_ECHO_RSP
);
473 /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
474 * message shall be copied from the signalling request message
475 * that the GSN is replying to.
477 gtp_pkt
->gtp0_h
.seq
= seq
;
479 gtp_pkt
->ie
.tag
= GTPIE_RECOVERY
;
480 gtp_pkt
->ie
.val
= gtp
->restart_count
;
482 switch (gtp
->sk0
->sk_family
) {
484 if (gtp0_send_echo_resp_ip(gtp
, skb
) < 0)
494 static int gtp_genl_fill_echo(struct sk_buff
*skb
, u32 snd_portid
, u32 snd_seq
,
495 int flags
, u32 type
, struct echo_info echo
)
499 genlh
= genlmsg_put(skb
, snd_portid
, snd_seq
, >p_genl_family
, flags
,
504 if (nla_put_u32(skb
, GTPA_VERSION
, echo
.gtp_version
) ||
505 nla_put_be32(skb
, GTPA_PEER_ADDRESS
, echo
.peer
.addr
.s_addr
) ||
506 nla_put_be32(skb
, GTPA_MS_ADDRESS
, echo
.ms
.addr
.s_addr
))
509 genlmsg_end(skb
, genlh
);
513 genlmsg_cancel(skb
, genlh
);
517 static void gtp0_handle_echo_resp_ip(struct sk_buff
*skb
, struct echo_info
*echo
)
519 struct iphdr
*iph
= ip_hdr(skb
);
521 echo
->ms
.addr
.s_addr
= iph
->daddr
;
522 echo
->peer
.addr
.s_addr
= iph
->saddr
;
523 echo
->gtp_version
= GTP_V0
;
526 static int gtp0_handle_echo_resp(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
528 struct gtp0_header
*gtp0
;
529 struct echo_info echo
;
533 gtp0
= (struct gtp0_header
*)(skb
->data
+ sizeof(struct udphdr
));
535 if (!gtp0_validate_echo_hdr(gtp0
))
538 switch (gtp
->sk0
->sk_family
) {
540 gtp0_handle_echo_resp_ip(skb
, &echo
);
546 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
550 ret
= gtp_genl_fill_echo(msg
, 0, 0, 0, GTP_CMD_ECHOREQ
, echo
);
556 return genlmsg_multicast_netns(>p_genl_family
, dev_net(gtp
->dev
),
557 msg
, 0, GTP_GENL_MCGRP
, GFP_ATOMIC
);
560 static int gtp_proto_to_family(__u16 proto
)
575 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
576 static int gtp0_udp_encap_recv(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
578 unsigned int hdrlen
= sizeof(struct udphdr
) +
579 sizeof(struct gtp0_header
);
580 struct gtp0_header
*gtp0
;
581 struct pdp_ctx
*pctx
;
584 if (!pskb_may_pull(skb
, hdrlen
))
587 gtp0
= (struct gtp0_header
*)(skb
->data
+ sizeof(struct udphdr
));
589 if ((gtp0
->flags
>> 5) != GTP_V0
)
592 /* If the sockets were created in kernel, it means that
593 * there is no daemon running in userspace which would
594 * handle echo request.
596 if (gtp0
->type
== GTP_ECHO_REQ
&& gtp
->sk_created
)
597 return gtp0_send_echo_resp(gtp
, skb
);
599 if (gtp0
->type
== GTP_ECHO_RSP
&& gtp
->sk_created
)
600 return gtp0_handle_echo_resp(gtp
, skb
);
602 if (gtp0
->type
!= GTP_TPDU
)
605 if (gtp_inner_proto(skb
, hdrlen
, &inner_proto
) < 0) {
606 netdev_dbg(gtp
->dev
, "GTP packet does not encapsulate an IP packet\n");
610 pctx
= gtp0_pdp_find(gtp
, be64_to_cpu(gtp0
->tid
),
611 gtp_proto_to_family(inner_proto
));
613 netdev_dbg(gtp
->dev
, "No PDP ctx to decap skb=%p\n", skb
);
617 return gtp_rx(pctx
, skb
, hdrlen
, gtp
->role
, inner_proto
);
620 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
621 static void gtp1u_build_echo_msg(struct gtp1_header_long
*hdr
, __u8 msg_type
)
623 int len_pkt
, len_hdr
;
625 /* S flag must be set to 1 */
626 hdr
->flags
= 0x32; /* v1, GTP-non-prime. */
627 hdr
->type
= msg_type
;
628 /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
631 /* seq, npdu and next should be counted to the length of the GTP packet
632 * that's why szie of gtp1_header should be subtracted,
633 * not size of gtp1_header_long.
636 len_hdr
= sizeof(struct gtp1_header
);
638 if (msg_type
== GTP_ECHO_RSP
) {
639 len_pkt
= sizeof(struct gtp1u_packet
);
640 hdr
->length
= htons(len_pkt
- len_hdr
);
642 /* GTP_ECHO_REQ does not carry GTP Information Element,
643 * the why gtp1_header_long is used here.
645 len_pkt
= sizeof(struct gtp1_header_long
);
646 hdr
->length
= htons(len_pkt
- len_hdr
);
650 static int gtp1u_send_echo_resp(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
652 struct gtp1_header_long
*gtp1u
;
653 struct gtp1u_packet
*gtp_pkt
;
658 gtp1u
= (struct gtp1_header_long
*)(skb
->data
+ sizeof(struct udphdr
));
660 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
661 * Error Indication and Supported Extension Headers Notification
662 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
664 if (!(gtp1u
->flags
& GTP1_F_SEQ
) || gtp1u
->tid
)
667 /* pull GTP and UDP headers */
669 sizeof(struct gtp1_header_long
) + sizeof(struct udphdr
));
671 gtp_pkt
= skb_push(skb
, sizeof(struct gtp1u_packet
));
672 memset(gtp_pkt
, 0, sizeof(struct gtp1u_packet
));
674 gtp1u_build_echo_msg(>p_pkt
->gtp1u_h
, GTP_ECHO_RSP
);
676 /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
677 * Recovery information element shall not be used, i.e. it shall
678 * be set to zero by the sender and shall be ignored by the receiver.
679 * The Recovery information element is mandatory due to backwards
680 * compatibility reasons.
682 gtp_pkt
->ie
.tag
= GTPIE_RECOVERY
;
687 /* find route to the sender,
688 * src address becomes dst address and vice versa.
690 rt
= ip4_route_output_gtp(&fl4
, gtp
->sk1u
, iph
->saddr
, iph
->daddr
);
692 netdev_dbg(gtp
->dev
, "no route for echo response from %pI4\n",
697 udp_tunnel_xmit_skb(rt
, gtp
->sk1u
, skb
,
698 fl4
.saddr
, fl4
.daddr
,
700 ip4_dst_hoplimit(&rt
->dst
),
702 htons(GTP1U_PORT
), htons(GTP1U_PORT
),
703 !net_eq(sock_net(gtp
->sk1u
),
709 static int gtp1u_handle_echo_resp(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
711 struct gtp1_header_long
*gtp1u
;
712 struct echo_info echo
;
717 gtp1u
= (struct gtp1_header_long
*)(skb
->data
+ sizeof(struct udphdr
));
719 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
720 * Error Indication and Supported Extension Headers Notification
721 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
723 if (!(gtp1u
->flags
& GTP1_F_SEQ
) || gtp1u
->tid
)
727 echo
.ms
.addr
.s_addr
= iph
->daddr
;
728 echo
.peer
.addr
.s_addr
= iph
->saddr
;
729 echo
.gtp_version
= GTP_V1
;
731 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
735 ret
= gtp_genl_fill_echo(msg
, 0, 0, 0, GTP_CMD_ECHOREQ
, echo
);
741 return genlmsg_multicast_netns(>p_genl_family
, dev_net(gtp
->dev
),
742 msg
, 0, GTP_GENL_MCGRP
, GFP_ATOMIC
);
745 static int gtp_parse_exthdrs(struct sk_buff
*skb
, unsigned int *hdrlen
)
747 struct gtp_ext_hdr
*gtp_exthdr
, _gtp_exthdr
;
748 unsigned int offset
= *hdrlen
;
749 __u8
*next_type
, _next_type
;
751 /* From 29.060: "The Extension Header Length field specifies the length
752 * of the particular Extension header in 4 octets units."
754 * This length field includes length field size itself (1 byte),
755 * payload (variable length) and next type (1 byte). The extension
756 * header is aligned to to 4 bytes.
760 gtp_exthdr
= skb_header_pointer(skb
, offset
, sizeof(*gtp_exthdr
),
762 if (!gtp_exthdr
|| !gtp_exthdr
->len
)
765 offset
+= gtp_exthdr
->len
* 4;
767 /* From 29.060: "If no such Header follows, then the value of
768 * the Next Extension Header Type shall be 0."
770 next_type
= skb_header_pointer(skb
, offset
- 1,
771 sizeof(_next_type
), &_next_type
);
775 } while (*next_type
!= 0);
782 static int gtp1u_udp_encap_recv(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
784 unsigned int hdrlen
= sizeof(struct udphdr
) +
785 sizeof(struct gtp1_header
);
786 struct gtp1_header
*gtp1
;
787 struct pdp_ctx
*pctx
;
790 if (!pskb_may_pull(skb
, hdrlen
))
793 gtp1
= (struct gtp1_header
*)(skb
->data
+ sizeof(struct udphdr
));
795 if ((gtp1
->flags
>> 5) != GTP_V1
)
798 /* If the sockets were created in kernel, it means that
799 * there is no daemon running in userspace which would
800 * handle echo request.
802 if (gtp1
->type
== GTP_ECHO_REQ
&& gtp
->sk_created
)
803 return gtp1u_send_echo_resp(gtp
, skb
);
805 if (gtp1
->type
== GTP_ECHO_RSP
&& gtp
->sk_created
)
806 return gtp1u_handle_echo_resp(gtp
, skb
);
808 if (gtp1
->type
!= GTP_TPDU
)
811 /* From 29.060: "This field shall be present if and only if any one or
812 * more of the S, PN and E flags are set.".
814 * If any of the bit is set, then the remaining ones also have to be
817 if (gtp1
->flags
& GTP1_F_MASK
)
820 /* Make sure the header is larger enough, including extensions. */
821 if (!pskb_may_pull(skb
, hdrlen
))
824 if (gtp_inner_proto(skb
, hdrlen
, &inner_proto
) < 0) {
825 netdev_dbg(gtp
->dev
, "GTP packet does not encapsulate an IP packet\n");
829 gtp1
= (struct gtp1_header
*)(skb
->data
+ sizeof(struct udphdr
));
831 pctx
= gtp1_pdp_find(gtp
, ntohl(gtp1
->tid
),
832 gtp_proto_to_family(inner_proto
));
834 netdev_dbg(gtp
->dev
, "No PDP ctx to decap skb=%p\n", skb
);
838 if (gtp1
->flags
& GTP1_F_EXTHDR
&&
839 gtp_parse_exthdrs(skb
, &hdrlen
) < 0)
842 return gtp_rx(pctx
, skb
, hdrlen
, gtp
->role
, inner_proto
);
845 static void __gtp_encap_destroy(struct sock
*sk
)
850 gtp
= sk
->sk_user_data
;
856 WRITE_ONCE(udp_sk(sk
)->encap_type
, 0);
857 rcu_assign_sk_user_data(sk
, NULL
);
865 static void gtp_encap_destroy(struct sock
*sk
)
868 __gtp_encap_destroy(sk
);
872 static void gtp_encap_disable_sock(struct sock
*sk
)
877 __gtp_encap_destroy(sk
);
880 static void gtp_encap_disable(struct gtp_dev
*gtp
)
882 if (gtp
->sk_created
) {
883 udp_tunnel_sock_release(gtp
->sk0
->sk_socket
);
884 udp_tunnel_sock_release(gtp
->sk1u
->sk_socket
);
885 gtp
->sk_created
= false;
889 gtp_encap_disable_sock(gtp
->sk0
);
890 gtp_encap_disable_sock(gtp
->sk1u
);
894 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
895 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
897 static int gtp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
902 gtp
= rcu_dereference_sk_user_data(sk
);
906 netdev_dbg(gtp
->dev
, "encap_recv sk=%p\n", sk
);
908 switch (READ_ONCE(udp_sk(sk
)->encap_type
)) {
910 netdev_dbg(gtp
->dev
, "received GTP0 packet\n");
911 ret
= gtp0_udp_encap_recv(gtp
, skb
);
913 case UDP_ENCAP_GTP1U
:
914 netdev_dbg(gtp
->dev
, "received GTP1U packet\n");
915 ret
= gtp1u_udp_encap_recv(gtp
, skb
);
918 ret
= -1; /* Shouldn't happen. */
923 netdev_dbg(gtp
->dev
, "pass up to the process\n");
928 netdev_dbg(gtp
->dev
, "GTP packet has been dropped\n");
937 static void gtp_dev_uninit(struct net_device
*dev
)
939 struct gtp_dev
*gtp
= netdev_priv(dev
);
941 gtp_encap_disable(gtp
);
944 static inline void gtp0_push_header(struct sk_buff
*skb
, struct pdp_ctx
*pctx
)
946 int payload_len
= skb
->len
;
947 struct gtp0_header
*gtp0
;
949 gtp0
= skb_push(skb
, sizeof(*gtp0
));
951 gtp0
->flags
= 0x1e; /* v0, GTP-non-prime. */
952 gtp0
->type
= GTP_TPDU
;
953 gtp0
->length
= htons(payload_len
);
954 gtp0
->seq
= htons((atomic_inc_return(&pctx
->tx_seq
) - 1) % 0xffff);
955 gtp0
->flow
= htons(pctx
->u
.v0
.flow
);
957 gtp0
->spare
[0] = gtp0
->spare
[1] = gtp0
->spare
[2] = 0xff;
958 gtp0
->tid
= cpu_to_be64(pctx
->u
.v0
.tid
);
961 static inline void gtp1_push_header(struct sk_buff
*skb
, struct pdp_ctx
*pctx
)
963 int payload_len
= skb
->len
;
964 struct gtp1_header
*gtp1
;
966 gtp1
= skb_push(skb
, sizeof(*gtp1
));
968 /* Bits 8 7 6 5 4 3 2 1
969 * +--+--+--+--+--+--+--+--+
970 * |version |PT| 0| E| S|PN|
971 * +--+--+--+--+--+--+--+--+
974 gtp1
->flags
= 0x30; /* v1, GTP-non-prime. */
975 gtp1
->type
= GTP_TPDU
;
976 gtp1
->length
= htons(payload_len
);
977 gtp1
->tid
= htonl(pctx
->u
.v1
.o_tei
);
979 /* TODO: Support for extension header, sequence number and N-PDU.
980 * Update the length field if any of them is available.
992 struct rt6_info
*rt6
;
994 struct pdp_ctx
*pctx
;
995 struct net_device
*dev
;
1000 static void gtp_push_header(struct sk_buff
*skb
, struct gtp_pktinfo
*pktinfo
)
1002 switch (pktinfo
->pctx
->gtp_version
) {
1004 pktinfo
->gtph_port
= htons(GTP0_PORT
);
1005 gtp0_push_header(skb
, pktinfo
->pctx
);
1008 pktinfo
->gtph_port
= htons(GTP1U_PORT
);
1009 gtp1_push_header(skb
, pktinfo
->pctx
);
1014 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo
*pktinfo
,
1015 struct sock
*sk
, __u8 tos
,
1016 struct pdp_ctx
*pctx
, struct rtable
*rt
,
1018 struct net_device
*dev
)
1022 pktinfo
->pctx
= pctx
;
1024 pktinfo
->fl4
= *fl4
;
1028 static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo
*pktinfo
,
1029 struct sock
*sk
, __u8 tos
,
1030 struct pdp_ctx
*pctx
, struct rt6_info
*rt6
,
1032 struct net_device
*dev
)
1036 pktinfo
->pctx
= pctx
;
1038 pktinfo
->fl6
= *fl6
;
1042 static int gtp_build_skb_outer_ip4(struct sk_buff
*skb
, struct net_device
*dev
,
1043 struct gtp_pktinfo
*pktinfo
,
1044 struct pdp_ctx
*pctx
, __u8 tos
,
1052 rt
= ip4_route_output_gtp(&fl4
, pctx
->sk
, pctx
->peer
.addr
.s_addr
,
1053 inet_sk(pctx
->sk
)->inet_saddr
);
1055 netdev_dbg(dev
, "no route to SSGN %pI4\n",
1056 &pctx
->peer
.addr
.s_addr
);
1057 dev
->stats
.tx_carrier_errors
++;
1061 if (rt
->dst
.dev
== dev
) {
1062 netdev_dbg(dev
, "circular route to SSGN %pI4\n",
1063 &pctx
->peer
.addr
.s_addr
);
1064 dev
->stats
.collisions
++;
1068 /* This is similar to tnl_update_pmtu(). */
1071 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
-
1072 sizeof(struct iphdr
) - sizeof(struct udphdr
);
1073 switch (pctx
->gtp_version
) {
1075 mtu
-= sizeof(struct gtp0_header
);
1078 mtu
-= sizeof(struct gtp1_header
);
1082 mtu
= dst_mtu(&rt
->dst
);
1085 skb_dst_update_pmtu_no_confirm(skb
, mtu
);
1087 if (frag_off
& htons(IP_DF
) &&
1088 ((!skb_is_gso(skb
) && skb
->len
> mtu
) ||
1089 (skb_is_gso(skb
) && !skb_gso_validate_network_len(skb
, mtu
)))) {
1090 netdev_dbg(dev
, "packet too big, fragmentation needed\n");
1091 icmp_ndo_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
1096 gtp_set_pktinfo_ipv4(pktinfo
, pctx
->sk
, tos
, pctx
, rt
, &fl4
, dev
);
1097 gtp_push_header(skb
, pktinfo
);
1106 static int gtp_build_skb_outer_ip6(struct net
*net
, struct sk_buff
*skb
,
1107 struct net_device
*dev
,
1108 struct gtp_pktinfo
*pktinfo
,
1109 struct pdp_ctx
*pctx
, __u8 tos
)
1111 struct dst_entry
*dst
;
1112 struct rt6_info
*rt
;
1116 rt
= ip6_route_output_gtp(net
, &fl6
, pctx
->sk
, &pctx
->peer
.addr6
,
1117 &inet6_sk(pctx
->sk
)->saddr
);
1119 netdev_dbg(dev
, "no route to SSGN %pI6\n",
1121 dev
->stats
.tx_carrier_errors
++;
1126 if (rt
->dst
.dev
== dev
) {
1127 netdev_dbg(dev
, "circular route to SSGN %pI6\n",
1129 dev
->stats
.collisions
++;
1133 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
-
1134 sizeof(struct ipv6hdr
) - sizeof(struct udphdr
);
1135 switch (pctx
->gtp_version
) {
1137 mtu
-= sizeof(struct gtp0_header
);
1140 mtu
-= sizeof(struct gtp1_header
);
1144 skb_dst_update_pmtu_no_confirm(skb
, mtu
);
1146 if ((!skb_is_gso(skb
) && skb
->len
> mtu
) ||
1147 (skb_is_gso(skb
) && !skb_gso_validate_network_len(skb
, mtu
))) {
1148 netdev_dbg(dev
, "packet too big, fragmentation needed\n");
1149 icmpv6_ndo_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
1153 gtp_set_pktinfo_ipv6(pktinfo
, pctx
->sk
, tos
, pctx
, rt
, &fl6
, dev
);
1154 gtp_push_header(skb
, pktinfo
);
1163 static int gtp_build_skb_ip4(struct sk_buff
*skb
, struct net_device
*dev
,
1164 struct gtp_pktinfo
*pktinfo
)
1166 struct gtp_dev
*gtp
= netdev_priv(dev
);
1167 struct net
*net
= gtp
->net
;
1168 struct pdp_ctx
*pctx
;
1172 /* Read the IP destination address and resolve the PDP context.
1173 * Prepend PDP header with TEI/TID from PDP ctx.
1176 if (gtp
->role
== GTP_ROLE_SGSN
)
1177 pctx
= ipv4_pdp_find(gtp
, iph
->saddr
);
1179 pctx
= ipv4_pdp_find(gtp
, iph
->daddr
);
1182 netdev_dbg(dev
, "no PDP ctx found for %pI4, skip\n",
1186 netdev_dbg(dev
, "found PDP context %p\n", pctx
);
1188 switch (pctx
->sk
->sk_family
) {
1190 ret
= gtp_build_skb_outer_ip4(skb
, dev
, pktinfo
, pctx
,
1191 iph
->tos
, iph
->frag_off
);
1194 ret
= gtp_build_skb_outer_ip6(net
, skb
, dev
, pktinfo
, pctx
,
1206 netdev_dbg(dev
, "gtp -> IP src: %pI4 dst: %pI4\n",
1207 &iph
->saddr
, &iph
->daddr
);
1212 static int gtp_build_skb_ip6(struct sk_buff
*skb
, struct net_device
*dev
,
1213 struct gtp_pktinfo
*pktinfo
)
1215 struct gtp_dev
*gtp
= netdev_priv(dev
);
1216 struct net
*net
= gtp
->net
;
1217 struct pdp_ctx
*pctx
;
1218 struct ipv6hdr
*ip6h
;
1222 /* Read the IP destination address and resolve the PDP context.
1223 * Prepend PDP header with TEI/TID from PDP ctx.
1225 ip6h
= ipv6_hdr(skb
);
1226 if (gtp
->role
== GTP_ROLE_SGSN
)
1227 pctx
= ipv6_pdp_find(gtp
, &ip6h
->saddr
);
1229 pctx
= ipv6_pdp_find(gtp
, &ip6h
->daddr
);
1232 netdev_dbg(dev
, "no PDP ctx found for %pI6, skip\n",
1236 netdev_dbg(dev
, "found PDP context %p\n", pctx
);
1238 tos
= ipv6_get_dsfield(ip6h
);
1240 switch (pctx
->sk
->sk_family
) {
1242 ret
= gtp_build_skb_outer_ip4(skb
, dev
, pktinfo
, pctx
, tos
, 0);
1245 ret
= gtp_build_skb_outer_ip6(net
, skb
, dev
, pktinfo
, pctx
, tos
);
1256 netdev_dbg(dev
, "gtp -> IP src: %pI6 dst: %pI6\n",
1257 &ip6h
->saddr
, &ip6h
->daddr
);
1262 static netdev_tx_t
gtp_dev_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1264 unsigned int proto
= ntohs(skb
->protocol
);
1265 struct gtp_pktinfo pktinfo
;
1268 /* Ensure there is sufficient headroom. */
1269 if (skb_cow_head(skb
, dev
->needed_headroom
))
1272 if (!pskb_inet_may_pull(skb
))
1275 skb_reset_inner_headers(skb
);
1277 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
1281 err
= gtp_build_skb_ip4(skb
, dev
, &pktinfo
);
1284 err
= gtp_build_skb_ip6(skb
, dev
, &pktinfo
);
1295 switch (pktinfo
.pctx
->sk
->sk_family
) {
1297 udp_tunnel_xmit_skb(pktinfo
.rt
, pktinfo
.sk
, skb
,
1298 pktinfo
.fl4
.saddr
, pktinfo
.fl4
.daddr
,
1300 ip4_dst_hoplimit(&pktinfo
.rt
->dst
),
1302 pktinfo
.gtph_port
, pktinfo
.gtph_port
,
1303 !net_eq(sock_net(pktinfo
.pctx
->sk
),
1308 #if IS_ENABLED(CONFIG_IPV6)
1309 udp_tunnel6_xmit_skb(&pktinfo
.rt6
->dst
, pktinfo
.sk
, skb
, dev
,
1310 &pktinfo
.fl6
.saddr
, &pktinfo
.fl6
.daddr
,
1312 ip6_dst_hoplimit(&pktinfo
.rt
->dst
),
1314 pktinfo
.gtph_port
, pktinfo
.gtph_port
,
1322 return NETDEV_TX_OK
;
1324 dev
->stats
.tx_errors
++;
1326 return NETDEV_TX_OK
;
1329 static const struct net_device_ops gtp_netdev_ops
= {
1330 .ndo_uninit
= gtp_dev_uninit
,
1331 .ndo_start_xmit
= gtp_dev_xmit
,
1334 static const struct device_type gtp_type
= {
1338 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1339 #define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN)
1341 static void gtp_link_setup(struct net_device
*dev
)
1343 struct gtp_dev
*gtp
= netdev_priv(dev
);
1345 dev
->netdev_ops
= >p_netdev_ops
;
1346 dev
->needs_free_netdev
= true;
1347 SET_NETDEV_DEVTYPE(dev
, >p_type
);
1349 dev
->hard_header_len
= 0;
1351 dev
->mtu
= ETH_DATA_LEN
- GTP_IPV4_MAXLEN
;
1353 /* Zero header length. */
1354 dev
->type
= ARPHRD_NONE
;
1355 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
1357 dev
->pcpu_stat_type
= NETDEV_PCPU_STAT_TSTATS
;
1358 dev
->priv_flags
|= IFF_NO_QUEUE
;
1360 netif_keep_dst(dev
);
1362 dev
->needed_headroom
= LL_MAX_HEADER
+ GTP_IPV4_MAXLEN
;
1366 static int gtp_hashtable_new(struct gtp_dev
*gtp
, int hsize
);
1367 static int gtp_encap_enable(struct gtp_dev
*gtp
, struct nlattr
*data
[]);
1369 static void gtp_destructor(struct net_device
*dev
)
1371 struct gtp_dev
*gtp
= netdev_priv(dev
);
1373 kfree(gtp
->addr_hash
);
1374 kfree(gtp
->tid_hash
);
1377 static int gtp_sock_udp_config(struct udp_port_cfg
*udp_conf
,
1378 const struct nlattr
*nla
, int family
)
1380 udp_conf
->family
= family
;
1382 switch (udp_conf
->family
) {
1384 udp_conf
->local_ip
.s_addr
= nla_get_be32(nla
);
1386 #if IS_ENABLED(CONFIG_IPV6)
1388 udp_conf
->local_ip6
= nla_get_in6_addr(nla
);
1398 static struct sock
*gtp_create_sock(int type
, struct gtp_dev
*gtp
,
1399 const struct nlattr
*nla
, int family
)
1401 struct udp_tunnel_sock_cfg tuncfg
= {};
1402 struct udp_port_cfg udp_conf
= {};
1403 struct net
*net
= gtp
->net
;
1404 struct socket
*sock
;
1408 err
= gtp_sock_udp_config(&udp_conf
, nla
, family
);
1410 return ERR_PTR(err
);
1412 udp_conf
.local_ip
.s_addr
= htonl(INADDR_ANY
);
1413 udp_conf
.family
= AF_INET
;
1416 if (type
== UDP_ENCAP_GTP0
)
1417 udp_conf
.local_udp_port
= htons(GTP0_PORT
);
1418 else if (type
== UDP_ENCAP_GTP1U
)
1419 udp_conf
.local_udp_port
= htons(GTP1U_PORT
);
1421 return ERR_PTR(-EINVAL
);
1423 err
= udp_sock_create(net
, &udp_conf
, &sock
);
1425 return ERR_PTR(err
);
1427 tuncfg
.sk_user_data
= gtp
;
1428 tuncfg
.encap_type
= type
;
1429 tuncfg
.encap_rcv
= gtp_encap_recv
;
1430 tuncfg
.encap_destroy
= NULL
;
1432 setup_udp_tunnel_sock(net
, sock
, &tuncfg
);
1437 static int gtp_create_sockets(struct gtp_dev
*gtp
, const struct nlattr
*nla
,
1443 sk0
= gtp_create_sock(UDP_ENCAP_GTP0
, gtp
, nla
, family
);
1445 return PTR_ERR(sk0
);
1447 sk1u
= gtp_create_sock(UDP_ENCAP_GTP1U
, gtp
, nla
, family
);
1449 udp_tunnel_sock_release(sk0
->sk_socket
);
1450 return PTR_ERR(sk1u
);
1453 gtp
->sk_created
= true;
1460 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1461 #define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
1463 static int gtp_newlink(struct net
*src_net
, struct net_device
*dev
,
1464 struct nlattr
*tb
[], struct nlattr
*data
[],
1465 struct netlink_ext_ack
*extack
)
1467 unsigned int role
= GTP_ROLE_GGSN
;
1468 struct gtp_dev
*gtp
;
1472 #if !IS_ENABLED(CONFIG_IPV6)
1473 if (data
[IFLA_GTP_LOCAL6
])
1474 return -EAFNOSUPPORT
;
1477 gtp
= netdev_priv(dev
);
1479 if (!data
[IFLA_GTP_PDP_HASHSIZE
]) {
1482 hashsize
= nla_get_u32(data
[IFLA_GTP_PDP_HASHSIZE
]);
1487 if (data
[IFLA_GTP_ROLE
]) {
1488 role
= nla_get_u32(data
[IFLA_GTP_ROLE
]);
1489 if (role
> GTP_ROLE_SGSN
)
1494 gtp
->restart_count
= nla_get_u8_default(data
[IFLA_GTP_RESTART_COUNT
],
1499 err
= gtp_hashtable_new(gtp
, hashsize
);
1503 if (data
[IFLA_GTP_CREATE_SOCKETS
]) {
1504 if (data
[IFLA_GTP_LOCAL6
])
1505 err
= gtp_create_sockets(gtp
, data
[IFLA_GTP_LOCAL6
], AF_INET6
);
1507 err
= gtp_create_sockets(gtp
, data
[IFLA_GTP_LOCAL
], AF_INET
);
1509 err
= gtp_encap_enable(gtp
, data
);
1515 if ((gtp
->sk0
&& gtp
->sk0
->sk_family
== AF_INET6
) ||
1516 (gtp
->sk1u
&& gtp
->sk1u
->sk_family
== AF_INET6
)) {
1517 dev
->mtu
= ETH_DATA_LEN
- GTP_IPV6_MAXLEN
;
1518 dev
->needed_headroom
= LL_MAX_HEADER
+ GTP_IPV6_MAXLEN
;
1521 err
= register_netdevice(dev
);
1523 netdev_dbg(dev
, "failed to register new netdev %d\n", err
);
1527 gn
= net_generic(dev_net(dev
), gtp_net_id
);
1528 list_add_rcu(>p
->list
, &gn
->gtp_dev_list
);
1529 dev
->priv_destructor
= gtp_destructor
;
1531 netdev_dbg(dev
, "registered new GTP interface\n");
1536 gtp_encap_disable(gtp
);
1538 kfree(gtp
->addr_hash
);
1539 kfree(gtp
->tid_hash
);
1543 static void gtp_dellink(struct net_device
*dev
, struct list_head
*head
)
1545 struct gtp_dev
*gtp
= netdev_priv(dev
);
1546 struct hlist_node
*next
;
1547 struct pdp_ctx
*pctx
;
1550 for (i
= 0; i
< gtp
->hash_size
; i
++)
1551 hlist_for_each_entry_safe(pctx
, next
, >p
->tid_hash
[i
], hlist_tid
)
1552 pdp_context_delete(pctx
);
1554 list_del_rcu(>p
->list
);
1555 unregister_netdevice_queue(dev
, head
);
1558 static const struct nla_policy gtp_policy
[IFLA_GTP_MAX
+ 1] = {
1559 [IFLA_GTP_FD0
] = { .type
= NLA_U32
},
1560 [IFLA_GTP_FD1
] = { .type
= NLA_U32
},
1561 [IFLA_GTP_PDP_HASHSIZE
] = { .type
= NLA_U32
},
1562 [IFLA_GTP_ROLE
] = { .type
= NLA_U32
},
1563 [IFLA_GTP_CREATE_SOCKETS
] = { .type
= NLA_U8
},
1564 [IFLA_GTP_RESTART_COUNT
] = { .type
= NLA_U8
},
1565 [IFLA_GTP_LOCAL
] = { .type
= NLA_U32
},
1566 [IFLA_GTP_LOCAL6
] = { .len
= sizeof(struct in6_addr
) },
1569 static int gtp_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
1570 struct netlink_ext_ack
*extack
)
1578 static size_t gtp_get_size(const struct net_device
*dev
)
1580 return nla_total_size(sizeof(__u32
)) + /* IFLA_GTP_PDP_HASHSIZE */
1581 nla_total_size(sizeof(__u32
)) + /* IFLA_GTP_ROLE */
1582 nla_total_size(sizeof(__u8
)); /* IFLA_GTP_RESTART_COUNT */
1585 static int gtp_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1587 struct gtp_dev
*gtp
= netdev_priv(dev
);
1589 if (nla_put_u32(skb
, IFLA_GTP_PDP_HASHSIZE
, gtp
->hash_size
))
1590 goto nla_put_failure
;
1591 if (nla_put_u32(skb
, IFLA_GTP_ROLE
, gtp
->role
))
1592 goto nla_put_failure
;
1593 if (nla_put_u8(skb
, IFLA_GTP_RESTART_COUNT
, gtp
->restart_count
))
1594 goto nla_put_failure
;
1602 static struct rtnl_link_ops gtp_link_ops __read_mostly
= {
1604 .maxtype
= IFLA_GTP_MAX
,
1605 .policy
= gtp_policy
,
1606 .priv_size
= sizeof(struct gtp_dev
),
1607 .setup
= gtp_link_setup
,
1608 .validate
= gtp_validate
,
1609 .newlink
= gtp_newlink
,
1610 .dellink
= gtp_dellink
,
1611 .get_size
= gtp_get_size
,
1612 .fill_info
= gtp_fill_info
,
1615 static int gtp_hashtable_new(struct gtp_dev
*gtp
, int hsize
)
1619 gtp
->addr_hash
= kmalloc_array(hsize
, sizeof(struct hlist_head
),
1620 GFP_KERNEL
| __GFP_NOWARN
);
1621 if (gtp
->addr_hash
== NULL
)
1624 gtp
->tid_hash
= kmalloc_array(hsize
, sizeof(struct hlist_head
),
1625 GFP_KERNEL
| __GFP_NOWARN
);
1626 if (gtp
->tid_hash
== NULL
)
1629 gtp
->hash_size
= hsize
;
1631 for (i
= 0; i
< hsize
; i
++) {
1632 INIT_HLIST_HEAD(>p
->addr_hash
[i
]);
1633 INIT_HLIST_HEAD(>p
->tid_hash
[i
]);
1637 kfree(gtp
->addr_hash
);
1641 static struct sock
*gtp_encap_enable_socket(int fd
, int type
,
1642 struct gtp_dev
*gtp
)
1644 struct udp_tunnel_sock_cfg tuncfg
= {NULL
};
1645 struct socket
*sock
;
1649 pr_debug("enable gtp on %d, %d\n", fd
, type
);
1651 sock
= sockfd_lookup(fd
, &err
);
1653 pr_debug("gtp socket fd=%d not found\n", fd
);
1654 return ERR_PTR(err
);
1658 if (sk
->sk_protocol
!= IPPROTO_UDP
||
1659 sk
->sk_type
!= SOCK_DGRAM
||
1660 (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)) {
1661 pr_debug("socket fd=%d not UDP\n", fd
);
1662 sk
= ERR_PTR(-EINVAL
);
1666 if (sk
->sk_family
== AF_INET6
&&
1668 sk
= ERR_PTR(-EADDRNOTAVAIL
);
1673 if (sk
->sk_user_data
) {
1674 sk
= ERR_PTR(-EBUSY
);
1680 tuncfg
.sk_user_data
= gtp
;
1681 tuncfg
.encap_type
= type
;
1682 tuncfg
.encap_rcv
= gtp_encap_recv
;
1683 tuncfg
.encap_destroy
= gtp_encap_destroy
;
1685 setup_udp_tunnel_sock(sock_net(sock
->sk
), sock
, &tuncfg
);
1688 release_sock(sock
->sk
);
1694 static int gtp_encap_enable(struct gtp_dev
*gtp
, struct nlattr
*data
[])
1696 struct sock
*sk1u
= NULL
;
1697 struct sock
*sk0
= NULL
;
1699 if (!data
[IFLA_GTP_FD0
] && !data
[IFLA_GTP_FD1
])
1702 if (data
[IFLA_GTP_FD0
]) {
1703 int fd0
= nla_get_u32(data
[IFLA_GTP_FD0
]);
1706 sk0
= gtp_encap_enable_socket(fd0
, UDP_ENCAP_GTP0
, gtp
);
1708 return PTR_ERR(sk0
);
1712 if (data
[IFLA_GTP_FD1
]) {
1713 int fd1
= nla_get_u32(data
[IFLA_GTP_FD1
]);
1716 sk1u
= gtp_encap_enable_socket(fd1
, UDP_ENCAP_GTP1U
, gtp
);
1718 gtp_encap_disable_sock(sk0
);
1719 return PTR_ERR(sk1u
);
1728 sk0
->sk_family
!= sk1u
->sk_family
) {
1729 gtp_encap_disable_sock(sk0
);
1730 gtp_encap_disable_sock(sk1u
);
1737 static struct gtp_dev
*gtp_find_dev(struct net
*src_net
, struct nlattr
*nla
[])
1739 struct gtp_dev
*gtp
= NULL
;
1740 struct net_device
*dev
;
1743 /* Examine the link attributes and figure out which network namespace
1744 * we are talking about.
1746 if (nla
[GTPA_NET_NS_FD
])
1747 net
= get_net_ns_by_fd(nla_get_u32(nla
[GTPA_NET_NS_FD
]));
1749 net
= get_net(src_net
);
1754 /* Check if there's an existing gtpX device to configure */
1755 dev
= dev_get_by_index_rcu(net
, nla_get_u32(nla
[GTPA_LINK
]));
1756 if (dev
&& dev
->netdev_ops
== >p_netdev_ops
)
1757 gtp
= netdev_priv(dev
);
1763 static void gtp_pdp_fill(struct pdp_ctx
*pctx
, struct genl_info
*info
)
1765 pctx
->gtp_version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
1767 switch (pctx
->gtp_version
) {
1769 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
1770 * label needs to be the same for uplink and downlink packets,
1771 * so let's annotate this.
1773 pctx
->u
.v0
.tid
= nla_get_u64(info
->attrs
[GTPA_TID
]);
1774 pctx
->u
.v0
.flow
= nla_get_u16(info
->attrs
[GTPA_FLOW
]);
1777 pctx
->u
.v1
.i_tei
= nla_get_u32(info
->attrs
[GTPA_I_TEI
]);
1778 pctx
->u
.v1
.o_tei
= nla_get_u32(info
->attrs
[GTPA_O_TEI
]);
1785 static void ip_pdp_peer_fill(struct pdp_ctx
*pctx
, struct genl_info
*info
)
1787 if (info
->attrs
[GTPA_PEER_ADDRESS
]) {
1788 pctx
->peer
.addr
.s_addr
=
1789 nla_get_be32(info
->attrs
[GTPA_PEER_ADDRESS
]);
1790 } else if (info
->attrs
[GTPA_PEER_ADDR6
]) {
1791 pctx
->peer
.addr6
= nla_get_in6_addr(info
->attrs
[GTPA_PEER_ADDR6
]);
1795 static void ipv4_pdp_fill(struct pdp_ctx
*pctx
, struct genl_info
*info
)
1797 ip_pdp_peer_fill(pctx
, info
);
1798 pctx
->ms
.addr
.s_addr
=
1799 nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
1800 gtp_pdp_fill(pctx
, info
);
1803 static bool ipv6_pdp_fill(struct pdp_ctx
*pctx
, struct genl_info
*info
)
1805 ip_pdp_peer_fill(pctx
, info
);
1806 pctx
->ms
.addr6
= nla_get_in6_addr(info
->attrs
[GTPA_MS_ADDR6
]);
1807 if (pctx
->ms
.addr6
.s6_addr32
[2] ||
1808 pctx
->ms
.addr6
.s6_addr32
[3])
1811 gtp_pdp_fill(pctx
, info
);
1816 static struct pdp_ctx
*gtp_pdp_add(struct gtp_dev
*gtp
, struct sock
*sk
,
1817 struct genl_info
*info
)
1819 struct pdp_ctx
*pctx
, *pctx_tid
= NULL
;
1820 struct net_device
*dev
= gtp
->dev
;
1821 u32 hash_ms
, hash_tid
= 0;
1822 struct in6_addr ms_addr6
;
1823 unsigned int version
;
1828 version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
1830 family
= nla_get_u8_default(info
->attrs
[GTPA_FAMILY
], AF_INET
);
1832 #if !IS_ENABLED(CONFIG_IPV6)
1833 if (family
== AF_INET6
)
1834 return ERR_PTR(-EAFNOSUPPORT
);
1836 if (!info
->attrs
[GTPA_PEER_ADDRESS
] &&
1837 !info
->attrs
[GTPA_PEER_ADDR6
])
1838 return ERR_PTR(-EINVAL
);
1840 if ((info
->attrs
[GTPA_PEER_ADDRESS
] &&
1841 sk
->sk_family
== AF_INET6
) ||
1842 (info
->attrs
[GTPA_PEER_ADDR6
] &&
1843 sk
->sk_family
== AF_INET
))
1844 return ERR_PTR(-EAFNOSUPPORT
);
1848 if (!info
->attrs
[GTPA_MS_ADDRESS
] ||
1849 info
->attrs
[GTPA_MS_ADDR6
])
1850 return ERR_PTR(-EINVAL
);
1852 ms_addr
= nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
1853 hash_ms
= ipv4_hashfn(ms_addr
) % gtp
->hash_size
;
1854 pctx
= ipv4_pdp_find(gtp
, ms_addr
);
1857 if (!info
->attrs
[GTPA_MS_ADDR6
] ||
1858 info
->attrs
[GTPA_MS_ADDRESS
])
1859 return ERR_PTR(-EINVAL
);
1861 ms_addr6
= nla_get_in6_addr(info
->attrs
[GTPA_MS_ADDR6
]);
1862 hash_ms
= ipv6_hashfn(&ms_addr6
) % gtp
->hash_size
;
1863 pctx
= ipv6_pdp_find(gtp
, &ms_addr6
);
1866 return ERR_PTR(-EAFNOSUPPORT
);
1870 if (version
== GTP_V0
)
1871 pctx_tid
= gtp0_pdp_find(gtp
,
1872 nla_get_u64(info
->attrs
[GTPA_TID
]),
1874 else if (version
== GTP_V1
)
1875 pctx_tid
= gtp1_pdp_find(gtp
,
1876 nla_get_u32(info
->attrs
[GTPA_I_TEI
]),
1882 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
1883 return ERR_PTR(-EEXIST
);
1884 if (info
->nlhdr
->nlmsg_flags
& NLM_F_REPLACE
)
1885 return ERR_PTR(-EOPNOTSUPP
);
1887 if (pctx
&& pctx_tid
)
1888 return ERR_PTR(-EEXIST
);
1894 ipv4_pdp_fill(pctx
, info
);
1897 if (!ipv6_pdp_fill(pctx
, info
))
1898 return ERR_PTR(-EADDRNOTAVAIL
);
1902 if (pctx
->gtp_version
== GTP_V0
)
1903 netdev_dbg(dev
, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
1904 pctx
->u
.v0
.tid
, pctx
);
1905 else if (pctx
->gtp_version
== GTP_V1
)
1906 netdev_dbg(dev
, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
1907 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
, pctx
);
1913 pctx
= kmalloc(sizeof(*pctx
), GFP_ATOMIC
);
1915 return ERR_PTR(-ENOMEM
);
1919 pctx
->dev
= gtp
->dev
;
1924 if (!info
->attrs
[GTPA_MS_ADDRESS
]) {
1927 return ERR_PTR(-EINVAL
);
1930 ipv4_pdp_fill(pctx
, info
);
1933 if (!info
->attrs
[GTPA_MS_ADDR6
]) {
1936 return ERR_PTR(-EINVAL
);
1939 if (!ipv6_pdp_fill(pctx
, info
)) {
1942 return ERR_PTR(-EADDRNOTAVAIL
);
1946 atomic_set(&pctx
->tx_seq
, 0);
1948 switch (pctx
->gtp_version
) {
1950 /* TS 09.60: "The flow label identifies unambiguously a GTP
1951 * flow.". We use the tid for this instead, I cannot find a
1952 * situation in which this doesn't unambiguosly identify the
1955 hash_tid
= gtp0_hashfn(pctx
->u
.v0
.tid
) % gtp
->hash_size
;
1958 hash_tid
= gtp1u_hashfn(pctx
->u
.v1
.i_tei
) % gtp
->hash_size
;
1962 hlist_add_head_rcu(&pctx
->hlist_addr
, >p
->addr_hash
[hash_ms
]);
1963 hlist_add_head_rcu(&pctx
->hlist_tid
, >p
->tid_hash
[hash_tid
]);
1965 switch (pctx
->gtp_version
) {
1967 netdev_dbg(dev
, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1968 pctx
->u
.v0
.tid
, &pctx
->peer
.addr
,
1969 &pctx
->ms
.addr
, pctx
);
1972 netdev_dbg(dev
, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1973 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
,
1974 &pctx
->peer
.addr
, &pctx
->ms
.addr
, pctx
);
1981 static void pdp_context_free(struct rcu_head
*head
)
1983 struct pdp_ctx
*pctx
= container_of(head
, struct pdp_ctx
, rcu_head
);
1989 static void pdp_context_delete(struct pdp_ctx
*pctx
)
1991 hlist_del_rcu(&pctx
->hlist_tid
);
1992 hlist_del_rcu(&pctx
->hlist_addr
);
1993 call_rcu(&pctx
->rcu_head
, pdp_context_free
);
1996 static int gtp_tunnel_notify(struct pdp_ctx
*pctx
, u8 cmd
, gfp_t allocation
);
1998 static int gtp_genl_new_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
2000 unsigned int version
;
2001 struct pdp_ctx
*pctx
;
2002 struct gtp_dev
*gtp
;
2006 if (!info
->attrs
[GTPA_VERSION
] ||
2007 !info
->attrs
[GTPA_LINK
])
2010 version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
2014 if (!info
->attrs
[GTPA_TID
] ||
2015 !info
->attrs
[GTPA_FLOW
])
2019 if (!info
->attrs
[GTPA_I_TEI
] ||
2020 !info
->attrs
[GTPA_O_TEI
])
2030 gtp
= gtp_find_dev(sock_net(skb
->sk
), info
->attrs
);
2036 if (version
== GTP_V0
)
2038 else if (version
== GTP_V1
)
2048 pctx
= gtp_pdp_add(gtp
, sk
, info
);
2050 err
= PTR_ERR(pctx
);
2052 gtp_tunnel_notify(pctx
, GTP_CMD_NEWPDP
, GFP_KERNEL
);
2061 static struct pdp_ctx
*gtp_find_pdp_by_link(struct net
*net
,
2062 struct nlattr
*nla
[])
2064 struct gtp_dev
*gtp
;
2067 family
= nla_get_u8_default(nla
[GTPA_FAMILY
], AF_INET
);
2069 gtp
= gtp_find_dev(net
, nla
);
2071 return ERR_PTR(-ENODEV
);
2073 if (nla
[GTPA_MS_ADDRESS
]) {
2074 __be32 ip
= nla_get_be32(nla
[GTPA_MS_ADDRESS
]);
2076 if (family
!= AF_INET
)
2077 return ERR_PTR(-EINVAL
);
2079 return ipv4_pdp_find(gtp
, ip
);
2080 } else if (nla
[GTPA_MS_ADDR6
]) {
2081 struct in6_addr addr
= nla_get_in6_addr(nla
[GTPA_MS_ADDR6
]);
2083 if (family
!= AF_INET6
)
2084 return ERR_PTR(-EINVAL
);
2086 if (addr
.s6_addr32
[2] ||
2088 return ERR_PTR(-EADDRNOTAVAIL
);
2090 return ipv6_pdp_find(gtp
, &addr
);
2091 } else if (nla
[GTPA_VERSION
]) {
2092 u32 gtp_version
= nla_get_u32(nla
[GTPA_VERSION
]);
2094 if (gtp_version
== GTP_V0
&& nla
[GTPA_TID
]) {
2095 return gtp0_pdp_find(gtp
, nla_get_u64(nla
[GTPA_TID
]),
2097 } else if (gtp_version
== GTP_V1
&& nla
[GTPA_I_TEI
]) {
2098 return gtp1_pdp_find(gtp
, nla_get_u32(nla
[GTPA_I_TEI
]),
2103 return ERR_PTR(-EINVAL
);
2106 static struct pdp_ctx
*gtp_find_pdp(struct net
*net
, struct nlattr
*nla
[])
2108 struct pdp_ctx
*pctx
;
2111 pctx
= gtp_find_pdp_by_link(net
, nla
);
2113 pctx
= ERR_PTR(-EINVAL
);
2116 pctx
= ERR_PTR(-ENOENT
);
2121 static int gtp_genl_del_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
2123 struct pdp_ctx
*pctx
;
2126 if (!info
->attrs
[GTPA_VERSION
])
2131 pctx
= gtp_find_pdp(sock_net(skb
->sk
), info
->attrs
);
2133 err
= PTR_ERR(pctx
);
2137 if (pctx
->gtp_version
== GTP_V0
)
2138 netdev_dbg(pctx
->dev
, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
2139 pctx
->u
.v0
.tid
, pctx
);
2140 else if (pctx
->gtp_version
== GTP_V1
)
2141 netdev_dbg(pctx
->dev
, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
2142 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
, pctx
);
2144 gtp_tunnel_notify(pctx
, GTP_CMD_DELPDP
, GFP_ATOMIC
);
2145 pdp_context_delete(pctx
);
2152 static int gtp_genl_fill_info(struct sk_buff
*skb
, u32 snd_portid
, u32 snd_seq
,
2153 int flags
, u32 type
, struct pdp_ctx
*pctx
)
2157 genlh
= genlmsg_put(skb
, snd_portid
, snd_seq
, >p_genl_family
, flags
,
2162 if (nla_put_u32(skb
, GTPA_VERSION
, pctx
->gtp_version
) ||
2163 nla_put_u32(skb
, GTPA_LINK
, pctx
->dev
->ifindex
) ||
2164 nla_put_u8(skb
, GTPA_FAMILY
, pctx
->af
))
2165 goto nla_put_failure
;
2169 if (nla_put_be32(skb
, GTPA_MS_ADDRESS
, pctx
->ms
.addr
.s_addr
))
2170 goto nla_put_failure
;
2173 if (nla_put_in6_addr(skb
, GTPA_MS_ADDR6
, &pctx
->ms
.addr6
))
2174 goto nla_put_failure
;
2178 switch (pctx
->sk
->sk_family
) {
2180 if (nla_put_be32(skb
, GTPA_PEER_ADDRESS
, pctx
->peer
.addr
.s_addr
))
2181 goto nla_put_failure
;
2184 if (nla_put_in6_addr(skb
, GTPA_PEER_ADDR6
, &pctx
->peer
.addr6
))
2185 goto nla_put_failure
;
2189 switch (pctx
->gtp_version
) {
2191 if (nla_put_u64_64bit(skb
, GTPA_TID
, pctx
->u
.v0
.tid
, GTPA_PAD
) ||
2192 nla_put_u16(skb
, GTPA_FLOW
, pctx
->u
.v0
.flow
))
2193 goto nla_put_failure
;
2196 if (nla_put_u32(skb
, GTPA_I_TEI
, pctx
->u
.v1
.i_tei
) ||
2197 nla_put_u32(skb
, GTPA_O_TEI
, pctx
->u
.v1
.o_tei
))
2198 goto nla_put_failure
;
2201 genlmsg_end(skb
, genlh
);
2206 genlmsg_cancel(skb
, genlh
);
2210 static int gtp_tunnel_notify(struct pdp_ctx
*pctx
, u8 cmd
, gfp_t allocation
)
2212 struct sk_buff
*msg
;
2215 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, allocation
);
2219 ret
= gtp_genl_fill_info(msg
, 0, 0, 0, cmd
, pctx
);
2225 ret
= genlmsg_multicast_netns(>p_genl_family
, dev_net(pctx
->dev
), msg
,
2226 0, GTP_GENL_MCGRP
, GFP_ATOMIC
);
2230 static int gtp_genl_get_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
2232 struct pdp_ctx
*pctx
= NULL
;
2233 struct sk_buff
*skb2
;
2236 if (!info
->attrs
[GTPA_VERSION
])
2241 pctx
= gtp_find_pdp(sock_net(skb
->sk
), info
->attrs
);
2243 err
= PTR_ERR(pctx
);
2247 skb2
= genlmsg_new(NLMSG_GOODSIZE
, GFP_ATOMIC
);
2253 err
= gtp_genl_fill_info(skb2
, NETLINK_CB(skb
).portid
, info
->snd_seq
,
2254 0, info
->nlhdr
->nlmsg_type
, pctx
);
2256 goto err_unlock_free
;
2259 return genlmsg_unicast(genl_info_net(info
), skb2
, info
->snd_portid
);
2268 static int gtp_genl_dump_pdp(struct sk_buff
*skb
,
2269 struct netlink_callback
*cb
)
2271 struct gtp_dev
*last_gtp
= (struct gtp_dev
*)cb
->args
[2], *gtp
;
2272 int i
, j
, bucket
= cb
->args
[0], skip
= cb
->args
[1];
2273 struct net
*net
= sock_net(skb
->sk
);
2274 struct pdp_ctx
*pctx
;
2277 gn
= net_generic(net
, gtp_net_id
);
2283 list_for_each_entry_rcu(gtp
, &gn
->gtp_dev_list
, list
) {
2284 if (last_gtp
&& last_gtp
!= gtp
)
2289 for (i
= bucket
; i
< gtp
->hash_size
; i
++) {
2291 hlist_for_each_entry_rcu(pctx
, >p
->tid_hash
[i
],
2294 gtp_genl_fill_info(skb
,
2295 NETLINK_CB(cb
->skb
).portid
,
2298 cb
->nlh
->nlmsg_type
, pctx
)) {
2301 cb
->args
[2] = (unsigned long)gtp
;
2316 static int gtp_genl_send_echo_req(struct sk_buff
*skb
, struct genl_info
*info
)
2318 struct sk_buff
*skb_to_send
;
2319 __be32 src_ip
, dst_ip
;
2320 unsigned int version
;
2321 struct gtp_dev
*gtp
;
2328 if (!info
->attrs
[GTPA_VERSION
] ||
2329 !info
->attrs
[GTPA_LINK
] ||
2330 !info
->attrs
[GTPA_PEER_ADDRESS
] ||
2331 !info
->attrs
[GTPA_MS_ADDRESS
])
2334 version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
2335 dst_ip
= nla_get_be32(info
->attrs
[GTPA_PEER_ADDRESS
]);
2336 src_ip
= nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
2338 gtp
= gtp_find_dev(sock_net(skb
->sk
), info
->attrs
);
2342 if (!gtp
->sk_created
)
2344 if (!(gtp
->dev
->flags
& IFF_UP
))
2347 if (version
== GTP_V0
) {
2348 struct gtp0_header
*gtp0_h
;
2350 len
= LL_RESERVED_SPACE(gtp
->dev
) + sizeof(struct gtp0_header
) +
2351 sizeof(struct iphdr
) + sizeof(struct udphdr
);
2353 skb_to_send
= netdev_alloc_skb_ip_align(gtp
->dev
, len
);
2358 port
= htons(GTP0_PORT
);
2360 gtp0_h
= skb_push(skb_to_send
, sizeof(struct gtp0_header
));
2361 memset(gtp0_h
, 0, sizeof(struct gtp0_header
));
2362 gtp0_build_echo_msg(gtp0_h
, GTP_ECHO_REQ
);
2363 } else if (version
== GTP_V1
) {
2364 struct gtp1_header_long
*gtp1u_h
;
2366 len
= LL_RESERVED_SPACE(gtp
->dev
) +
2367 sizeof(struct gtp1_header_long
) +
2368 sizeof(struct iphdr
) + sizeof(struct udphdr
);
2370 skb_to_send
= netdev_alloc_skb_ip_align(gtp
->dev
, len
);
2375 port
= htons(GTP1U_PORT
);
2377 gtp1u_h
= skb_push(skb_to_send
,
2378 sizeof(struct gtp1_header_long
));
2379 memset(gtp1u_h
, 0, sizeof(struct gtp1_header_long
));
2380 gtp1u_build_echo_msg(gtp1u_h
, GTP_ECHO_REQ
);
2385 rt
= ip4_route_output_gtp(&fl4
, sk
, dst_ip
, src_ip
);
2387 netdev_dbg(gtp
->dev
, "no route for echo request to %pI4\n",
2389 kfree_skb(skb_to_send
);
2393 udp_tunnel_xmit_skb(rt
, sk
, skb_to_send
,
2394 fl4
.saddr
, fl4
.daddr
,
2396 ip4_dst_hoplimit(&rt
->dst
),
2399 !net_eq(sock_net(sk
),
2405 static const struct nla_policy gtp_genl_policy
[GTPA_MAX
+ 1] = {
2406 [GTPA_LINK
] = { .type
= NLA_U32
, },
2407 [GTPA_VERSION
] = { .type
= NLA_U32
, },
2408 [GTPA_TID
] = { .type
= NLA_U64
, },
2409 [GTPA_PEER_ADDRESS
] = { .type
= NLA_U32
, },
2410 [GTPA_MS_ADDRESS
] = { .type
= NLA_U32
, },
2411 [GTPA_FLOW
] = { .type
= NLA_U16
, },
2412 [GTPA_NET_NS_FD
] = { .type
= NLA_U32
, },
2413 [GTPA_I_TEI
] = { .type
= NLA_U32
, },
2414 [GTPA_O_TEI
] = { .type
= NLA_U32
, },
2415 [GTPA_PEER_ADDR6
] = { .len
= sizeof(struct in6_addr
), },
2416 [GTPA_MS_ADDR6
] = { .len
= sizeof(struct in6_addr
), },
2417 [GTPA_FAMILY
] = { .type
= NLA_U8
, },
2420 static const struct genl_small_ops gtp_genl_ops
[] = {
2422 .cmd
= GTP_CMD_NEWPDP
,
2423 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2424 .doit
= gtp_genl_new_pdp
,
2425 .flags
= GENL_ADMIN_PERM
,
2428 .cmd
= GTP_CMD_DELPDP
,
2429 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2430 .doit
= gtp_genl_del_pdp
,
2431 .flags
= GENL_ADMIN_PERM
,
2434 .cmd
= GTP_CMD_GETPDP
,
2435 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2436 .doit
= gtp_genl_get_pdp
,
2437 .dumpit
= gtp_genl_dump_pdp
,
2438 .flags
= GENL_ADMIN_PERM
,
2441 .cmd
= GTP_CMD_ECHOREQ
,
2442 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2443 .doit
= gtp_genl_send_echo_req
,
2444 .flags
= GENL_ADMIN_PERM
,
2448 static struct genl_family gtp_genl_family __ro_after_init
= {
2452 .maxattr
= GTPA_MAX
,
2453 .policy
= gtp_genl_policy
,
2455 .module
= THIS_MODULE
,
2456 .small_ops
= gtp_genl_ops
,
2457 .n_small_ops
= ARRAY_SIZE(gtp_genl_ops
),
2458 .resv_start_op
= GTP_CMD_ECHOREQ
+ 1,
2459 .mcgrps
= gtp_genl_mcgrps
,
2460 .n_mcgrps
= ARRAY_SIZE(gtp_genl_mcgrps
),
2463 static int __net_init
gtp_net_init(struct net
*net
)
2465 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
2467 INIT_LIST_HEAD(&gn
->gtp_dev_list
);
2471 static void __net_exit
gtp_net_exit_batch_rtnl(struct list_head
*net_list
,
2472 struct list_head
*dev_to_kill
)
2476 list_for_each_entry(net
, net_list
, exit_list
) {
2477 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
2478 struct gtp_dev
*gtp
;
2480 list_for_each_entry(gtp
, &gn
->gtp_dev_list
, list
)
2481 gtp_dellink(gtp
->dev
, dev_to_kill
);
2485 static struct pernet_operations gtp_net_ops
= {
2486 .init
= gtp_net_init
,
2487 .exit_batch_rtnl
= gtp_net_exit_batch_rtnl
,
2489 .size
= sizeof(struct gtp_net
),
2492 static int __init
gtp_init(void)
2496 get_random_bytes(>p_h_initval
, sizeof(gtp_h_initval
));
2498 err
= register_pernet_subsys(>p_net_ops
);
2502 err
= rtnl_link_register(>p_link_ops
);
2504 goto unreg_pernet_subsys
;
2506 err
= genl_register_family(>p_genl_family
);
2508 goto unreg_rtnl_link
;
2510 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
2511 sizeof(struct pdp_ctx
));
2515 rtnl_link_unregister(>p_link_ops
);
2516 unreg_pernet_subsys
:
2517 unregister_pernet_subsys(>p_net_ops
);
2519 pr_err("error loading GTP module loaded\n");
2522 late_initcall(gtp_init
);
2524 static void __exit
gtp_fini(void)
2526 genl_unregister_family(>p_genl_family
);
2527 rtnl_link_unregister(>p_link_ops
);
2528 unregister_pernet_subsys(>p_net_ops
);
2530 pr_info("GTP module unloaded\n");
2532 module_exit(gtp_fini
);
2534 MODULE_LICENSE("GPL");
2535 MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
2536 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
2537 MODULE_ALIAS_RTNL_LINK("gtp");
2538 MODULE_ALIAS_GENL_FAMILY("gtp");