1 /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
3 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
4 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
6 * Author: Harald Welte <hwelte@sysmocom.de>
7 * Pablo Neira Ayuso <pablo@netfilter.org>
8 * Andreas Schultz <aschultz@travelping.com>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/skbuff.h>
20 #include <linux/udp.h>
21 #include <linux/rculist.h>
22 #include <linux/jhash.h>
23 #include <linux/if_tunnel.h>
24 #include <linux/net.h>
25 #include <linux/file.h>
26 #include <linux/gtp.h>
28 #include <net/net_namespace.h>
29 #include <net/protocol.h>
32 #include <net/udp_tunnel.h>
35 #include <net/genetlink.h>
36 #include <net/netns/generic.h>
39 /* An active session for the subscriber. */
41 struct hlist_node hlist_tid
;
42 struct hlist_node hlist_addr
;
58 struct in_addr ms_addr_ip4
;
59 struct in_addr sgsn_addr_ip4
;
62 struct rcu_head rcu_head
;
65 /* One instance of the GTP device. */
67 struct list_head list
;
70 struct socket
*sock1u
;
72 struct net_device
*dev
;
74 unsigned int hash_size
;
75 struct hlist_head
*tid_hash
;
76 struct hlist_head
*addr_hash
;
79 static unsigned int gtp_net_id __read_mostly
;
82 struct list_head gtp_dev_list
;
85 static u32 gtp_h_initval
;
87 static inline u32
gtp0_hashfn(u64 tid
)
89 u32
*tid32
= (u32
*) &tid
;
90 return jhash_2words(tid32
[0], tid32
[1], gtp_h_initval
);
93 static inline u32
gtp1u_hashfn(u32 tid
)
95 return jhash_1word(tid
, gtp_h_initval
);
98 static inline u32
ipv4_hashfn(__be32 ip
)
100 return jhash_1word((__force u32
)ip
, gtp_h_initval
);
103 /* Resolve a PDP context structure based on the 64bit TID. */
104 static struct pdp_ctx
*gtp0_pdp_find(struct gtp_dev
*gtp
, u64 tid
)
106 struct hlist_head
*head
;
109 head
= >p
->tid_hash
[gtp0_hashfn(tid
) % gtp
->hash_size
];
111 hlist_for_each_entry_rcu(pdp
, head
, hlist_tid
) {
112 if (pdp
->gtp_version
== GTP_V0
&&
113 pdp
->u
.v0
.tid
== tid
)
119 /* Resolve a PDP context structure based on the 32bit TEI. */
120 static struct pdp_ctx
*gtp1_pdp_find(struct gtp_dev
*gtp
, u32 tid
)
122 struct hlist_head
*head
;
125 head
= >p
->tid_hash
[gtp1u_hashfn(tid
) % gtp
->hash_size
];
127 hlist_for_each_entry_rcu(pdp
, head
, hlist_tid
) {
128 if (pdp
->gtp_version
== GTP_V1
&&
129 pdp
->u
.v1
.i_tei
== tid
)
135 /* Resolve a PDP context based on IPv4 address of MS. */
136 static struct pdp_ctx
*ipv4_pdp_find(struct gtp_dev
*gtp
, __be32 ms_addr
)
138 struct hlist_head
*head
;
141 head
= >p
->addr_hash
[ipv4_hashfn(ms_addr
) % gtp
->hash_size
];
143 hlist_for_each_entry_rcu(pdp
, head
, hlist_addr
) {
144 if (pdp
->af
== AF_INET
&&
145 pdp
->ms_addr_ip4
.s_addr
== ms_addr
)
152 static bool gtp_check_src_ms_ipv4(struct sk_buff
*skb
, struct pdp_ctx
*pctx
,
157 if (!pskb_may_pull(skb
, hdrlen
+ sizeof(struct iphdr
)))
160 iph
= (struct iphdr
*)(skb
->data
+ hdrlen
);
162 return iph
->saddr
== pctx
->ms_addr_ip4
.s_addr
;
165 /* Check if the inner IP source address in this packet is assigned to any
166 * existing mobile subscriber.
168 static bool gtp_check_src_ms(struct sk_buff
*skb
, struct pdp_ctx
*pctx
,
171 switch (ntohs(skb
->protocol
)) {
173 return gtp_check_src_ms_ipv4(skb
, pctx
, hdrlen
);
178 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
179 static int gtp0_udp_encap_recv(struct gtp_dev
*gtp
, struct sk_buff
*skb
,
182 unsigned int hdrlen
= sizeof(struct udphdr
) +
183 sizeof(struct gtp0_header
);
184 struct gtp0_header
*gtp0
;
185 struct pdp_ctx
*pctx
;
187 if (!pskb_may_pull(skb
, hdrlen
))
190 gtp0
= (struct gtp0_header
*)(skb
->data
+ sizeof(struct udphdr
));
192 if ((gtp0
->flags
>> 5) != GTP_V0
)
195 if (gtp0
->type
!= GTP_TPDU
)
198 pctx
= gtp0_pdp_find(gtp
, be64_to_cpu(gtp0
->tid
));
200 netdev_dbg(gtp
->dev
, "No PDP ctx to decap skb=%p\n", skb
);
204 if (!gtp_check_src_ms(skb
, pctx
, hdrlen
)) {
205 netdev_dbg(gtp
->dev
, "No PDP ctx for this MS\n");
209 /* Get rid of the GTP + UDP headers. */
210 return iptunnel_pull_header(skb
, hdrlen
, skb
->protocol
, xnet
);
213 static int gtp1u_udp_encap_recv(struct gtp_dev
*gtp
, struct sk_buff
*skb
,
216 unsigned int hdrlen
= sizeof(struct udphdr
) +
217 sizeof(struct gtp1_header
);
218 struct gtp1_header
*gtp1
;
219 struct pdp_ctx
*pctx
;
221 if (!pskb_may_pull(skb
, hdrlen
))
224 gtp1
= (struct gtp1_header
*)(skb
->data
+ sizeof(struct udphdr
));
226 if ((gtp1
->flags
>> 5) != GTP_V1
)
229 if (gtp1
->type
!= GTP_TPDU
)
232 /* From 29.060: "This field shall be present if and only if any one or
233 * more of the S, PN and E flags are set.".
235 * If any of the bit is set, then the remaining ones also have to be
238 if (gtp1
->flags
& GTP1_F_MASK
)
241 /* Make sure the header is larger enough, including extensions. */
242 if (!pskb_may_pull(skb
, hdrlen
))
245 gtp1
= (struct gtp1_header
*)(skb
->data
+ sizeof(struct udphdr
));
247 pctx
= gtp1_pdp_find(gtp
, ntohl(gtp1
->tid
));
249 netdev_dbg(gtp
->dev
, "No PDP ctx to decap skb=%p\n", skb
);
253 if (!gtp_check_src_ms(skb
, pctx
, hdrlen
)) {
254 netdev_dbg(gtp
->dev
, "No PDP ctx for this MS\n");
258 /* Get rid of the GTP + UDP headers. */
259 return iptunnel_pull_header(skb
, hdrlen
, skb
->protocol
, xnet
);
262 static void gtp_encap_disable(struct gtp_dev
*gtp
)
264 if (gtp
->sock0
&& gtp
->sock0
->sk
) {
265 udp_sk(gtp
->sock0
->sk
)->encap_type
= 0;
266 rcu_assign_sk_user_data(gtp
->sock0
->sk
, NULL
);
268 if (gtp
->sock1u
&& gtp
->sock1u
->sk
) {
269 udp_sk(gtp
->sock1u
->sk
)->encap_type
= 0;
270 rcu_assign_sk_user_data(gtp
->sock1u
->sk
, NULL
);
277 static void gtp_encap_destroy(struct sock
*sk
)
281 gtp
= rcu_dereference_sk_user_data(sk
);
283 gtp_encap_disable(gtp
);
286 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
287 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
289 static int gtp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
291 struct pcpu_sw_netstats
*stats
;
296 gtp
= rcu_dereference_sk_user_data(sk
);
300 netdev_dbg(gtp
->dev
, "encap_recv sk=%p\n", sk
);
302 xnet
= !net_eq(sock_net(sk
), dev_net(gtp
->dev
));
304 switch (udp_sk(sk
)->encap_type
) {
306 netdev_dbg(gtp
->dev
, "received GTP0 packet\n");
307 ret
= gtp0_udp_encap_recv(gtp
, skb
, xnet
);
309 case UDP_ENCAP_GTP1U
:
310 netdev_dbg(gtp
->dev
, "received GTP1U packet\n");
311 ret
= gtp1u_udp_encap_recv(gtp
, skb
, xnet
);
314 ret
= -1; /* Shouldn't happen. */
319 netdev_dbg(gtp
->dev
, "pass up to the process\n");
322 netdev_dbg(gtp
->dev
, "forwarding packet from GGSN to uplink\n");
325 netdev_dbg(gtp
->dev
, "GTP packet has been dropped\n");
330 /* Now that the UDP and the GTP header have been removed, set up the
331 * new network header. This is required by the upper layer to
332 * calculate the transport header.
334 skb_reset_network_header(skb
);
338 stats
= this_cpu_ptr(gtp
->dev
->tstats
);
339 u64_stats_update_begin(&stats
->syncp
);
341 stats
->rx_bytes
+= skb
->len
;
342 u64_stats_update_end(&stats
->syncp
);
349 static int gtp_dev_init(struct net_device
*dev
)
351 struct gtp_dev
*gtp
= netdev_priv(dev
);
355 dev
->tstats
= alloc_percpu(struct pcpu_sw_netstats
);
362 static void gtp_dev_uninit(struct net_device
*dev
)
364 struct gtp_dev
*gtp
= netdev_priv(dev
);
366 gtp_encap_disable(gtp
);
367 free_percpu(dev
->tstats
);
370 static struct rtable
*ip4_route_output_gtp(struct net
*net
, struct flowi4
*fl4
,
371 const struct sock
*sk
, __be32 daddr
)
373 memset(fl4
, 0, sizeof(*fl4
));
374 fl4
->flowi4_oif
= sk
->sk_bound_dev_if
;
376 fl4
->saddr
= inet_sk(sk
)->inet_saddr
;
377 fl4
->flowi4_tos
= RT_CONN_FLAGS(sk
);
378 fl4
->flowi4_proto
= sk
->sk_protocol
;
380 return ip_route_output_key(net
, fl4
);
383 static inline void gtp0_push_header(struct sk_buff
*skb
, struct pdp_ctx
*pctx
)
385 int payload_len
= skb
->len
;
386 struct gtp0_header
*gtp0
;
388 gtp0
= (struct gtp0_header
*) skb_push(skb
, sizeof(*gtp0
));
390 gtp0
->flags
= 0x1e; /* v0, GTP-non-prime. */
391 gtp0
->type
= GTP_TPDU
;
392 gtp0
->length
= htons(payload_len
);
393 gtp0
->seq
= htons((atomic_inc_return(&pctx
->tx_seq
) - 1) % 0xffff);
394 gtp0
->flow
= htons(pctx
->u
.v0
.flow
);
396 gtp0
->spare
[0] = gtp0
->spare
[1] = gtp0
->spare
[2] = 0xff;
397 gtp0
->tid
= cpu_to_be64(pctx
->u
.v0
.tid
);
400 static inline void gtp1_push_header(struct sk_buff
*skb
, struct pdp_ctx
*pctx
)
402 int payload_len
= skb
->len
;
403 struct gtp1_header
*gtp1
;
405 gtp1
= (struct gtp1_header
*) skb_push(skb
, sizeof(*gtp1
));
407 /* Bits 8 7 6 5 4 3 2 1
408 * +--+--+--+--+--+--+--+--+
409 * |version |PT| 0| E| S|PN|
410 * +--+--+--+--+--+--+--+--+
413 gtp1
->flags
= 0x30; /* v1, GTP-non-prime. */
414 gtp1
->type
= GTP_TPDU
;
415 gtp1
->length
= htons(payload_len
);
416 gtp1
->tid
= htonl(pctx
->u
.v1
.o_tei
);
418 /* TODO: Suppport for extension header, sequence number and N-PDU.
419 * Update the length field if any of them is available.
428 struct pdp_ctx
*pctx
;
429 struct net_device
*dev
;
433 static void gtp_push_header(struct sk_buff
*skb
, struct gtp_pktinfo
*pktinfo
)
435 switch (pktinfo
->pctx
->gtp_version
) {
437 pktinfo
->gtph_port
= htons(GTP0_PORT
);
438 gtp0_push_header(skb
, pktinfo
->pctx
);
441 pktinfo
->gtph_port
= htons(GTP1U_PORT
);
442 gtp1_push_header(skb
, pktinfo
->pctx
);
447 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo
*pktinfo
,
448 struct sock
*sk
, struct iphdr
*iph
,
449 struct pdp_ctx
*pctx
, struct rtable
*rt
,
451 struct net_device
*dev
)
455 pktinfo
->pctx
= pctx
;
461 static int gtp_build_skb_ip4(struct sk_buff
*skb
, struct net_device
*dev
,
462 struct gtp_pktinfo
*pktinfo
)
464 struct gtp_dev
*gtp
= netdev_priv(dev
);
465 struct pdp_ctx
*pctx
;
473 /* Read the IP destination address and resolve the PDP context.
474 * Prepend PDP header with TEI/TID from PDP ctx.
477 pctx
= ipv4_pdp_find(gtp
, iph
->daddr
);
479 netdev_dbg(dev
, "no PDP ctx found for %pI4, skip\n",
483 netdev_dbg(dev
, "found PDP context %p\n", pctx
);
485 switch (pctx
->gtp_version
) {
494 sk
= gtp
->sock1u
->sk
;
503 netdev_dbg(dev
, "no userspace socket is available, skip\n");
507 rt
= ip4_route_output_gtp(sock_net(sk
), &fl4
, gtp
->sock0
->sk
,
508 pctx
->sgsn_addr_ip4
.s_addr
);
510 netdev_dbg(dev
, "no route to SSGN %pI4\n",
511 &pctx
->sgsn_addr_ip4
.s_addr
);
512 dev
->stats
.tx_carrier_errors
++;
516 if (rt
->dst
.dev
== dev
) {
517 netdev_dbg(dev
, "circular route to SSGN %pI4\n",
518 &pctx
->sgsn_addr_ip4
.s_addr
);
519 dev
->stats
.collisions
++;
525 /* This is similar to tnl_update_pmtu(). */
528 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
-
529 sizeof(struct iphdr
) - sizeof(struct udphdr
);
530 switch (pctx
->gtp_version
) {
532 mtu
-= sizeof(struct gtp0_header
);
535 mtu
-= sizeof(struct gtp1_header
);
539 mtu
= dst_mtu(&rt
->dst
);
542 rt
->dst
.ops
->update_pmtu(&rt
->dst
, NULL
, skb
, mtu
);
544 if (!skb_is_gso(skb
) && (iph
->frag_off
& htons(IP_DF
)) &&
545 mtu
< ntohs(iph
->tot_len
)) {
546 netdev_dbg(dev
, "packet too big, fragmentation needed\n");
547 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
548 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
553 gtp_set_pktinfo_ipv4(pktinfo
, sk
, iph
, pctx
, rt
, &fl4
, dev
);
554 gtp_push_header(skb
, pktinfo
);
563 static netdev_tx_t
gtp_dev_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
565 unsigned int proto
= ntohs(skb
->protocol
);
566 struct gtp_pktinfo pktinfo
;
569 /* Ensure there is sufficient headroom. */
570 if (skb_cow_head(skb
, dev
->needed_headroom
))
573 skb_reset_inner_headers(skb
);
575 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
579 err
= gtp_build_skb_ip4(skb
, dev
, &pktinfo
);
592 netdev_dbg(pktinfo
.dev
, "gtp -> IP src: %pI4 dst: %pI4\n",
593 &pktinfo
.iph
->saddr
, &pktinfo
.iph
->daddr
);
594 udp_tunnel_xmit_skb(pktinfo
.rt
, pktinfo
.sk
, skb
,
595 pktinfo
.fl4
.saddr
, pktinfo
.fl4
.daddr
,
597 ip4_dst_hoplimit(&pktinfo
.rt
->dst
),
599 pktinfo
.gtph_port
, pktinfo
.gtph_port
,
606 dev
->stats
.tx_errors
++;
611 static const struct net_device_ops gtp_netdev_ops
= {
612 .ndo_init
= gtp_dev_init
,
613 .ndo_uninit
= gtp_dev_uninit
,
614 .ndo_start_xmit
= gtp_dev_xmit
,
615 .ndo_get_stats64
= ip_tunnel_get_stats64
,
618 static void gtp_link_setup(struct net_device
*dev
)
620 dev
->netdev_ops
= >p_netdev_ops
;
621 dev
->needs_free_netdev
= true;
623 dev
->hard_header_len
= 0;
626 /* Zero header length. */
627 dev
->type
= ARPHRD_NONE
;
628 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
630 dev
->priv_flags
|= IFF_NO_QUEUE
;
631 dev
->features
|= NETIF_F_LLTX
;
634 /* Assume largest header, ie. GTPv0. */
635 dev
->needed_headroom
= LL_MAX_HEADER
+
636 sizeof(struct iphdr
) +
637 sizeof(struct udphdr
) +
638 sizeof(struct gtp0_header
);
641 static int gtp_hashtable_new(struct gtp_dev
*gtp
, int hsize
);
642 static void gtp_hashtable_free(struct gtp_dev
*gtp
);
643 static int gtp_encap_enable(struct net_device
*dev
, struct gtp_dev
*gtp
,
644 int fd_gtp0
, int fd_gtp1
);
646 static int gtp_newlink(struct net
*src_net
, struct net_device
*dev
,
647 struct nlattr
*tb
[], struct nlattr
*data
[])
649 int hashsize
, err
, fd0
, fd1
;
653 if (!data
[IFLA_GTP_FD0
] || !data
[IFLA_GTP_FD1
])
656 gtp
= netdev_priv(dev
);
658 fd0
= nla_get_u32(data
[IFLA_GTP_FD0
]);
659 fd1
= nla_get_u32(data
[IFLA_GTP_FD1
]);
661 err
= gtp_encap_enable(dev
, gtp
, fd0
, fd1
);
665 if (!data
[IFLA_GTP_PDP_HASHSIZE
])
668 hashsize
= nla_get_u32(data
[IFLA_GTP_PDP_HASHSIZE
]);
670 err
= gtp_hashtable_new(gtp
, hashsize
);
674 err
= register_netdevice(dev
);
676 netdev_dbg(dev
, "failed to register new netdev %d\n", err
);
680 gn
= net_generic(dev_net(dev
), gtp_net_id
);
681 list_add_rcu(>p
->list
, &gn
->gtp_dev_list
);
683 netdev_dbg(dev
, "registered new GTP interface\n");
688 gtp_hashtable_free(gtp
);
690 gtp_encap_disable(gtp
);
695 static void gtp_dellink(struct net_device
*dev
, struct list_head
*head
)
697 struct gtp_dev
*gtp
= netdev_priv(dev
);
699 gtp_encap_disable(gtp
);
700 gtp_hashtable_free(gtp
);
701 list_del_rcu(>p
->list
);
702 unregister_netdevice_queue(dev
, head
);
705 static const struct nla_policy gtp_policy
[IFLA_GTP_MAX
+ 1] = {
706 [IFLA_GTP_FD0
] = { .type
= NLA_U32
},
707 [IFLA_GTP_FD1
] = { .type
= NLA_U32
},
708 [IFLA_GTP_PDP_HASHSIZE
] = { .type
= NLA_U32
},
711 static int gtp_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
719 static size_t gtp_get_size(const struct net_device
*dev
)
721 return nla_total_size(sizeof(__u32
)); /* IFLA_GTP_PDP_HASHSIZE */
724 static int gtp_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
726 struct gtp_dev
*gtp
= netdev_priv(dev
);
728 if (nla_put_u32(skb
, IFLA_GTP_PDP_HASHSIZE
, gtp
->hash_size
))
729 goto nla_put_failure
;
737 static struct rtnl_link_ops gtp_link_ops __read_mostly
= {
739 .maxtype
= IFLA_GTP_MAX
,
740 .policy
= gtp_policy
,
741 .priv_size
= sizeof(struct gtp_dev
),
742 .setup
= gtp_link_setup
,
743 .validate
= gtp_validate
,
744 .newlink
= gtp_newlink
,
745 .dellink
= gtp_dellink
,
746 .get_size
= gtp_get_size
,
747 .fill_info
= gtp_fill_info
,
750 static struct net
*gtp_genl_get_net(struct net
*src_net
, struct nlattr
*tb
[])
754 /* Examine the link attributes and figure out which network namespace
755 * we are talking about.
757 if (tb
[GTPA_NET_NS_FD
])
758 net
= get_net_ns_by_fd(nla_get_u32(tb
[GTPA_NET_NS_FD
]));
760 net
= get_net(src_net
);
765 static int gtp_hashtable_new(struct gtp_dev
*gtp
, int hsize
)
769 gtp
->addr_hash
= kmalloc(sizeof(struct hlist_head
) * hsize
, GFP_KERNEL
);
770 if (gtp
->addr_hash
== NULL
)
773 gtp
->tid_hash
= kmalloc(sizeof(struct hlist_head
) * hsize
, GFP_KERNEL
);
774 if (gtp
->tid_hash
== NULL
)
777 gtp
->hash_size
= hsize
;
779 for (i
= 0; i
< hsize
; i
++) {
780 INIT_HLIST_HEAD(>p
->addr_hash
[i
]);
781 INIT_HLIST_HEAD(>p
->tid_hash
[i
]);
785 kfree(gtp
->addr_hash
);
789 static void gtp_hashtable_free(struct gtp_dev
*gtp
)
791 struct pdp_ctx
*pctx
;
794 for (i
= 0; i
< gtp
->hash_size
; i
++) {
795 hlist_for_each_entry_rcu(pctx
, >p
->tid_hash
[i
], hlist_tid
) {
796 hlist_del_rcu(&pctx
->hlist_tid
);
797 hlist_del_rcu(&pctx
->hlist_addr
);
798 kfree_rcu(pctx
, rcu_head
);
802 kfree(gtp
->addr_hash
);
803 kfree(gtp
->tid_hash
);
806 static int gtp_encap_enable(struct net_device
*dev
, struct gtp_dev
*gtp
,
807 int fd_gtp0
, int fd_gtp1
)
809 struct udp_tunnel_sock_cfg tuncfg
= {NULL
};
810 struct socket
*sock0
, *sock1u
;
813 netdev_dbg(dev
, "enable gtp on %d, %d\n", fd_gtp0
, fd_gtp1
);
815 sock0
= sockfd_lookup(fd_gtp0
, &err
);
817 netdev_dbg(dev
, "socket fd=%d not found (gtp0)\n", fd_gtp0
);
821 if (sock0
->sk
->sk_protocol
!= IPPROTO_UDP
) {
822 netdev_dbg(dev
, "socket fd=%d not UDP\n", fd_gtp0
);
827 sock1u
= sockfd_lookup(fd_gtp1
, &err
);
828 if (sock1u
== NULL
) {
829 netdev_dbg(dev
, "socket fd=%d not found (gtp1u)\n", fd_gtp1
);
834 if (sock1u
->sk
->sk_protocol
!= IPPROTO_UDP
) {
835 netdev_dbg(dev
, "socket fd=%d not UDP\n", fd_gtp1
);
840 netdev_dbg(dev
, "enable gtp on %p, %p\n", sock0
, sock1u
);
843 gtp
->sock1u
= sock1u
;
845 tuncfg
.sk_user_data
= gtp
;
846 tuncfg
.encap_rcv
= gtp_encap_recv
;
847 tuncfg
.encap_destroy
= gtp_encap_destroy
;
849 tuncfg
.encap_type
= UDP_ENCAP_GTP0
;
850 setup_udp_tunnel_sock(sock_net(gtp
->sock0
->sk
), gtp
->sock0
, &tuncfg
);
852 tuncfg
.encap_type
= UDP_ENCAP_GTP1U
;
853 setup_udp_tunnel_sock(sock_net(gtp
->sock1u
->sk
), gtp
->sock1u
, &tuncfg
);
863 static struct net_device
*gtp_find_dev(struct net
*net
, int ifindex
)
865 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
868 list_for_each_entry_rcu(gtp
, &gn
->gtp_dev_list
, list
) {
869 if (ifindex
== gtp
->dev
->ifindex
)
875 static void ipv4_pdp_fill(struct pdp_ctx
*pctx
, struct genl_info
*info
)
877 pctx
->gtp_version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
879 pctx
->sgsn_addr_ip4
.s_addr
=
880 nla_get_be32(info
->attrs
[GTPA_SGSN_ADDRESS
]);
881 pctx
->ms_addr_ip4
.s_addr
=
882 nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
884 switch (pctx
->gtp_version
) {
886 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
887 * label needs to be the same for uplink and downlink packets,
888 * so let's annotate this.
890 pctx
->u
.v0
.tid
= nla_get_u64(info
->attrs
[GTPA_TID
]);
891 pctx
->u
.v0
.flow
= nla_get_u16(info
->attrs
[GTPA_FLOW
]);
894 pctx
->u
.v1
.i_tei
= nla_get_u32(info
->attrs
[GTPA_I_TEI
]);
895 pctx
->u
.v1
.o_tei
= nla_get_u32(info
->attrs
[GTPA_O_TEI
]);
902 static int ipv4_pdp_add(struct net_device
*dev
, struct genl_info
*info
)
904 struct gtp_dev
*gtp
= netdev_priv(dev
);
905 u32 hash_ms
, hash_tid
= 0;
906 struct pdp_ctx
*pctx
;
910 ms_addr
= nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
911 hash_ms
= ipv4_hashfn(ms_addr
) % gtp
->hash_size
;
913 hlist_for_each_entry_rcu(pctx
, >p
->addr_hash
[hash_ms
], hlist_addr
) {
914 if (pctx
->ms_addr_ip4
.s_addr
== ms_addr
) {
921 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
923 if (info
->nlhdr
->nlmsg_flags
& NLM_F_REPLACE
)
926 ipv4_pdp_fill(pctx
, info
);
928 if (pctx
->gtp_version
== GTP_V0
)
929 netdev_dbg(dev
, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
930 pctx
->u
.v0
.tid
, pctx
);
931 else if (pctx
->gtp_version
== GTP_V1
)
932 netdev_dbg(dev
, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
933 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
, pctx
);
939 pctx
= kmalloc(sizeof(struct pdp_ctx
), GFP_KERNEL
);
943 ipv4_pdp_fill(pctx
, info
);
944 atomic_set(&pctx
->tx_seq
, 0);
946 switch (pctx
->gtp_version
) {
948 /* TS 09.60: "The flow label identifies unambiguously a GTP
949 * flow.". We use the tid for this instead, I cannot find a
950 * situation in which this doesn't unambiguosly identify the
953 hash_tid
= gtp0_hashfn(pctx
->u
.v0
.tid
) % gtp
->hash_size
;
956 hash_tid
= gtp1u_hashfn(pctx
->u
.v1
.i_tei
) % gtp
->hash_size
;
960 hlist_add_head_rcu(&pctx
->hlist_addr
, >p
->addr_hash
[hash_ms
]);
961 hlist_add_head_rcu(&pctx
->hlist_tid
, >p
->tid_hash
[hash_tid
]);
963 switch (pctx
->gtp_version
) {
965 netdev_dbg(dev
, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
966 pctx
->u
.v0
.tid
, &pctx
->sgsn_addr_ip4
,
967 &pctx
->ms_addr_ip4
, pctx
);
970 netdev_dbg(dev
, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
971 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
,
972 &pctx
->sgsn_addr_ip4
, &pctx
->ms_addr_ip4
, pctx
);
979 static int gtp_genl_new_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
981 struct net_device
*dev
;
984 if (!info
->attrs
[GTPA_VERSION
] ||
985 !info
->attrs
[GTPA_LINK
] ||
986 !info
->attrs
[GTPA_SGSN_ADDRESS
] ||
987 !info
->attrs
[GTPA_MS_ADDRESS
])
990 switch (nla_get_u32(info
->attrs
[GTPA_VERSION
])) {
992 if (!info
->attrs
[GTPA_TID
] ||
993 !info
->attrs
[GTPA_FLOW
])
997 if (!info
->attrs
[GTPA_I_TEI
] ||
998 !info
->attrs
[GTPA_O_TEI
])
1006 net
= gtp_genl_get_net(sock_net(skb
->sk
), info
->attrs
);
1008 return PTR_ERR(net
);
1010 /* Check if there's an existing gtpX device to configure */
1011 dev
= gtp_find_dev(net
, nla_get_u32(info
->attrs
[GTPA_LINK
]));
1018 return ipv4_pdp_add(dev
, info
);
1021 static int gtp_genl_del_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
1023 struct net_device
*dev
;
1024 struct pdp_ctx
*pctx
;
1025 struct gtp_dev
*gtp
;
1028 if (!info
->attrs
[GTPA_VERSION
] ||
1029 !info
->attrs
[GTPA_LINK
])
1032 net
= gtp_genl_get_net(sock_net(skb
->sk
), info
->attrs
);
1034 return PTR_ERR(net
);
1036 /* Check if there's an existing gtpX device to configure */
1037 dev
= gtp_find_dev(net
, nla_get_u32(info
->attrs
[GTPA_LINK
]));
1044 gtp
= netdev_priv(dev
);
1046 switch (nla_get_u32(info
->attrs
[GTPA_VERSION
])) {
1048 if (!info
->attrs
[GTPA_TID
])
1050 pctx
= gtp0_pdp_find(gtp
, nla_get_u64(info
->attrs
[GTPA_TID
]));
1053 if (!info
->attrs
[GTPA_I_TEI
])
1055 pctx
= gtp1_pdp_find(gtp
, nla_get_u64(info
->attrs
[GTPA_I_TEI
]));
1065 if (pctx
->gtp_version
== GTP_V0
)
1066 netdev_dbg(dev
, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1067 pctx
->u
.v0
.tid
, pctx
);
1068 else if (pctx
->gtp_version
== GTP_V1
)
1069 netdev_dbg(dev
, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1070 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
, pctx
);
1072 hlist_del_rcu(&pctx
->hlist_tid
);
1073 hlist_del_rcu(&pctx
->hlist_addr
);
1074 kfree_rcu(pctx
, rcu_head
);
1079 static struct genl_family gtp_genl_family
;
1081 static int gtp_genl_fill_info(struct sk_buff
*skb
, u32 snd_portid
, u32 snd_seq
,
1082 u32 type
, struct pdp_ctx
*pctx
)
1086 genlh
= genlmsg_put(skb
, snd_portid
, snd_seq
, >p_genl_family
, 0,
1091 if (nla_put_u32(skb
, GTPA_VERSION
, pctx
->gtp_version
) ||
1092 nla_put_be32(skb
, GTPA_SGSN_ADDRESS
, pctx
->sgsn_addr_ip4
.s_addr
) ||
1093 nla_put_be32(skb
, GTPA_MS_ADDRESS
, pctx
->ms_addr_ip4
.s_addr
))
1094 goto nla_put_failure
;
1096 switch (pctx
->gtp_version
) {
1098 if (nla_put_u64_64bit(skb
, GTPA_TID
, pctx
->u
.v0
.tid
, GTPA_PAD
) ||
1099 nla_put_u16(skb
, GTPA_FLOW
, pctx
->u
.v0
.flow
))
1100 goto nla_put_failure
;
1103 if (nla_put_u32(skb
, GTPA_I_TEI
, pctx
->u
.v1
.i_tei
) ||
1104 nla_put_u32(skb
, GTPA_O_TEI
, pctx
->u
.v1
.o_tei
))
1105 goto nla_put_failure
;
1108 genlmsg_end(skb
, genlh
);
1113 genlmsg_cancel(skb
, genlh
);
1117 static int gtp_genl_get_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
1119 struct pdp_ctx
*pctx
= NULL
;
1120 struct net_device
*dev
;
1121 struct sk_buff
*skb2
;
1122 struct gtp_dev
*gtp
;
1127 if (!info
->attrs
[GTPA_VERSION
] ||
1128 !info
->attrs
[GTPA_LINK
])
1131 gtp_version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
1132 switch (gtp_version
) {
1140 net
= gtp_genl_get_net(sock_net(skb
->sk
), info
->attrs
);
1142 return PTR_ERR(net
);
1144 /* Check if there's an existing gtpX device to configure */
1145 dev
= gtp_find_dev(net
, nla_get_u32(info
->attrs
[GTPA_LINK
]));
1152 gtp
= netdev_priv(dev
);
1155 if (gtp_version
== GTP_V0
&&
1156 info
->attrs
[GTPA_TID
]) {
1157 u64 tid
= nla_get_u64(info
->attrs
[GTPA_TID
]);
1159 pctx
= gtp0_pdp_find(gtp
, tid
);
1160 } else if (gtp_version
== GTP_V1
&&
1161 info
->attrs
[GTPA_I_TEI
]) {
1162 u32 tid
= nla_get_u32(info
->attrs
[GTPA_I_TEI
]);
1164 pctx
= gtp1_pdp_find(gtp
, tid
);
1165 } else if (info
->attrs
[GTPA_MS_ADDRESS
]) {
1166 __be32 ip
= nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
1168 pctx
= ipv4_pdp_find(gtp
, ip
);
1176 skb2
= genlmsg_new(NLMSG_GOODSIZE
, GFP_ATOMIC
);
1182 err
= gtp_genl_fill_info(skb2
, NETLINK_CB(skb
).portid
,
1183 info
->snd_seq
, info
->nlhdr
->nlmsg_type
, pctx
);
1185 goto err_unlock_free
;
1188 return genlmsg_unicast(genl_info_net(info
), skb2
, info
->snd_portid
);
1197 static int gtp_genl_dump_pdp(struct sk_buff
*skb
,
1198 struct netlink_callback
*cb
)
1200 struct gtp_dev
*last_gtp
= (struct gtp_dev
*)cb
->args
[2], *gtp
;
1201 struct net
*net
= sock_net(skb
->sk
);
1202 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
1203 unsigned long tid
= cb
->args
[1];
1204 int i
, k
= cb
->args
[0], ret
;
1205 struct pdp_ctx
*pctx
;
1210 list_for_each_entry_rcu(gtp
, &gn
->gtp_dev_list
, list
) {
1211 if (last_gtp
&& last_gtp
!= gtp
)
1216 for (i
= k
; i
< gtp
->hash_size
; i
++) {
1217 hlist_for_each_entry_rcu(pctx
, >p
->tid_hash
[i
], hlist_tid
) {
1218 if (tid
&& tid
!= pctx
->u
.tid
)
1223 ret
= gtp_genl_fill_info(skb
,
1224 NETLINK_CB(cb
->skb
).portid
,
1226 cb
->nlh
->nlmsg_type
, pctx
);
1229 cb
->args
[1] = pctx
->u
.tid
;
1230 cb
->args
[2] = (unsigned long)gtp
;
1241 static struct nla_policy gtp_genl_policy
[GTPA_MAX
+ 1] = {
1242 [GTPA_LINK
] = { .type
= NLA_U32
, },
1243 [GTPA_VERSION
] = { .type
= NLA_U32
, },
1244 [GTPA_TID
] = { .type
= NLA_U64
, },
1245 [GTPA_SGSN_ADDRESS
] = { .type
= NLA_U32
, },
1246 [GTPA_MS_ADDRESS
] = { .type
= NLA_U32
, },
1247 [GTPA_FLOW
] = { .type
= NLA_U16
, },
1248 [GTPA_NET_NS_FD
] = { .type
= NLA_U32
, },
1249 [GTPA_I_TEI
] = { .type
= NLA_U32
, },
1250 [GTPA_O_TEI
] = { .type
= NLA_U32
, },
1253 static const struct genl_ops gtp_genl_ops
[] = {
1255 .cmd
= GTP_CMD_NEWPDP
,
1256 .doit
= gtp_genl_new_pdp
,
1257 .policy
= gtp_genl_policy
,
1258 .flags
= GENL_ADMIN_PERM
,
1261 .cmd
= GTP_CMD_DELPDP
,
1262 .doit
= gtp_genl_del_pdp
,
1263 .policy
= gtp_genl_policy
,
1264 .flags
= GENL_ADMIN_PERM
,
1267 .cmd
= GTP_CMD_GETPDP
,
1268 .doit
= gtp_genl_get_pdp
,
1269 .dumpit
= gtp_genl_dump_pdp
,
1270 .policy
= gtp_genl_policy
,
1271 .flags
= GENL_ADMIN_PERM
,
1275 static struct genl_family gtp_genl_family __ro_after_init
= {
1279 .maxattr
= GTPA_MAX
,
1281 .module
= THIS_MODULE
,
1282 .ops
= gtp_genl_ops
,
1283 .n_ops
= ARRAY_SIZE(gtp_genl_ops
),
1286 static int __net_init
gtp_net_init(struct net
*net
)
1288 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
1290 INIT_LIST_HEAD(&gn
->gtp_dev_list
);
1294 static void __net_exit
gtp_net_exit(struct net
*net
)
1296 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
1297 struct gtp_dev
*gtp
;
1301 list_for_each_entry(gtp
, &gn
->gtp_dev_list
, list
)
1302 gtp_dellink(gtp
->dev
, &list
);
1304 unregister_netdevice_many(&list
);
1308 static struct pernet_operations gtp_net_ops
= {
1309 .init
= gtp_net_init
,
1310 .exit
= gtp_net_exit
,
1312 .size
= sizeof(struct gtp_net
),
1315 static int __init
gtp_init(void)
1319 get_random_bytes(>p_h_initval
, sizeof(gtp_h_initval
));
1321 err
= rtnl_link_register(>p_link_ops
);
1325 err
= genl_register_family(>p_genl_family
);
1327 goto unreg_rtnl_link
;
1329 err
= register_pernet_subsys(>p_net_ops
);
1331 goto unreg_genl_family
;
1333 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
1334 sizeof(struct pdp_ctx
));
1338 genl_unregister_family(>p_genl_family
);
1340 rtnl_link_unregister(>p_link_ops
);
1342 pr_err("error loading GTP module loaded\n");
1345 late_initcall(gtp_init
);
1347 static void __exit
gtp_fini(void)
1349 unregister_pernet_subsys(>p_net_ops
);
1350 genl_unregister_family(>p_genl_family
);
1351 rtnl_link_unregister(>p_link_ops
);
1353 pr_info("GTP module unloaded\n");
1355 module_exit(gtp_fini
);
1357 MODULE_LICENSE("GPL");
1358 MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1359 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1360 MODULE_ALIAS_RTNL_LINK("gtp");
1361 MODULE_ALIAS_GENL_FAMILY("gtp");