1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
5 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
7 * Author: Harald Welte <hwelte@sysmocom.de>
8 * Pablo Neira Ayuso <pablo@netfilter.org>
9 * Andreas Schultz <aschultz@travelping.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/udp.h>
17 #include <linux/rculist.h>
18 #include <linux/jhash.h>
19 #include <linux/if_tunnel.h>
20 #include <linux/net.h>
21 #include <linux/file.h>
22 #include <linux/gtp.h>
24 #include <net/net_namespace.h>
25 #include <net/protocol.h>
28 #include <net/udp_tunnel.h>
31 #include <net/genetlink.h>
32 #include <net/netns/generic.h>
35 /* An active session for the subscriber. */
37 struct hlist_node hlist_tid
;
38 struct hlist_node hlist_addr
;
53 struct in_addr ms_addr_ip4
;
54 struct in_addr peer_addr_ip4
;
57 struct net_device
*dev
;
60 struct rcu_head rcu_head
;
63 /* One instance of the GTP device. */
65 struct list_head list
;
70 struct net_device
*dev
;
73 unsigned int hash_size
;
74 struct hlist_head
*tid_hash
;
75 struct hlist_head
*addr_hash
;
78 static unsigned int gtp_net_id __read_mostly
;
81 struct list_head gtp_dev_list
;
84 static u32 gtp_h_initval
;
86 static void pdp_context_delete(struct pdp_ctx
*pctx
);
88 static inline u32
gtp0_hashfn(u64 tid
)
90 u32
*tid32
= (u32
*) &tid
;
91 return jhash_2words(tid32
[0], tid32
[1], gtp_h_initval
);
94 static inline u32
gtp1u_hashfn(u32 tid
)
96 return jhash_1word(tid
, gtp_h_initval
);
99 static inline u32
ipv4_hashfn(__be32 ip
)
101 return jhash_1word((__force u32
)ip
, gtp_h_initval
);
104 /* Resolve a PDP context structure based on the 64bit TID. */
105 static struct pdp_ctx
*gtp0_pdp_find(struct gtp_dev
*gtp
, u64 tid
)
107 struct hlist_head
*head
;
110 head
= >p
->tid_hash
[gtp0_hashfn(tid
) % gtp
->hash_size
];
112 hlist_for_each_entry_rcu(pdp
, head
, hlist_tid
) {
113 if (pdp
->gtp_version
== GTP_V0
&&
114 pdp
->u
.v0
.tid
== tid
)
120 /* Resolve a PDP context structure based on the 32bit TEI. */
121 static struct pdp_ctx
*gtp1_pdp_find(struct gtp_dev
*gtp
, u32 tid
)
123 struct hlist_head
*head
;
126 head
= >p
->tid_hash
[gtp1u_hashfn(tid
) % gtp
->hash_size
];
128 hlist_for_each_entry_rcu(pdp
, head
, hlist_tid
) {
129 if (pdp
->gtp_version
== GTP_V1
&&
130 pdp
->u
.v1
.i_tei
== tid
)
136 /* Resolve a PDP context based on IPv4 address of MS. */
137 static struct pdp_ctx
*ipv4_pdp_find(struct gtp_dev
*gtp
, __be32 ms_addr
)
139 struct hlist_head
*head
;
142 head
= >p
->addr_hash
[ipv4_hashfn(ms_addr
) % gtp
->hash_size
];
144 hlist_for_each_entry_rcu(pdp
, head
, hlist_addr
) {
145 if (pdp
->af
== AF_INET
&&
146 pdp
->ms_addr_ip4
.s_addr
== ms_addr
)
153 static bool gtp_check_ms_ipv4(struct sk_buff
*skb
, struct pdp_ctx
*pctx
,
154 unsigned int hdrlen
, unsigned int role
)
158 if (!pskb_may_pull(skb
, hdrlen
+ sizeof(struct iphdr
)))
161 iph
= (struct iphdr
*)(skb
->data
+ hdrlen
);
163 if (role
== GTP_ROLE_SGSN
)
164 return iph
->daddr
== pctx
->ms_addr_ip4
.s_addr
;
166 return iph
->saddr
== pctx
->ms_addr_ip4
.s_addr
;
169 /* Check if the inner IP address in this packet is assigned to any
170 * existing mobile subscriber.
172 static bool gtp_check_ms(struct sk_buff
*skb
, struct pdp_ctx
*pctx
,
173 unsigned int hdrlen
, unsigned int role
)
175 switch (ntohs(skb
->protocol
)) {
177 return gtp_check_ms_ipv4(skb
, pctx
, hdrlen
, role
);
182 static int gtp_rx(struct pdp_ctx
*pctx
, struct sk_buff
*skb
,
183 unsigned int hdrlen
, unsigned int role
)
185 if (!gtp_check_ms(skb
, pctx
, hdrlen
, role
)) {
186 netdev_dbg(pctx
->dev
, "No PDP ctx for this MS\n");
190 /* Get rid of the GTP + UDP headers. */
191 if (iptunnel_pull_header(skb
, hdrlen
, skb
->protocol
,
192 !net_eq(sock_net(pctx
->sk
), dev_net(pctx
->dev
))))
195 netdev_dbg(pctx
->dev
, "forwarding packet from GGSN to uplink\n");
197 /* Now that the UDP and the GTP header have been removed, set up the
198 * new network header. This is required by the upper layer to
199 * calculate the transport header.
201 skb_reset_network_header(skb
);
203 skb
->dev
= pctx
->dev
;
205 dev_sw_netstats_rx_add(pctx
->dev
, skb
->len
);
211 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
212 static int gtp0_udp_encap_recv(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
214 unsigned int hdrlen
= sizeof(struct udphdr
) +
215 sizeof(struct gtp0_header
);
216 struct gtp0_header
*gtp0
;
217 struct pdp_ctx
*pctx
;
219 if (!pskb_may_pull(skb
, hdrlen
))
222 gtp0
= (struct gtp0_header
*)(skb
->data
+ sizeof(struct udphdr
));
224 if ((gtp0
->flags
>> 5) != GTP_V0
)
227 if (gtp0
->type
!= GTP_TPDU
)
230 pctx
= gtp0_pdp_find(gtp
, be64_to_cpu(gtp0
->tid
));
232 netdev_dbg(gtp
->dev
, "No PDP ctx to decap skb=%p\n", skb
);
236 return gtp_rx(pctx
, skb
, hdrlen
, gtp
->role
);
239 static int gtp1u_udp_encap_recv(struct gtp_dev
*gtp
, struct sk_buff
*skb
)
241 unsigned int hdrlen
= sizeof(struct udphdr
) +
242 sizeof(struct gtp1_header
);
243 struct gtp1_header
*gtp1
;
244 struct pdp_ctx
*pctx
;
246 if (!pskb_may_pull(skb
, hdrlen
))
249 gtp1
= (struct gtp1_header
*)(skb
->data
+ sizeof(struct udphdr
));
251 if ((gtp1
->flags
>> 5) != GTP_V1
)
254 if (gtp1
->type
!= GTP_TPDU
)
257 /* From 29.060: "This field shall be present if and only if any one or
258 * more of the S, PN and E flags are set.".
260 * If any of the bit is set, then the remaining ones also have to be
263 if (gtp1
->flags
& GTP1_F_MASK
)
266 /* Make sure the header is larger enough, including extensions. */
267 if (!pskb_may_pull(skb
, hdrlen
))
270 gtp1
= (struct gtp1_header
*)(skb
->data
+ sizeof(struct udphdr
));
272 pctx
= gtp1_pdp_find(gtp
, ntohl(gtp1
->tid
));
274 netdev_dbg(gtp
->dev
, "No PDP ctx to decap skb=%p\n", skb
);
278 return gtp_rx(pctx
, skb
, hdrlen
, gtp
->role
);
281 static void __gtp_encap_destroy(struct sock
*sk
)
286 gtp
= sk
->sk_user_data
;
292 udp_sk(sk
)->encap_type
= 0;
293 rcu_assign_sk_user_data(sk
, NULL
);
299 static void gtp_encap_destroy(struct sock
*sk
)
302 __gtp_encap_destroy(sk
);
306 static void gtp_encap_disable_sock(struct sock
*sk
)
311 __gtp_encap_destroy(sk
);
314 static void gtp_encap_disable(struct gtp_dev
*gtp
)
316 gtp_encap_disable_sock(gtp
->sk0
);
317 gtp_encap_disable_sock(gtp
->sk1u
);
320 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
321 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
323 static int gtp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
328 gtp
= rcu_dereference_sk_user_data(sk
);
332 netdev_dbg(gtp
->dev
, "encap_recv sk=%p\n", sk
);
334 switch (udp_sk(sk
)->encap_type
) {
336 netdev_dbg(gtp
->dev
, "received GTP0 packet\n");
337 ret
= gtp0_udp_encap_recv(gtp
, skb
);
339 case UDP_ENCAP_GTP1U
:
340 netdev_dbg(gtp
->dev
, "received GTP1U packet\n");
341 ret
= gtp1u_udp_encap_recv(gtp
, skb
);
344 ret
= -1; /* Shouldn't happen. */
349 netdev_dbg(gtp
->dev
, "pass up to the process\n");
354 netdev_dbg(gtp
->dev
, "GTP packet has been dropped\n");
363 static int gtp_dev_init(struct net_device
*dev
)
365 struct gtp_dev
*gtp
= netdev_priv(dev
);
369 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
376 static void gtp_dev_uninit(struct net_device
*dev
)
378 struct gtp_dev
*gtp
= netdev_priv(dev
);
380 gtp_encap_disable(gtp
);
381 free_percpu(dev
->tstats
);
384 static struct rtable
*ip4_route_output_gtp(struct flowi4
*fl4
,
385 const struct sock
*sk
,
388 memset(fl4
, 0, sizeof(*fl4
));
389 fl4
->flowi4_oif
= sk
->sk_bound_dev_if
;
391 fl4
->saddr
= inet_sk(sk
)->inet_saddr
;
392 fl4
->flowi4_tos
= RT_CONN_FLAGS(sk
);
393 fl4
->flowi4_proto
= sk
->sk_protocol
;
395 return ip_route_output_key(sock_net(sk
), fl4
);
398 static inline void gtp0_push_header(struct sk_buff
*skb
, struct pdp_ctx
*pctx
)
400 int payload_len
= skb
->len
;
401 struct gtp0_header
*gtp0
;
403 gtp0
= skb_push(skb
, sizeof(*gtp0
));
405 gtp0
->flags
= 0x1e; /* v0, GTP-non-prime. */
406 gtp0
->type
= GTP_TPDU
;
407 gtp0
->length
= htons(payload_len
);
408 gtp0
->seq
= htons((atomic_inc_return(&pctx
->tx_seq
) - 1) % 0xffff);
409 gtp0
->flow
= htons(pctx
->u
.v0
.flow
);
411 gtp0
->spare
[0] = gtp0
->spare
[1] = gtp0
->spare
[2] = 0xff;
412 gtp0
->tid
= cpu_to_be64(pctx
->u
.v0
.tid
);
415 static inline void gtp1_push_header(struct sk_buff
*skb
, struct pdp_ctx
*pctx
)
417 int payload_len
= skb
->len
;
418 struct gtp1_header
*gtp1
;
420 gtp1
= skb_push(skb
, sizeof(*gtp1
));
422 /* Bits 8 7 6 5 4 3 2 1
423 * +--+--+--+--+--+--+--+--+
424 * |version |PT| 0| E| S|PN|
425 * +--+--+--+--+--+--+--+--+
428 gtp1
->flags
= 0x30; /* v1, GTP-non-prime. */
429 gtp1
->type
= GTP_TPDU
;
430 gtp1
->length
= htons(payload_len
);
431 gtp1
->tid
= htonl(pctx
->u
.v1
.o_tei
);
433 /* TODO: Suppport for extension header, sequence number and N-PDU.
434 * Update the length field if any of them is available.
443 struct pdp_ctx
*pctx
;
444 struct net_device
*dev
;
448 static void gtp_push_header(struct sk_buff
*skb
, struct gtp_pktinfo
*pktinfo
)
450 switch (pktinfo
->pctx
->gtp_version
) {
452 pktinfo
->gtph_port
= htons(GTP0_PORT
);
453 gtp0_push_header(skb
, pktinfo
->pctx
);
456 pktinfo
->gtph_port
= htons(GTP1U_PORT
);
457 gtp1_push_header(skb
, pktinfo
->pctx
);
462 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo
*pktinfo
,
463 struct sock
*sk
, struct iphdr
*iph
,
464 struct pdp_ctx
*pctx
, struct rtable
*rt
,
466 struct net_device
*dev
)
470 pktinfo
->pctx
= pctx
;
476 static int gtp_build_skb_ip4(struct sk_buff
*skb
, struct net_device
*dev
,
477 struct gtp_pktinfo
*pktinfo
)
479 struct gtp_dev
*gtp
= netdev_priv(dev
);
480 struct pdp_ctx
*pctx
;
487 /* Read the IP destination address and resolve the PDP context.
488 * Prepend PDP header with TEI/TID from PDP ctx.
491 if (gtp
->role
== GTP_ROLE_SGSN
)
492 pctx
= ipv4_pdp_find(gtp
, iph
->saddr
);
494 pctx
= ipv4_pdp_find(gtp
, iph
->daddr
);
497 netdev_dbg(dev
, "no PDP ctx found for %pI4, skip\n",
501 netdev_dbg(dev
, "found PDP context %p\n", pctx
);
503 rt
= ip4_route_output_gtp(&fl4
, pctx
->sk
, pctx
->peer_addr_ip4
.s_addr
);
505 netdev_dbg(dev
, "no route to SSGN %pI4\n",
506 &pctx
->peer_addr_ip4
.s_addr
);
507 dev
->stats
.tx_carrier_errors
++;
511 if (rt
->dst
.dev
== dev
) {
512 netdev_dbg(dev
, "circular route to SSGN %pI4\n",
513 &pctx
->peer_addr_ip4
.s_addr
);
514 dev
->stats
.collisions
++;
520 /* This is similar to tnl_update_pmtu(). */
523 mtu
= dst_mtu(&rt
->dst
) - dev
->hard_header_len
-
524 sizeof(struct iphdr
) - sizeof(struct udphdr
);
525 switch (pctx
->gtp_version
) {
527 mtu
-= sizeof(struct gtp0_header
);
530 mtu
-= sizeof(struct gtp1_header
);
534 mtu
= dst_mtu(&rt
->dst
);
537 rt
->dst
.ops
->update_pmtu(&rt
->dst
, NULL
, skb
, mtu
, false);
539 if (!skb_is_gso(skb
) && (iph
->frag_off
& htons(IP_DF
)) &&
540 mtu
< ntohs(iph
->tot_len
)) {
541 netdev_dbg(dev
, "packet too big, fragmentation needed\n");
542 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
543 icmp_ndo_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
548 gtp_set_pktinfo_ipv4(pktinfo
, pctx
->sk
, iph
, pctx
, rt
, &fl4
, dev
);
549 gtp_push_header(skb
, pktinfo
);
558 static netdev_tx_t
gtp_dev_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
560 unsigned int proto
= ntohs(skb
->protocol
);
561 struct gtp_pktinfo pktinfo
;
564 /* Ensure there is sufficient headroom. */
565 if (skb_cow_head(skb
, dev
->needed_headroom
))
568 skb_reset_inner_headers(skb
);
570 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
574 err
= gtp_build_skb_ip4(skb
, dev
, &pktinfo
);
587 netdev_dbg(pktinfo
.dev
, "gtp -> IP src: %pI4 dst: %pI4\n",
588 &pktinfo
.iph
->saddr
, &pktinfo
.iph
->daddr
);
589 udp_tunnel_xmit_skb(pktinfo
.rt
, pktinfo
.sk
, skb
,
590 pktinfo
.fl4
.saddr
, pktinfo
.fl4
.daddr
,
592 ip4_dst_hoplimit(&pktinfo
.rt
->dst
),
594 pktinfo
.gtph_port
, pktinfo
.gtph_port
,
601 dev
->stats
.tx_errors
++;
606 static const struct net_device_ops gtp_netdev_ops
= {
607 .ndo_init
= gtp_dev_init
,
608 .ndo_uninit
= gtp_dev_uninit
,
609 .ndo_start_xmit
= gtp_dev_xmit
,
610 .ndo_get_stats64
= dev_get_tstats64
,
613 static void gtp_link_setup(struct net_device
*dev
)
615 dev
->netdev_ops
= >p_netdev_ops
;
616 dev
->needs_free_netdev
= true;
618 dev
->hard_header_len
= 0;
621 /* Zero header length. */
622 dev
->type
= ARPHRD_NONE
;
623 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
625 dev
->priv_flags
|= IFF_NO_QUEUE
;
626 dev
->features
|= NETIF_F_LLTX
;
629 /* Assume largest header, ie. GTPv0. */
630 dev
->needed_headroom
= LL_MAX_HEADER
+
631 sizeof(struct iphdr
) +
632 sizeof(struct udphdr
) +
633 sizeof(struct gtp0_header
);
636 static int gtp_hashtable_new(struct gtp_dev
*gtp
, int hsize
);
637 static int gtp_encap_enable(struct gtp_dev
*gtp
, struct nlattr
*data
[]);
639 static void gtp_destructor(struct net_device
*dev
)
641 struct gtp_dev
*gtp
= netdev_priv(dev
);
643 kfree(gtp
->addr_hash
);
644 kfree(gtp
->tid_hash
);
647 static int gtp_newlink(struct net
*src_net
, struct net_device
*dev
,
648 struct nlattr
*tb
[], struct nlattr
*data
[],
649 struct netlink_ext_ack
*extack
)
655 if (!data
[IFLA_GTP_FD0
] && !data
[IFLA_GTP_FD1
])
658 gtp
= netdev_priv(dev
);
660 if (!data
[IFLA_GTP_PDP_HASHSIZE
]) {
663 hashsize
= nla_get_u32(data
[IFLA_GTP_PDP_HASHSIZE
]);
668 err
= gtp_hashtable_new(gtp
, hashsize
);
672 err
= gtp_encap_enable(gtp
, data
);
676 err
= register_netdevice(dev
);
678 netdev_dbg(dev
, "failed to register new netdev %d\n", err
);
682 gn
= net_generic(dev_net(dev
), gtp_net_id
);
683 list_add_rcu(>p
->list
, &gn
->gtp_dev_list
);
684 dev
->priv_destructor
= gtp_destructor
;
686 netdev_dbg(dev
, "registered new GTP interface\n");
691 gtp_encap_disable(gtp
);
693 kfree(gtp
->addr_hash
);
694 kfree(gtp
->tid_hash
);
698 static void gtp_dellink(struct net_device
*dev
, struct list_head
*head
)
700 struct gtp_dev
*gtp
= netdev_priv(dev
);
701 struct pdp_ctx
*pctx
;
704 for (i
= 0; i
< gtp
->hash_size
; i
++)
705 hlist_for_each_entry_rcu(pctx
, >p
->tid_hash
[i
], hlist_tid
)
706 pdp_context_delete(pctx
);
708 list_del_rcu(>p
->list
);
709 unregister_netdevice_queue(dev
, head
);
712 static const struct nla_policy gtp_policy
[IFLA_GTP_MAX
+ 1] = {
713 [IFLA_GTP_FD0
] = { .type
= NLA_U32
},
714 [IFLA_GTP_FD1
] = { .type
= NLA_U32
},
715 [IFLA_GTP_PDP_HASHSIZE
] = { .type
= NLA_U32
},
716 [IFLA_GTP_ROLE
] = { .type
= NLA_U32
},
719 static int gtp_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
720 struct netlink_ext_ack
*extack
)
728 static size_t gtp_get_size(const struct net_device
*dev
)
730 return nla_total_size(sizeof(__u32
)); /* IFLA_GTP_PDP_HASHSIZE */
733 static int gtp_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
735 struct gtp_dev
*gtp
= netdev_priv(dev
);
737 if (nla_put_u32(skb
, IFLA_GTP_PDP_HASHSIZE
, gtp
->hash_size
))
738 goto nla_put_failure
;
746 static struct rtnl_link_ops gtp_link_ops __read_mostly
= {
748 .maxtype
= IFLA_GTP_MAX
,
749 .policy
= gtp_policy
,
750 .priv_size
= sizeof(struct gtp_dev
),
751 .setup
= gtp_link_setup
,
752 .validate
= gtp_validate
,
753 .newlink
= gtp_newlink
,
754 .dellink
= gtp_dellink
,
755 .get_size
= gtp_get_size
,
756 .fill_info
= gtp_fill_info
,
759 static int gtp_hashtable_new(struct gtp_dev
*gtp
, int hsize
)
763 gtp
->addr_hash
= kmalloc_array(hsize
, sizeof(struct hlist_head
),
764 GFP_KERNEL
| __GFP_NOWARN
);
765 if (gtp
->addr_hash
== NULL
)
768 gtp
->tid_hash
= kmalloc_array(hsize
, sizeof(struct hlist_head
),
769 GFP_KERNEL
| __GFP_NOWARN
);
770 if (gtp
->tid_hash
== NULL
)
773 gtp
->hash_size
= hsize
;
775 for (i
= 0; i
< hsize
; i
++) {
776 INIT_HLIST_HEAD(>p
->addr_hash
[i
]);
777 INIT_HLIST_HEAD(>p
->tid_hash
[i
]);
781 kfree(gtp
->addr_hash
);
785 static struct sock
*gtp_encap_enable_socket(int fd
, int type
,
788 struct udp_tunnel_sock_cfg tuncfg
= {NULL
};
793 pr_debug("enable gtp on %d, %d\n", fd
, type
);
795 sock
= sockfd_lookup(fd
, &err
);
797 pr_debug("gtp socket fd=%d not found\n", fd
);
802 if (sk
->sk_protocol
!= IPPROTO_UDP
||
803 sk
->sk_type
!= SOCK_DGRAM
||
804 (sk
->sk_family
!= AF_INET
&& sk
->sk_family
!= AF_INET6
)) {
805 pr_debug("socket fd=%d not UDP\n", fd
);
806 sk
= ERR_PTR(-EINVAL
);
811 if (sk
->sk_user_data
) {
812 sk
= ERR_PTR(-EBUSY
);
818 tuncfg
.sk_user_data
= gtp
;
819 tuncfg
.encap_type
= type
;
820 tuncfg
.encap_rcv
= gtp_encap_recv
;
821 tuncfg
.encap_destroy
= gtp_encap_destroy
;
823 setup_udp_tunnel_sock(sock_net(sock
->sk
), sock
, &tuncfg
);
826 release_sock(sock
->sk
);
832 static int gtp_encap_enable(struct gtp_dev
*gtp
, struct nlattr
*data
[])
834 struct sock
*sk1u
= NULL
;
835 struct sock
*sk0
= NULL
;
836 unsigned int role
= GTP_ROLE_GGSN
;
838 if (data
[IFLA_GTP_FD0
]) {
839 u32 fd0
= nla_get_u32(data
[IFLA_GTP_FD0
]);
841 sk0
= gtp_encap_enable_socket(fd0
, UDP_ENCAP_GTP0
, gtp
);
846 if (data
[IFLA_GTP_FD1
]) {
847 u32 fd1
= nla_get_u32(data
[IFLA_GTP_FD1
]);
849 sk1u
= gtp_encap_enable_socket(fd1
, UDP_ENCAP_GTP1U
, gtp
);
851 gtp_encap_disable_sock(sk0
);
852 return PTR_ERR(sk1u
);
856 if (data
[IFLA_GTP_ROLE
]) {
857 role
= nla_get_u32(data
[IFLA_GTP_ROLE
]);
858 if (role
> GTP_ROLE_SGSN
) {
859 gtp_encap_disable_sock(sk0
);
860 gtp_encap_disable_sock(sk1u
);
872 static struct gtp_dev
*gtp_find_dev(struct net
*src_net
, struct nlattr
*nla
[])
874 struct gtp_dev
*gtp
= NULL
;
875 struct net_device
*dev
;
878 /* Examine the link attributes and figure out which network namespace
879 * we are talking about.
881 if (nla
[GTPA_NET_NS_FD
])
882 net
= get_net_ns_by_fd(nla_get_u32(nla
[GTPA_NET_NS_FD
]));
884 net
= get_net(src_net
);
889 /* Check if there's an existing gtpX device to configure */
890 dev
= dev_get_by_index_rcu(net
, nla_get_u32(nla
[GTPA_LINK
]));
891 if (dev
&& dev
->netdev_ops
== >p_netdev_ops
)
892 gtp
= netdev_priv(dev
);
898 static void ipv4_pdp_fill(struct pdp_ctx
*pctx
, struct genl_info
*info
)
900 pctx
->gtp_version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
902 pctx
->peer_addr_ip4
.s_addr
=
903 nla_get_be32(info
->attrs
[GTPA_PEER_ADDRESS
]);
904 pctx
->ms_addr_ip4
.s_addr
=
905 nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
907 switch (pctx
->gtp_version
) {
909 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
910 * label needs to be the same for uplink and downlink packets,
911 * so let's annotate this.
913 pctx
->u
.v0
.tid
= nla_get_u64(info
->attrs
[GTPA_TID
]);
914 pctx
->u
.v0
.flow
= nla_get_u16(info
->attrs
[GTPA_FLOW
]);
917 pctx
->u
.v1
.i_tei
= nla_get_u32(info
->attrs
[GTPA_I_TEI
]);
918 pctx
->u
.v1
.o_tei
= nla_get_u32(info
->attrs
[GTPA_O_TEI
]);
925 static struct pdp_ctx
*gtp_pdp_add(struct gtp_dev
*gtp
, struct sock
*sk
,
926 struct genl_info
*info
)
928 struct pdp_ctx
*pctx
, *pctx_tid
= NULL
;
929 struct net_device
*dev
= gtp
->dev
;
930 u32 hash_ms
, hash_tid
= 0;
931 unsigned int version
;
935 ms_addr
= nla_get_be32(info
->attrs
[GTPA_MS_ADDRESS
]);
936 hash_ms
= ipv4_hashfn(ms_addr
) % gtp
->hash_size
;
937 version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
939 pctx
= ipv4_pdp_find(gtp
, ms_addr
);
942 if (version
== GTP_V0
)
943 pctx_tid
= gtp0_pdp_find(gtp
,
944 nla_get_u64(info
->attrs
[GTPA_TID
]));
945 else if (version
== GTP_V1
)
946 pctx_tid
= gtp1_pdp_find(gtp
,
947 nla_get_u32(info
->attrs
[GTPA_I_TEI
]));
952 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
953 return ERR_PTR(-EEXIST
);
954 if (info
->nlhdr
->nlmsg_flags
& NLM_F_REPLACE
)
955 return ERR_PTR(-EOPNOTSUPP
);
957 if (pctx
&& pctx_tid
)
958 return ERR_PTR(-EEXIST
);
962 ipv4_pdp_fill(pctx
, info
);
964 if (pctx
->gtp_version
== GTP_V0
)
965 netdev_dbg(dev
, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
966 pctx
->u
.v0
.tid
, pctx
);
967 else if (pctx
->gtp_version
== GTP_V1
)
968 netdev_dbg(dev
, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
969 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
, pctx
);
975 pctx
= kmalloc(sizeof(*pctx
), GFP_ATOMIC
);
977 return ERR_PTR(-ENOMEM
);
981 pctx
->dev
= gtp
->dev
;
982 ipv4_pdp_fill(pctx
, info
);
983 atomic_set(&pctx
->tx_seq
, 0);
985 switch (pctx
->gtp_version
) {
987 /* TS 09.60: "The flow label identifies unambiguously a GTP
988 * flow.". We use the tid for this instead, I cannot find a
989 * situation in which this doesn't unambiguosly identify the
992 hash_tid
= gtp0_hashfn(pctx
->u
.v0
.tid
) % gtp
->hash_size
;
995 hash_tid
= gtp1u_hashfn(pctx
->u
.v1
.i_tei
) % gtp
->hash_size
;
999 hlist_add_head_rcu(&pctx
->hlist_addr
, >p
->addr_hash
[hash_ms
]);
1000 hlist_add_head_rcu(&pctx
->hlist_tid
, >p
->tid_hash
[hash_tid
]);
1002 switch (pctx
->gtp_version
) {
1004 netdev_dbg(dev
, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1005 pctx
->u
.v0
.tid
, &pctx
->peer_addr_ip4
,
1006 &pctx
->ms_addr_ip4
, pctx
);
1009 netdev_dbg(dev
, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1010 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
,
1011 &pctx
->peer_addr_ip4
, &pctx
->ms_addr_ip4
, pctx
);
1018 static void pdp_context_free(struct rcu_head
*head
)
1020 struct pdp_ctx
*pctx
= container_of(head
, struct pdp_ctx
, rcu_head
);
1026 static void pdp_context_delete(struct pdp_ctx
*pctx
)
1028 hlist_del_rcu(&pctx
->hlist_tid
);
1029 hlist_del_rcu(&pctx
->hlist_addr
);
1030 call_rcu(&pctx
->rcu_head
, pdp_context_free
);
1033 static int gtp_tunnel_notify(struct pdp_ctx
*pctx
, u8 cmd
, gfp_t allocation
);
1035 static int gtp_genl_new_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
1037 unsigned int version
;
1038 struct pdp_ctx
*pctx
;
1039 struct gtp_dev
*gtp
;
1043 if (!info
->attrs
[GTPA_VERSION
] ||
1044 !info
->attrs
[GTPA_LINK
] ||
1045 !info
->attrs
[GTPA_PEER_ADDRESS
] ||
1046 !info
->attrs
[GTPA_MS_ADDRESS
])
1049 version
= nla_get_u32(info
->attrs
[GTPA_VERSION
]);
1053 if (!info
->attrs
[GTPA_TID
] ||
1054 !info
->attrs
[GTPA_FLOW
])
1058 if (!info
->attrs
[GTPA_I_TEI
] ||
1059 !info
->attrs
[GTPA_O_TEI
])
1069 gtp
= gtp_find_dev(sock_net(skb
->sk
), info
->attrs
);
1075 if (version
== GTP_V0
)
1077 else if (version
== GTP_V1
)
1087 pctx
= gtp_pdp_add(gtp
, sk
, info
);
1089 err
= PTR_ERR(pctx
);
1091 gtp_tunnel_notify(pctx
, GTP_CMD_NEWPDP
, GFP_KERNEL
);
1100 static struct pdp_ctx
*gtp_find_pdp_by_link(struct net
*net
,
1101 struct nlattr
*nla
[])
1103 struct gtp_dev
*gtp
;
1105 gtp
= gtp_find_dev(net
, nla
);
1107 return ERR_PTR(-ENODEV
);
1109 if (nla
[GTPA_MS_ADDRESS
]) {
1110 __be32 ip
= nla_get_be32(nla
[GTPA_MS_ADDRESS
]);
1112 return ipv4_pdp_find(gtp
, ip
);
1113 } else if (nla
[GTPA_VERSION
]) {
1114 u32 gtp_version
= nla_get_u32(nla
[GTPA_VERSION
]);
1116 if (gtp_version
== GTP_V0
&& nla
[GTPA_TID
])
1117 return gtp0_pdp_find(gtp
, nla_get_u64(nla
[GTPA_TID
]));
1118 else if (gtp_version
== GTP_V1
&& nla
[GTPA_I_TEI
])
1119 return gtp1_pdp_find(gtp
, nla_get_u32(nla
[GTPA_I_TEI
]));
1122 return ERR_PTR(-EINVAL
);
1125 static struct pdp_ctx
*gtp_find_pdp(struct net
*net
, struct nlattr
*nla
[])
1127 struct pdp_ctx
*pctx
;
1130 pctx
= gtp_find_pdp_by_link(net
, nla
);
1132 pctx
= ERR_PTR(-EINVAL
);
1135 pctx
= ERR_PTR(-ENOENT
);
1140 static int gtp_genl_del_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
1142 struct pdp_ctx
*pctx
;
1145 if (!info
->attrs
[GTPA_VERSION
])
1150 pctx
= gtp_find_pdp(sock_net(skb
->sk
), info
->attrs
);
1152 err
= PTR_ERR(pctx
);
1156 if (pctx
->gtp_version
== GTP_V0
)
1157 netdev_dbg(pctx
->dev
, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1158 pctx
->u
.v0
.tid
, pctx
);
1159 else if (pctx
->gtp_version
== GTP_V1
)
1160 netdev_dbg(pctx
->dev
, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1161 pctx
->u
.v1
.i_tei
, pctx
->u
.v1
.o_tei
, pctx
);
1163 gtp_tunnel_notify(pctx
, GTP_CMD_DELPDP
, GFP_ATOMIC
);
1164 pdp_context_delete(pctx
);
1171 static struct genl_family gtp_genl_family
;
1173 enum gtp_multicast_groups
{
1177 static const struct genl_multicast_group gtp_genl_mcgrps
[] = {
1178 [GTP_GENL_MCGRP
] = { .name
= GTP_GENL_MCGRP_NAME
},
1181 static int gtp_genl_fill_info(struct sk_buff
*skb
, u32 snd_portid
, u32 snd_seq
,
1182 int flags
, u32 type
, struct pdp_ctx
*pctx
)
1186 genlh
= genlmsg_put(skb
, snd_portid
, snd_seq
, >p_genl_family
, flags
,
1191 if (nla_put_u32(skb
, GTPA_VERSION
, pctx
->gtp_version
) ||
1192 nla_put_u32(skb
, GTPA_LINK
, pctx
->dev
->ifindex
) ||
1193 nla_put_be32(skb
, GTPA_PEER_ADDRESS
, pctx
->peer_addr_ip4
.s_addr
) ||
1194 nla_put_be32(skb
, GTPA_MS_ADDRESS
, pctx
->ms_addr_ip4
.s_addr
))
1195 goto nla_put_failure
;
1197 switch (pctx
->gtp_version
) {
1199 if (nla_put_u64_64bit(skb
, GTPA_TID
, pctx
->u
.v0
.tid
, GTPA_PAD
) ||
1200 nla_put_u16(skb
, GTPA_FLOW
, pctx
->u
.v0
.flow
))
1201 goto nla_put_failure
;
1204 if (nla_put_u32(skb
, GTPA_I_TEI
, pctx
->u
.v1
.i_tei
) ||
1205 nla_put_u32(skb
, GTPA_O_TEI
, pctx
->u
.v1
.o_tei
))
1206 goto nla_put_failure
;
1209 genlmsg_end(skb
, genlh
);
1214 genlmsg_cancel(skb
, genlh
);
1218 static int gtp_tunnel_notify(struct pdp_ctx
*pctx
, u8 cmd
, gfp_t allocation
)
1220 struct sk_buff
*msg
;
1223 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, allocation
);
1227 ret
= gtp_genl_fill_info(msg
, 0, 0, 0, cmd
, pctx
);
1233 ret
= genlmsg_multicast_netns(>p_genl_family
, dev_net(pctx
->dev
), msg
,
1234 0, GTP_GENL_MCGRP
, GFP_ATOMIC
);
1238 static int gtp_genl_get_pdp(struct sk_buff
*skb
, struct genl_info
*info
)
1240 struct pdp_ctx
*pctx
= NULL
;
1241 struct sk_buff
*skb2
;
1244 if (!info
->attrs
[GTPA_VERSION
])
1249 pctx
= gtp_find_pdp(sock_net(skb
->sk
), info
->attrs
);
1251 err
= PTR_ERR(pctx
);
1255 skb2
= genlmsg_new(NLMSG_GOODSIZE
, GFP_ATOMIC
);
1261 err
= gtp_genl_fill_info(skb2
, NETLINK_CB(skb
).portid
, info
->snd_seq
,
1262 0, info
->nlhdr
->nlmsg_type
, pctx
);
1264 goto err_unlock_free
;
1267 return genlmsg_unicast(genl_info_net(info
), skb2
, info
->snd_portid
);
1276 static int gtp_genl_dump_pdp(struct sk_buff
*skb
,
1277 struct netlink_callback
*cb
)
1279 struct gtp_dev
*last_gtp
= (struct gtp_dev
*)cb
->args
[2], *gtp
;
1280 int i
, j
, bucket
= cb
->args
[0], skip
= cb
->args
[1];
1281 struct net
*net
= sock_net(skb
->sk
);
1282 struct pdp_ctx
*pctx
;
1285 gn
= net_generic(net
, gtp_net_id
);
1291 list_for_each_entry_rcu(gtp
, &gn
->gtp_dev_list
, list
) {
1292 if (last_gtp
&& last_gtp
!= gtp
)
1297 for (i
= bucket
; i
< gtp
->hash_size
; i
++) {
1299 hlist_for_each_entry_rcu(pctx
, >p
->tid_hash
[i
],
1302 gtp_genl_fill_info(skb
,
1303 NETLINK_CB(cb
->skb
).portid
,
1306 cb
->nlh
->nlmsg_type
, pctx
)) {
1309 cb
->args
[2] = (unsigned long)gtp
;
1324 static const struct nla_policy gtp_genl_policy
[GTPA_MAX
+ 1] = {
1325 [GTPA_LINK
] = { .type
= NLA_U32
, },
1326 [GTPA_VERSION
] = { .type
= NLA_U32
, },
1327 [GTPA_TID
] = { .type
= NLA_U64
, },
1328 [GTPA_PEER_ADDRESS
] = { .type
= NLA_U32
, },
1329 [GTPA_MS_ADDRESS
] = { .type
= NLA_U32
, },
1330 [GTPA_FLOW
] = { .type
= NLA_U16
, },
1331 [GTPA_NET_NS_FD
] = { .type
= NLA_U32
, },
1332 [GTPA_I_TEI
] = { .type
= NLA_U32
, },
1333 [GTPA_O_TEI
] = { .type
= NLA_U32
, },
1336 static const struct genl_small_ops gtp_genl_ops
[] = {
1338 .cmd
= GTP_CMD_NEWPDP
,
1339 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1340 .doit
= gtp_genl_new_pdp
,
1341 .flags
= GENL_ADMIN_PERM
,
1344 .cmd
= GTP_CMD_DELPDP
,
1345 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1346 .doit
= gtp_genl_del_pdp
,
1347 .flags
= GENL_ADMIN_PERM
,
1350 .cmd
= GTP_CMD_GETPDP
,
1351 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1352 .doit
= gtp_genl_get_pdp
,
1353 .dumpit
= gtp_genl_dump_pdp
,
1354 .flags
= GENL_ADMIN_PERM
,
1358 static struct genl_family gtp_genl_family __ro_after_init
= {
1362 .maxattr
= GTPA_MAX
,
1363 .policy
= gtp_genl_policy
,
1365 .module
= THIS_MODULE
,
1366 .small_ops
= gtp_genl_ops
,
1367 .n_small_ops
= ARRAY_SIZE(gtp_genl_ops
),
1368 .mcgrps
= gtp_genl_mcgrps
,
1369 .n_mcgrps
= ARRAY_SIZE(gtp_genl_mcgrps
),
1372 static int __net_init
gtp_net_init(struct net
*net
)
1374 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
1376 INIT_LIST_HEAD(&gn
->gtp_dev_list
);
1380 static void __net_exit
gtp_net_exit(struct net
*net
)
1382 struct gtp_net
*gn
= net_generic(net
, gtp_net_id
);
1383 struct gtp_dev
*gtp
;
1387 list_for_each_entry(gtp
, &gn
->gtp_dev_list
, list
)
1388 gtp_dellink(gtp
->dev
, &list
);
1390 unregister_netdevice_many(&list
);
1394 static struct pernet_operations gtp_net_ops
= {
1395 .init
= gtp_net_init
,
1396 .exit
= gtp_net_exit
,
1398 .size
= sizeof(struct gtp_net
),
1401 static int __init
gtp_init(void)
1405 get_random_bytes(>p_h_initval
, sizeof(gtp_h_initval
));
1407 err
= rtnl_link_register(>p_link_ops
);
1411 err
= genl_register_family(>p_genl_family
);
1413 goto unreg_rtnl_link
;
1415 err
= register_pernet_subsys(>p_net_ops
);
1417 goto unreg_genl_family
;
1419 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
1420 sizeof(struct pdp_ctx
));
1424 genl_unregister_family(>p_genl_family
);
1426 rtnl_link_unregister(>p_link_ops
);
1428 pr_err("error loading GTP module loaded\n");
1431 late_initcall(gtp_init
);
1433 static void __exit
gtp_fini(void)
1435 genl_unregister_family(>p_genl_family
);
1436 rtnl_link_unregister(>p_link_ops
);
1437 unregister_pernet_subsys(>p_net_ops
);
1439 pr_info("GTP module unloaded\n");
1441 module_exit(gtp_fini
);
1443 MODULE_LICENSE("GPL");
1444 MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1445 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1446 MODULE_ALIAS_RTNL_LINK("gtp");
1447 MODULE_ALIAS_GENL_FAMILY("gtp");