3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/udp.c
11 * Hideaki YOSHIFUJI : sin6_scope_id support
12 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
13 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
14 * a single port at the same time.
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
16 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/in6.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
34 #include <linux/init.h>
35 #include <linux/module.h>
36 #include <linux/skbuff.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
40 #include <net/addrconf.h>
41 #include <net/ndisc.h>
42 #include <net/protocol.h>
43 #include <net/transp_v6.h>
44 #include <net/ip6_route.h>
46 #include <net/tcp_states.h>
47 #include <net/ip6_checksum.h>
49 #include <net/inet_hashtables.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/busy_poll.h>
52 #include <net/sock_reuseport.h>
54 #include <linux/proc_fs.h>
55 #include <linux/seq_file.h>
56 #include <trace/events/skb.h>
59 static bool udp6_lib_exact_dif_match(struct net
*net
, struct sk_buff
*skb
)
61 #if defined(CONFIG_NET_L3_MASTER_DEV)
62 if (!net
->ipv4
.sysctl_udp_l3mdev_accept
&&
63 skb
&& ipv6_l3mdev_skb(IP6CB(skb
)->flags
))
69 static u32
udp6_ehashfn(const struct net
*net
,
70 const struct in6_addr
*laddr
,
72 const struct in6_addr
*faddr
,
75 static u32 udp6_ehash_secret __read_mostly
;
76 static u32 udp_ipv6_hash_secret __read_mostly
;
80 net_get_random_once(&udp6_ehash_secret
,
81 sizeof(udp6_ehash_secret
));
82 net_get_random_once(&udp_ipv6_hash_secret
,
83 sizeof(udp_ipv6_hash_secret
));
85 lhash
= (__force u32
)laddr
->s6_addr32
[3];
86 fhash
= __ipv6_addr_jhash(faddr
, udp_ipv6_hash_secret
);
88 return __inet6_ehashfn(lhash
, lport
, fhash
, fport
,
89 udp_ipv6_hash_secret
+ net_hash_mix(net
));
92 int udp_v6_get_port(struct sock
*sk
, unsigned short snum
)
94 unsigned int hash2_nulladdr
=
95 ipv6_portaddr_hash(sock_net(sk
), &in6addr_any
, snum
);
96 unsigned int hash2_partial
=
97 ipv6_portaddr_hash(sock_net(sk
), &sk
->sk_v6_rcv_saddr
, 0);
99 /* precompute partial secondary hash */
100 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
101 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
104 static void udp_v6_rehash(struct sock
*sk
)
106 u16 new_hash
= ipv6_portaddr_hash(sock_net(sk
),
107 &sk
->sk_v6_rcv_saddr
,
108 inet_sk(sk
)->inet_num
);
110 udp_lib_rehash(sk
, new_hash
);
113 static int compute_score(struct sock
*sk
, struct net
*net
,
114 const struct in6_addr
*saddr
, __be16 sport
,
115 const struct in6_addr
*daddr
, unsigned short hnum
,
116 int dif
, int sdif
, bool exact_dif
)
119 struct inet_sock
*inet
;
121 if (!net_eq(sock_net(sk
), net
) ||
122 udp_sk(sk
)->udp_port_hash
!= hnum
||
123 sk
->sk_family
!= PF_INET6
)
129 if (inet
->inet_dport
) {
130 if (inet
->inet_dport
!= sport
)
135 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
)) {
136 if (!ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, daddr
))
141 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
142 if (!ipv6_addr_equal(&sk
->sk_v6_daddr
, saddr
))
147 if (sk
->sk_bound_dev_if
|| exact_dif
) {
148 bool dev_match
= (sk
->sk_bound_dev_if
== dif
||
149 sk
->sk_bound_dev_if
== sdif
);
153 if (sk
->sk_bound_dev_if
)
157 if (READ_ONCE(sk
->sk_incoming_cpu
) == raw_smp_processor_id())
163 /* called with rcu_read_lock() */
164 static struct sock
*udp6_lib_lookup2(struct net
*net
,
165 const struct in6_addr
*saddr
, __be16 sport
,
166 const struct in6_addr
*daddr
, unsigned int hnum
,
167 int dif
, int sdif
, bool exact_dif
,
168 struct udp_hslot
*hslot2
, struct sk_buff
*skb
)
170 struct sock
*sk
, *result
;
176 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
177 score
= compute_score(sk
, net
, saddr
, sport
,
178 daddr
, hnum
, dif
, sdif
, exact_dif
);
179 if (score
> badness
) {
180 if (sk
->sk_reuseport
&&
181 sk
->sk_state
!= TCP_ESTABLISHED
) {
182 hash
= udp6_ehashfn(net
, daddr
, hnum
,
185 result
= reuseport_select_sock(sk
, hash
, skb
,
186 sizeof(struct udphdr
));
187 if (result
&& !reuseport_has_conns(sk
, false))
197 /* rcu_read_lock() must be held */
198 struct sock
*__udp6_lib_lookup(struct net
*net
,
199 const struct in6_addr
*saddr
, __be16 sport
,
200 const struct in6_addr
*daddr
, __be16 dport
,
201 int dif
, int sdif
, struct udp_table
*udptable
,
204 struct sock
*sk
, *result
;
205 unsigned short hnum
= ntohs(dport
);
206 unsigned int hash2
, slot2
, slot
= udp_hashfn(net
, hnum
, udptable
->mask
);
207 struct udp_hslot
*hslot2
, *hslot
= &udptable
->hash
[slot
];
208 bool exact_dif
= udp6_lib_exact_dif_match(net
, skb
);
212 if (hslot
->count
> 10) {
213 hash2
= ipv6_portaddr_hash(net
, daddr
, hnum
);
214 slot2
= hash2
& udptable
->mask
;
215 hslot2
= &udptable
->hash2
[slot2
];
216 if (hslot
->count
< hslot2
->count
)
219 result
= udp6_lib_lookup2(net
, saddr
, sport
,
220 daddr
, hnum
, dif
, sdif
, exact_dif
,
223 unsigned int old_slot2
= slot2
;
224 hash2
= ipv6_portaddr_hash(net
, &in6addr_any
, hnum
);
225 slot2
= hash2
& udptable
->mask
;
226 /* avoid searching the same slot again. */
227 if (unlikely(slot2
== old_slot2
))
230 hslot2
= &udptable
->hash2
[slot2
];
231 if (hslot
->count
< hslot2
->count
)
234 result
= udp6_lib_lookup2(net
, saddr
, sport
,
235 daddr
, hnum
, dif
, sdif
,
239 if (unlikely(IS_ERR(result
)))
246 sk_for_each_rcu(sk
, &hslot
->head
) {
247 score
= compute_score(sk
, net
, saddr
, sport
, daddr
, hnum
, dif
,
249 if (score
> badness
) {
250 if (sk
->sk_reuseport
) {
251 hash
= udp6_ehashfn(net
, daddr
, hnum
,
253 result
= reuseport_select_sock(sk
, hash
, skb
,
254 sizeof(struct udphdr
));
255 if (unlikely(IS_ERR(result
)))
266 EXPORT_SYMBOL_GPL(__udp6_lib_lookup
);
268 static struct sock
*__udp6_lib_lookup_skb(struct sk_buff
*skb
,
269 __be16 sport
, __be16 dport
,
270 struct udp_table
*udptable
)
272 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
274 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
275 &iph
->daddr
, dport
, inet6_iif(skb
),
276 inet6_sdif(skb
), udptable
, skb
);
279 struct sock
*udp6_lib_lookup_skb(struct sk_buff
*skb
,
280 __be16 sport
, __be16 dport
)
282 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
284 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
285 &iph
->daddr
, dport
, inet6_iif(skb
),
286 inet6_sdif(skb
), &udp_table
, NULL
);
288 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb
);
290 /* Must be called under rcu_read_lock().
291 * Does increment socket refcount.
293 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
294 struct sock
*udp6_lib_lookup(struct net
*net
, const struct in6_addr
*saddr
, __be16 sport
,
295 const struct in6_addr
*daddr
, __be16 dport
, int dif
)
299 sk
= __udp6_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
300 dif
, 0, &udp_table
, NULL
);
301 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
305 EXPORT_SYMBOL_GPL(udp6_lib_lookup
);
308 /* do not use the scratch area len for jumbogram: their length execeeds the
309 * scratch area space; note that the IP6CB flags is still in the first
310 * cacheline, so checking for jumbograms is cheap
312 static int udp6_skb_len(struct sk_buff
*skb
)
314 return unlikely(inet6_is_jumbogram(skb
)) ? skb
->len
: udp_skb_len(skb
);
318 * This should be easy, if there is something there we
319 * return it, otherwise we block.
322 int udpv6_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
323 int noblock
, int flags
, int *addr_len
)
325 struct ipv6_pinfo
*np
= inet6_sk(sk
);
326 struct inet_sock
*inet
= inet_sk(sk
);
328 unsigned int ulen
, copied
;
329 int peeked
, peeking
, off
;
331 int is_udplite
= IS_UDPLITE(sk
);
332 bool checksum_valid
= false;
335 if (flags
& MSG_ERRQUEUE
)
336 return ipv6_recv_error(sk
, msg
, len
, addr_len
);
338 if (np
->rxpmtu
&& np
->rxopt
.bits
.rxpmtu
)
339 return ipv6_recv_rxpmtu(sk
, msg
, len
, addr_len
);
342 peeking
= flags
& MSG_PEEK
;
343 off
= sk_peek_offset(sk
, flags
);
344 skb
= __skb_recv_udp(sk
, flags
, noblock
, &peeked
, &off
, &err
);
348 ulen
= udp6_skb_len(skb
);
350 if (copied
> ulen
- off
)
352 else if (copied
< ulen
)
353 msg
->msg_flags
|= MSG_TRUNC
;
355 is_udp4
= (skb
->protocol
== htons(ETH_P_IP
));
358 * If checksum is needed at all, try to do it while copying the
359 * data. If the data is truncated, or if we only want a partial
360 * coverage checksum (UDP-Lite), do it before the copy.
363 if (copied
< ulen
|| peeking
||
364 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
365 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
366 !__udp_lib_checksum_complete(skb
);
371 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
372 if (udp_skb_is_linear(skb
))
373 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
375 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
377 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
383 atomic_inc(&sk
->sk_drops
);
385 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
388 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
396 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INDATAGRAMS
,
399 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INDATAGRAMS
,
403 sock_recv_ts_and_drops(msg
, sk
, skb
);
405 /* Copy the address. */
407 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
408 sin6
->sin6_family
= AF_INET6
;
409 sin6
->sin6_port
= udp_hdr(skb
)->source
;
410 sin6
->sin6_flowinfo
= 0;
413 ipv6_addr_set_v4mapped(ip_hdr(skb
)->saddr
,
415 sin6
->sin6_scope_id
= 0;
417 sin6
->sin6_addr
= ipv6_hdr(skb
)->saddr
;
418 sin6
->sin6_scope_id
=
419 ipv6_iface_scope_id(&sin6
->sin6_addr
,
422 *addr_len
= sizeof(*sin6
);
424 if (cgroup_bpf_enabled
)
425 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk
,
426 (struct sockaddr
*)sin6
);
430 ip6_datagram_recv_common_ctl(sk
, msg
, skb
);
433 if (inet
->cmsg_flags
)
434 ip_cmsg_recv_offset(msg
, sk
, skb
,
435 sizeof(struct udphdr
), off
);
438 ip6_datagram_recv_specific_ctl(sk
, msg
, skb
);
442 if (flags
& MSG_TRUNC
)
445 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
449 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
450 udp_skb_destructor
)) {
452 UDP_INC_STATS(sock_net(sk
),
453 UDP_MIB_CSUMERRORS
, is_udplite
);
454 UDP_INC_STATS(sock_net(sk
),
455 UDP_MIB_INERRORS
, is_udplite
);
457 UDP6_INC_STATS(sock_net(sk
),
458 UDP_MIB_CSUMERRORS
, is_udplite
);
459 UDP6_INC_STATS(sock_net(sk
),
460 UDP_MIB_INERRORS
, is_udplite
);
465 /* starting over for a new packet, but check if we need to yield */
467 msg
->msg_flags
&= ~MSG_TRUNC
;
471 void __udp6_lib_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
472 u8 type
, u8 code
, int offset
, __be32 info
,
473 struct udp_table
*udptable
)
475 struct ipv6_pinfo
*np
;
476 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
477 const struct in6_addr
*saddr
= &hdr
->saddr
;
478 const struct in6_addr
*daddr
= &hdr
->daddr
;
479 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+offset
);
483 struct net
*net
= dev_net(skb
->dev
);
485 sk
= __udp6_lib_lookup(net
, daddr
, uh
->dest
, saddr
, uh
->source
,
486 inet6_iif(skb
), 0, udptable
, NULL
);
488 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
493 harderr
= icmpv6_err_convert(type
, code
, &err
);
496 if (type
== ICMPV6_PKT_TOOBIG
) {
497 if (!ip6_sk_accept_pmtu(sk
))
499 ip6_sk_update_pmtu(skb
, sk
, info
);
500 if (np
->pmtudisc
!= IPV6_PMTUDISC_DONT
)
503 if (type
== NDISC_REDIRECT
) {
504 ip6_sk_redirect(skb
, sk
);
509 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
512 ipv6_icmp_error(sk
, skb
, err
, uh
->dest
, ntohl(info
), (u8
*)(uh
+1));
516 sk
->sk_error_report(sk
);
521 static int __udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
525 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
526 sock_rps_save_rxhash(sk
, skb
);
527 sk_mark_napi_id(sk
, skb
);
528 sk_incoming_cpu_update(sk
);
530 sk_mark_napi_id_once(sk
, skb
);
533 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
535 int is_udplite
= IS_UDPLITE(sk
);
537 /* Note that an ENOMEM error is charged twice */
539 UDP6_INC_STATS(sock_net(sk
),
540 UDP_MIB_RCVBUFERRORS
, is_udplite
);
541 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
549 static __inline__
void udpv6_err(struct sk_buff
*skb
,
550 struct inet6_skb_parm
*opt
, u8 type
,
551 u8 code
, int offset
, __be32 info
)
553 __udp6_lib_err(skb
, opt
, type
, code
, offset
, info
, &udp_table
);
556 static DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key
);
557 void udpv6_encap_enable(void)
559 static_branch_enable(&udpv6_encap_needed_key
);
561 EXPORT_SYMBOL(udpv6_encap_enable
);
563 static int udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
565 struct udp_sock
*up
= udp_sk(sk
);
566 int is_udplite
= IS_UDPLITE(sk
);
568 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
571 if (static_branch_unlikely(&udpv6_encap_needed_key
) && up
->encap_type
) {
572 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
575 * This is an encapsulation socket so pass the skb to
576 * the socket's udp_encap_rcv() hook. Otherwise, just
577 * fall through and pass this up the UDP socket.
578 * up->encap_rcv() returns the following value:
579 * =0 if skb was successfully passed to the encap
580 * handler or was discarded by it.
581 * >0 if skb should be passed on to UDP.
582 * <0 if skb should be resubmitted as proto -N
585 /* if we're overly short, let UDP handle it */
586 encap_rcv
= READ_ONCE(up
->encap_rcv
);
590 /* Verify checksum before giving to encap */
591 if (udp_lib_checksum_complete(skb
))
594 ret
= encap_rcv(sk
, skb
);
596 __UDP_INC_STATS(sock_net(sk
),
603 /* FALLTHROUGH -- it's a UDP Packet */
607 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
609 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
611 if (up
->pcrlen
== 0) { /* full coverage was set */
612 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
613 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
616 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
617 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
618 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
623 prefetch(&sk
->sk_rmem_alloc
);
624 if (rcu_access_pointer(sk
->sk_filter
) &&
625 udp_lib_checksum_complete(skb
))
628 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
631 udp_csum_pull_header(skb
);
635 return __udpv6_queue_rcv_skb(sk
, skb
);
638 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
640 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
641 atomic_inc(&sk
->sk_drops
);
646 static bool __udp_v6_is_mcast_sock(struct net
*net
, struct sock
*sk
,
647 __be16 loc_port
, const struct in6_addr
*loc_addr
,
648 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
649 int dif
, unsigned short hnum
)
651 struct inet_sock
*inet
= inet_sk(sk
);
653 if (!net_eq(sock_net(sk
), net
))
656 if (udp_sk(sk
)->udp_port_hash
!= hnum
||
657 sk
->sk_family
!= PF_INET6
||
658 (inet
->inet_dport
&& inet
->inet_dport
!= rmt_port
) ||
659 (!ipv6_addr_any(&sk
->sk_v6_daddr
) &&
660 !ipv6_addr_equal(&sk
->sk_v6_daddr
, rmt_addr
)) ||
661 (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
) ||
662 (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
) &&
663 !ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, loc_addr
)))
665 if (!inet6_mc_check(sk
, loc_addr
, rmt_addr
))
670 static void udp6_csum_zero_error(struct sk_buff
*skb
)
672 /* RFC 2460 section 8.1 says that we SHOULD log
673 * this error. Well, it is reasonable.
675 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
676 &ipv6_hdr(skb
)->saddr
, ntohs(udp_hdr(skb
)->source
),
677 &ipv6_hdr(skb
)->daddr
, ntohs(udp_hdr(skb
)->dest
));
681 * Note: called only from the BH handler context,
682 * so we don't need to lock the hashes.
684 static int __udp6_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
685 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
,
686 struct udp_table
*udptable
, int proto
)
688 struct sock
*sk
, *first
= NULL
;
689 const struct udphdr
*uh
= udp_hdr(skb
);
690 unsigned short hnum
= ntohs(uh
->dest
);
691 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
692 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
693 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
694 int dif
= inet6_iif(skb
);
695 struct hlist_node
*node
;
696 struct sk_buff
*nskb
;
699 hash2_any
= ipv6_portaddr_hash(net
, &in6addr_any
, hnum
) &
701 hash2
= ipv6_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
703 hslot
= &udptable
->hash2
[hash2
];
704 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
707 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
708 if (!__udp_v6_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
709 uh
->source
, saddr
, dif
, hnum
))
711 /* If zero checksum and no_check is not on for
712 * the socket then skip it.
714 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
)
720 nskb
= skb_clone(skb
, GFP_ATOMIC
);
721 if (unlikely(!nskb
)) {
722 atomic_inc(&sk
->sk_drops
);
723 __UDP6_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
725 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
,
730 if (udpv6_queue_rcv_skb(sk
, nskb
) > 0)
734 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
735 if (use_hash2
&& hash2
!= hash2_any
) {
741 if (udpv6_queue_rcv_skb(first
, skb
) > 0)
745 __UDP6_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
746 proto
== IPPROTO_UDPLITE
);
751 static void udp6_sk_rx_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
753 if (udp_sk_rx_dst_set(sk
, dst
)) {
754 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
756 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
760 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
761 * return code conversion for ip layer consumption
763 static int udp6_unicast_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
,
768 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
769 skb_checksum_try_convert(skb
, IPPROTO_UDP
, uh
->check
,
772 ret
= udpv6_queue_rcv_skb(sk
, skb
);
774 /* a return value > 0 means to resubmit the input */
780 int __udp6_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
783 const struct in6_addr
*saddr
, *daddr
;
784 struct net
*net
= dev_net(skb
->dev
);
789 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
792 saddr
= &ipv6_hdr(skb
)->saddr
;
793 daddr
= &ipv6_hdr(skb
)->daddr
;
796 ulen
= ntohs(uh
->len
);
800 if (proto
== IPPROTO_UDP
) {
801 /* UDP validates ulen. */
803 /* Check for jumbo payload */
807 if (ulen
< sizeof(*uh
))
810 if (ulen
< skb
->len
) {
811 if (pskb_trim_rcsum(skb
, ulen
))
813 saddr
= &ipv6_hdr(skb
)->saddr
;
814 daddr
= &ipv6_hdr(skb
)->daddr
;
819 if (udp6_csum_init(skb
, uh
, proto
))
822 /* Check if the socket is already available, e.g. due to early demux */
823 sk
= skb_steal_sock(skb
);
825 struct dst_entry
*dst
= skb_dst(skb
);
828 if (unlikely(sk
->sk_rx_dst
!= dst
))
829 udp6_sk_rx_dst_set(sk
, dst
);
831 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
) {
833 goto report_csum_error
;
836 ret
= udp6_unicast_rcv_skb(sk
, skb
, uh
);
842 * Multicast receive code
844 if (ipv6_addr_is_multicast(daddr
))
845 return __udp6_lib_mcast_deliver(net
, skb
,
846 saddr
, daddr
, udptable
, proto
);
849 sk
= __udp6_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
851 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
)
852 goto report_csum_error
;
853 return udp6_unicast_rcv_skb(sk
, skb
, uh
);
857 goto report_csum_error
;
859 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
862 if (udp_lib_checksum_complete(skb
))
865 __UDP6_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
866 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0);
872 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
873 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
874 saddr
, ntohs(uh
->source
),
876 daddr
, ntohs(uh
->dest
));
880 udp6_csum_zero_error(skb
);
882 __UDP6_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
884 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
890 static struct sock
*__udp6_lib_demux_lookup(struct net
*net
,
891 __be16 loc_port
, const struct in6_addr
*loc_addr
,
892 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
895 unsigned short hnum
= ntohs(loc_port
);
896 unsigned int hash2
= ipv6_portaddr_hash(net
, loc_addr
, hnum
);
897 unsigned int slot2
= hash2
& udp_table
.mask
;
898 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
899 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
902 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
903 if (sk
->sk_state
== TCP_ESTABLISHED
&&
904 INET6_MATCH(sk
, net
, rmt_addr
, loc_addr
, ports
, dif
, sdif
))
906 /* Only check first socket in chain */
912 static void udp_v6_early_demux(struct sk_buff
*skb
)
914 struct net
*net
= dev_net(skb
->dev
);
915 const struct udphdr
*uh
;
917 struct dst_entry
*dst
;
918 int dif
= skb
->dev
->ifindex
;
919 int sdif
= inet6_sdif(skb
);
921 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) +
922 sizeof(struct udphdr
)))
927 if (skb
->pkt_type
== PACKET_HOST
)
928 sk
= __udp6_lib_demux_lookup(net
, uh
->dest
,
929 &ipv6_hdr(skb
)->daddr
,
930 uh
->source
, &ipv6_hdr(skb
)->saddr
,
935 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
939 skb
->destructor
= sock_efree
;
940 dst
= READ_ONCE(sk
->sk_rx_dst
);
943 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
945 /* set noref for now.
946 * any place which wants to hold dst has to call
949 skb_dst_set_noref(skb
, dst
);
953 static __inline__
int udpv6_rcv(struct sk_buff
*skb
)
955 return __udp6_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
959 * Throw away all pending data and cancel the corking. Socket is locked.
961 static void udp_v6_flush_pending_frames(struct sock
*sk
)
963 struct udp_sock
*up
= udp_sk(sk
);
965 if (up
->pending
== AF_INET
)
966 udp_flush_pending_frames(sk
);
967 else if (up
->pending
) {
970 ip6_flush_pending_frames(sk
);
974 static int udpv6_pre_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
977 /* The following checks are replicated from __ip6_datagram_connect()
978 * and intended to prevent BPF program called below from accessing
979 * bytes that are out of the bound specified by user in addr_len.
981 if (uaddr
->sa_family
== AF_INET
) {
982 if (__ipv6_only_sock(sk
))
983 return -EAFNOSUPPORT
;
984 return udp_pre_connect(sk
, uaddr
, addr_len
);
987 if (addr_len
< SIN6_LEN_RFC2133
)
990 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk
, uaddr
);
994 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
995 * @sk: socket we are sending on
996 * @skb: sk_buff containing the filled-in UDP header
997 * (checksum field must be zeroed out)
999 static void udp6_hwcsum_outgoing(struct sock
*sk
, struct sk_buff
*skb
,
1000 const struct in6_addr
*saddr
,
1001 const struct in6_addr
*daddr
, int len
)
1003 unsigned int offset
;
1004 struct udphdr
*uh
= udp_hdr(skb
);
1005 struct sk_buff
*frags
= skb_shinfo(skb
)->frag_list
;
1009 /* Only one fragment on the socket. */
1010 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
1011 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1012 uh
->check
= ~csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
, 0);
1015 * HW-checksum won't work as there are two or more
1016 * fragments on the socket so that all csums of sk_buffs
1017 * should be together
1019 offset
= skb_transport_offset(skb
);
1020 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
1023 skb
->ip_summed
= CHECKSUM_NONE
;
1026 csum
= csum_add(csum
, frags
->csum
);
1027 } while ((frags
= frags
->next
));
1029 uh
->check
= csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
,
1032 uh
->check
= CSUM_MANGLED_0
;
1040 static int udp_v6_send_skb(struct sk_buff
*skb
, struct flowi6
*fl6
,
1041 struct inet_cork
*cork
)
1043 struct sock
*sk
= skb
->sk
;
1046 int is_udplite
= IS_UDPLITE(sk
);
1048 int offset
= skb_transport_offset(skb
);
1049 int len
= skb
->len
- offset
;
1050 int datalen
= len
- sizeof(*uh
);
1053 * Create a UDP header
1056 uh
->source
= fl6
->fl6_sport
;
1057 uh
->dest
= fl6
->fl6_dport
;
1058 uh
->len
= htons(len
);
1061 if (cork
->gso_size
) {
1062 const int hlen
= skb_network_header_len(skb
) +
1063 sizeof(struct udphdr
);
1065 if (hlen
+ cork
->gso_size
> cork
->fragsize
) {
1069 if (skb
->len
> cork
->gso_size
* UDP_MAX_SEGMENTS
) {
1073 if (udp_sk(sk
)->no_check6_tx
) {
1077 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
|| is_udplite
||
1078 dst_xfrm(skb_dst(skb
))) {
1083 if (datalen
> cork
->gso_size
) {
1084 skb_shinfo(skb
)->gso_size
= cork
->gso_size
;
1085 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP_L4
;
1086 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(datalen
,
1093 csum
= udplite_csum(skb
);
1094 else if (udp_sk(sk
)->no_check6_tx
) { /* UDP csum disabled */
1095 skb
->ip_summed
= CHECKSUM_NONE
;
1097 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
1099 udp6_hwcsum_outgoing(sk
, skb
, &fl6
->saddr
, &fl6
->daddr
, len
);
1102 csum
= udp_csum(skb
);
1104 /* add protocol-dependent pseudo-header */
1105 uh
->check
= csum_ipv6_magic(&fl6
->saddr
, &fl6
->daddr
,
1106 len
, fl6
->flowi6_proto
, csum
);
1108 uh
->check
= CSUM_MANGLED_0
;
1111 err
= ip6_send_skb(skb
);
1113 if (err
== -ENOBUFS
&& !inet6_sk(sk
)->recverr
) {
1114 UDP6_INC_STATS(sock_net(sk
),
1115 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1119 UDP6_INC_STATS(sock_net(sk
),
1120 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
1125 static int udp_v6_push_pending_frames(struct sock
*sk
)
1127 struct sk_buff
*skb
;
1128 struct udp_sock
*up
= udp_sk(sk
);
1132 if (up
->pending
== AF_INET
)
1133 return udp_push_pending_frames(sk
);
1135 /* ip6_finish_skb will release the cork, so make a copy of
1138 fl6
= inet_sk(sk
)->cork
.fl
.u
.ip6
;
1140 skb
= ip6_finish_skb(sk
);
1144 err
= udp_v6_send_skb(skb
, &fl6
, &inet_sk(sk
)->cork
.base
);
1152 int udpv6_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1154 struct ipv6_txoptions opt_space
;
1155 struct udp_sock
*up
= udp_sk(sk
);
1156 struct inet_sock
*inet
= inet_sk(sk
);
1157 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1158 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
1159 struct in6_addr
*daddr
, *final_p
, final
;
1160 struct ipv6_txoptions
*opt
= NULL
;
1161 struct ipv6_txoptions
*opt_to_free
= NULL
;
1162 struct ip6_flowlabel
*flowlabel
= NULL
;
1164 struct dst_entry
*dst
;
1165 struct ipcm6_cookie ipc6
;
1166 int addr_len
= msg
->msg_namelen
;
1167 bool connected
= false;
1169 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
1171 int is_udplite
= IS_UDPLITE(sk
);
1172 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
1175 ipc6
.gso_size
= up
->gso_size
;
1176 ipc6
.sockc
.tsflags
= sk
->sk_tsflags
;
1178 /* destination address check */
1180 if (addr_len
< offsetof(struct sockaddr
, sa_data
))
1183 switch (sin6
->sin6_family
) {
1185 if (addr_len
< SIN6_LEN_RFC2133
)
1187 daddr
= &sin6
->sin6_addr
;
1188 if (ipv6_addr_any(daddr
) &&
1189 ipv6_addr_v4mapped(&np
->saddr
))
1190 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK
),
1194 goto do_udp_sendmsg
;
1196 msg
->msg_name
= sin6
= NULL
;
1197 msg
->msg_namelen
= addr_len
= 0;
1203 } else if (!up
->pending
) {
1204 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1205 return -EDESTADDRREQ
;
1206 daddr
= &sk
->sk_v6_daddr
;
1211 if (ipv6_addr_v4mapped(daddr
)) {
1212 struct sockaddr_in sin
;
1213 sin
.sin_family
= AF_INET
;
1214 sin
.sin_port
= sin6
? sin6
->sin6_port
: inet
->inet_dport
;
1215 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
1216 msg
->msg_name
= &sin
;
1217 msg
->msg_namelen
= sizeof(sin
);
1219 if (__ipv6_only_sock(sk
))
1220 return -ENETUNREACH
;
1221 return udp_sendmsg(sk
, msg
, len
);
1225 if (up
->pending
== AF_INET
)
1226 return udp_sendmsg(sk
, msg
, len
);
1228 /* Rough check on arithmetic overflow,
1229 better check is made in ip6_append_data().
1231 if (len
> INT_MAX
- sizeof(struct udphdr
))
1234 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
1237 * There are pending frames.
1238 * The socket lock must be held while it's corked.
1241 if (likely(up
->pending
)) {
1242 if (unlikely(up
->pending
!= AF_INET6
)) {
1244 return -EAFNOSUPPORT
;
1247 goto do_append_data
;
1251 ulen
+= sizeof(struct udphdr
);
1253 memset(&fl6
, 0, sizeof(fl6
));
1256 if (sin6
->sin6_port
== 0)
1259 fl6
.fl6_dport
= sin6
->sin6_port
;
1260 daddr
= &sin6
->sin6_addr
;
1263 fl6
.flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
1264 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
1265 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1272 * Otherwise it will be difficult to maintain
1275 if (sk
->sk_state
== TCP_ESTABLISHED
&&
1276 ipv6_addr_equal(daddr
, &sk
->sk_v6_daddr
))
1277 daddr
= &sk
->sk_v6_daddr
;
1279 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
1280 sin6
->sin6_scope_id
&&
1281 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr
)))
1282 fl6
.flowi6_oif
= sin6
->sin6_scope_id
;
1284 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1285 return -EDESTADDRREQ
;
1287 fl6
.fl6_dport
= inet
->inet_dport
;
1288 daddr
= &sk
->sk_v6_daddr
;
1289 fl6
.flowlabel
= np
->flow_label
;
1293 if (!fl6
.flowi6_oif
)
1294 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
1296 if (!fl6
.flowi6_oif
)
1297 fl6
.flowi6_oif
= np
->sticky_pktinfo
.ipi6_ifindex
;
1299 fl6
.flowi6_mark
= sk
->sk_mark
;
1300 fl6
.flowi6_uid
= sk
->sk_uid
;
1302 if (msg
->msg_controllen
) {
1304 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
1305 opt
->tot_len
= sizeof(*opt
);
1308 err
= udp_cmsg_send(sk
, msg
, &ipc6
.gso_size
);
1310 err
= ip6_datagram_send_ctl(sock_net(sk
), sk
, msg
, &fl6
,
1313 fl6_sock_release(flowlabel
);
1316 if ((fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
1317 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1321 if (!(opt
->opt_nflen
|opt
->opt_flen
))
1326 opt
= txopt_get(np
);
1330 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
1331 opt
= ipv6_fixup_options(&opt_space
, opt
);
1334 fl6
.flowi6_proto
= sk
->sk_protocol
;
1336 if (ipv6_addr_any(&fl6
.saddr
) && !ipv6_addr_any(&np
->saddr
))
1337 fl6
.saddr
= np
->saddr
;
1338 fl6
.fl6_sport
= inet
->inet_sport
;
1340 if (cgroup_bpf_enabled
&& !connected
) {
1341 err
= BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk
,
1342 (struct sockaddr
*)sin6
, &fl6
.saddr
);
1346 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
1347 /* BPF program rewrote IPv6-only by IPv4-mapped
1348 * IPv6. It's currently unsupported.
1353 if (sin6
->sin6_port
== 0) {
1354 /* BPF program set invalid port. Reject it. */
1358 fl6
.fl6_dport
= sin6
->sin6_port
;
1359 fl6
.daddr
= sin6
->sin6_addr
;
1363 if (ipv6_addr_any(&fl6
.daddr
))
1364 fl6
.daddr
.s6_addr
[15] = 0x1; /* :: means loopback (BSD'ism) */
1366 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
1370 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
)) {
1371 fl6
.flowi6_oif
= np
->mcast_oif
;
1373 } else if (!fl6
.flowi6_oif
)
1374 fl6
.flowi6_oif
= np
->ucast_oif
;
1376 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
1378 if (ipc6
.tclass
< 0)
1379 ipc6
.tclass
= np
->tclass
;
1381 fl6
.flowlabel
= ip6_make_flowinfo(ipc6
.tclass
, fl6
.flowlabel
);
1383 dst
= ip6_sk_dst_lookup_flow(sk
, &fl6
, final_p
, connected
);
1390 if (ipc6
.hlimit
< 0)
1391 ipc6
.hlimit
= ip6_sk_dst_hoplimit(np
, &fl6
, dst
);
1393 if (msg
->msg_flags
&MSG_CONFIRM
)
1397 /* Lockless fast path for the non-corking case */
1399 struct inet_cork_full cork
;
1400 struct sk_buff
*skb
;
1402 skb
= ip6_make_skb(sk
, getfrag
, msg
, ulen
,
1403 sizeof(struct udphdr
), &ipc6
,
1404 &fl6
, (struct rt6_info
*)dst
,
1405 msg
->msg_flags
, &cork
);
1407 if (!IS_ERR_OR_NULL(skb
))
1408 err
= udp_v6_send_skb(skb
, &fl6
, &cork
.base
);
1413 if (unlikely(up
->pending
)) {
1414 /* The socket is already corked while preparing it. */
1415 /* ... which is an evident application bug. --ANK */
1418 net_dbg_ratelimited("udp cork app bug 2\n");
1423 up
->pending
= AF_INET6
;
1426 if (ipc6
.dontfrag
< 0)
1427 ipc6
.dontfrag
= np
->dontfrag
;
1429 err
= ip6_append_data(sk
, getfrag
, msg
, ulen
, sizeof(struct udphdr
),
1430 &ipc6
, &fl6
, (struct rt6_info
*)dst
,
1431 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1433 udp_v6_flush_pending_frames(sk
);
1435 err
= udp_v6_push_pending_frames(sk
);
1436 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1440 err
= np
->recverr
? net_xmit_errno(err
) : 0;
1446 fl6_sock_release(flowlabel
);
1447 txopt_put(opt_to_free
);
1451 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1452 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1453 * we don't have a good statistic (IpOutDiscards but it can be too many
1454 * things). We could add another new stat but at least for now that
1455 * seems like overkill.
1457 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1458 UDP6_INC_STATS(sock_net(sk
),
1459 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1464 if (msg
->msg_flags
& MSG_PROBE
)
1465 dst_confirm_neigh(dst
, &fl6
.daddr
);
1466 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1467 goto back_from_confirm
;
1472 void udpv6_destroy_sock(struct sock
*sk
)
1474 struct udp_sock
*up
= udp_sk(sk
);
1476 udp_v6_flush_pending_frames(sk
);
1479 if (static_branch_unlikely(&udpv6_encap_needed_key
) && up
->encap_type
) {
1480 void (*encap_destroy
)(struct sock
*sk
);
1481 encap_destroy
= READ_ONCE(up
->encap_destroy
);
1486 inet6_destroy_sock(sk
);
1490 * Socket option code for UDP
1492 int udpv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1493 char __user
*optval
, unsigned int optlen
)
1495 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1496 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1497 udp_v6_push_pending_frames
);
1498 return ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1501 #ifdef CONFIG_COMPAT
1502 int compat_udpv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1503 char __user
*optval
, unsigned int optlen
)
1505 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1506 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1507 udp_v6_push_pending_frames
);
1508 return compat_ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1512 int udpv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1513 char __user
*optval
, int __user
*optlen
)
1515 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1516 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1517 return ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1520 #ifdef CONFIG_COMPAT
1521 int compat_udpv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1522 char __user
*optval
, int __user
*optlen
)
1524 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1525 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1526 return compat_ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1530 /* thinking of making this const? Don't.
1531 * early_demux can change based on sysctl.
1533 static struct inet6_protocol udpv6_protocol
= {
1534 .early_demux
= udp_v6_early_demux
,
1535 .early_demux_handler
= udp_v6_early_demux
,
1536 .handler
= udpv6_rcv
,
1537 .err_handler
= udpv6_err
,
1538 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1541 /* ------------------------------------------------------------------------ */
1542 #ifdef CONFIG_PROC_FS
1543 int udp6_seq_show(struct seq_file
*seq
, void *v
)
1545 if (v
== SEQ_START_TOKEN
) {
1546 seq_puts(seq
, IPV6_SEQ_DGRAM_HEADER
);
1548 int bucket
= ((struct udp_iter_state
*)seq
->private)->bucket
;
1549 struct inet_sock
*inet
= inet_sk(v
);
1550 __u16 srcp
= ntohs(inet
->inet_sport
);
1551 __u16 destp
= ntohs(inet
->inet_dport
);
1552 __ip6_dgram_sock_seq_show(seq
, v
, srcp
, destp
,
1553 udp_rqueue_get(v
), bucket
);
1558 const struct seq_operations udp6_seq_ops
= {
1559 .start
= udp_seq_start
,
1560 .next
= udp_seq_next
,
1561 .stop
= udp_seq_stop
,
1562 .show
= udp6_seq_show
,
1564 EXPORT_SYMBOL(udp6_seq_ops
);
1566 static struct udp_seq_afinfo udp6_seq_afinfo
= {
1568 .udp_table
= &udp_table
,
1571 int __net_init
udp6_proc_init(struct net
*net
)
1573 if (!proc_create_net_data("udp6", 0444, net
->proc_net
, &udp6_seq_ops
,
1574 sizeof(struct udp_iter_state
), &udp6_seq_afinfo
))
1579 void udp6_proc_exit(struct net
*net
)
1581 remove_proc_entry("udp6", net
->proc_net
);
1583 #endif /* CONFIG_PROC_FS */
1585 /* ------------------------------------------------------------------------ */
1587 struct proto udpv6_prot
= {
1589 .owner
= THIS_MODULE
,
1590 .close
= udp_lib_close
,
1591 .pre_connect
= udpv6_pre_connect
,
1592 .connect
= ip6_datagram_connect
,
1593 .disconnect
= udp_disconnect
,
1595 .init
= udp_init_sock
,
1596 .destroy
= udpv6_destroy_sock
,
1597 .setsockopt
= udpv6_setsockopt
,
1598 .getsockopt
= udpv6_getsockopt
,
1599 .sendmsg
= udpv6_sendmsg
,
1600 .recvmsg
= udpv6_recvmsg
,
1601 .release_cb
= ip6_datagram_release_cb
,
1602 .hash
= udp_lib_hash
,
1603 .unhash
= udp_lib_unhash
,
1604 .rehash
= udp_v6_rehash
,
1605 .get_port
= udp_v6_get_port
,
1606 .memory_allocated
= &udp_memory_allocated
,
1607 .sysctl_mem
= sysctl_udp_mem
,
1608 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_wmem_min
),
1609 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_udp_rmem_min
),
1610 .obj_size
= sizeof(struct udp6_sock
),
1611 .h
.udp_table
= &udp_table
,
1612 #ifdef CONFIG_COMPAT
1613 .compat_setsockopt
= compat_udpv6_setsockopt
,
1614 .compat_getsockopt
= compat_udpv6_getsockopt
,
1616 .diag_destroy
= udp_abort
,
1619 static struct inet_protosw udpv6_protosw
= {
1621 .protocol
= IPPROTO_UDP
,
1622 .prot
= &udpv6_prot
,
1623 .ops
= &inet6_dgram_ops
,
1624 .flags
= INET_PROTOSW_PERMANENT
,
1627 int __init
udpv6_init(void)
1631 ret
= inet6_add_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1635 ret
= inet6_register_protosw(&udpv6_protosw
);
1637 goto out_udpv6_protocol
;
1642 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1646 void udpv6_exit(void)
1648 inet6_unregister_protosw(&udpv6_protosw
);
1649 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);